code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
AMF Remoting support.
A Remoting request from the client consists of a short preamble, headers, and
bodies. The preamble contains basic information about the nature of the
request. Headers can be used to request debugging information, send
authentication info, tag transactions, etc. Bodies contain actual Remoting
requests and responses. A single Remoting envelope can contain several
requests; Remoting supports batching out of the box.
Client headers and bodies need not be responded to in a one-to-one manner.
That is, a body or header may not require a response. Debug information is
requested by a header but sent back as a body object. The response index is
essential for the Adobe Flash Player to understand the response therefore.
@see: U{Remoting Envelope on OSFlash
<http://osflash.org/documentation/amf/envelopes/remoting>}
@see: U{Remoting Headers on OSFlash
<http://osflash.org/amf/envelopes/remoting/headers>}
@see: U{Remoting Debug Headers on OSFlash
<http://osflash.org/documentation/amf/envelopes/remoting/debuginfo>}
@since: 0.1
"""
import pyamf
from pyamf import util
__all__ = ['Envelope', 'Request', 'Response', 'decode', 'encode']
#: Succesful call.
STATUS_OK = 0
#: Reserved for runtime errors.
STATUS_ERROR = 1
#: Debug information.
STATUS_DEBUG = 2
#: List of available status response codes.
STATUS_CODES = {
STATUS_OK: '/onResult',
STATUS_ERROR: '/onStatus',
STATUS_DEBUG: '/onDebugEvents'
}
#: AMF mimetype.
CONTENT_TYPE = 'application/x-amf'
ERROR_CALL_FAILED, = range(1)
ERROR_CODES = {
ERROR_CALL_FAILED: 'Server.Call.Failed'
}
APPEND_TO_GATEWAY_URL = 'AppendToGatewayUrl'
REPLACE_GATEWAY_URL = 'ReplaceGatewayUrl'
REQUEST_PERSISTENT_HEADER = 'RequestPersistentHeader'
class RemotingError(pyamf.BaseError):
"""
Generic remoting error class.
"""
class RemotingCallFailed(RemotingError):
"""
Raised if B{Server.Call.Failed} received.
"""
pyamf.add_error_class(RemotingCallFailed, ERROR_CODES[ERROR_CALL_FAILED])
class HeaderCollection(dict):
"""
Collection of AMF message headers.
"""
def __init__(self, raw_headers={}):
self.required = []
for (k, ig, v) in raw_headers:
self[k] = v
if ig:
self.required.append(k)
def is_required(self, idx):
"""
@raise KeyError: Unknown header found.
"""
if not idx in self:
raise KeyError("Unknown header %s" % str(idx))
return idx in self.required
def set_required(self, idx, value=True):
"""
@raise KeyError: Unknown header found.
"""
if not idx in self:
raise KeyError("Unknown header %s" % str(idx))
if not idx in self.required:
self.required.append(idx)
def __len__(self):
return len(self.keys())
class Envelope(object):
"""
I wrap an entire request, encapsulating headers and bodies.
There can be more than one request in a single transaction.
@ivar amfVersion: AMF encoding version. See L{pyamf.ENCODING_TYPES}
@type amfVersion: C{int} or C{None}
@ivar headers: AMF headers, a list of name, value pairs. Global to each
request.
@type headers: L{HeaderCollection}
@ivar bodies: A list of requests/response messages
@type bodies: C{list} containing tuples of the key of the request and the
L{Message}.
"""
def __init__(self, amfVersion=None):
self.amfVersion = amfVersion
self.headers = HeaderCollection()
self.bodies = []
def __repr__(self):
r = "<Envelope amfVersion=%r>\n" % (self.amfVersion,)
for h in self.headers:
r += " " + repr(h) + "\n"
for request in iter(self):
r += " " + repr(request) + "\n"
r += "</Envelope>"
return r
def __setitem__(self, name, value):
if not isinstance(value, Message):
raise TypeError("Message instance expected")
idx = 0
found = False
for body in self.bodies:
if name == body[0]:
self.bodies[idx] = (name, value)
found = True
idx = idx + 1
if not found:
self.bodies.append((name, value))
value.envelope = self
def __getitem__(self, name):
for body in self.bodies:
if name == body[0]:
return body[1]
raise KeyError("'%r'" % (name,))
def __nonzero__(self):
return len(self.bodies) != 0 or len(self.headers) != 0
def __iter__(self):
for body in self.bodies:
yield body[0], body[1]
raise StopIteration
def __len__(self):
return len(self.bodies)
def iteritems(self):
for body in self.bodies:
yield body
raise StopIteration
def keys(self):
return [body[0] for body in self.bodies]
def items(self):
return self.bodies
def __contains__(self, name):
for body in self.bodies:
if name == body[0]:
return True
return False
def __eq__(self, other):
if isinstance(other, Envelope):
return (self.amfVersion == other.amfVersion and
self.headers == other.headers and
self.bodies == other.bodies)
if hasattr(other, 'keys') and hasattr(other, 'items'):
keys, o_keys = self.keys(), other.keys()
if len(o_keys) != len(keys):
return False
for k in o_keys:
if k not in keys:
return False
keys.remove(k)
for k, v in other.items():
if self[k] != v:
return False
return True
class Message(object):
"""
I represent a singular request/response, containing a collection of
headers and one body of data.
I am used to iterate over all requests in the :class:`Envelope`.
@ivar envelope: The parent L{envelope<Envelope>} of this AMF Message.
@ivar body: The body of the message.
@ivar headers: The message headers. Dict like in behaviour.
"""
def __init__(self, envelope, body):
self.envelope = envelope
self.body = body
def _get_headers(self):
return self.envelope.headers
headers = property(_get_headers)
class Request(Message):
"""
An AMF Request payload.
@ivar target: The C{string} target of the request
"""
def __init__(self, target, body=[], envelope=None):
Message.__init__(self, envelope, body)
self.target = target
def __repr__(self):
return "<%s target=%s>%s</%s>" % (
type(self).__name__, repr(self.target), repr(self.body), type(self).__name__)
class Response(Message):
"""
An AMF Response.
@ivar status: The status of the message. Default is L{STATUS_OK}.
@type status: Member of L{STATUS_CODES}.
"""
def __init__(self, body, status=STATUS_OK, envelope=None):
Message.__init__(self, envelope, body)
self.status = status
def __repr__(self):
return "<%s status=%s>%s</%s>" % (
type(self).__name__, _get_status(self.status), repr(self.body),
type(self).__name__
)
class BaseFault(object):
"""
I represent a fault message (C{mx.rpc.Fault}).
@ivar level: The level of the fault.
@ivar code: A simple code describing the fault.
@ivar details: Any extra details of the fault.
@ivar description: A longer description of the fault.
@see: U{mx.rpc.Fault on Livedocs
<http://livedocs.adobe.com/flex/201/langref/mx/rpc/Fault.html>}
"""
level = None
class __amf__:
static = ('level', 'code', 'type', 'details', 'description')
def __init__(self, *args, **kwargs):
self.code = kwargs.get('code', '')
self.type = kwargs.get('type', '')
self.details = kwargs.get('details', '')
self.description = kwargs.get('description', '')
def __repr__(self):
x = '%s level=%s' % (self.__class__.__name__, self.level)
if self.code not in ('', None):
x += ' code=%s' % repr(self.code)
if self.type not in ('', None):
x += ' type=%s' % repr(self.type)
if self.description not in ('', None):
x += ' description=%s' % repr(self.description)
if self.details not in ('', None):
x += '\nTraceback:\n%s' % (repr(self.details),)
return x
def raiseException(self):
"""
Raises an exception based on the fault object. There is no traceback
available.
"""
raise get_exception_from_fault(self), self.description, None
class ErrorFault(BaseFault):
"""
I represent an error level fault.
"""
level = 'error'
def _read_header(stream, decoder, strict=False):
"""
Read AMF L{Message} header from the stream.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param decoder: An AMF0 decoder.
@param strict: Use strict decoding policy. Default is C{False}. Will raise a
L{pyamf.DecodeError} if the data that was read from the stream does not
match the header length.
@return: A C{tuple} containing the name of the header, a C{bool}
determining if understanding this header is required and the decoded
data.
@note: Quite what understanding required headers actually means is unknown.
"""
name_len = stream.read_ushort()
name = stream.read_utf8_string(name_len)
required = bool(stream.read_uchar())
data_len = stream.read_ulong()
pos = stream.tell()
data = decoder.readElement()
if strict and pos + data_len != stream.tell():
raise pyamf.DecodeError(
"Data read from stream does not match header length")
return (name, required, data)
def _write_header(name, header, required, stream, encoder, strict=False):
"""
Write AMF message header.
@param name: Name of the header.
@param header: Header value.
@param required: Whether understanding this header is required (?).
@param stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>} that
will receive the encoded header.
@param encoder: An encoder capable of encoding C{AMF0}.
@param strict: Use strict encoding policy. Default is C{False}. Will write
the correct header length after writing the header.
"""
stream.write_ushort(len(name))
stream.write_utf8_string(name)
stream.write_uchar(required)
write_pos = stream.tell()
stream.write_ulong(0)
old_pos = stream.tell()
encoder.writeElement(header)
new_pos = stream.tell()
if strict:
stream.seek(write_pos)
stream.write_ulong(new_pos - old_pos)
stream.seek(new_pos)
def _read_body(stream, decoder, strict=False, logger=None):
"""
Read an AMF message body from the stream.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param decoder: An AMF0 decoder.
@param strict: Use strict decoding policy. Default is `False`.
@param logger: Used to log interesting events whilst reading a remoting
body.
@type logger: A C{logging.Logger} instance or C{None}.
@return: A C{tuple} containing the C{id} of the request and the L{Request}
or L{Response}
"""
def _read_args():
# we have to go through this insanity because it seems that amf0
# does not keep the array of args in the object references lookup
type_byte = stream.peek(1)
if type_byte == '\x11':
if not decoder.use_amf3:
raise pyamf.DecodeError(
"Unexpected AMF3 type with incorrect message type")
return decoder.readElement()
if type_byte != '\x0a':
raise pyamf.DecodeError("Array type required for request body")
stream.read(1)
x = stream.read_ulong()
return [decoder.readElement() for i in xrange(x)]
target = stream.read_utf8_string(stream.read_ushort())
response = stream.read_utf8_string(stream.read_ushort())
status = STATUS_OK
is_request = True
for code, s in STATUS_CODES.iteritems():
if not target.endswith(s):
continue
is_request = False
status = code
target = target[:0 - len(s)]
if logger:
logger.debug('Remoting target: %r' % (target,))
data_len = stream.read_ulong()
pos = stream.tell()
if is_request:
data = _read_args()
else:
data = decoder.readElement()
if strict and pos + data_len != stream.tell():
raise pyamf.DecodeError("Data read from stream does not match body "
"length (%d != %d)" % (pos + data_len, stream.tell(),))
if is_request:
return response, Request(target, body=data)
if status == STATUS_ERROR and isinstance(data, pyamf.ASObject):
data = get_fault(data)
return target, Response(data, status)
def _write_body(name, message, stream, encoder, strict=False):
"""
Write AMF message body.
@param name: The name of the request.
@param message: The AMF L{Message}
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param encoder: An AMF0 encoder.
@param strict: Use strict encoding policy. Default is `False`.
"""
def _encode_body(message):
if isinstance(message, Response):
encoder.writeElement(message.body)
return
stream.write('\x0a')
stream.write_ulong(len(message.body))
for x in message.body:
encoder.writeElement(x)
if not isinstance(message, (Request, Response)):
raise TypeError("Unknown message type")
target = None
if isinstance(message, Request):
target = unicode(message.target)
else:
target = u"%s%s" % (name, _get_status(message.status))
target = target.encode('utf8')
stream.write_ushort(len(target))
stream.write_utf8_string(target)
response = 'null'
if isinstance(message, Request):
response = name
stream.write_ushort(len(response))
stream.write_utf8_string(response)
if not strict:
stream.write_ulong(0)
_encode_body(message)
return
write_pos = stream.tell()
stream.write_ulong(0)
old_pos = stream.tell()
_encode_body(message)
new_pos = stream.tell()
stream.seek(write_pos)
stream.write_ulong(new_pos - old_pos)
stream.seek(new_pos)
def _get_status(status):
"""
Get status code.
@see: L{STATUS_CODES}
"""
if status not in STATUS_CODES:
# TODO print that status code..
raise ValueError("Unknown status code")
return STATUS_CODES[status]
def get_fault_class(level, **kwargs):
if level == 'error':
return ErrorFault
return BaseFault
def get_fault(data):
try:
level = data['level']
del data['level']
except KeyError:
level = 'error'
e = {}
for x, y in data.iteritems():
if isinstance(x, unicode):
e[str(x)] = y
else:
e[x] = y
return get_fault_class(level, **e)(**e)
def decode(stream, strict=False, logger=None, timezone_offset=None):
"""
Decodes the incoming stream as a remoting message.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param strict: Enforce strict decoding. Default is `False`.
@param logger: Used to log interesting events whilst decoding a remoting
message.
@type logger: U{logging.Logger<http://
docs.python.org/library/logging.html#loggers>}
@param timezone_offset: The difference between the current timezone and
UTC. Date/times should always be handled in UTC to avoid confusion but
this is required for legacy systems.
@type timezone_offset: U{datetime.datetime.timedelta<http://
docs.python.org/library/datetime.html#datetime.timedelta}
@return: Message L{envelope<Envelope>}.
"""
if not isinstance(stream, util.BufferedByteStream):
stream = util.BufferedByteStream(stream)
if logger:
logger.debug('remoting.decode start')
msg = Envelope()
msg.amfVersion = stream.read_ushort()
# see http://osflash.org/documentation/amf/envelopes/remoting#preamble
# why we are doing this...
if msg.amfVersion > 0x09:
raise pyamf.DecodeError("Malformed stream (amfVersion=%d)" %
msg.amfVersion)
decoder = pyamf.get_decoder(pyamf.AMF0, stream, strict=strict,
timezone_offset=timezone_offset)
context = decoder.context
decoder.use_amf3 = msg.amfVersion == pyamf.AMF3
header_count = stream.read_ushort()
for i in xrange(header_count):
name, required, data = _read_header(stream, decoder, strict)
msg.headers[name] = data
if required:
msg.headers.set_required(name)
body_count = stream.read_short()
for i in xrange(body_count):
context.clear()
target, payload = _read_body(stream, decoder, strict, logger)
msg[target] = payload
if strict and stream.remaining() > 0:
raise RuntimeError("Unable to fully consume the buffer")
if logger:
logger.debug('remoting.decode end')
return msg
def encode(msg, strict=False, logger=None, timezone_offset=None):
"""
Encodes and returns the L{msg<Envelope>} as an AMF stream.
@param strict: Enforce strict encoding. Default is C{False}. Specifically
header/body lengths will be written correctly, instead of the default 0.
Default is `False`. Introduced in 0.4.
@param logger: Used to log interesting events whilst decoding a remoting
message.
@type logger: U{logging.Logger<http://
docs.python.org/library/logging.html#loggers>}
@param timezone_offset: The difference between the current timezone and
UTC. Date/times should always be handled in UTC to avoid confusion but
this is required for legacy systems.
@type timezone_offset: U{datetime.datetime.timedelta<http://
docs.python.org/library/datetime.html#datetime.timedelta}
@rtype: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
"""
stream = util.BufferedByteStream()
encoder = pyamf.get_encoder(pyamf.AMF0, stream, strict=strict,
timezone_offset=timezone_offset)
if msg.amfVersion == pyamf.AMF3:
encoder.use_amf3 = True
stream.write_ushort(msg.amfVersion)
stream.write_ushort(len(msg.headers))
for name, header in msg.headers.iteritems():
_write_header(name, header, int(msg.headers.is_required(name)),
stream, encoder, strict)
stream.write_short(len(msg))
for name, message in msg.iteritems():
encoder.context.clear()
_write_body(name, message, stream, encoder, strict)
stream.seek(0)
return stream
def get_exception_from_fault(fault):
"""
"""
return pyamf.ERROR_CLASS_MAP.get(fault.code, RemotingError)
pyamf.register_class(ErrorFault)
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Gateway for Google App Engine.
This gateway allows you to expose functions in Google App Engine web
applications to AMF clients and servers.
@see: U{Google App Engine homepage
<http://code.google.com/appengine/docs/python/overview.html>}
@since: 0.3.1
"""
import sys
import os.path
try:
sys.path.remove(os.path.dirname(os.path.abspath(__file__)))
except ValueError:
pass
google = __import__('google.appengine.ext.webapp')
webapp = google.appengine.ext.webapp
from pyamf import remoting, DecodeError
from pyamf.remoting import gateway
__all__ = ['WebAppGateway']
class WebAppGateway(webapp.RequestHandler, gateway.BaseGateway):
"""
Google App Engine Remoting Gateway.
"""
__name__ = None
def __init__(self, *args, **kwargs):
gateway.BaseGateway.__init__(self, *args, **kwargs)
def getResponse(self, request):
"""
Processes the AMF request, returning an AMF response.
:param request: The AMF Request.
:type request: :class:`Envelope<pyamf.remoting.Envelope>`
:rtype: :class:`Envelope<pyamf.remoting.Envelope>`
:return: The AMF Response.
"""
response = remoting.Envelope(request.amfVersion)
for name, message in request:
self.request.amf_request = message
processor = self.getProcessor(message)
response[name] = processor(message, http_request=self.request)
return response
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers['Server'] = gateway.SERVER_NAME
self.error(405)
self.response.out.write("405 Method Not Allowed\n\n"
"To access this PyAMF gateway you must use POST requests "
"(%s received)" % self.request.method)
def post(self):
body = self.request.body_file.read()
stream = None
timezone_offset = self._get_timezone_offset()
# Decode the request
try:
request = remoting.decode(body, strict=self.strict,
logger=self.logger, timezone_offset=timezone_offset)
except (DecodeError, IOError):
if self.logger:
self.logger.exception('Error decoding AMF request')
response = ("400 Bad Request\n\nThe request body was unable to "
"be successfully decoded.")
if self.debug:
response += "\n\nTraceback:\n\n%s" % gateway.format_exception()
self.error(400)
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers['Server'] = gateway.SERVER_NAME
self.response.out.write(response)
return
except (KeyboardInterrupt, SystemExit):
raise
except:
if self.logger:
self.logger.exception('Unexpected error decoding AMF request')
response = ('500 Internal Server Error\n\n'
'An unexpected error occurred.')
if self.debug:
response += "\n\nTraceback:\n\n%s" % gateway.format_exception()
self.error(500)
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers['Server'] = gateway.SERVER_NAME
self.response.out.write(response)
return
if self.logger:
self.logger.debug("AMF Request: %r" % request)
# Process the request
try:
response = self.getResponse(request)
except (KeyboardInterrupt, SystemExit):
raise
except:
if self.logger:
self.logger.exception('Error processing AMF request')
response = ("500 Internal Server Error\n\nThe request was " \
"unable to be successfully processed.")
if self.debug:
response += "\n\nTraceback:\n\n%s" % gateway.format_exception()
self.error(500)
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers['Server'] = gateway.SERVER_NAME
self.response.out.write(response)
return
if self.logger:
self.logger.debug("AMF Response: %r" % response)
# Encode the response
try:
stream = remoting.encode(response, strict=self.strict,
logger=self.logger, timezone_offset=timezone_offset)
except:
if self.logger:
self.logger.exception('Error encoding AMF request')
response = ("500 Internal Server Error\n\nThe request was " \
"unable to be encoded.")
if self.debug:
response += "\n\nTraceback:\n\n%s" % gateway.format_exception()
self.error(500)
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers['Server'] = gateway.SERVER_NAME
self.response.out.write(response)
return
response = stream.getvalue()
self.response.headers['Content-Type'] = remoting.CONTENT_TYPE
self.response.headers['Content-Length'] = str(len(response))
self.response.headers['Server'] = gateway.SERVER_NAME
self.response.out.write(response)
def __call__(self, request, *args, **kwargs):
app = webapp.WSGIApplication
# try to get the threadlocal WSGIApplication.app instance
app = getattr(app, 'app', app)
response = app.response_class()
self.initialize(request, response)
self.dispatch()
return response
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Gateway for the Django framework.
This gateway allows you to expose functions in Django to AMF clients and
servers.
@see: U{Django homepage<http://djangoproject.com>}
@since: 0.1.0
"""
django = __import__('django.http')
http = django.http
conf = __import__('django.conf')
conf = conf.conf
import pyamf
from pyamf import remoting
from pyamf.remoting import gateway
__all__ = ['DjangoGateway']
class DjangoGateway(gateway.BaseGateway):
"""
An instance of this class is suitable as a Django view.
An example usage would be through C{urlconf}::
from django.conf.urls.defaults import *
urlpatterns = patterns('',
(r'^gateway/', 'yourproject.yourapp.gateway.gw_instance'),
)
where C{yourproject.yourapp.gateway.gw_instance} refers to an instance of
this class.
@ivar expose_request: The standard Django view always has the request
object as the first parameter. To disable this functionality, set this
to C{False}.
"""
csrf_exempt = True
def __init__(self, *args, **kwargs):
kwargs['expose_request'] = kwargs.get('expose_request', True)
try:
tz = conf.settings.AMF_TIME_OFFSET
except AttributeError:
tz = None
try:
debug = conf.settings.DEBUG
except AttributeError:
debug = False
kwargs['timezone_offset'] = kwargs.get('timezone_offset', tz)
kwargs['debug'] = kwargs.get('debug', debug)
gateway.BaseGateway.__init__(self, *args, **kwargs)
def getResponse(self, http_request, request):
"""
Processes the AMF request, returning an AMF response.
@param http_request: The underlying HTTP Request.
@type http_request: U{HTTPRequest<http://docs.djangoproject.com
/en/dev/ref/request-response/#httprequest-objects>}
@param request: The AMF Request.
@type request: L{Envelope<pyamf.remoting.Envelope>}
@rtype: L{Envelope<pyamf.remoting.Envelope>}
"""
response = remoting.Envelope(request.amfVersion)
for name, message in request:
http_request.amf_request = message
processor = self.getProcessor(message)
response[name] = processor(message, http_request=http_request)
return response
def __call__(self, http_request):
"""
Processes and dispatches the request.
"""
if http_request.method != 'POST':
return http.HttpResponseNotAllowed(['POST'])
stream = None
timezone_offset = self._get_timezone_offset()
# Decode the request
try:
request = remoting.decode(http_request.raw_post_data,
strict=self.strict, logger=self.logger,
timezone_offset=timezone_offset)
except (pyamf.DecodeError, IOError):
if self.logger:
self.logger.exception('Error decoding AMF request')
response = ("400 Bad Request\n\nThe request body was unable to "
"be successfully decoded.")
if self.debug:
response += "\n\nTraceback:\n\n%s" % gateway.format_exception()
# support for Django 0.96
http_response = http.HttpResponse(mimetype='text/plain',
content=response)
http_response.status_code = 400
return http_response
except (KeyboardInterrupt, SystemExit):
raise
except:
if self.logger:
self.logger.exception('Unexpected error decoding AMF request')
response = ('500 Internal Server Error\n\n'
'An unexpected error occurred.')
if self.debug:
response += "\n\nTraceback:\n\n%s" % gateway.format_exception()
return http.HttpResponseServerError(mimetype='text/plain',
content=response)
if self.logger:
self.logger.debug("AMF Request: %r" % request)
# Process the request
try:
response = self.getResponse(http_request, request)
except (KeyboardInterrupt, SystemExit):
raise
except:
if self.logger:
self.logger.exception('Error processing AMF request')
response = ("500 Internal Server Error\n\nThe request was "
"unable to be successfully processed.")
if self.debug:
response += "\n\nTraceback:\n\n%s" % gateway.format_exception()
return http.HttpResponseServerError(mimetype='text/plain',
content=response)
if self.logger:
self.logger.debug("AMF Response: %r" % response)
# Encode the response
try:
stream = remoting.encode(response, strict=self.strict,
logger=self.logger, timezone_offset=timezone_offset)
except:
if self.logger:
self.logger.exception('Error encoding AMF request')
response = ("500 Internal Server Error\n\nThe request was "
"unable to be encoded.")
if self.debug:
response += "\n\nTraceback:\n\n%s" % gateway.format_exception()
return http.HttpResponseServerError(
mimetype='text/plain', content=response)
buf = stream.getvalue()
http_response = http.HttpResponse(mimetype=remoting.CONTENT_TYPE)
http_response['Server'] = gateway.SERVER_NAME
http_response['Content-Length'] = str(len(buf))
http_response.write(buf)
return http_response
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Twisted server implementation.
This gateway allows you to expose functions in Twisted to AMF clients and
servers.
@see: U{Twisted homepage<http://twistedmatrix.com>}
@since: 0.1.0
"""
import sys
import os.path
try:
sys.path.remove('')
except ValueError:
pass
try:
sys.path.remove(os.path.dirname(os.path.abspath(__file__)))
except ValueError:
pass
twisted = __import__('twisted')
__import__('twisted.internet.defer')
__import__('twisted.internet.threads')
__import__('twisted.web.resource')
__import__('twisted.web.server')
defer = twisted.internet.defer
threads = twisted.internet.threads
resource = twisted.web.resource
server = twisted.web.server
from pyamf import remoting
from pyamf.remoting import gateway, amf0, amf3
__all__ = ['TwistedGateway']
class AMF0RequestProcessor(amf0.RequestProcessor):
"""
A Twisted friendly implementation of
L{amf0.RequestProcessor<pyamf.remoting.amf0.RequestProcessor>}
"""
def __call__(self, request, *args, **kwargs):
"""
Calls the underlying service method.
@return: A C{Deferred} that will contain the AMF L{Response}.
@rtype: C{twisted.internet.defer.Deferred}
"""
try:
service_request = self.gateway.getServiceRequest(
request, request.target)
except gateway.UnknownServiceError:
return defer.succeed(self.buildErrorResponse(request))
response = remoting.Response(None)
deferred_response = defer.Deferred()
def eb(failure):
errMesg = "%s: %s" % (failure.type, failure.getErrorMessage())
if self.gateway.logger:
self.gateway.logger.error(errMesg)
self.gateway.logger.info(failure.getTraceback())
deferred_response.callback(self.buildErrorResponse(
request, (failure.type, failure.value, failure.tb)))
def response_cb(result):
if self.gateway.logger:
self.gateway.logger.debug("AMF Response: %s" % (result,))
response.body = result
deferred_response.callback(response)
def preprocess_cb(result):
d = defer.maybeDeferred(self._getBody, request, response,
service_request, **kwargs)
d.addCallback(response_cb).addErrback(eb)
def auth_cb(result):
if result is not True:
response.status = remoting.STATUS_ERROR
response.body = remoting.ErrorFault(code='AuthenticationError',
description='Authentication failed')
deferred_response.callback(response)
return
d = defer.maybeDeferred(self.gateway.preprocessRequest,
service_request, *args, **kwargs)
d.addCallback(preprocess_cb).addErrback(eb)
# we have a valid service, now attempt authentication
d = defer.maybeDeferred(self.authenticateRequest, request,
service_request, **kwargs)
d.addCallback(auth_cb).addErrback(eb)
return deferred_response
class AMF3RequestProcessor(amf3.RequestProcessor):
"""
A Twisted friendly implementation of
L{amf3.RequestProcessor<pyamf.remoting.amf3.RequestProcessor>}
"""
def _processRemotingMessage(self, amf_request, ro_request, **kwargs):
ro_response = amf3.generate_acknowledgement(ro_request)
try:
service_name = ro_request.operation
if hasattr(ro_request, 'destination') and ro_request.destination:
service_name = '%s.%s' % (ro_request.destination, service_name)
service_request = self.gateway.getServiceRequest(amf_request,
service_name)
except gateway.UnknownServiceError:
return defer.succeed(remoting.Response(
self.buildErrorResponse(ro_request),
status=remoting.STATUS_ERROR))
deferred_response = defer.Deferred()
def eb(failure):
errMesg = "%s: %s" % (failure.type, failure.getErrorMessage())
if self.gateway.logger:
self.gateway.logger.error(errMesg)
self.gateway.logger.error(failure.getTraceback())
ro_response = self.buildErrorResponse(ro_request, (failure.type,
failure.value, failure.tb))
deferred_response.callback(remoting.Response(ro_response,
status=remoting.STATUS_ERROR))
def response_cb(result):
ro_response.body = result
res = remoting.Response(ro_response)
if self.gateway.logger:
self.gateway.logger.debug("AMF Response: %r" % (res,))
deferred_response.callback(res)
def process_cb(result):
d = defer.maybeDeferred(self.gateway.callServiceRequest,
service_request, *ro_request.body, **kwargs)
d.addCallback(response_cb).addErrback(eb)
d = defer.maybeDeferred(self.gateway.preprocessRequest, service_request,
*ro_request.body, **kwargs)
d.addCallback(process_cb).addErrback(eb)
return deferred_response
def __call__(self, amf_request, **kwargs):
"""
Calls the underlying service method.
@return: A C{deferred} that will contain the AMF L{Response}.
@rtype: C{Deferred<twisted.internet.defer.Deferred>}
"""
deferred_response = defer.Deferred()
ro_request = amf_request.body[0]
def cb(amf_response):
deferred_response.callback(amf_response)
def eb(failure):
errMesg = "%s: %s" % (failure.type, failure.getErrorMessage())
if self.gateway.logger:
self.gateway.logger.error(errMesg)
self.gateway.logger.error(failure.getTraceback())
deferred_response.callback(self.buildErrorResponse(ro_request,
(failure.type, failure.value, failure.tb)))
d = defer.maybeDeferred(self._getBody, amf_request, ro_request, **kwargs)
d.addCallback(cb).addErrback(eb)
return deferred_response
class TwistedGateway(gateway.BaseGateway, resource.Resource):
"""
Twisted Remoting gateway for C{twisted.web}.
@ivar expose_request: Forces the underlying HTTP request to be the first
argument to any service call.
@type expose_request: C{bool}
"""
allowedMethods = ('POST',)
def __init__(self, *args, **kwargs):
if 'expose_request' not in kwargs:
kwargs['expose_request'] = True
gateway.BaseGateway.__init__(self, *args, **kwargs)
resource.Resource.__init__(self)
def _finaliseRequest(self, request, status, content, mimetype='text/plain'):
"""
Finalises the request.
@param request: The HTTP Request.
@type request: C{http.Request}
@param status: The HTTP status code.
@type status: C{int}
@param content: The content of the response.
@type content: C{str}
@param mimetype: The MIME type of the request.
@type mimetype: C{str}
"""
request.setResponseCode(status)
request.setHeader("Content-Type", mimetype)
request.setHeader("Content-Length", str(len(content)))
request.setHeader("Server", gateway.SERVER_NAME)
request.write(content)
request.finish()
def render_POST(self, request):
"""
Read remoting request from the client.
@type request: The HTTP Request.
@param request: C{twisted.web.http.Request}
"""
def handleDecodeError(failure):
"""
Return HTTP 400 Bad Request.
"""
errMesg = "%s: %s" % (failure.type, failure.getErrorMessage())
if self.logger:
self.logger.error(errMesg)
self.logger.error(failure.getTraceback())
body = "400 Bad Request\n\nThe request body was unable to " \
"be successfully decoded."
if self.debug:
body += "\n\nTraceback:\n\n%s" % failure.getTraceback()
self._finaliseRequest(request, 400, body)
request.content.seek(0, 0)
timezone_offset = self._get_timezone_offset()
d = threads.deferToThread(remoting.decode, request.content.read(),
strict=self.strict, logger=self.logger,
timezone_offset=timezone_offset)
def cb(amf_request):
if self.logger:
self.logger.debug("AMF Request: %r" % amf_request)
x = self.getResponse(request, amf_request)
x.addCallback(self.sendResponse, request)
# Process the request
d.addCallback(cb).addErrback(handleDecodeError)
return server.NOT_DONE_YET
def sendResponse(self, amf_response, request):
def cb(result):
self._finaliseRequest(request, 200, result.getvalue(),
remoting.CONTENT_TYPE)
def eb(failure):
"""
Return 500 Internal Server Error.
"""
errMesg = "%s: %s" % (failure.type, failure.getErrorMessage())
if self.logger:
self.logger.error(errMesg)
self.logger.error(failure.getTraceback())
body = "500 Internal Server Error\n\nThere was an error encoding " \
"the response."
if self.debug:
body += "\n\nTraceback:\n\n%s" % failure.getTraceback()
self._finaliseRequest(request, 500, body)
timezone_offset = self._get_timezone_offset()
d = threads.deferToThread(remoting.encode, amf_response,
strict=self.strict, logger=self.logger,
timezone_offset=timezone_offset)
d.addCallback(cb).addErrback(eb)
def getProcessor(self, request):
"""
Determines the request processor, based on the request.
@param request: The AMF message.
@type request: L{Request<pyamf.remoting.Request>}
"""
if request.target == 'null':
return AMF3RequestProcessor(self)
return AMF0RequestProcessor(self)
def getResponse(self, http_request, amf_request):
"""
Processes the AMF request, returning an AMF L{Response}.
@param http_request: The underlying HTTP Request
@type http_request: C{twisted.web.http.Request}
@param amf_request: The AMF Request.
@type amf_request: L{Envelope<pyamf.remoting.Envelope>}
"""
response = remoting.Envelope(amf_request.amfVersion)
dl = []
def cb(body, name):
response[name] = body
for name, message in amf_request:
processor = self.getProcessor(message)
http_request.amf_request = message
d = defer.maybeDeferred(
processor, message, http_request=http_request)
dl.append(d.addCallback(cb, name))
def cb2(result):
return response
def eb(failure):
"""
Return 500 Internal Server Error.
"""
errMesg = "%s: %s" % (failure.type, failure.getErrorMessage())
if self.logger:
self.logger.error(errMesg)
self.logger.error(failure.getTraceback())
body = "500 Internal Server Error\n\nThe request was unable to " \
"be successfully processed."
if self.debug:
body += "\n\nTraceback:\n\n%s" % failure.getTraceback()
self._finaliseRequest(http_request, 500, body)
d = defer.DeferredList(dl)
return d.addCallback(cb2).addErrback(eb)
def authenticateRequest(self, service_request, username, password, **kwargs):
"""
Processes an authentication request. If no authenticator is supplied,
then authentication succeeds.
@return: C{Deferred}.
@rtype: C{twisted.internet.defer.Deferred}
"""
authenticator = self.getAuthenticator(service_request)
if self.logger:
self.logger.debug('Authenticator expands to: %r' % authenticator)
if authenticator is None:
return defer.succeed(True)
args = (username, password)
if hasattr(authenticator, '_pyamf_expose_request'):
http_request = kwargs.get('http_request', None)
args = (http_request,) + args
return defer.maybeDeferred(authenticator, *args)
def preprocessRequest(self, service_request, *args, **kwargs):
"""
Preprocesses a request.
"""
processor = self.getPreprocessor(service_request)
if self.logger:
self.logger.debug('Preprocessor expands to: %r' % processor)
if processor is None:
return
args = (service_request,) + args
if hasattr(processor, '_pyamf_expose_request'):
http_request = kwargs.get('http_request', None)
args = (http_request,) + args
return defer.maybeDeferred(processor, *args)
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Remoting server implementations.
@since: 0.1.0
"""
import sys
import types
import datetime
import pyamf
from pyamf import remoting, util, python
try:
from platform import python_implementation
impl = python_implementation()
except ImportError:
impl = 'Python'
SERVER_NAME = 'PyAMF/%s %s/%s' % (
pyamf.version, impl,
'.'.join(map(lambda x: str(x), sys.version_info[0:3]))
)
class BaseServiceError(pyamf.BaseError):
"""
Base service error.
"""
class UnknownServiceError(BaseServiceError):
"""
Client made a request for an unknown service.
"""
_amf_code = 'Service.ResourceNotFound'
class UnknownServiceMethodError(BaseServiceError):
"""
Client made a request for an unknown method.
"""
_amf_code = 'Service.MethodNotFound'
class InvalidServiceMethodError(BaseServiceError):
"""
Client made a request for an invalid methodname.
"""
_amf_code = 'Service.MethodInvalid'
class ServiceWrapper(object):
"""
Wraps a supplied service with extra functionality.
@ivar service: The original service.
@type service: C{callable}
@ivar description: A description of the service.
@type description: C{str}
"""
def __init__(self, service, description=None, authenticator=None,
expose_request=None, preprocessor=None):
self.service = service
self.description = description
self.authenticator = authenticator
self.expose_request = expose_request
self.preprocessor = preprocessor
def __cmp__(self, other):
if isinstance(other, ServiceWrapper):
return cmp(self.__dict__, other.__dict__)
return cmp(self.service, other)
def _get_service_func(self, method, params):
"""
@raise InvalidServiceMethodError: Calls to private methods are not
allowed.
@raise UnknownServiceMethodError: Unknown method.
@raise InvalidServiceMethodError: Service method must be callable.
"""
service = None
if isinstance(self.service, (type, types.ClassType)):
service = self.service()
else:
service = self.service
if method is not None:
method = str(method)
if method.startswith('_'):
raise InvalidServiceMethodError(
"Calls to private methods are not allowed")
try:
func = getattr(service, method)
except AttributeError:
raise UnknownServiceMethodError(
"Unknown method %s" % str(method))
if not python.callable(func):
raise InvalidServiceMethodError(
"Service method %s must be callable" % str(method))
return func
if not python.callable(service):
raise UnknownServiceMethodError(
"Unknown method %s" % str(self.service))
return service
def __call__(self, method, params):
"""
Executes the service.
If the service is a class, it will be instantiated.
@param method: The method to call on the service.
@type method: C{None} or C{mixed}
@param params: The params to pass to the service.
@type params: C{list} or C{tuple}
@return: The result of the execution.
@rtype: C{mixed}
"""
func = self._get_service_func(method, params)
return func(*params)
def getMethods(self):
"""
Gets a C{dict} of valid method callables for the underlying service
object.
"""
callables = {}
for name in dir(self.service):
method = getattr(self.service, name)
if name.startswith('_') or not python.callable(method):
continue
callables[name] = method
return callables
def getAuthenticator(self, service_request=None):
if service_request is None:
return self.authenticator
methods = self.getMethods()
if service_request.method is None:
if hasattr(self.service, '_pyamf_authenticator'):
return self.service._pyamf_authenticator
if service_request.method not in methods:
return self.authenticator
method = methods[service_request.method]
if hasattr(method, '_pyamf_authenticator'):
return method._pyamf_authenticator
return self.authenticator
def mustExposeRequest(self, service_request=None):
if service_request is None:
return self.expose_request
methods = self.getMethods()
if service_request.method is None:
if hasattr(self.service, '_pyamf_expose_request'):
return self.service._pyamf_expose_request
return self.expose_request
if service_request.method not in methods:
return self.expose_request
method = methods[service_request.method]
if hasattr(method, '_pyamf_expose_request'):
return method._pyamf_expose_request
return self.expose_request
def getPreprocessor(self, service_request=None):
if service_request is None:
return self.preprocessor
methods = self.getMethods()
if service_request.method is None:
if hasattr(self.service, '_pyamf_preprocessor'):
return self.service._pyamf_preprocessor
if service_request.method not in methods:
return self.preprocessor
method = methods[service_request.method]
if hasattr(method, '_pyamf_preprocessor'):
return method._pyamf_preprocessor
return self.preprocessor
class ServiceRequest(object):
"""
Remoting service request.
@ivar request: The request to service.
@type request: L{Envelope<pyamf.remoting.Envelope>}
@ivar service: Facilitates the request.
@type service: L{ServiceWrapper}
@ivar method: The method to call on the service. A value of C{None}
means that the service will be called directly.
@type method: C{None} or C{str}
"""
def __init__(self, amf_request, service, method):
self.request = amf_request
self.service = service
self.method = method
def __call__(self, *args):
return self.service(self.method, args)
class ServiceCollection(dict):
"""
I hold a collection of services, mapping names to objects.
"""
def __contains__(self, value):
if isinstance(value, basestring):
return value in self.keys()
return value in self.values()
class BaseGateway(object):
"""
Generic Remoting gateway.
@ivar services: A map of service names to callables.
@type services: L{ServiceCollection}
@ivar authenticator: A callable that will check the credentials of
the request before allowing access to the service. Will return a
C{bool} value.
@type authenticator: C{Callable} or C{None}
@ivar preprocessor: Called before the actual service method is invoked.
Useful for setting up sessions etc.
@type preprocessor: C{Callable} or C{None}
@ivar logger: A logging instance.
@ivar strict: Defines whether the gateway should use strict en/decoding.
@type strict: C{bool}
@ivar timezone_offset: A U{datetime.datetime.timedelta<http://
docs.python.org/library/datetime.html#datetime.timedelta} between UTC
and the timezone to be encoded. Most dates should be handled as UTC to
avoid confusion but for older legacy systems this is not an option.
Supplying an int as this will be interpretted in seconds.
@ivar debug: Provides debugging information when an error occurs. Use only
in non production settings.
@type debug: C{bool}
"""
_request_class = ServiceRequest
def __init__(self, services=None, **kwargs):
if services is None:
services = {}
if not hasattr(services, 'iteritems'):
raise TypeError("dict type required for services")
self.services = ServiceCollection()
self.authenticator = kwargs.pop('authenticator', None)
self.preprocessor = kwargs.pop('preprocessor', None)
self.expose_request = kwargs.pop('expose_request', False)
self.strict = kwargs.pop('strict', False)
self.logger = kwargs.pop('logger', None)
self.timezone_offset = kwargs.pop('timezone_offset', None)
self.debug = kwargs.pop('debug', False)
if kwargs:
raise TypeError('Unknown kwargs: %r' % (kwargs,))
for name, service in services.iteritems():
self.addService(service, name)
def addService(self, service, name=None, description=None,
authenticator=None, expose_request=None, preprocessor=None):
"""
Adds a service to the gateway.
@param service: The service to add to the gateway.
@type service: C{callable}, class instance, or a module
@param name: The name of the service.
@type name: C{str}
@raise pyamf.remoting.RemotingError: Service already exists.
@raise TypeError: C{service} cannot be a scalar value.
@raise TypeError: C{service} must be C{callable} or a module.
"""
if isinstance(service, (int, long, float, basestring)):
raise TypeError("Service cannot be a scalar value")
allowed_types = (types.ModuleType, types.FunctionType, types.DictType,
types.MethodType, types.InstanceType, types.ObjectType)
if not python.callable(service) and not isinstance(service, allowed_types):
raise TypeError("Service must be a callable, module, or an object")
if name is None:
# TODO: include the module in the name
if isinstance(service, (type, types.ClassType)):
name = service.__name__
elif isinstance(service, types.FunctionType):
name = service.func_name
elif isinstance(service, types.ModuleType):
name = service.__name__
else:
name = str(service)
if name in self.services:
raise remoting.RemotingError("Service %s already exists" % name)
self.services[name] = ServiceWrapper(service, description,
authenticator, expose_request, preprocessor)
def _get_timezone_offset(self):
if self.timezone_offset is None:
return None
if isinstance(self.timezone_offset, datetime.timedelta):
return self.timezone_offset
return datetime.timedelta(seconds=self.timezone_offset)
def removeService(self, service):
"""
Removes a service from the gateway.
@param service: Either the name or t of the service to remove from the
gateway, or .
@type service: C{callable} or a class instance
@raise NameError: Service not found.
"""
for name, wrapper in self.services.iteritems():
if service in (name, wrapper.service):
del self.services[name]
return
raise NameError("Service %r not found" % (service,))
def getServiceRequest(self, request, target):
"""
Returns a service based on the message.
@raise UnknownServiceError: Unknown service.
@param request: The AMF request.
@type request: L{Request<pyamf.remoting.Request>}
@rtype: L{ServiceRequest}
"""
try:
return self._request_class(
request.envelope, self.services[target], None)
except KeyError:
pass
try:
sp = target.split('.')
name, meth = '.'.join(sp[:-1]), sp[-1]
return self._request_class(
request.envelope, self.services[name], meth)
except (ValueError, KeyError):
pass
raise UnknownServiceError("Unknown service %s" % target)
def getProcessor(self, request):
"""
Returns request processor.
@param request: The AMF message.
@type request: L{Request<remoting.Request>}
"""
if request.target == 'null' or not request.target:
from pyamf.remoting import amf3
return amf3.RequestProcessor(self)
else:
from pyamf.remoting import amf0
return amf0.RequestProcessor(self)
def getResponse(self, amf_request):
"""
Returns the response to the request.
Any implementing gateway must define this function.
@param amf_request: The AMF request.
@type amf_request: L{Envelope<pyamf.remoting.Envelope>}
@return: The AMF response.
@rtype: L{Envelope<pyamf.remoting.Envelope>}
"""
raise NotImplementedError
def mustExposeRequest(self, service_request):
"""
Decides whether the underlying http request should be exposed as the
first argument to the method call. This is granular, looking at the
service method first, then at the service level and finally checking
the gateway.
@rtype: C{bool}
"""
expose_request = service_request.service.mustExposeRequest(service_request)
if expose_request is None:
if self.expose_request is None:
return False
return self.expose_request
return expose_request
def getAuthenticator(self, service_request):
"""
Gets an authenticator callable based on the service_request. This is
granular, looking at the service method first, then at the service
level and finally to see if there is a global authenticator function
for the gateway. Returns C{None} if one could not be found.
"""
auth = service_request.service.getAuthenticator(service_request)
if auth is None:
return self.authenticator
return auth
def authenticateRequest(self, service_request, username, password, **kwargs):
"""
Processes an authentication request. If no authenticator is supplied,
then authentication succeeds.
@return: Returns a C{bool} based on the result of authorization. A
value of C{False} will stop processing the request and return an
error to the client.
@rtype: C{bool}
"""
authenticator = self.getAuthenticator(service_request)
if authenticator is None:
return True
args = (username, password)
if hasattr(authenticator, '_pyamf_expose_request'):
http_request = kwargs.get('http_request', None)
args = (http_request,) + args
return authenticator(*args) == True
def getPreprocessor(self, service_request):
"""
Gets a preprocessor callable based on the service_request. This is
granular, looking at the service method first, then at the service
level and finally to see if there is a global preprocessor function
for the gateway. Returns C{None} if one could not be found.
"""
preproc = service_request.service.getPreprocessor(service_request)
if preproc is None:
return self.preprocessor
return preproc
def preprocessRequest(self, service_request, *args, **kwargs):
"""
Preprocesses a request.
"""
processor = self.getPreprocessor(service_request)
if processor is None:
return
args = (service_request,) + args
if hasattr(processor, '_pyamf_expose_request'):
http_request = kwargs.get('http_request', None)
args = (http_request,) + args
return processor(*args)
def callServiceRequest(self, service_request, *args, **kwargs):
"""
Executes the service_request call
"""
if self.mustExposeRequest(service_request):
http_request = kwargs.get('http_request', None)
args = (http_request,) + args
return service_request(*args)
def authenticate(func, c, expose_request=False):
"""
A decorator that facilitates authentication per method. Setting
C{expose_request} to C{True} will set the underlying request object (if
there is one), usually HTTP and set it to the first argument of the
authenticating callable. If there is no request object, the default is
C{None}.
@raise TypeError: C{func} and authenticator must be callable.
"""
if not python.callable(func):
raise TypeError('func must be callable')
if not python.callable(c):
raise TypeError('Authenticator must be callable')
attr = func
if isinstance(func, types.UnboundMethodType):
attr = func.im_func
if expose_request is True:
c = globals()['expose_request'](c)
setattr(attr, '_pyamf_authenticator', c)
return func
def expose_request(func):
"""
A decorator that adds an expose_request flag to the underlying callable.
@raise TypeError: C{func} must be callable.
"""
if not python.callable(func):
raise TypeError("func must be callable")
if isinstance(func, types.UnboundMethodType):
setattr(func.im_func, '_pyamf_expose_request', True)
else:
setattr(func, '_pyamf_expose_request', True)
return func
def preprocess(func, c, expose_request=False):
"""
A decorator that facilitates preprocessing per method. Setting
C{expose_request} to C{True} will set the underlying request object (if
there is one), usually HTTP and set it to the first argument of the
preprocessing callable. If there is no request object, the default is
C{None}.
@raise TypeError: C{func} and preprocessor must be callable.
"""
if not python.callable(func):
raise TypeError('func must be callable')
if not python.callable(c):
raise TypeError('Preprocessor must be callable')
attr = func
if isinstance(func, types.UnboundMethodType):
attr = func.im_func
if expose_request is True:
c = globals()['expose_request'](c)
setattr(attr, '_pyamf_preprocessor', c)
return func
def format_exception():
import traceback
f = util.BufferedByteStream()
traceback.print_exc(file=f)
return f.getvalue()
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
WSGI server implementation.
The Python Web Server Gateway Interface (WSGI) is a simple and universal
interface between web servers and web applications or frameworks.
The WSGI interface has two sides: the "server" or "gateway" side, and the
"application" or "framework" side. The server side invokes a callable
object (usually a function or a method) that is provided by the application
side. Additionally WSGI provides middlewares; a WSGI middleware implements
both sides of the API, so that it can be inserted "between" a WSGI server
and a WSGI application -- the middleware will act as an application from
the server's point of view, and as a server from the application's point
of view.
@see: U{WSGI homepage (external)<http://wsgi.org>}
@see: U{PEP-333 (external)<http://www.python.org/peps/pep-0333.html>}
@since: 0.1.0
"""
import pyamf
from pyamf import remoting
from pyamf.remoting import gateway
__all__ = ['WSGIGateway']
class WSGIGateway(gateway.BaseGateway):
"""
WSGI Remoting Gateway.
"""
def getResponse(self, request, environ):
"""
Processes the AMF request, returning an AMF response.
@param request: The AMF Request.
@type request: L{Envelope<pyamf.remoting.Envelope>}
@rtype: L{Envelope<pyamf.remoting.Envelope>}
@return: The AMF Response.
"""
response = remoting.Envelope(request.amfVersion)
for name, message in request:
processor = self.getProcessor(message)
environ['pyamf.request'] = message
response[name] = processor(message, http_request=environ)
return response
def badRequestMethod(self, environ, start_response):
"""
Return HTTP 400 Bad Request.
"""
response = "400 Bad Request\n\nTo access this PyAMF gateway you " \
"must use POST requests (%s received)" % environ['REQUEST_METHOD']
start_response('400 Bad Request', [
('Content-Type', 'text/plain'),
('Content-Length', str(len(response))),
('Server', gateway.SERVER_NAME),
])
return [response]
def __call__(self, environ, start_response):
"""
@rtype: C{StringIO}
@return: File-like object.
"""
if environ['REQUEST_METHOD'] != 'POST':
return self.badRequestMethod(environ, start_response)
body = environ['wsgi.input'].read(int(environ['CONTENT_LENGTH']))
stream = None
timezone_offset = self._get_timezone_offset()
# Decode the request
try:
request = remoting.decode(body, strict=self.strict,
logger=self.logger, timezone_offset=timezone_offset)
except (pyamf.DecodeError, IOError):
if self.logger:
self.logger.exception('Error decoding AMF request')
response = "400 Bad Request\n\nThe request body was unable to " \
"be successfully decoded."
if self.debug:
response += "\n\nTraceback:\n\n%s" % gateway.format_exception()
start_response('400 Bad Request', [
('Content-Type', 'text/plain'),
('Content-Length', str(len(response))),
('Server', gateway.SERVER_NAME),
])
return [response]
except (KeyboardInterrupt, SystemExit):
raise
except:
if self.logger:
self.logger.exception('Unexpected error decoding AMF request')
response = ("500 Internal Server Error\n\nAn unexpected error "
"occurred whilst decoding.")
if self.debug:
response += "\n\nTraceback:\n\n%s" % gateway.format_exception()
start_response('500 Internal Server Error', [
('Content-Type', 'text/plain'),
('Content-Length', str(len(response))),
('Server', gateway.SERVER_NAME),
])
return [response]
if self.logger:
self.logger.debug("AMF Request: %r" % request)
# Process the request
try:
response = self.getResponse(request, environ)
except (KeyboardInterrupt, SystemExit):
raise
except:
if self.logger:
self.logger.exception('Error processing AMF request')
response = ("500 Internal Server Error\n\nThe request was "
"unable to be successfully processed.")
if self.debug:
response += "\n\nTraceback:\n\n%s" % gateway.format_exception()
start_response('500 Internal Server Error', [
('Content-Type', 'text/plain'),
('Content-Length', str(len(response))),
('Server', gateway.SERVER_NAME),
])
return [response]
if self.logger:
self.logger.debug("AMF Response: %r" % response)
# Encode the response
try:
stream = remoting.encode(response, strict=self.strict,
timezone_offset=timezone_offset)
except:
if self.logger:
self.logger.exception('Error encoding AMF request')
response = ("500 Internal Server Error\n\nThe request was "
"unable to be encoded.")
if self.debug:
response += "\n\nTraceback:\n\n%s" % gateway.format_exception()
start_response('500 Internal Server Error', [
('Content-Type', 'text/plain'),
('Content-Length', str(len(response))),
('Server', gateway.SERVER_NAME),
])
return [response]
response = stream.getvalue()
start_response('200 OK', [
('Content-Type', remoting.CONTENT_TYPE),
('Content-Length', str(len(response))),
('Server', gateway.SERVER_NAME),
])
return [response]
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Flex Data Management Service implementation.
This module contains the message classes used with Flex Data Management
Service.
@since: 0.1.0
"""
import pyamf
from pyamf.flex.messaging import AsyncMessage, AcknowledgeMessage, ErrorMessage
#: Namespace for C{flex.data} messages.
NAMESPACE = 'flex.data.messages'
__all__ = [
'DataMessage',
'SequencedMessage',
'PagedMessage',
'DataErrorMessage'
]
class DataMessage(AsyncMessage):
"""
I am used to transport an operation that occured on a managed object
or collection.
This class of message is transmitted between clients subscribed to a
remote destination as well as between server nodes within a cluster.
The payload of this message describes all of the relevant details of
the operation. This information is used to replicate updates and detect
conflicts.
@see: U{DataMessage on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/data/messages/DataMessage.html>}
"""
def __init__(self):
AsyncMessage.__init__(self)
#: Provides access to the identity map which defines the
#: unique identity of the item affected by this DataMessage
#: (relevant for create/update/delete but not fill operations).
self.identity = None
#: Provides access to the operation/command of this DataMessage.
#:
#: Operations indicate how the remote destination should process
#: this message.
self.operation = None
class SequencedMessage(AcknowledgeMessage):
"""
Response to L{DataMessage} requests.
@see: U{SequencedMessage on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/data/messages/SequencedMessage.html>}
"""
def __init__(self):
AcknowledgeMessage.__init__(self)
#: Provides access to the sequence id for this message.
#:
#: The sequence id is a unique identifier for a sequence
#: within a remote destination. This value is only unique for
#: the endpoint and destination contacted.
self.sequenceId = None
#:
self.sequenceProxies = None
#: Provides access to the sequence size for this message.
#:
#: The sequence size indicates how many items reside in the
#: remote sequence.
self.sequenceSize = None
#:
self.dataMessage = None
class PagedMessage(SequencedMessage):
"""
This messsage provides information about a partial sequence result.
@see: U{PagedMessage on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/data/messages/PagedMessage.html>}
"""
def __init__(self):
SequencedMessage.__init__(self)
#: Provides access to the number of total pages in a sequence
#: based on the current page size.
self.pageCount = None
#: Provides access to the index of the current page in a sequence.
self.pageIndex = None
class DataErrorMessage(ErrorMessage):
"""
Special cases of ErrorMessage will be sent when a data conflict
occurs.
This message provides the conflict information in addition to
the L{ErrorMessage<pyamf.flex.messaging.ErrorMessage>} information.
@see: U{DataErrorMessage on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/data/messages/DataErrorMessage.html>}
"""
def __init__(self):
ErrorMessage.__init__(self)
#: The client oringinated message which caused the conflict.
self.cause = None
#: An array of properties that were found to be conflicting
#: between the client and server objects.
self.propertyNames = None
#: The value that the server had for the object with the
#: conflicting properties.
self.serverObject = None
pyamf.register_package(globals(), NAMESPACE)
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Flex Messaging implementation.
This module contains the message classes used with Flex Data Services.
@see: U{RemoteObject on OSFlash (external)
<http://osflash.org/documentation/amf3#remoteobject>}
@since: 0.1
"""
import uuid
import pyamf.util
from pyamf import amf3
__all__ = [
'RemotingMessage',
'CommandMessage',
'AcknowledgeMessage',
'ErrorMessage',
'AbstractMessage',
'AsyncMessage'
]
NAMESPACE = 'flex.messaging.messages'
SMALL_FLAG_MORE = 0x80
class AbstractMessage(object):
"""
Abstract base class for all Flex messages.
Messages have two customizable sections; headers and data. The headers
property provides access to specialized meta information for a specific
message instance. The data property contains the instance specific data
that needs to be delivered and processed by the decoder.
@see: U{AbstractMessage on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/messaging/messages/AbstractMessage.html>}
@ivar body: Specific data that needs to be delivered to the remote
destination.
@type body: C{mixed}
@ivar clientId: Indicates which client sent the message.
@type clientId: C{str}
@ivar destination: Message destination.
@type destination: C{str}
@ivar headers: Message headers. Core header names start with DS.
@type headers: C{dict}
@ivar messageId: Unique Message ID.
@type messageId: C{str}
@ivar timeToLive: How long the message should be considered valid and
deliverable.
@type timeToLive: C{int}
@ivar timestamp: Timestamp when the message was generated.
@type timestamp: C{int}
"""
class __amf__:
amf3 = True
static = ('body', 'clientId', 'destination', 'headers', 'messageId',
'timestamp', 'timeToLive')
#: Each message pushed from the server will contain this header identifying
#: the client that will receive the message.
DESTINATION_CLIENT_ID_HEADER = "DSDstClientId"
#: Messages are tagged with the endpoint id for the channel they are sent
#: over.
ENDPOINT_HEADER = "DSEndpoint"
#: Messages that need to set remote credentials for a destination carry the
#: C{Base64} encoded credentials in this header.
REMOTE_CREDENTIALS_HEADER = "DSRemoteCredentials"
#: The request timeout value is set on outbound messages by services or
#: channels and the value controls how long the responder will wait for an
#: acknowledgement, result or fault response for the message before timing
#: out the request.
REQUEST_TIMEOUT_HEADER = "DSRequestTimeout"
SMALL_ATTRIBUTE_FLAGS = [0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40]
SMALL_ATTRIBUTES = dict(zip(
SMALL_ATTRIBUTE_FLAGS,
__amf__.static
))
SMALL_UUID_FLAGS = [0x01, 0x02]
SMALL_UUIDS = dict(zip(
SMALL_UUID_FLAGS,
['clientId', 'messageId']
))
def __new__(cls, *args, **kwargs):
obj = object.__new__(cls)
obj.__init__(*args, **kwargs)
return obj
def __init__(self, *args, **kwargs):
self.body = kwargs.get('body', None)
self.clientId = kwargs.get('clientId', None)
self.destination = kwargs.get('destination', None)
self.headers = kwargs.get('headers', {})
self.messageId = kwargs.get('messageId', None)
self.timestamp = kwargs.get('timestamp', None)
self.timeToLive = kwargs.get('timeToLive', None)
def __repr__(self):
m = '<%s ' % self.__class__.__name__
for k in self.__dict__:
m += ' %s=%r' % (k, getattr(self, k))
return m + " />"
def decodeSmallAttribute(self, attr, input):
"""
@since: 0.5
"""
obj = input.readObject()
if attr in ['timestamp', 'timeToLive']:
return pyamf.util.get_datetime(obj / 1000.0)
return obj
def encodeSmallAttribute(self, attr):
"""
@since: 0.5
"""
obj = getattr(self, attr)
if not obj:
return obj
if attr in ['timestamp', 'timeToLive']:
return pyamf.util.get_timestamp(obj) * 1000.0
elif attr in ['clientId', 'messageId']:
if isinstance(obj, uuid.UUID):
return None
return obj
def __readamf__(self, input):
flags = read_flags(input)
if len(flags) > 2:
raise pyamf.DecodeError('Expected <=2 (got %d) flags for the '
'AbstractMessage portion of the small message for %r' % (
len(flags), self.__class__))
for index, byte in enumerate(flags):
if index == 0:
for flag in self.SMALL_ATTRIBUTE_FLAGS:
if flag & byte:
attr = self.SMALL_ATTRIBUTES[flag]
setattr(self, attr, self.decodeSmallAttribute(attr, input))
elif index == 1:
for flag in self.SMALL_UUID_FLAGS:
if flag & byte:
attr = self.SMALL_UUIDS[flag]
setattr(self, attr, decode_uuid(input.readObject()))
def __writeamf__(self, output):
flag_attrs = []
uuid_attrs = []
byte = 0
for flag in self.SMALL_ATTRIBUTE_FLAGS:
value = self.encodeSmallAttribute(self.SMALL_ATTRIBUTES[flag])
if value:
byte |= flag
flag_attrs.append(value)
flags = byte
byte = 0
for flag in self.SMALL_UUID_FLAGS:
attr = self.SMALL_UUIDS[flag]
value = getattr(self, attr)
if not value:
continue
byte |= flag
uuid_attrs.append(amf3.ByteArray(value.bytes))
if not byte:
output.writeUnsignedByte(flags)
else:
output.writeUnsignedByte(flags | SMALL_FLAG_MORE)
output.writeUnsignedByte(byte)
[output.writeObject(attr) for attr in flag_attrs]
[output.writeObject(attr) for attr in uuid_attrs]
def getSmallMessage(self):
"""
Return a ISmallMessage representation of this object. If one is not
available, L{NotImplementedError} will be raised.
@since: 0.5
"""
raise NotImplementedError
class AsyncMessage(AbstractMessage):
"""
I am the base class for all asynchronous Flex messages.
@see: U{AsyncMessage on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/messaging/messages/AsyncMessage.html>}
@ivar correlationId: Correlation id of the message.
@type correlationId: C{str}
"""
#: Messages that were sent with a defined subtopic property indicate their
#: target subtopic in this header.
SUBTOPIC_HEADER = "DSSubtopic"
class __amf__:
static = ('correlationId',)
def __init__(self, *args, **kwargs):
AbstractMessage.__init__(self, *args, **kwargs)
self.correlationId = kwargs.get('correlationId', None)
def __readamf__(self, input):
AbstractMessage.__readamf__(self, input)
flags = read_flags(input)
if len(flags) > 1:
raise pyamf.DecodeError('Expected <=1 (got %d) flags for the '
'AsyncMessage portion of the small message for %r' % (
len(flags), self.__class__))
byte = flags[0]
if byte & 0x01:
self.correlationId = input.readObject()
if byte & 0x02:
self.correlationId = decode_uuid(input.readObject())
def __writeamf__(self, output):
AbstractMessage.__writeamf__(self, output)
if not isinstance(self.correlationId, uuid.UUID):
output.writeUnsignedByte(0x01)
output.writeObject(self.correlationId)
else:
output.writeUnsignedByte(0x02)
output.writeObject(pyamf.amf3.ByteArray(self.correlationId.bytes))
def getSmallMessage(self):
"""
Return a ISmallMessage representation of this async message.
@since: 0.5
"""
return AsyncMessageExt(**self.__dict__)
class AcknowledgeMessage(AsyncMessage):
"""
I acknowledge the receipt of a message that was sent previously.
Every message sent within the messaging system must receive an
acknowledgement.
@see: U{AcknowledgeMessage on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/messaging/messages/AcknowledgeMessage.html>}
"""
#: Used to indicate that the acknowledgement is for a message that
#: generated an error.
ERROR_HINT_HEADER = "DSErrorHint"
def __readamf__(self, input):
AsyncMessage.__readamf__(self, input)
flags = read_flags(input)
if len(flags) > 1:
raise pyamf.DecodeError('Expected <=1 (got %d) flags for the '
'AcknowledgeMessage portion of the small message for %r' % (
len(flags), self.__class__))
def __writeamf__(self, output):
AsyncMessage.__writeamf__(self, output)
output.writeUnsignedByte(0)
def getSmallMessage(self):
"""
Return a ISmallMessage representation of this acknowledge message.
@since: 0.5
"""
return AcknowledgeMessageExt(**self.__dict__)
class CommandMessage(AsyncMessage):
"""
Provides a mechanism for sending commands related to publish/subscribe
messaging, ping, and cluster operations.
@see: U{CommandMessage on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/messaging/messages/CommandMessage.html>}
@ivar operation: The command
@type operation: C{int}
@ivar messageRefType: hmm, not sure about this one.
@type messageRefType: C{str}
"""
#: The server message type for authentication commands.
AUTHENTICATION_MESSAGE_REF_TYPE = "flex.messaging.messages.AuthenticationMessage"
#: This is used to test connectivity over the current channel to the remote
#: endpoint.
PING_OPERATION = 5
#: This is used by a remote destination to sync missed or cached messages
#: back to a client as a result of a client issued poll command.
SYNC_OPERATION = 4
#: This is used to request a list of failover endpoint URIs for the remote
#: destination based on cluster membership.
CLUSTER_REQUEST_OPERATION = 7
#: This is used to send credentials to the endpoint so that the user can be
#: logged in over the current channel. The credentials need to be C{Base64}
#: encoded and stored in the body of the message.
LOGIN_OPERATION = 8
#: This is used to log the user out of the current channel, and will
#: invalidate the server session if the channel is HTTP based.
LOGOUT_OPERATION = 9
#: This is used to poll a remote destination for pending, undelivered
#: messages.
POLL_OPERATION = 2
#: Subscribe commands issued by a consumer pass the consumer's C{selector}
#: expression in this header.
SELECTOR_HEADER = "DSSelector"
#: This is used to indicate that the client's session with a remote
#: destination has timed out.
SESSION_INVALIDATE_OPERATION = 10
#: This is used to subscribe to a remote destination.
SUBSCRIBE_OPERATION = 0
#: This is the default operation for new L{CommandMessage} instances.
UNKNOWN_OPERATION = 1000
#: This is used to unsubscribe from a remote destination.
UNSUBSCRIBE_OPERATION = 1
#: This operation is used to indicate that a channel has disconnected.
DISCONNECT_OPERATION = 12
class __amf__:
static = ('operation',)
def __init__(self, *args, **kwargs):
AsyncMessage.__init__(self, *args, **kwargs)
self.operation = kwargs.get('operation', None)
def __readamf__(self, input):
AsyncMessage.__readamf__(self, input)
flags = read_flags(input)
if not flags:
return
if len(flags) > 1:
raise pyamf.DecodeError('Expected <=1 (got %d) flags for the '
'CommandMessage portion of the small message for %r' % (
len(flags), self.__class__))
byte = flags[0]
if byte & 0x01:
self.operation = input.readObject()
def __writeamf__(self, output):
AsyncMessage.__writeamf__(self, output)
if self.operation:
output.writeUnsignedByte(0x01)
output.writeObject(self.operation)
else:
output.writeUnsignedByte(0)
def getSmallMessage(self):
"""
Return a ISmallMessage representation of this command message.
@since: 0.5
"""
return CommandMessageExt(**self.__dict__)
class ErrorMessage(AcknowledgeMessage):
"""
I am the Flex error message to be returned to the client.
This class is used to report errors within the messaging system.
@see: U{ErrorMessage on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/messaging/messages/ErrorMessage.html>}
"""
#: If a message may not have been delivered, the faultCode will contain
#: this constant.
MESSAGE_DELIVERY_IN_DOUBT = "Client.Error.DeliveryInDoubt"
#: Header name for the retryable hint header.
#:
#: This is used to indicate that the operation that generated the error may
#: be retryable rather than fatal.
RETRYABLE_HINT_HEADER = "DSRetryableErrorHint"
class __amf__:
static = ('extendedData', 'faultCode', 'faultDetail', 'faultString',
'rootCause')
def __init__(self, *args, **kwargs):
AcknowledgeMessage.__init__(self, *args, **kwargs)
#: Extended data that the remote destination has chosen to associate
#: with this error to facilitate custom error processing on the client.
self.extendedData = kwargs.get('extendedData', {})
#: Fault code for the error.
self.faultCode = kwargs.get('faultCode', None)
#: Detailed description of what caused the error.
self.faultDetail = kwargs.get('faultDetail', None)
#: A simple description of the error.
self.faultString = kwargs.get('faultString', None)
#: Should a traceback exist for the error, this property contains the
#: message.
self.rootCause = kwargs.get('rootCause', {})
def getSmallMessage(self):
"""
Return a ISmallMessage representation of this error message.
@since: 0.5
"""
raise NotImplementedError
class RemotingMessage(AbstractMessage):
"""
I am used to send RPC requests to a remote endpoint.
@see: U{RemotingMessage on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/messaging/messages/RemotingMessage.html>}
"""
class __amf__:
static = ('operation', 'source')
def __init__(self, *args, **kwargs):
AbstractMessage.__init__(self, *args, **kwargs)
#: Name of the remote method/operation that should be called.
self.operation = kwargs.get('operation', None)
#: Name of the service to be called including package name.
#: This property is provided for backwards compatibility.
self.source = kwargs.get('source', None)
class AcknowledgeMessageExt(AcknowledgeMessage):
"""
An L{AcknowledgeMessage}, but implementing C{ISmallMessage}.
@since: 0.5
"""
class __amf__:
external = True
class CommandMessageExt(CommandMessage):
"""
A L{CommandMessage}, but implementing C{ISmallMessage}.
@since: 0.5
"""
class __amf__:
external = True
class AsyncMessageExt(AsyncMessage):
"""
A L{AsyncMessage}, but implementing C{ISmallMessage}.
@since: 0.5
"""
class __amf__:
external = True
def read_flags(input):
"""
@since: 0.5
"""
flags = []
done = False
while not done:
byte = input.readUnsignedByte()
if not byte & SMALL_FLAG_MORE:
done = True
else:
byte = byte ^ SMALL_FLAG_MORE
flags.append(byte)
return flags
def decode_uuid(obj):
"""
Decode a L{ByteArray} contents to a C{uuid.UUID} instance.
@since: 0.5
"""
return uuid.UUID(bytes=str(obj))
pyamf.register_package(globals(), package=NAMESPACE)
pyamf.register_class(AcknowledgeMessageExt, 'DSK')
pyamf.register_class(CommandMessageExt, 'DSC')
pyamf.register_class(AsyncMessageExt, 'DSA')
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Compatibility classes/functions for Flex.
@note: Not available in ActionScript 1.0 and 2.0.
@see: U{Flex on Wikipedia<http://en.wikipedia.org/wiki/Adobe_Flex>}
@since: 0.1
"""
import pyamf
__all__ = ['ArrayCollection', 'ObjectProxy']
class ArrayCollection(list):
"""
I represent the ActionScript 3 based class
C{flex.messaging.io.ArrayCollection} used in the Flex framework.
The C{ArrayCollection} class is a wrapper class that exposes an Array
as a collection that can be accessed and manipulated using the
methods and properties of the `ICollectionView` or `IList`
interfaces in the Flex framework.
@see: U{ArrayCollection on Livedocs <http://
livedocs.adobe.com/flex/201/langref/mx/collections/ArrayCollection.html>}
@note: This class does not implement the RemoteObject part of the
documentation.
@ivar length: [read-only] The number of items in this collection.
Introduced in 0.4.
@type length: C{int}
"""
class __amf__:
external = True
amf3 = True
exclude = ('length',)
def __init__(self, source=None):
if source is not None:
if isinstance(source, dict):
raise TypeError('Cannot convert dicts to ArrayCollection')
if hasattr(source, '__iter__'):
self.extend(source)
def __repr__(self):
return "<flex.messaging.io.ArrayCollection %s>" % list.__repr__(self)
def __readamf__(self, input):
data = input.readObject()
if hasattr(data, 'source'):
data = data.source
else:
if not hasattr(data, '__iter__'):
raise pyamf.DecodeError('Unable to read a list when decoding '
'ArrayCollection')
self.extend(data)
def __writeamf__(self, output):
# meh, this needs to be re-thought out
output.encoder.writeList(list(self), is_proxy=True)
def _get_length(self):
return len(self)
def _set_length(self, length):
raise AttributeError("Property length is read-only")
length = property(_get_length, _set_length)
def addItem(self, item):
"""
Adds the specified item to the end of the list.
@param item: The object to add to the collection.
@since: 0.4
"""
self.append(item)
def addItemAt(self, item, index):
"""
Adds the item at the specified index.
@param item: The object to add to the collection.
@param index: The index at which to place the item.
@raise IndexError: If index is less than 0 or greater than the length
of the list.
@since: 0.4
"""
if index < 0 or index > len(self):
raise IndexError
self.insert(index, item)
def getItemAt(self, index, prefetch=0):
"""
Gets the item at the specified index.
@param index: The index in the list from which to retrieve the item.
@type index: C{int}
@param prefetch: This param is ignored and is only here as part of the
interface.
@raise IndexError: if `index < 0` or `index >= length`
@since: 0.4
"""
if index < 0:
raise IndexError
if index > len(self):
raise IndexError
return self.__getitem__(index)
def getItemIndex(self, item):
"""
Returns the index of the item if it is in the list such that
C{getItemAt(index) == item}.
@return: The index of the item or C{-1} if the item is not in the list.
@since: 0.4
"""
try:
return self.index(item)
except ValueError:
return -1
def removeAll(self):
"""
Removes all items from the list.
@since: 0.4
"""
while len(self) > 0:
self.pop()
def removeItemAt(self, index):
"""
Removes the item at the specified index and returns it. Any items that
were after this index are now one index earlier.
@param index: The index from which to remove the item.
@return: The item that was removed.
@raise IndexError: If index is less than 0 or greater than length.
@since: 0.4
"""
if index < 0 or index > len(self):
raise IndexError
x = self[index]
del self[index]
return x
def setItemAt(self, item, index):
"""
Places the item at the specified index. If an item was already at that
index the new item will replace it and it will be returned.
@return: The item that was replaced, or C{None}.
@raise IndexError: If index is less than 0 or greater than length.
@since: 0.4
"""
if index < 0 or index > len(self):
raise IndexError
tmp = self.__getitem__(index)
self.__setitem__(index, item)
return tmp
def toArray(self):
"""
Returns an Array that is populated in the same order as the C{IList}
implementation.
@return: The array.
@rtype: C{list}
"""
return self
class ObjectProxy(object):
"""
I represent the ActionScript 3 based class C{flex.messaging.io.ObjectProxy}
used in the Flex framework. Flex's C{ObjectProxy} class allows an anonymous,
dynamic ActionScript Object to be bindable and report change events.
@see: U{ObjectProxy on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/utils/ObjectProxy.html>}
"""
class __amf__:
external = True
amf3 = True
def __init__(self, object=None):
if object is None:
self._amf_object = pyamf.ASObject()
else:
self._amf_object = object
def __repr__(self):
return "<flex.messaging.io.ObjectProxy %r>" % self._amf_object
def __getattr__(self, name):
if name == '_amf_object':
return self.__dict__['_amf_object']
return getattr(self.__dict__['_amf_object'], name)
def __setattr__(self, name, value):
if name == '_amf_object':
self.__dict__['_amf_object'] = value
else:
setattr(self._amf_object, name, value)
def __readamf__(self, input):
self._amf_object = input.readObject()
def __writeamf__(self, output):
output.encoder.writeObject(self._amf_object, is_proxy=True)
def unproxy_object(obj):
"""
Returns the unproxied version of the object.
"""
if isinstance(obj, ArrayCollection):
return list(obj)
elif isinstance(obj, ObjectProxy):
return obj._amf_object
return obj
def proxy_object(obj):
"""
Returns a proxied representation of C{obj}
Conversion
==========
- C{list}: L{ArrayCollection}
- C{dict}: L{ObjectProxy}
- Everything else: C{obj}
@since: 0.6
"""
if type(obj) in (list, tuple):
return ArrayCollection(obj)
if isinstance(obj, dict):
return ObjectProxy(obj)
return obj
pyamf.register_package(globals(), package='flex.messaging.io')
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Class alias base functionality.
@since: 0.6
"""
import inspect
import pyamf
from pyamf import python, util
class UnknownClassAlias(Exception):
"""
Raised if the AMF stream specifies an Actionscript class that does not
have a Python class alias.
@see: L{register_class}
"""
class ClassAlias(object):
"""
Class alias. Provides class/instance meta data to the En/Decoder to allow
fine grain control and some performance increases.
"""
def __init__(self, klass, alias=None, **kwargs):
if not isinstance(klass, python.class_types):
raise TypeError('klass must be a class type, got %r' % type(klass))
self.checkClass(klass)
self.klass = klass
self.alias = alias
if hasattr(self.alias, 'decode'):
self.alias = self.alias.decode('utf-8')
self.static_attrs = kwargs.pop('static_attrs', None)
self.exclude_attrs = kwargs.pop('exclude_attrs', None)
self.readonly_attrs = kwargs.pop('readonly_attrs', None)
self.proxy_attrs = kwargs.pop('proxy_attrs', None)
self.amf3 = kwargs.pop('amf3', None)
self.external = kwargs.pop('external', None)
self.dynamic = kwargs.pop('dynamic', None)
self.synonym_attrs = kwargs.pop('synonym_attrs', {})
self._compiled = False
self.anonymous = False
self.sealed = None
self.bases = None
if self.alias is None:
self.anonymous = True
# we don't set this to None because AMF3 untyped objects have a
# class name of ''
self.alias = ''
else:
if self.alias == '':
raise ValueError('Cannot set class alias as \'\'')
if not kwargs.pop('defer', False):
self.compile()
if kwargs:
raise TypeError('Unexpected keyword arguments %r' % (kwargs,))
def _checkExternal(self):
k = self.klass
if not hasattr(k, '__readamf__'):
raise AttributeError("An externalised class was specified, but"
" no __readamf__ attribute was found for %r" % (k,))
if not hasattr(k, '__writeamf__'):
raise AttributeError("An externalised class was specified, but"
" no __writeamf__ attribute was found for %r" % (k,))
if not hasattr(k.__readamf__, '__call__'):
raise TypeError("%s.__readamf__ must be callable" % (k.__name__,))
if not hasattr(k.__writeamf__, '__call__'):
raise TypeError("%s.__writeamf__ must be callable" % (k.__name__,))
def compile(self):
"""
This compiles the alias into a form that can be of most benefit to the
en/decoder.
"""
if self._compiled:
return
self.decodable_properties = set()
self.encodable_properties = set()
self.inherited_dynamic = None
self.inherited_sealed = None
self.bases = []
self.exclude_attrs = set(self.exclude_attrs or [])
self.readonly_attrs = set(self.readonly_attrs or [])
self.static_attrs = list(self.static_attrs or [])
self.static_attrs_set = set(self.static_attrs)
self.proxy_attrs = set(self.proxy_attrs or [])
self.sealed = util.is_class_sealed(self.klass)
if self.external:
self._checkExternal()
self._finalise_compile()
# this class is external so no more compiling is necessary
return
if hasattr(self.klass, '__slots__'):
self.decodable_properties.update(self.klass.__slots__)
self.encodable_properties.update(self.klass.__slots__)
for k, v in self.klass.__dict__.iteritems():
if not isinstance(v, property):
continue
if v.fget:
self.encodable_properties.update([k])
if v.fset:
self.decodable_properties.update([k])
else:
self.readonly_attrs.update([k])
mro = inspect.getmro(self.klass)[1:]
for c in mro:
self._compile_base_class(c)
self.getCustomProperties()
self._finalise_compile()
def _compile_base_class(self, klass):
if klass is object:
return
try:
alias = pyamf.get_class_alias(klass)
except UnknownClassAlias:
alias = pyamf.register_class(klass)
alias.compile()
self.bases.append((klass, alias))
if alias.exclude_attrs:
self.exclude_attrs.update(alias.exclude_attrs)
if alias.readonly_attrs:
self.readonly_attrs.update(alias.readonly_attrs)
if alias.static_attrs:
self.static_attrs_set.update(alias.static_attrs)
for a in alias.static_attrs:
if a not in self.static_attrs:
self.static_attrs.insert(0, a)
if alias.proxy_attrs:
self.proxy_attrs.update(alias.proxy_attrs)
if alias.encodable_properties:
self.encodable_properties.update(alias.encodable_properties)
if alias.decodable_properties:
self.decodable_properties.update(alias.decodable_properties)
if self.amf3 is None and alias.amf3:
self.amf3 = alias.amf3
if self.dynamic is None and alias.dynamic is not None:
self.inherited_dynamic = alias.dynamic
if alias.sealed is not None:
self.inherited_sealed = alias.sealed
if alias.synonym_attrs:
self.synonym_attrs, x = alias.synonym_attrs.copy(), self.synonym_attrs
self.synonym_attrs.update(x)
def _finalise_compile(self):
if self.dynamic is None:
self.dynamic = True
if self.inherited_dynamic is not None:
if self.inherited_dynamic is False and not self.sealed and self.inherited_sealed:
self.dynamic = True
else:
self.dynamic = self.inherited_dynamic
if self.sealed:
self.dynamic = False
if self.amf3 is None:
self.amf3 = False
if self.external is None:
self.external = False
if self.static_attrs:
self.encodable_properties.update(self.static_attrs)
self.decodable_properties.update(self.static_attrs)
if self.static_attrs:
if self.exclude_attrs:
self.static_attrs_set.difference_update(self.exclude_attrs)
for a in self.static_attrs_set:
if a not in self.static_attrs:
self.static_attrs.remove(a)
if not self.exclude_attrs:
self.exclude_attrs = None
else:
self.encodable_properties.difference_update(self.exclude_attrs)
self.decodable_properties.difference_update(self.exclude_attrs)
if self.exclude_attrs is not None:
self.exclude_attrs = list(self.exclude_attrs)
self.exclude_attrs.sort()
if not self.readonly_attrs:
self.readonly_attrs = None
else:
self.decodable_properties.difference_update(self.readonly_attrs)
if self.readonly_attrs is not None:
self.readonly_attrs = list(self.readonly_attrs)
self.readonly_attrs.sort()
if not self.proxy_attrs:
self.proxy_attrs = None
else:
self.proxy_attrs = list(self.proxy_attrs)
self.proxy_attrs.sort()
if len(self.decodable_properties) == 0:
self.decodable_properties = None
else:
self.decodable_properties = list(self.decodable_properties)
self.decodable_properties.sort()
if len(self.encodable_properties) == 0:
self.encodable_properties = None
else:
self.encodable_properties = list(self.encodable_properties)
self.encodable_properties.sort()
self.non_static_encodable_properties = None
if self.encodable_properties:
self.non_static_encodable_properties = set(self.encodable_properties)
if self.static_attrs:
self.non_static_encodable_properties.difference_update(self.static_attrs)
self.shortcut_encode = True
self.shortcut_decode = True
if (self.encodable_properties or self.static_attrs or
self.exclude_attrs or self.proxy_attrs or self.external or
self.synonym_attrs):
self.shortcut_encode = False
if (self.decodable_properties or self.static_attrs or
self.exclude_attrs or self.readonly_attrs or
not self.dynamic or self.external or self.synonym_attrs):
self.shortcut_decode = False
self.is_dict = False
if issubclass(self.klass, dict) or self.klass is dict:
self.is_dict = True
self._compiled = True
def is_compiled(self):
return self._compiled
def __str__(self):
return self.alias
def __repr__(self):
k = self.__class__
return '<%s.%s alias=%r class=%r @ 0x%x>' % (k.__module__, k.__name__,
self.alias, self.klass, id(self))
def __eq__(self, other):
if isinstance(other, basestring):
return self.alias == other
elif isinstance(other, self.__class__):
return self.klass == other.klass
elif isinstance(other, python.class_types):
return self.klass == other
else:
return False
def __hash__(self):
return id(self)
def checkClass(self, klass):
"""
This function is used to check if the class being aliased fits certain
criteria. The default is to check that C{__new__} is available or the
C{__init__} constructor does not need additional arguments. If this is
the case then L{TypeError} will be raised.
@since: 0.4
"""
# Check for __new__ support.
if hasattr(klass, '__new__') and hasattr(klass.__new__, '__call__'):
# Should be good to go.
return
# Check that the constructor of the class doesn't require any additonal
# arguments.
if not (hasattr(klass, '__init__') and hasattr(klass.__init__, '__call__')):
return
klass_func = klass.__init__.im_func
if not hasattr(klass_func, 'func_code'):
# Can't examine it, assume it's OK.
return
if klass_func.func_defaults:
available_arguments = len(klass_func.func_defaults) + 1
else:
available_arguments = 1
needed_arguments = klass_func.func_code.co_argcount
if available_arguments >= needed_arguments:
# Looks good to me.
return
spec = inspect.getargspec(klass_func)
raise TypeError("__init__ doesn't support additional arguments: %s"
% inspect.formatargspec(*spec))
def getEncodableAttributes(self, obj, codec=None):
"""
Must return a C{dict} of attributes to be encoded, even if its empty.
@param codec: An optional argument that will contain the encoder
instance calling this function.
@since: 0.5
"""
if not self._compiled:
self.compile()
if self.is_dict:
return dict(obj)
if self.shortcut_encode and self.dynamic:
return obj.__dict__.copy()
attrs = {}
if self.static_attrs:
for attr in self.static_attrs:
attrs[attr] = getattr(obj, attr, pyamf.Undefined)
if not self.dynamic:
if self.non_static_encodable_properties:
for attr in self.non_static_encodable_properties:
attrs[attr] = getattr(obj, attr)
return attrs
dynamic_props = util.get_properties(obj)
if not self.shortcut_encode:
dynamic_props = set(dynamic_props)
if self.encodable_properties:
dynamic_props.update(self.encodable_properties)
if self.static_attrs:
dynamic_props.difference_update(self.static_attrs)
if self.exclude_attrs:
dynamic_props.difference_update(self.exclude_attrs)
for attr in dynamic_props:
attrs[attr] = getattr(obj, attr)
if self.proxy_attrs is not None and attrs and codec:
context = codec.context
for k, v in attrs.copy().iteritems():
if k in self.proxy_attrs:
attrs[k] = context.getProxyForObject(v)
if self.synonym_attrs:
missing = object()
for k, v in self.synonym_attrs.iteritems():
value = attrs.pop(k, missing)
if value is missing:
continue
attrs[v] = value
return attrs
def getDecodableAttributes(self, obj, attrs, codec=None):
"""
Returns a dictionary of attributes for C{obj} that has been filtered,
based on the supplied C{attrs}. This allows for fine grain control
over what will finally end up on the object or not.
@param obj: The object that will recieve the attributes.
@param attrs: The C{attrs} dictionary that has been decoded.
@param codec: An optional argument that will contain the decoder
instance calling this function.
@return: A dictionary of attributes that can be applied to C{obj}
@since: 0.5
"""
if not self._compiled:
self.compile()
changed = False
props = set(attrs.keys())
if self.static_attrs:
missing_attrs = self.static_attrs_set.difference(props)
if missing_attrs:
raise AttributeError('Static attributes %r expected '
'when decoding %r' % (missing_attrs, self.klass))
props.difference_update(self.static_attrs)
if not props:
return attrs
if not self.dynamic:
if not self.decodable_properties:
props = set()
else:
props.intersection_update(self.decodable_properties)
changed = True
if self.readonly_attrs:
props.difference_update(self.readonly_attrs)
changed = True
if self.exclude_attrs:
props.difference_update(self.exclude_attrs)
changed = True
if self.proxy_attrs is not None and codec:
context = codec.context
for k in self.proxy_attrs:
try:
v = attrs[k]
except KeyError:
continue
attrs[k] = context.getObjectForProxy(v)
if self.synonym_attrs:
missing = object()
for k, v in self.synonym_attrs.iteritems():
value = attrs.pop(k, missing)
if value is missing:
continue
attrs[v] = value
if not changed:
return attrs
a = {}
[a.__setitem__(p, attrs[p]) for p in props]
return a
def applyAttributes(self, obj, attrs, codec=None):
"""
Applies the collection of attributes C{attrs} to aliased object C{obj}.
Called when decoding reading aliased objects from an AMF byte stream.
Override this to provide fine grain control of application of
attributes to C{obj}.
@param codec: An optional argument that will contain the en/decoder
instance calling this function.
"""
if not self._compiled:
self.compile()
if self.shortcut_decode:
if self.is_dict:
obj.update(attrs)
return
if not self.sealed:
obj.__dict__.update(attrs)
return
else:
attrs = self.getDecodableAttributes(obj, attrs, codec=codec)
util.set_attrs(obj, attrs)
def getCustomProperties(self):
"""
Overrride this to provide known static properties based on the aliased
class.
@since: 0.5
"""
def createInstance(self, codec=None):
"""
Creates an instance of the klass.
@return: Instance of C{self.klass}.
"""
if type(self.klass) is type:
return self.klass.__new__(self.klass)
return self.klass()
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
AMF3 implementation.
C{AMF3} is the default serialization for
U{ActionScript<http://en.wikipedia.org/wiki/ActionScript>} 3.0 and provides
various advantages over L{AMF0<pyamf.amf0>}, which is used for ActionScript 1.0
and 2.0. It adds support for sending C{int} and C{uint} objects as integers and
supports data types that are available only in ActionScript 3.0, such as
L{ByteArray} and L{ArrayCollection}.
@see: U{Official AMF3 Specification in English
<http://opensource.adobe.com/wiki/download/attachments/1114283/amf3_spec_05_05_08.pdf>}
@see: U{Official AMF3 Specification in Japanese
<http://opensource.adobe.com/wiki/download/attachments/1114283/JP_amf3_spec_121207.pdf>}
@see: U{AMF3 documentation on OSFlash
<http://osflash.org/documentation/amf3>}
@since: 0.1
"""
import datetime
import zlib
import pyamf
from pyamf import codec, util, xml, python
__all__ = [
'ByteArray',
'Context',
'Encoder',
'Decoder',
'use_proxies_default',
]
#: If True encode/decode lists/tuples to L{ArrayCollection
#: <pyamf.flex.ArrayCollection>} and dicts to L{ObjectProxy
#: <pyamf.flex.ObjectProxy>}
use_proxies_default = False
#: The undefined type is represented by the undefined type marker. No further
#: information is encoded for this value.
TYPE_UNDEFINED = '\x00'
#: The null type is represented by the null type marker. No further
#: information is encoded for this value.
TYPE_NULL = '\x01'
#: The false type is represented by the false type marker and is used to
#: encode a Boolean value of C{false}. No further information is encoded for
#: this value.
TYPE_BOOL_FALSE = '\x02'
#: The true type is represented by the true type marker and is used to encode
#: a Boolean value of C{true}. No further information is encoded for this
#: value.
TYPE_BOOL_TRUE = '\x03'
#: In AMF 3 integers are serialized using a variable length signed 29-bit
#: integer.
#: @see: U{Parsing Integers on OSFlash (external)
#: <http://osflash.org/documentation/amf3/parsing_integers>}
TYPE_INTEGER = '\x04'
#: This type is used to encode an ActionScript Number or an ActionScript
#: C{int} of value greater than or equal to 2^28 or an ActionScript uint of
#: value greater than or equal to 2^29. The encoded value is is always an 8
#: byte IEEE-754 double precision floating point value in network byte order
#: (sign bit in low memory). The AMF 3 number type is encoded in the same
#: manner as the AMF 0 L{Number<pyamf.amf0.TYPE_NUMBER>} type.
TYPE_NUMBER = '\x05'
#: ActionScript String values are represented using a single string type in
#: AMF 3 - the concept of string and long string types from AMF 0 is not used.
#: Strings can be sent as a reference to a previously occurring String by
#: using an index to the implicit string reference table. Strings are encoding
#: using UTF-8 - however the header may either describe a string literal or a
#: string reference.
TYPE_STRING = '\x06'
#: ActionScript 3.0 introduced a new XML type however the legacy C{XMLDocument}
#: type from ActionScript 1.0 and 2.0.is retained in the language as
#: C{flash.xml.XMLDocument}. Similar to AMF 0, the structure of an
#: C{XMLDocument} needs to be flattened into a string representation for
#: serialization. As with other strings in AMF, the content is encoded in
#: UTF-8. XMLDocuments can be sent as a reference to a previously occurring
#: C{XMLDocument} instance by using an index to the implicit object reference
#: table.
#: @see: U{OSFlash documentation (external)
#: <http://osflash.org/documentation/amf3#x07_-_xml_legacy_flash.xml.xmldocument_class>}
TYPE_XML = '\x07'
#: In AMF 3 an ActionScript Date is serialized simply as the number of
#: milliseconds elapsed since the epoch of midnight, 1st Jan 1970 in the
#: UTC time zone. Local time zone information is not sent.
TYPE_DATE = '\x08'
#: ActionScript Arrays are described based on the nature of their indices,
#: i.e. their type and how they are positioned in the Array.
TYPE_ARRAY = '\x09'
#: A single AMF 3 type handles ActionScript Objects and custom user classes.
TYPE_OBJECT = '\x0A'
#: ActionScript 3.0 introduces a new top-level XML class that supports
#: U{E4X<http://en.wikipedia.org/wiki/E4X>} syntax.
#: For serialization purposes the XML type needs to be flattened into a
#: string representation. As with other strings in AMF, the content is
#: encoded using UTF-8.
TYPE_XMLSTRING = '\x0B'
#: ActionScript 3.0 introduces the L{ByteArray} type to hold an Array
#: of bytes. AMF 3 serializes this type using a variable length encoding
#: 29-bit integer for the byte-length prefix followed by the raw bytes
#: of the L{ByteArray}.
#: @see: U{Parsing ByteArrays on OSFlash (external)
#: <http://osflash.org/documentation/amf3/parsing_byte_arrays>}
TYPE_BYTEARRAY = '\x0C'
#: Reference bit.
REFERENCE_BIT = 0x01
#: The maximum that can be represented by a signed 29 bit integer.
MAX_29B_INT = 0x0FFFFFFF
#: The minimum that can be represented by a signed 29 bit integer.
MIN_29B_INT = -0x10000000
ENCODED_INT_CACHE = {}
class ObjectEncoding:
"""
AMF object encodings.
"""
#: Property list encoding.
#: The remaining integer-data represents the number of class members that
#: exist. The property names are read as string-data. The values are then
#: read as AMF3-data.
STATIC = 0x00
#: Externalizable object.
#: What follows is the value of the "inner" object, including type code.
#: This value appears for objects that implement IExternalizable, such as
#: L{ArrayCollection} and L{ObjectProxy}.
EXTERNAL = 0x01
#: Name-value encoding.
#: The property names and values are encoded as string-data followed by
#: AMF3-data until there is an empty string property name. If there is a
#: class-def reference there are no property names and the number of values
#: is equal to the number of properties in the class-def.
DYNAMIC = 0x02
#: Proxy object.
PROXY = 0x03
class DataOutput(object):
"""
I am a C{StringIO} type object containing byte data from the AMF stream.
ActionScript 3.0 introduced the C{flash.utils.ByteArray} class to support
the manipulation of raw data in the form of an Array of bytes.
I provide a set of methods for writing binary data with ActionScript 3.0.
This class is the I/O counterpart to the L{DataInput} class, which reads
binary data.
@see: U{IDataOutput on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/flash/utils/IDataOutput.html>}
"""
def __init__(self, encoder):
"""
@param encoder: Encoder containing the stream.
@type encoder: L{amf3.Encoder<pyamf.amf3.Encoder>}
"""
self.encoder = encoder
self.stream = encoder.stream
def writeBoolean(self, value):
"""
Writes a Boolean value.
@type value: C{bool}
@param value: A C{Boolean} value determining which byte is written.
If the parameter is C{True}, C{1} is written; if C{False}, C{0} is
written.
@raise ValueError: Non-boolean value found.
"""
if not isinstance(value, bool):
raise ValueError("Non-boolean value found")
if value is True:
self.stream.write_uchar(1)
else:
self.stream.write_uchar(0)
def writeByte(self, value):
"""
Writes a byte.
@type value: C{int}
"""
self.stream.write_char(value)
def writeUnsignedByte(self, value):
"""
Writes an unsigned byte.
@type value: C{int}
@since: 0.5
"""
return self.stream.write_uchar(value)
def writeDouble(self, value):
"""
Writes an IEEE 754 double-precision (64-bit) floating
point number.
@type value: C{number}
"""
self.stream.write_double(value)
def writeFloat(self, value):
"""
Writes an IEEE 754 single-precision (32-bit) floating
point number.
@type value: C{float}
"""
self.stream.write_float(value)
def writeInt(self, value):
"""
Writes a 32-bit signed integer.
@type value: C{int}
"""
self.stream.write_long(value)
def writeMultiByte(self, value, charset):
"""
Writes a multibyte string to the datastream using the
specified character set.
@type value: C{str}
@param value: The string value to be written.
@type charset: C{str}
@param charset: The string denoting the character set to use. Possible
character set strings include C{shift-jis}, C{cn-gb},
C{iso-8859-1} and others.
@see: U{Supported character sets on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/charset-codes.html>}
"""
if type(value) is unicode:
value = value.encode(charset)
self.stream.write(value)
def writeObject(self, value):
"""
Writes an object to data stream in AMF serialized format.
@param value: The object to be serialized.
"""
self.encoder.writeElement(value)
def writeShort(self, value):
"""
Writes a 16-bit integer.
@type value: C{int}
@param value: A byte value as an integer.
"""
self.stream.write_short(value)
def writeUnsignedShort(self, value):
"""
Writes a 16-bit unsigned integer.
@type value: C{int}
@param value: A byte value as an integer.
@since: 0.5
"""
self.stream.write_ushort(value)
def writeUnsignedInt(self, value):
"""
Writes a 32-bit unsigned integer.
@type value: C{int}
@param value: A byte value as an unsigned integer.
"""
self.stream.write_ulong(value)
def writeUTF(self, value):
"""
Writes a UTF-8 string to the data stream.
The length of the UTF-8 string in bytes is written first,
as a 16-bit integer, followed by the bytes representing the
characters of the string.
@type value: C{str}
@param value: The string value to be written.
"""
buf = util.BufferedByteStream()
buf.write_utf8_string(value)
bytes = buf.getvalue()
self.stream.write_ushort(len(bytes))
self.stream.write(bytes)
def writeUTFBytes(self, value):
"""
Writes a UTF-8 string. Similar to L{writeUTF}, but does
not prefix the string with a 16-bit length word.
@type value: C{str}
@param value: The string value to be written.
"""
val = None
if isinstance(value, unicode):
val = value
else:
val = unicode(value, 'utf8')
self.stream.write_utf8_string(val)
class DataInput(object):
"""
I provide a set of methods for reading binary data with ActionScript 3.0.
This class is the I/O counterpart to the L{DataOutput} class,
which writes binary data.
@see: U{IDataInput on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/flash/utils/IDataInput.html>}
"""
def __init__(self, decoder=None):
"""
@param decoder: AMF3 decoder containing the stream.
@type decoder: L{amf3.Decoder<pyamf.amf3.Decoder>}
"""
self.decoder = decoder
self.stream = decoder.stream
def readBoolean(self):
"""
Read C{Boolean}.
@raise ValueError: Error reading Boolean.
@rtype: C{bool}
@return: A Boolean value, C{True} if the byte
is nonzero, C{False} otherwise.
"""
byte = self.stream.read(1)
if byte == '\x00':
return False
elif byte == '\x01':
return True
else:
raise ValueError("Error reading boolean")
def readByte(self):
"""
Reads a signed byte.
@rtype: C{int}
@return: The returned value is in the range -128 to 127.
"""
return self.stream.read_char()
def readDouble(self):
"""
Reads an IEEE 754 double-precision floating point number from the
data stream.
@rtype: C{number}
@return: An IEEE 754 double-precision floating point number.
"""
return self.stream.read_double()
def readFloat(self):
"""
Reads an IEEE 754 single-precision floating point number from the
data stream.
@rtype: C{number}
@return: An IEEE 754 single-precision floating point number.
"""
return self.stream.read_float()
def readInt(self):
"""
Reads a signed 32-bit integer from the data stream.
@rtype: C{int}
@return: The returned value is in the range -2147483648 to 2147483647.
"""
return self.stream.read_long()
def readMultiByte(self, length, charset):
"""
Reads a multibyte string of specified length from the data stream
using the specified character set.
@type length: C{int}
@param length: The number of bytes from the data stream to read.
@type charset: C{str}
@param charset: The string denoting the character set to use.
@rtype: C{str}
@return: UTF-8 encoded string.
"""
#FIXME nick: how to work out the code point byte size (on the fly)?
bytes = self.stream.read(length)
return unicode(bytes, charset)
def readObject(self):
"""
Reads an object from the data stream.
@return: The deserialized object.
"""
return self.decoder.readElement()
def readShort(self):
"""
Reads a signed 16-bit integer from the data stream.
@rtype: C{uint}
@return: The returned value is in the range -32768 to 32767.
"""
return self.stream.read_short()
def readUnsignedByte(self):
"""
Reads an unsigned byte from the data stream.
@rtype: C{uint}
@return: The returned value is in the range 0 to 255.
"""
return self.stream.read_uchar()
def readUnsignedInt(self):
"""
Reads an unsigned 32-bit integer from the data stream.
@rtype: C{uint}
@return: The returned value is in the range 0 to 4294967295.
"""
return self.stream.read_ulong()
def readUnsignedShort(self):
"""
Reads an unsigned 16-bit integer from the data stream.
@rtype: C{uint}
@return: The returned value is in the range 0 to 65535.
"""
return self.stream.read_ushort()
def readUTF(self):
"""
Reads a UTF-8 string from the data stream.
The string is assumed to be prefixed with an unsigned
short indicating the length in bytes.
@rtype: C{str}
@return: A UTF-8 string produced by the byte
representation of characters.
"""
length = self.stream.read_ushort()
return self.stream.read_utf8_string(length)
def readUTFBytes(self, length):
"""
Reads a sequence of C{length} UTF-8 bytes from the data
stream and returns a string.
@type length: C{int}
@param length: The number of bytes from the data stream to read.
@rtype: C{str}
@return: A UTF-8 string produced by the byte representation of
characters of specified C{length}.
"""
return self.readMultiByte(length, 'utf-8')
class ByteArray(util.BufferedByteStream, DataInput, DataOutput):
"""
I am a C{StringIO} type object containing byte data from the AMF stream.
ActionScript 3.0 introduced the C{flash.utils.ByteArray} class to support
the manipulation of raw data in the form of an Array of bytes.
Supports C{zlib} compression.
Possible uses of the C{ByteArray} class:
- Creating a custom protocol to connect to a client.
- Writing your own AMF/Remoting packet.
- Optimizing the size of your data by using custom data types.
@see: U{ByteArray on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/flash/utils/ByteArray.html>}
"""
class __amf__:
amf3 = True
def __init__(self, *args, **kwargs):
self.context = Context()
util.BufferedByteStream.__init__(self, *args, **kwargs)
DataInput.__init__(self, Decoder(self, self.context))
DataOutput.__init__(self, Encoder(self, self.context))
self.compressed = False
def readObject(self, *args, **kwargs):
self.context.clear()
return super(ByteArray, self).readObject(*args, **kwargs)
def writeObject(self, *args, **kwargs):
self.context.clear()
return super(ByteArray, self).writeObject(*args, **kwargs)
def __cmp__(self, other):
if isinstance(other, ByteArray):
return cmp(self.getvalue(), other.getvalue())
return cmp(self.getvalue(), other)
def __str__(self):
buf = self.getvalue()
if not self.compressed:
return buf
buf = zlib.compress(buf)
#FIXME nick: hacked
return buf[0] + '\xda' + buf[2:]
def compress(self):
"""
Forces compression of the underlying stream.
"""
self.compressed = True
class ClassDefinition(object):
"""
This is an internal class used by L{Encoder}/L{Decoder} to hold details
about transient class trait definitions.
"""
def __init__(self, alias):
self.alias = alias
self.reference = None
alias.compile()
self.attr_len = 0
if alias.static_attrs:
self.attr_len = len(alias.static_attrs)
self.encoding = ObjectEncoding.DYNAMIC
if alias.external:
self.encoding = ObjectEncoding.EXTERNAL
elif not alias.dynamic:
if alias.encodable_properties is not None:
if len(alias.static_attrs) == len(alias.encodable_properties):
self.encoding = ObjectEncoding.STATIC
else:
self.encoding = ObjectEncoding.STATIC
def __repr__(self):
return '<%s.ClassDefinition reference=%r encoding=%r alias=%r at 0x%x>' % (
self.__class__.__module__, self.reference, self.encoding, self.alias, id(self))
class Context(codec.Context):
"""
I hold the AMF3 context for en/decoding streams.
@ivar strings: A list of string references.
@type strings: C{list}
@ivar classes: A list of L{ClassDefinition}.
@type classes: C{list}
"""
def __init__(self):
self.strings = codec.IndexedCollection(use_hash=True)
self.classes = {}
self.class_ref = {}
self.class_idx = 0
codec.Context.__init__(self)
def clear(self):
"""
Clears the context.
"""
codec.Context.clear(self)
self.strings.clear()
self.proxied_objects = {}
self.classes = {}
self.class_ref = {}
self.class_idx = 0
def getString(self, ref):
"""
Gets a string based on a reference C{ref}.
@param ref: The reference index.
@type ref: C{str}
@rtype: C{str} or C{None}
@return: The referenced string.
"""
return self.strings.getByReference(ref)
def getStringReference(self, s):
"""
Return string reference.
@type s: C{str}
@param s: The referenced string.
@return: The reference index to the string.
@rtype: C{int} or C{None}
"""
return self.strings.getReferenceTo(s)
def addString(self, s):
"""
Creates a reference to C{s}. If the reference already exists, that
reference is returned.
@type s: C{str}
@param s: The string to be referenced.
@rtype: C{int}
@return: The reference index.
@raise TypeError: The parameter C{s} is not of C{basestring} type.
"""
if not isinstance(s, basestring):
raise TypeError
if len(s) == 0:
return -1
return self.strings.append(s)
def getClassByReference(self, ref):
"""
Return class reference.
@return: Class reference.
"""
return self.class_ref.get(ref)
def getClass(self, klass):
"""
Return class reference.
@return: Class reference.
"""
return self.classes.get(klass)
def addClass(self, alias, klass):
"""
Creates a reference to C{class_def}.
@param alias: C{ClassDefinition} instance.
"""
ref = self.class_idx
self.class_ref[ref] = alias
cd = self.classes[klass] = alias
cd.reference = ref
self.class_idx += 1
return ref
def getObjectForProxy(self, proxy):
"""
Returns the unproxied version of C{proxy} as stored in the context, or
unproxies the proxy and returns that 'raw' object.
@see: L{pyamf.flex.unproxy_object}
@since: 0.6
"""
obj = self.proxied_objects.get(id(proxy))
if obj is None:
from pyamf import flex
obj = flex.unproxy_object(proxy)
self.addProxyObject(obj, proxy)
return obj
def addProxyObject(self, obj, proxied):
"""
Stores a reference to the unproxied and proxied versions of C{obj} for
later retrieval.
@since: 0.6
"""
self.proxied_objects[id(obj)] = proxied
self.proxied_objects[id(proxied)] = obj
def getProxyForObject(self, obj):
"""
Returns the proxied version of C{obj} as stored in the context, or
creates a new proxied object and returns that.
@see: L{pyamf.flex.proxy_object}
@since: 0.6
"""
proxied = self.proxied_objects.get(id(obj))
if proxied is None:
from pyamf import flex
proxied = flex.proxy_object(obj)
self.addProxyObject(obj, proxied)
return proxied
class Decoder(codec.Decoder):
"""
Decodes an AMF3 data stream.
"""
def __init__(self, *args, **kwargs):
self.use_proxies = kwargs.pop('use_proxies', use_proxies_default)
codec.Decoder.__init__(self, *args, **kwargs)
def buildContext(self):
return Context()
def getTypeFunc(self, data):
if data == TYPE_UNDEFINED:
return self.readUndefined
elif data == TYPE_NULL:
return self.readNull
elif data == TYPE_BOOL_FALSE:
return self.readBoolFalse
elif data == TYPE_BOOL_TRUE:
return self.readBoolTrue
elif data == TYPE_INTEGER:
return self.readInteger
elif data == TYPE_NUMBER:
return self.readNumber
elif data == TYPE_STRING:
return self.readString
elif data == TYPE_XML:
return self.readXML
elif data == TYPE_DATE:
return self.readDate
elif data == TYPE_ARRAY:
return self.readArray
elif data == TYPE_OBJECT:
return self.readObject
elif data == TYPE_XMLSTRING:
return self.readXMLString
elif data == TYPE_BYTEARRAY:
return self.readByteArray
def readProxy(self, obj):
"""
Decodes a proxied object from the stream.
@since: 0.6
"""
return self.context.getObjectForProxy(obj)
def readUndefined(self):
"""
Read undefined.
"""
return pyamf.Undefined
def readNull(self):
"""
Read null.
@return: C{None}
@rtype: C{None}
"""
return None
def readBoolFalse(self):
"""
Returns C{False}.
@return: C{False}
@rtype: C{bool}
"""
return False
def readBoolTrue(self):
"""
Returns C{True}.
@return: C{True}
@rtype: C{bool}
"""
return True
def readNumber(self):
"""
Read number.
"""
return self.stream.read_double()
def readInteger(self, signed=True):
"""
Reads and returns an integer from the stream.
@type signed: C{bool}
@see: U{Parsing integers on OSFlash
<http://osflash.org/amf3/parsing_integers>} for the AMF3 integer data
format.
"""
return decode_int(self.stream, signed)
def _readLength(self):
x = decode_int(self.stream, False)
return (x >> 1, x & REFERENCE_BIT == 0)
def readBytes(self):
"""
Reads and returns a utf-8 encoded byte array.
"""
length, is_reference = self._readLength()
if is_reference:
return self.context.getString(length)
if length == 0:
return ''
result = self.stream.read(length)
self.context.addString(result)
return result
def readString(self):
"""
Reads and returns a string from the stream.
"""
length, is_reference = self._readLength()
if is_reference:
result = self.context.getString(length)
return self.context.getStringForBytes(result)
if length == 0:
return ''
result = self.stream.read(length)
self.context.addString(result)
return self.context.getStringForBytes(result)
def readDate(self):
"""
Read date from the stream.
The timezone is ignored as the date is always in UTC.
"""
ref = self.readInteger(False)
if ref & REFERENCE_BIT == 0:
return self.context.getObject(ref >> 1)
ms = self.stream.read_double()
result = util.get_datetime(ms / 1000.0)
if self.timezone_offset is not None:
result += self.timezone_offset
self.context.addObject(result)
return result
def readArray(self):
"""
Reads an array from the stream.
@warning: There is a very specific problem with AMF3 where the first
three bytes of an encoded empty C{dict} will mirror that of an encoded
C{{'': 1, '2': 2}}
"""
size = self.readInteger(False)
if size & REFERENCE_BIT == 0:
return self.context.getObject(size >> 1)
size >>= 1
key = self.readBytes()
if key == '':
# integer indexes only -> python list
result = []
self.context.addObject(result)
for i in xrange(size):
result.append(self.readElement())
return result
result = pyamf.MixedArray()
self.context.addObject(result)
while key:
result[key] = self.readElement()
key = self.readBytes()
for i in xrange(size):
el = self.readElement()
result[i] = el
return result
def _getClassDefinition(self, ref):
"""
Reads class definition from the stream.
"""
is_ref = ref & REFERENCE_BIT == 0
ref >>= 1
if is_ref:
class_def = self.context.getClassByReference(ref)
return class_def
name = self.readBytes()
alias = None
if name == '':
name = pyamf.ASObject
try:
alias = pyamf.get_class_alias(name)
except pyamf.UnknownClassAlias:
if self.strict:
raise
alias = pyamf.TypedObjectClassAlias(name)
class_def = ClassDefinition(alias)
class_def.encoding = ref & 0x03
class_def.attr_len = ref >> 2
class_def.static_properties = []
if class_def.attr_len > 0:
for i in xrange(class_def.attr_len):
key = self.readBytes()
class_def.static_properties.append(key)
self.context.addClass(class_def, alias.klass)
return class_def
def _readStatic(self, class_def, obj):
for attr in class_def.static_properties:
obj[attr] = self.readElement()
def _readDynamic(self, class_def, obj):
attr = self.readBytes()
while attr:
obj[attr] = self.readElement()
attr = self.readBytes()
def readObject(self):
"""
Reads an object from the stream.
"""
ref = self.readInteger(False)
if ref & REFERENCE_BIT == 0:
obj = self.context.getObject(ref >> 1)
if obj is None:
raise pyamf.ReferenceError('Unknown reference %d' % (ref >> 1,))
if self.use_proxies is True:
obj = self.readProxy(obj)
return obj
ref >>= 1
class_def = self._getClassDefinition(ref)
alias = class_def.alias
obj = alias.createInstance(codec=self)
obj_attrs = dict()
self.context.addObject(obj)
if class_def.encoding in (ObjectEncoding.EXTERNAL, ObjectEncoding.PROXY):
obj.__readamf__(DataInput(self))
if self.use_proxies is True:
obj = self.readProxy(obj)
return obj
elif class_def.encoding == ObjectEncoding.DYNAMIC:
self._readStatic(class_def, obj_attrs)
self._readDynamic(class_def, obj_attrs)
elif class_def.encoding == ObjectEncoding.STATIC:
self._readStatic(class_def, obj_attrs)
else:
raise pyamf.DecodeError("Unknown object encoding")
alias.applyAttributes(obj, obj_attrs, codec=self)
if self.use_proxies is True:
obj = self.readProxy(obj)
return obj
def readXML(self):
"""
Reads an xml object from the stream.
@return: An etree interface compatible object
@see: L{xml.set_default_interface}
"""
ref = self.readInteger(False)
if ref & REFERENCE_BIT == 0:
return self.context.getObject(ref >> 1)
xmlstring = self.stream.read(ref >> 1)
x = xml.fromstring(xmlstring)
self.context.addObject(x)
return x
def readXMLString(self):
"""
Reads a string from the data stream and converts it into
an XML Tree.
@see: L{readXML}
"""
return self.readXML()
def readByteArray(self):
"""
Reads a string of data from the stream.
Detects if the L{ByteArray} was compressed using C{zlib}.
@see: L{ByteArray}
@note: This is not supported in ActionScript 1.0 and 2.0.
"""
ref = self.readInteger(False)
if ref & REFERENCE_BIT == 0:
return self.context.getObject(ref >> 1)
buffer = self.stream.read(ref >> 1)
try:
buffer = zlib.decompress(buffer)
compressed = True
except zlib.error:
compressed = False
obj = ByteArray(buffer)
obj.compressed = compressed
self.context.addObject(obj)
return obj
class Encoder(codec.Encoder):
"""
Encodes an AMF3 data stream.
"""
def __init__(self, *args, **kwargs):
self.use_proxies = kwargs.pop('use_proxies', use_proxies_default)
self.string_references = kwargs.pop('string_references', True)
codec.Encoder.__init__(self, *args, **kwargs)
def buildContext(self):
return Context()
def getTypeFunc(self, data):
"""
@see: L{codec.Encoder.getTypeFunc}
"""
t = type(data)
if t in python.int_types:
return self.writeInteger
elif t is ByteArray:
return self.writeByteArray
elif t is pyamf.MixedArray:
return self.writeDict
return codec.Encoder.getTypeFunc(self, data)
def writeUndefined(self, n):
"""
Writes an C{pyamf.Undefined} value to the stream.
"""
self.stream.write(TYPE_UNDEFINED)
def writeNull(self, n):
"""
Writes a C{null} value to the stream.
"""
self.stream.write(TYPE_NULL)
def writeBoolean(self, n):
"""
Writes a Boolean to the stream.
"""
t = TYPE_BOOL_TRUE
if n is False:
t = TYPE_BOOL_FALSE
self.stream.write(t)
def _writeInteger(self, n):
"""
AMF3 integers are encoded.
@param n: The integer data to be encoded to the AMF3 data stream.
@type n: integer data
@see: U{Parsing Integers on OSFlash
<http://osflash.org/documentation/amf3/parsing_integers>}
for more info.
"""
self.stream.write(encode_int(n))
def writeInteger(self, n):
"""
Writes an integer to the stream.
@type n: integer data
@param n: The integer data to be encoded to the AMF3 data stream.
"""
if n < MIN_29B_INT or n > MAX_29B_INT:
self.writeNumber(float(n))
return
self.stream.write(TYPE_INTEGER)
self.stream.write(encode_int(n))
def writeNumber(self, n):
"""
Writes a float to the stream.
@type n: C{float}
"""
self.stream.write(TYPE_NUMBER)
self.stream.write_double(n)
def serialiseBytes(self, b):
if len(b) == 0:
self.stream.write_uchar(REFERENCE_BIT)
return
if self.string_references:
ref = self.context.getStringReference(b)
if ref != -1:
self._writeInteger(ref << 1)
return
self.context.addString(b)
self._writeInteger((len(b) << 1) | REFERENCE_BIT)
self.stream.write(b)
def serialiseString(self, s):
"""
Writes a raw string to the stream.
@type s: C{str}
@param s: The string data to be encoded to the AMF3 data stream.
"""
if type(s) is unicode:
s = self.context.getBytesForString(s)
self.serialiseBytes(s)
def writeBytes(self, b):
"""
Writes a raw string to the stream.
"""
self.stream.write(TYPE_STRING)
self.serialiseBytes(b)
def writeString(self, s):
"""
Writes a string to the stream. It will be B{UTF-8} encoded.
"""
s = self.context.getBytesForString(s)
self.writeBytes(s)
def writeDate(self, n):
"""
Writes a C{datetime} instance to the stream.
@type n: L{datetime}
@param n: The C{Date} data to be encoded to the AMF3 data stream.
"""
if isinstance(n, datetime.time):
raise pyamf.EncodeError('A datetime.time instance was found but '
'AMF3 has no way to encode time objects. Please use '
'datetime.datetime instead (got:%r)' % (n,))
self.stream.write(TYPE_DATE)
ref = self.context.getObjectReference(n)
if ref != -1:
self._writeInteger(ref << 1)
return
self.context.addObject(n)
self.stream.write_uchar(REFERENCE_BIT)
if self.timezone_offset is not None:
n -= self.timezone_offset
ms = util.get_timestamp(n)
self.stream.write_double(ms * 1000.0)
def writeList(self, n, is_proxy=False):
"""
Writes a C{tuple}, C{set} or C{list} to the stream.
@type n: One of C{__builtin__.tuple}, C{__builtin__.set}
or C{__builtin__.list}
@param n: The C{list} data to be encoded to the AMF3 data stream.
"""
if self.use_proxies and not is_proxy:
self.writeProxy(n)
return
self.stream.write(TYPE_ARRAY)
ref = self.context.getObjectReference(n)
if ref != -1:
self._writeInteger(ref << 1)
return
self.context.addObject(n)
self._writeInteger((len(n) << 1) | REFERENCE_BIT)
self.stream.write('\x01')
[self.writeElement(x) for x in n]
def writeDict(self, n):
"""
Writes a C{dict} to the stream.
@type n: C{__builtin__.dict}
@param n: The C{dict} data to be encoded to the AMF3 data stream.
@raise ValueError: Non C{int}/C{str} key value found in the C{dict}
@raise EncodeError: C{dict} contains empty string keys.
"""
# Design bug in AMF3 that cannot read/write empty key strings
# for more info
if '' in n:
raise pyamf.EncodeError("dicts cannot contain empty string keys")
if self.use_proxies:
self.writeProxy(n)
return
self.stream.write(TYPE_ARRAY)
ref = self.context.getObjectReference(n)
if ref != -1:
self._writeInteger(ref << 1)
return
self.context.addObject(n)
# The AMF3 spec demands that all str based indicies be listed first
keys = n.keys()
int_keys = []
str_keys = []
for x in keys:
if isinstance(x, python.int_types):
int_keys.append(x)
elif isinstance(x, python.str_types):
str_keys.append(x)
else:
raise ValueError("Non int/str key value found in dict")
# Make sure the integer keys are within range
l = len(int_keys)
for x in int_keys:
if l < x <= 0:
# treat as a string key
str_keys.append(x)
del int_keys[int_keys.index(x)]
int_keys.sort()
# If integer keys don't start at 0, they will be treated as strings
if len(int_keys) > 0 and int_keys[0] != 0:
for x in int_keys:
str_keys.append(str(x))
del int_keys[int_keys.index(x)]
self._writeInteger(len(int_keys) << 1 | REFERENCE_BIT)
for x in str_keys:
self.serialiseString(x)
self.writeElement(n[x])
self.stream.write_uchar(0x01)
for k in int_keys:
self.writeElement(n[k])
def writeProxy(self, obj):
"""
Encodes a proxied object to the stream.
@since: 0.6
"""
proxy = self.context.getProxyForObject(obj)
self.writeObject(proxy, is_proxy=True)
def writeObject(self, obj, is_proxy=False):
"""
Writes an object to the stream.
"""
if self.use_proxies and not is_proxy:
self.writeProxy(obj)
return
self.stream.write(TYPE_OBJECT)
ref = self.context.getObjectReference(obj)
if ref != -1:
self._writeInteger(ref << 1)
return
self.context.addObject(obj)
# object is not referenced, serialise it
kls = obj.__class__
definition = self.context.getClass(kls)
alias = None
class_ref = False # if the class definition is a reference
if definition:
class_ref = True
alias = definition.alias
else:
alias = self.context.getClassAlias(kls)
definition = ClassDefinition(alias)
self.context.addClass(definition, alias.klass)
if class_ref:
self.stream.write(definition.reference)
else:
ref = 0
if definition.encoding != ObjectEncoding.EXTERNAL:
ref += definition.attr_len << 4
final_reference = encode_int(ref | definition.encoding << 2 |
REFERENCE_BIT << 1 | REFERENCE_BIT)
self.stream.write(final_reference)
definition.reference = encode_int(
definition.reference << 2 | REFERENCE_BIT)
if alias.anonymous:
self.stream.write('\x01')
else:
self.serialiseString(alias.alias)
# work out what the final reference for the class will be.
# this is okay because the next time an object of the same
# class is encoded, class_ref will be True and never get here
# again.
if alias.external:
obj.__writeamf__(DataOutput(self))
return
attrs = alias.getEncodableAttributes(obj, codec=self)
if alias.static_attrs:
if not class_ref:
[self.serialiseString(attr) for attr in alias.static_attrs]
for attr in alias.static_attrs:
value = attrs.pop(attr)
self.writeElement(value)
if definition.encoding == ObjectEncoding.STATIC:
return
if definition.encoding == ObjectEncoding.DYNAMIC:
if attrs:
for attr, value in attrs.iteritems():
if type(attr) in python.int_types:
attr = str(attr)
self.serialiseString(attr)
self.writeElement(value)
self.stream.write('\x01')
def writeByteArray(self, n):
"""
Writes a L{ByteArray} to the data stream.
@param n: The L{ByteArray} data to be encoded to the AMF3 data stream.
@type n: L{ByteArray}
"""
self.stream.write(TYPE_BYTEARRAY)
ref = self.context.getObjectReference(n)
if ref != -1:
self._writeInteger(ref << 1)
return
self.context.addObject(n)
buf = str(n)
l = len(buf)
self._writeInteger(l << 1 | REFERENCE_BIT)
self.stream.write(buf)
def writeXML(self, n):
"""
Writes a XML string to the data stream.
@type n: L{ET<xml.ET>}
@param n: The XML Document to be encoded to the AMF3 data stream.
"""
self.stream.write(TYPE_XMLSTRING)
ref = self.context.getObjectReference(n)
if ref != -1:
self._writeInteger(ref << 1)
return
self.context.addObject(n)
self.serialiseString(xml.tostring(n).encode('utf-8'))
def encode_int(n):
"""
Encodes an int as a variable length signed 29-bit integer as defined by
the spec.
@param n: The integer to be encoded
@return: The encoded string
@rtype: C{str}
@raise OverflowError: Out of range.
"""
global ENCODED_INT_CACHE
try:
return ENCODED_INT_CACHE[n]
except KeyError:
pass
if n < MIN_29B_INT or n > MAX_29B_INT:
raise OverflowError("Out of range")
if n < 0:
n += 0x20000000
bytes = ''
real_value = None
if n > 0x1fffff:
real_value = n
n >>= 1
bytes += chr(0x80 | ((n >> 21) & 0xff))
if n > 0x3fff:
bytes += chr(0x80 | ((n >> 14) & 0xff))
if n > 0x7f:
bytes += chr(0x80 | ((n >> 7) & 0xff))
if real_value is not None:
n = real_value
if n > 0x1fffff:
bytes += chr(n & 0xff)
else:
bytes += chr(n & 0x7f)
ENCODED_INT_CACHE[n] = bytes
return bytes
def decode_int(stream, signed=False):
"""
Decode C{int}.
"""
n = result = 0
b = stream.read_uchar()
while b & 0x80 != 0 and n < 3:
result <<= 7
result |= b & 0x7f
b = stream.read_uchar()
n += 1
if n < 3:
result <<= 7
result |= b
else:
result <<= 8
result |= b
if result & 0x10000000 != 0:
if signed:
result -= 0x20000000
else:
result <<= 1
result += 1
return result
pyamf.register_class(ByteArray)
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Python compatibility values and helpers.
"""
try:
import __builtin__ as builtins
except ImportError:
import builtins
import types
func_types = (
types.BuiltinFunctionType, types.BuiltinMethodType, types.CodeType,
types.FunctionType, types.GeneratorType, types.LambdaType, types.MethodType)
class_types = [type]
int_types = [int]
str_types = [str]
try:
int_types.append(long)
except NameError:
pass
try:
str_types.append(unicode)
except NameError:
pass
try:
class_types.append(types.ClassType)
except:
pass
int_types = tuple(int_types)
str_types = tuple(str_types)
class_types = tuple(class_types)
PosInf = 1e300000
NegInf = -1e300000
# we do this instead of float('nan') because windows throws a wobbler.
NaN = PosInf / PosInf
def isNaN(val):
"""
@since: 0.5
"""
return str(float(val)) == str(NaN)
def isPosInf(val):
"""
@since: 0.5
"""
return str(float(val)) == str(PosInf)
def isNegInf(val):
"""
@since: 0.5
"""
return str(float(val)) == str(NegInf)
try:
callable = builtins.callable
except NameError:
def callable(obj):
"""
Compatibility function for Python 3.x
"""
return hasattr(obj, '__call__')
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details. | Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Remoting tests.
@since: 0.1.0
"""
| Python |
from google.appengine.ext import db
class PetModel(db.Model):
"""
"""
# 'borrowed' from http://code.google.com/appengine/docs/datastore/entitiesandmodels.html
name = db.StringProperty(required=True)
type = db.StringProperty(required=True, choices=set(["cat", "dog", "bird"]))
birthdate = db.DateProperty()
weight_in_pounds = db.IntegerProperty()
spayed_or_neutered = db.BooleanProperty()
class PetExpando(db.Expando):
"""
"""
name = db.StringProperty(required=True)
type = db.StringProperty(required=True, choices=set(["cat", "dog", "bird"]))
birthdate = db.DateProperty()
weight_in_pounds = db.IntegerProperty()
spayed_or_neutered = db.BooleanProperty()
class ListModel(db.Model):
"""
"""
numbers = db.ListProperty(long)
class GettableModelStub(db.Model):
"""
"""
gets = []
@staticmethod
def get(*args, **kwargs):
GettableModelStub.gets.append([args, kwargs])
class Author(db.Model):
name = db.StringProperty()
class Novel(db.Model):
title = db.StringProperty()
author = db.ReferenceProperty(Author)
class EmptyModel(db.Model):
"""
A model that has no properties but also has no entities in the datastore.
"""
| Python |
# The simplest Django settings possible
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = ':memory:'
INSTALLED_APPS = ('adapters',) | Python |
from django.db import models
class SimplestModel(models.Model):
"""
The simplest Django model you can have
"""
class TimeClass(models.Model):
"""
A model with all the time based fields
"""
t = models.TimeField()
d = models.DateField()
dt = models.DateTimeField()
class ParentReference(models.Model):
"""
Has a foreign key to L{ChildReference}
"""
name = models.CharField(max_length=100)
bar = models.ForeignKey('ChildReference', null=True)
class ChildReference(models.Model):
"""
Has a foreign key relation to L{ParentReference}
"""
name = models.CharField(max_length=100)
foo = models.ForeignKey(ParentReference)
class NotSaved(models.Model):
name = models.CharField(max_length=100)
class Publication(models.Model):
title = models.CharField(max_length=30)
def __unicode__(self):
return self.title
class Meta:
ordering = ('title',)
class Reporter(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.EmailField()
def __unicode__(self):
return u"%s %s" % (self.first_name, self.last_name)
class Article(models.Model):
headline = models.CharField(max_length=100)
publications = models.ManyToManyField(Publication)
reporter = models.ForeignKey(Reporter, null=True)
def __unicode__(self):
return self.headline
class Meta:
ordering = ('headline',)
# concrete inheritance
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
class Restaurant(Place):
serves_hot_dogs = models.BooleanField()
serves_pizza = models.BooleanField()
# abstract inheritance
class CommonInfo(models.Model):
name = models.CharField(max_length=100)
age = models.PositiveIntegerField()
class Meta:
abstract = True
class Student(CommonInfo):
home_group = models.CharField(max_length=5)
# foreign keys
class NullForeignKey(models.Model):
foobar = models.ForeignKey(SimplestModel, null=True)
class BlankForeignKey(models.Model):
foobar = models.ForeignKey(SimplestModel, blank=True)
class StaticRelation(models.Model):
gak = models.ForeignKey(SimplestModel)
class FileModel(models.Model):
file = models.FileField(upload_to='file_model')
text = models.CharField(max_length=64)
try:
import PIL
class Profile(models.Model):
file = models.ImageField(upload_to='profile')
text = models.CharField(max_length=64)
except ImportError:
pass
class DBColumnModel(models.Model):
"""
@see: #807
"""
bar = models.ForeignKey(SimplestModel, db_column='custom')
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
File included to make the directory a Python package.
The test_*.py files are special in this directory in that they refer to the
top level module names of the adapter to test. An attempt will be made to
import that module but ignored if it fails (not available on the system).
"""
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Test utilities.
@since: 0.1.0
"""
import unittest
import copy
import pyamf
from pyamf import python
class ClassicSpam:
def __readamf__(self, input):
pass
def __writeamf__(self, output):
pass
class Spam(object):
"""
A generic object to use for object encoding.
"""
def __init__(self, d={}):
self.__dict__.update(d)
def __readamf__(self, input):
pass
def __writeamf__(self, output):
pass
class EncoderMixIn(object):
"""
A mixin class that provides an AMF* encoder and some helpful methods to do
testing.
"""
amf_type = None
def setUp(self):
self.encoder = pyamf.get_encoder(encoding=self.amf_type)
self.buf = self.encoder.stream
self.context = self.encoder.context
def tearDown(self):
pass
def encode(self, *args):
self.buf.seek(0, 0)
self.buf.truncate()
for arg in args:
self.encoder.writeElement(arg)
return self.buf.getvalue()
def assertEncoded(self, arg, *args, **kwargs):
if kwargs.get('clear', True):
self.context.clear()
assert_buffer(self, self.encode(arg), args)
class DecoderMixIn(object):
"""
A mixin class that provides an AMF* decoder and some helpful methods to do
testing.
"""
amf_type = None
def setUp(self):
self.decoder = pyamf.get_decoder(encoding=self.amf_type)
self.buf = self.decoder.stream
self.context = self.decoder.context
def tearDown(self):
pass
def decode(self, bytes, raw=False):
if not isinstance(bytes, basestring):
bytes = _join(bytes)
self.buf.seek(0, 0)
self.buf.truncate()
self.buf.write(bytes)
self.buf.seek(0, 0)
ret = []
while not self.buf.at_eof():
ret.append(self.decoder.readElement())
if raw:
return ret
if len(ret) == 1:
return ret[0]
return ret
def assertDecoded(self, decoded, bytes, raw=False, clear=True):
if clear:
self.context.clear()
ret = self.decode(bytes, raw)
self.assertEqual(ret, decoded)
self.assertEqual(self.buf.remaining(), 0)
class ClassCacheClearingTestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self._class_cache = pyamf.CLASS_CACHE.copy()
self._class_loaders = copy.copy(pyamf.CLASS_LOADERS)
def tearDown(self):
unittest.TestCase.tearDown(self)
pyamf.CLASS_CACHE = self._class_cache
pyamf.CLASS_LOADERS = self._class_loaders
def assertBuffer(self, first, second, msg=None):
assert_buffer(self, first, second, msg)
def assertEncodes(self, obj, buffer, encoding=pyamf.AMF3):
bytes = pyamf.encode(obj, encoding=encoding).getvalue()
if isinstance(buffer, basestring):
self.assertEqual(bytes, buffer)
return
self.assertBuffer(bytes, buffer)
def assertDecodes(self, bytes, cb, encoding=pyamf.AMF3, raw=False):
if not isinstance(bytes, basestring):
bytes = _join(bytes)
ret = list(pyamf.decode(bytes, encoding=encoding))
if not raw and len(ret) == 1:
ret = ret[0]
if python.callable(cb):
cb(ret)
else:
self.assertEqual(ret, cb)
def assert_buffer(testcase, val, s, msg=None):
if not check_buffer(val, s):
testcase.fail(msg or ('%r != %r' % (val, s)))
def check_buffer(buf, parts, inner=False):
assert isinstance(parts, (tuple, list))
parts = [p for p in parts]
for part in parts:
if inner is False:
if isinstance(part, (tuple, list)):
buf = check_buffer(buf, part, inner=True)
else:
if not buf.startswith(part):
return False
buf = buf[len(part):]
else:
for k in parts[:]:
for p in parts[:]:
if isinstance(p, (tuple, list)):
buf = check_buffer(buf, p, inner=True)
else:
if buf.startswith(p):
parts.remove(p)
buf = buf[len(p):]
return buf
return len(buf) == 0
def replace_dict(src, dest):
seen = []
for name in dest.copy().keys():
seen.append(name)
if name not in src:
del dest[name]
continue
if dest[name] is not src[name]:
dest[name] = src[name]
for name in src.keys():
if name in seen:
continue
dest[name] = src[name]
assert src == dest
class NullFileDescriptor(object):
"""
A file like object that no-ops when writing.
"""
def write(self, *args, **kwargs):
pass
def get_fqcn(klass):
return '%s.%s' % (klass.__module__, klass.__name__)
def expectedFailureIfAppengine(func):
try:
from google import appengine
except ImportError:
return func
else:
import os
if os.environ.get('SERVER_SOFTWARE', None) is None:
return func
return unittest.expectedFailure(func)
def _join(parts):
ret = ''
for p in parts:
if not isinstance(p, basestring):
ret += _join(p)
continue
ret += p
return ret
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Unit tests.
@since: 0.1.0
"""
import os.path
try:
import unittest2 as unittest
import sys
sys.modules['unittest'] = unittest
except ImportError:
import unittest
if not hasattr(unittest.TestCase, 'assertIdentical'):
def assertIdentical(self, first, second, msg=None):
"""
Fail the test if C{first} is not C{second}. This is an
obect-identity-equality test, not an object equality (i.e. C{__eq__}) test.
@param msg: if msg is None, then the failure message will be
'%r is not %r' % (first, second)
"""
if first is not second:
raise AssertionError(msg or '%r is not %r' % (first, second))
return first
unittest.TestCase.assertIdentical = assertIdentical
if not hasattr(unittest.TestCase, 'assertNotIdentical'):
def assertNotIdentical(self, first, second, msg=None):
"""
Fail the test if C{first} is C{second}. This is an
object-identity-equality test, not an object equality
(i.e. C{__eq__}) test.
@param msg: if msg is None, then the failure message will be
'%r is %r' % (first, second)
"""
if first is second:
raise AssertionError(msg or '%r is %r' % (first, second))
return first
unittest.TestCase.assertNotIdentical = assertNotIdentical
if not hasattr(unittest.TestCase, 'patch'):
import inspect
def unpatch(self):
for orig, part, replaced in self.__patches:
setattr(orig, part, replaced)
def patch(self, orig, replace):
if not hasattr(self, '__patches'):
self.__patches = []
self.addCleanup(unpatch, self)
f = inspect.stack()[1][0]
parts = orig.split('.')
v = f.f_globals.copy()
v.update(f.f_locals)
orig = v[parts[0]]
for part in parts[1:-1]:
orig = getattr(orig, part)
to_replace = getattr(orig, parts[-1])
self.__patches.append((orig, parts[-1], to_replace))
setattr(orig, parts[-1], replace)
unittest.TestCase.patch = patch
def get_suite():
"""
Discover the entire test suite.
"""
loader = unittest.TestLoader()
# this could be cleaned up but it works ..
tld = __file__.split(os.path.sep)
tld.reverse()
for i, x in enumerate(tld):
if x == 'pyamf':
tld.reverse()
tld = os.path.sep.join(tld[:-1 - i])
break
return loader.discover('pyamf', top_level_dir=tld)
def main():
"""
Run all of the tests when run as a module with -m.
"""
runner = unittest.TextTestRunner()
runner.run(get_suite())
if __name__ == '__main__':
main()
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Unit tests for Remoting gateways.
@since: 0.1.0
"""
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Because there is disparity between Python packaging (and it is being sorted
out ...) we currently provide our own way to get the string of a version tuple.
@since: 0.6
"""
class Version(tuple):
_version = None
def __new__(cls, *args):
x = tuple.__new__(cls, args)
return x
def __str__(self):
if not self._version:
self._version = get_version(self)
return self._version
def get_version(_version):
v = ''
prev = None
for x in _version:
if prev is not None:
if isinstance(x, int):
v += '.'
prev = x
v += str(x)
return v.strip('.')
| Python |
"""
"""
from django.contrib.auth import models
import pyamf.adapters
models.User.__amf__ = {
'exclude': ('message_set', 'password'),
'readonly': ('username',)
}
# ensure that the adapter that we depend on is loaded ..
pyamf.adapters.get_adapter('django.db.models.base')
pyamf.register_package(models, models.__name__) | Python |
# Copyright (c) The PyAMF Project.
# See LICENSE for details.
"""
SQLAlchemy adapter module.
@see: U{SQLAlchemy homepage<http://www.sqlalchemy.org>}
@since: 0.4
"""
from sqlalchemy.orm import collections
import pyamf
from pyamf.adapters import util
pyamf.add_type(collections.InstrumentedList, util.to_list)
pyamf.add_type(collections.InstrumentedDict, util.to_dict)
pyamf.add_type(collections.InstrumentedSet, util.to_set)
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE for details.
"""
Elixir adapter module. Elixir adds a number of properties to the mapped instances.
@see: U{Elixir homepage<http://elixir.ematia.de>}
@since: 0.6
"""
import elixir.entity
import pyamf
from pyamf import adapters
adapter = adapters.get_adapter('sqlalchemy.orm')
adapter.class_checkers.append(elixir.entity.is_entity)
class ElixirAdapter(adapter.SaMappedClassAlias):
EXCLUDED_ATTRS = adapter.SaMappedClassAlias.EXCLUDED_ATTRS + [
'_global_session']
def getCustomProperties(self):
adapter.SaMappedClassAlias.getCustomProperties(self)
self.descriptor = self.klass._descriptor
self.parent_descriptor = None
if self.descriptor.parent:
self.parent_descriptor = self.descriptor.parent._descriptor
foreign_constraints = []
for constraint in self.descriptor.constraints:
for col in constraint.columns:
col = str(col)
if adapter.__version__.startswith('0.6'):
foreign_constraints.append(col)
else:
if col.startswith(self.descriptor.tablename + '.'):
foreign_constraints.append(col[len(self.descriptor.tablename) + 1:])
if self.descriptor.polymorphic:
self.exclude_attrs.update([self.descriptor.polymorphic])
self.exclude_attrs.update(foreign_constraints)
def _compile_base_class(self, klass):
if klass is elixir.EntityBase or klass is elixir.Entity:
return
pyamf.ClassAlias._compile_base_class(self, klass)
pyamf.register_alias_type(ElixirAdapter, elixir.entity.is_entity) | Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Adapter module for U{google.appengine.ext.blobstore<http://
code.google.com/appengine/docs/python/blobstore/>}.
@since: 0.6
"""
from google.appengine.ext import blobstore
import pyamf
bi = blobstore.BlobInfo
class BlobInfoStub(object):
"""
Since C{blobstore.BlobInfo} requires __init__ args, we stub the object until
C{applyAttributes} is called which then magically converts it to the correct
type.
"""
class BlobInfoClassAlias(pyamf.ClassAlias):
"""
Fine grain control over C{blobstore.BlobInfo} instances. Required to encode
the C{key} attribute correctly.
"""
def createInstance(self, *args, **kwargs):
return BlobInfoStub()
def getEncodableAttributes(self, obj, codec=None):
"""
Returns a dict of kay/value pairs for PyAMF to encode.
"""
attrs = {
'content_type': obj.content_type,
'filename': obj.filename,
'size': obj.size,
'creation': obj.creation,
'key': str(obj.key())
}
return attrs
def applyAttributes(self, obj, attrs, **kwargs):
"""
Applies C{attrs} to C{obj}. Since C{blobstore.BlobInfo} objects are
read-only entities, we only care about the C{key} attribute.
"""
assert type(obj) is BlobInfoStub
key = attrs.pop('key', None)
if not key:
raise pyamf.DecodeError("Unable to build blobstore.BlobInfo "
"instance. Missing 'key' attribute.")
try:
key = blobstore.BlobKey(key)
except:
raise pyamf.DecodeError("Unable to build a valid blobstore.BlobKey "
"instance. Key supplied was %r" % (key,))
obj.__class__ = blobstore.BlobInfo
obj.__init__(key)
pyamf.register_alias_type(BlobInfoClassAlias, bi)
pyamf.register_class(bi, '.'.join([blobstore.__name__, bi.__name__]))
del bi
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
U{collections<http://docs.python.org/library/collections.html>} adapter module.
@since: 0.5
"""
import collections
import pyamf
from pyamf.adapters import util
if hasattr(collections, 'deque'):
pyamf.add_type(collections.deque, util.to_list)
if hasattr(collections, 'defaultdict'):
pyamf.add_type(collections.defaultdict, util.to_dict)
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Adapter for the U{decimal<http://docs.python.org/library/decimal.html>} module.
@since: 0.4
"""
import decimal
import pyamf
def convert_Decimal(x, encoder):
"""
Called when an instance of U{decimal.Decimal<http://
docs.python.org/library/decimal.html#decimal-objects>} is about to be
encoded to an AMF stream.
@return: If the encoder is in 'strict' mode then C{x} will be converted to
a float. Otherwise an L{pyamf.EncodeError} with a friendly message is
raised.
"""
if encoder.strict is False:
return float(x)
raise pyamf.EncodeError('Unable to encode decimal.Decimal instances as '
'there is no way to guarantee exact conversion. Use strict=False to '
'convert to a float.')
if hasattr(decimal, 'Decimal'):
pyamf.add_type(decimal.Decimal, convert_Decimal)
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
C{django.db.models} adapter module.
@see: U{Django Project<http://www.djangoproject.com>}
@since: 0.4.1
"""
from django.db.models.base import Model
from django.db.models import fields
from django.db.models.fields import related, files
import datetime
import pyamf
class DjangoReferenceCollection(dict):
"""
This helper class holds a dict of klass to pk/objects loaded from the
underlying db.
@since: 0.5
"""
def _getClass(self, klass):
if klass not in self.keys():
self[klass] = {}
return self[klass]
def getClassKey(self, klass, key):
"""
Return an instance based on klass/key.
If an instance cannot be found then C{KeyError} is raised.
@param klass: The class of the instance.
@param key: The primary_key of the instance.
@return: The instance linked to the C{klass}/C{key}.
@rtype: Instance of C{klass}.
"""
d = self._getClass(klass)
return d[key]
def addClassKey(self, klass, key, obj):
"""
Adds an object to the collection, based on klass and key.
@param klass: The class of the object.
@param key: The datastore key of the object.
@param obj: The loaded instance from the datastore.
"""
d = self._getClass(klass)
d[key] = obj
class DjangoClassAlias(pyamf.ClassAlias):
def getCustomProperties(self):
self.fields = {}
self.relations = {}
self.columns = []
self.meta = self.klass._meta
for name in self.meta.get_all_field_names():
x = self.meta.get_field_by_name(name)[0]
if isinstance(x, files.FileField):
self.readonly_attrs.update([name])
if isinstance(x, related.RelatedObject):
continue
if isinstance(x, related.ManyToManyField):
self.relations[name] = x
elif not isinstance(x, related.ForeignKey):
self.fields[name] = x
else:
self.relations[name] = x
parent_fields = []
for field in self.meta.parents.values():
parent_fields.append(field.attname)
del self.relations[field.name]
self.exclude_attrs.update(parent_fields)
props = self.fields.keys()
self.encodable_properties.update(props)
self.decodable_properties.update(props)
self.exclude_attrs.update(['_state'])
def _compile_base_class(self, klass):
if klass is Model:
return
pyamf.ClassAlias._compile_base_class(self, klass)
def _encodeValue(self, field, value):
if value is fields.NOT_PROVIDED:
return pyamf.Undefined
if value is None:
return value
# deal with dates ..
if isinstance(field, fields.DateTimeField):
return value
elif isinstance(field, fields.DateField):
return datetime.datetime(value.year, value.month, value.day, 0, 0, 0)
elif isinstance(field, fields.TimeField):
return datetime.datetime(1970, 1, 1,
value.hour, value.minute, value.second, value.microsecond)
elif isinstance(value, files.FieldFile):
return value.name
return value
def _decodeValue(self, field, value):
if value is pyamf.Undefined:
return fields.NOT_PROVIDED
if isinstance(field, fields.AutoField) and value == 0:
return None
elif isinstance(field, fields.DateTimeField):
# deal with dates
return value
elif isinstance(field, fields.DateField):
if not value:
return None
return datetime.date(value.year, value.month, value.day)
elif isinstance(field, fields.TimeField):
if not value:
return None
return datetime.time(value.hour, value.minute, value.second, value.microsecond)
return value
def getEncodableAttributes(self, obj, **kwargs):
attrs = pyamf.ClassAlias.getEncodableAttributes(self, obj, **kwargs)
if not attrs:
attrs = {}
for name, prop in self.fields.iteritems():
if name not in attrs.keys():
continue
attrs[name] = self._encodeValue(prop, getattr(obj, name))
keys = attrs.keys()
for key in keys:
if key.startswith('_'):
del attrs[key]
for name, relation in self.relations.iteritems():
if '_%s_cache' % name in obj.__dict__:
attrs[name] = getattr(obj, name)
if isinstance(relation, related.ManyToManyField):
attrs[name] = [x for x in getattr(obj, name).all()]
else:
del attrs[relation.attname]
return attrs
def getDecodableAttributes(self, obj, attrs, **kwargs):
attrs = pyamf.ClassAlias.getDecodableAttributes(self, obj, attrs, **kwargs)
for n in self.decodable_properties:
if n in self.relations:
continue
try:
f = self.fields[n]
except KeyError:
continue
attrs[f.attname] = self._decodeValue(f, attrs[n])
# primary key of django object must always be set first for
# relationships with other model objects to work properly
# and dict.iteritems() does not guarantee order
#
# django also forces the use only one attribute as primary key, so
# our obj._meta.pk.attname check is sufficient)
pk_attr = obj._meta.pk.attname
pk = attrs.pop(pk_attr, None)
if pk:
if pk is fields.NOT_PROVIDED:
attrs[pk_attr] = pk
else:
# load the object from the database
try:
loaded_instance = self.klass.objects.filter(pk=pk)[0]
obj.__dict__ = loaded_instance.__dict__
except IndexError:
pass
if not getattr(obj, pk_attr):
for name, relation in self.relations.iteritems():
if isinstance(relation, related.ManyToManyField):
try:
if len(attrs[name]) == 0:
del attrs[name]
except KeyError:
pass
return attrs
def getDjangoObjects(context):
"""
Returns a reference to the C{django_objects} on the context. If it doesn't
exist then it is created.
@rtype: Instance of L{DjangoReferenceCollection}
@since: 0.5
"""
c = context.extra
k = 'django_objects'
try:
return c[k]
except KeyError:
c[k] = DjangoReferenceCollection()
return c[k]
def writeDjangoObject(obj, encoder=None):
"""
The Django ORM creates new instances of objects for each db request.
This is a problem for PyAMF as it uses the C{id(obj)} of the object to do
reference checking.
We could just ignore the problem, but the objects are conceptually the
same so the effort should be made to attempt to resolve references for a
given object graph.
We create a new map on the encoder context object which contains a dict of
C{object.__class__: {key1: object1, key2: object2, .., keyn: objectn}}. We
use the primary key to do the reference checking.
@since: 0.5
"""
s = obj.pk
if s is None:
encoder.writeObject(obj)
return
django_objects = getDjangoObjects(encoder.context)
kls = obj.__class__
try:
referenced_object = django_objects.getClassKey(kls, s)
except KeyError:
referenced_object = obj
django_objects.addClassKey(kls, s, obj)
encoder.writeObject(referenced_object)
# initialise the module here: hook into pyamf
pyamf.register_alias_type(DjangoClassAlias, Model)
pyamf.add_type(Model, writeDjangoObject)
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Google App Engine adapter module.
Sets up basic type mapping and class mappings for using the Datastore API
in Google App Engine.
@see: U{Datastore API on Google App Engine<http://
code.google.com/appengine/docs/python/datastore>}
@since: 0.3.1
"""
from google.appengine.ext import db
from google.appengine.ext.db import polymodel
import datetime
import pyamf
from pyamf.adapters import util
class ModelStub(object):
"""
This class represents a C{db.Model} or C{db.Expando} class as the typed
object is being read from the AMF stream. Once the attributes have been
read from the stream and through the magic of Python, the instance of this
class will be converted into the correct type.
@ivar klass: The referenced class either C{db.Model} or C{db.Expando}.
This is used so we can proxy some of the method calls during decoding.
@type klass: C{db.Model} or C{db.Expando}
@see: L{DataStoreClassAlias.applyAttributes}
"""
def __init__(self, klass):
self.klass = klass
def properties(self):
return self.klass.properties()
def dynamic_properties(self):
return []
class GAEReferenceCollection(dict):
"""
This helper class holds a dict of klass to key/objects loaded from the
Datastore.
@since: 0.4.1
"""
def _getClass(self, klass):
if not issubclass(klass, (db.Model, db.Expando)):
raise TypeError('expected db.Model/db.Expando class, got %s' % (klass,))
return self.setdefault(klass, {})
def getClassKey(self, klass, key):
"""
Return an instance based on klass/key.
If an instance cannot be found then C{KeyError} is raised.
@param klass: The class of the instance.
@param key: The key of the instance.
@return: The instance linked to the C{klass}/C{key}.
@rtype: Instance of L{klass}.
"""
d = self._getClass(klass)
return d[key]
def addClassKey(self, klass, key, obj):
"""
Adds an object to the collection, based on klass and key.
@param klass: The class of the object.
@param key: The datastore key of the object.
@param obj: The loaded instance from the datastore.
"""
d = self._getClass(klass)
d[key] = obj
class DataStoreClassAlias(pyamf.ClassAlias):
"""
This class contains all the business logic to interact with Google's
Datastore API's. Any C{db.Model} or C{db.Expando} classes will use this
class alias for encoding/decoding.
We also add a number of indexes to the encoder context to aggressively
decrease the number of Datastore API's that we need to complete.
"""
# The name of the attribute used to represent the key
KEY_ATTR = '_key'
def _compile_base_class(self, klass):
if klass in (db.Model, polymodel.PolyModel):
return
pyamf.ClassAlias._compile_base_class(self, klass)
def getCustomProperties(self):
props = [self.KEY_ATTR]
self.reference_properties = {}
self.properties = {}
reverse_props = []
for name, prop in self.klass.properties().iteritems():
self.properties[name] = prop
props.append(name)
if isinstance(prop, db.ReferenceProperty):
self.reference_properties[name] = prop
if issubclass(self.klass, polymodel.PolyModel):
del self.properties['_class']
props.remove('_class')
# check if the property is a defined as a collection_name. These types
# of properties are read-only and the datastore freaks out if you
# attempt to meddle with it. We delete the attribute entirely ..
for name, value in self.klass.__dict__.iteritems():
if isinstance(value, db._ReverseReferenceProperty):
reverse_props.append(name)
self.encodable_properties.update(self.properties.keys())
self.decodable_properties.update(self.properties.keys())
self.readonly_attrs.update(reverse_props)
if not self.reference_properties:
self.reference_properties = None
if not self.properties:
self.properties = None
self.no_key_attr = self.KEY_ATTR in self.exclude_attrs
def getEncodableAttributes(self, obj, codec=None):
attrs = pyamf.ClassAlias.getEncodableAttributes(self, obj, codec=codec)
gae_objects = getGAEObjects(codec.context) if codec else None
if self.reference_properties and gae_objects:
for name, prop in self.reference_properties.iteritems():
klass = prop.reference_class
key = prop.get_value_for_datastore(obj)
if not key:
continue
try:
attrs[name] = gae_objects.getClassKey(klass, key)
except KeyError:
ref_obj = getattr(obj, name)
gae_objects.addClassKey(klass, key, ref_obj)
attrs[name] = ref_obj
for k in attrs.keys()[:]:
if k.startswith('_'):
del attrs[k]
for attr in obj.dynamic_properties():
attrs[attr] = getattr(obj, attr)
if not self.no_key_attr:
attrs[self.KEY_ATTR] = str(obj.key()) if obj.is_saved() else None
return attrs
def createInstance(self, codec=None):
return ModelStub(self.klass)
def getDecodableAttributes(self, obj, attrs, codec=None):
key = attrs.setdefault(self.KEY_ATTR, None)
attrs = pyamf.ClassAlias.getDecodableAttributes(self, obj, attrs, codec=codec)
del attrs[self.KEY_ATTR]
new_obj = None
# attempt to load the object from the datastore if KEY_ATTR exists.
if key and codec:
new_obj = loadInstanceFromDatastore(self.klass, key, codec)
# clean up the stub
if isinstance(obj, ModelStub) and hasattr(obj, 'klass'):
del obj.klass
if new_obj:
obj.__dict__ = new_obj.__dict__.copy()
obj.__class__ = self.klass
apply_init = True
if self.properties:
for k in [k for k in attrs.keys() if k in self.properties.keys()]:
prop = self.properties[k]
v = attrs[k]
if isinstance(prop, db.FloatProperty) and isinstance(v, (int, long)):
attrs[k] = float(v)
elif isinstance(prop, db.IntegerProperty) and isinstance(v, float):
x = long(v)
# only convert the type if there is no mantissa - otherwise
# let the chips fall where they may
if x == v:
attrs[k] = x
elif isinstance(prop, db.ListProperty) and v is None:
attrs[k] = []
elif isinstance(v, datetime.datetime):
# Date/Time Property fields expect specific types of data
# whereas PyAMF only decodes into datetime.datetime objects.
if isinstance(prop, db.DateProperty):
attrs[k] = v.date()
elif isinstance(prop, db.TimeProperty):
attrs[k] = v.time()
if new_obj is None and isinstance(v, ModelStub) and prop.required and k in self.reference_properties:
apply_init = False
del attrs[k]
# If the object does not exist in the datastore, we must fire the
# class constructor. This sets internal attributes that pyamf has
# no business messing with ..
if new_obj is None and apply_init is True:
obj.__init__(**attrs)
return attrs
def getGAEObjects(context):
"""
Returns a reference to the C{gae_objects} on the context. If it doesn't
exist then it is created.
@param context: The context to load the C{gae_objects} index from.
@return: The C{gae_objects} index reference.
@rtype: Instance of L{GAEReferenceCollection}
@since: 0.4.1
"""
return context.extra.setdefault('gae_objects', GAEReferenceCollection())
def loadInstanceFromDatastore(klass, key, codec=None):
"""
Attempt to load an instance from the datastore, based on C{klass}
and C{key}. We create an index on the codec's context (if it exists)
so we can check that first before accessing the datastore.
@param klass: The class that will be loaded from the datastore.
@type klass: Sub-class of C{db.Model} or C{db.Expando}
@param key: The key which is used to uniquely identify the instance in the
datastore.
@type key: C{str}
@param codec: The codec to reference the C{gae_objects} index. If
supplied,The codec must have have a context attribute.
@return: The loaded instance from the datastore.
@rtype: Instance of C{klass}.
@since: 0.4.1
"""
if not issubclass(klass, (db.Model, db.Expando)):
raise TypeError('expected db.Model/db.Expando class, got %s' % (klass,))
if not isinstance(key, basestring):
raise TypeError('string expected for key, got %s', (repr(key),))
key = str(key)
if codec is None:
return klass.get(key)
gae_objects = getGAEObjects(codec.context)
try:
return gae_objects.getClassKey(klass, key)
except KeyError:
pass
obj = klass.get(key)
gae_objects.addClassKey(klass, key, obj)
return obj
def writeGAEObject(obj, encoder=None):
"""
The GAE Datastore creates new instances of objects for each get request.
This is a problem for PyAMF as it uses the id(obj) of the object to do
reference checking.
We could just ignore the problem, but the objects are conceptually the
same so the effort should be made to attempt to resolve references for a
given object graph.
We create a new map on the encoder context object which contains a dict of
C{object.__class__: {key1: object1, key2: object2, .., keyn: objectn}}. We
use the datastore key to do the reference checking.
@since: 0.4.1
"""
if not obj.is_saved():
encoder.writeObject(obj)
return
context = encoder.context
kls = obj.__class__
s = obj.key()
gae_objects = getGAEObjects(context)
try:
referenced_object = gae_objects.getClassKey(kls, s)
except KeyError:
referenced_object = obj
gae_objects.addClassKey(kls, s, obj)
encoder.writeObject(referenced_object)
# initialise the module here: hook into pyamf
pyamf.register_alias_type(DataStoreClassAlias, db.Model)
pyamf.add_type(db.Query, util.to_list)
pyamf.add_type(db.Model, writeGAEObject)
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Adapter for the stdlib C{sets} module.
@since: 0.4
"""
import sets
import pyamf
from pyamf.adapters import util
if hasattr(sets, 'ImmutableSet'):
pyamf.add_type(sets.ImmutableSet, util.to_tuple)
if hasattr(sets, 'Set'):
pyamf.add_type(sets.Set, util.to_tuple)
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Useful helpers for adapters.
@since: 0.4
"""
import __builtin__
if not hasattr(__builtin__, 'set'):
from sets import Set as set
def to_list(obj, encoder):
"""
Converts an arbitrary object C{obj} to a C{list}.
"""
return list(obj)
def to_dict(obj, encoder):
"""
Converts an arbitrary object C{obj} to a C{dict}.
"""
return dict(obj)
def to_set(obj, encoder):
"""
Converts an arbitrary object C{obj} to a C{set}.
"""
return set(obj)
def to_tuple(x, encoder):
"""
Converts an arbitrary object C{obj} to a C{tuple}.
"""
return tuple(x)
def to_string(x, encoder):
"""
Converts an arbitrary object C{obj} to a string.
@since: 0.5
"""
return str(x)
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
C{django.utils.translation} adapter module.
@see: U{Django Project<http://www.djangoproject.com>}
@since: 0.4.2
"""
from django.utils.translation import ugettext_lazy
import pyamf
def convert_lazy(l, encoder=None):
if l.__class__._delegate_unicode:
return unicode(l)
if l.__class__._delegate_str:
return str(l)
raise ValueError('Don\'t know how to convert lazy value %s' % (repr(l),))
pyamf.add_type(type(ugettext_lazy('foo')), convert_lazy)
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
U{array<http://docs.python.org/library/array.html>} adapter module.
Will convert all array.array instances to a python list before encoding. All
type information is lost (but degrades nicely).
@since: 0.5
"""
import array
import pyamf
from pyamf.adapters import util
if hasattr(array, 'ArrayType'):
pyamf.add_type(array.ArrayType, util.to_list)
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Django query adapter module.
Sets up basic type mapping and class mappings for a
Django models.
@see: U{Django Project<http://www.djangoproject.com>}
@since: 0.1b
"""
from django.db.models import query
import pyamf
from pyamf.adapters import util
pyamf.add_type(query.QuerySet, util.to_list)
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
The adapter package provides additional functionality for other Python
packages. This includes registering classes, setting up type maps etc.
@since: 0.1.0
"""
import os.path
import glob
from pyamf.util import imports
adapters_registered = False
class PackageImporter(object):
"""
Package importer used for lazy module loading.
"""
def __init__(self, name):
self.name = name
def __call__(self, mod):
__import__('%s.%s' % ('pyamf.adapters', self.name))
def register_adapters():
global adapters_registered
if adapters_registered is True:
return
try:
import pkg_resources
packageDir = pkg_resources.resource_filename('pyamf', 'adapters')
except:
packageDir = os.path.dirname(__file__)
for f in glob.glob(os.path.join(packageDir, '*.py')):
mod = os.path.basename(f).split(os.path.extsep, 1)[0]
if mod == '__init__' or not mod.startswith('_'):
continue
try:
register_adapter(mod[1:].replace('_', '.'), PackageImporter(mod))
except ImportError:
pass
adapters_registered = True
def register_adapter(mod, func):
"""
Registers a callable to be executed when a module is imported. If the
module already exists then the callable will be executed immediately.
You can register the same module multiple times, the callables will be
executed in the order they were registered. The root module must exist
(i.e. be importable) otherwise an `ImportError` will be thrown.
@param mod: The fully qualified module string, as used in the imports
statement. E.g. 'foo.bar.baz'. The string must map to a module
otherwise the callable will not fire.
@param func: The function to call when C{mod} is imported. This function
must take one arg, the newly imported C{module} object.
@type func: callable
@raise TypeError: C{func} must be callable
"""
if not hasattr(func, '__call__'):
raise TypeError('func must be callable')
imports.when_imported(mod, func)
def get_adapter(mod):
"""
"""
base_name = '_' + mod.replace('.', '_')
full_import = '%s.%s' % (__name__, base_name)
ret = __import__(full_import)
for attr in full_import.split('.')[1:]:
ret = getattr(ret, attr)
return ret
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE for details.
"""
SQLAlchemy adapter module.
@see: U{SQLAlchemy homepage<http://www.sqlalchemy.org>}
@since: 0.4
"""
from sqlalchemy import orm, __version__
try:
from sqlalchemy.orm import class_mapper
except ImportError:
from sqlalchemy.orm.util import class_mapper
import pyamf
UnmappedInstanceError = None
try:
class_mapper(dict)
except Exception, e:
UnmappedInstanceError = e.__class__
class_checkers = []
class SaMappedClassAlias(pyamf.ClassAlias):
KEY_ATTR = 'sa_key'
LAZY_ATTR = 'sa_lazy'
EXCLUDED_ATTRS = [
'_entity_name', '_instance_key', '_sa_adapter', '_sa_appender',
'_sa_class_manager', '_sa_initiator', '_sa_instance_state',
'_sa_instrumented', '_sa_iterator', '_sa_remover', '_sa_session_id',
'_state'
]
STATE_ATTR = '_sa_instance_state'
if __version__.startswith('0.4'):
STATE_ATTR = '_state'
def getCustomProperties(self):
self.mapper = class_mapper(self.klass)
self.exclude_attrs.update(self.EXCLUDED_ATTRS)
self.properties = []
for prop in self.mapper.iterate_properties:
self.properties.append(prop.key)
self.encodable_properties.update(self.properties)
self.decodable_properties.update(self.properties)
self.exclude_sa_key = self.KEY_ATTR in self.exclude_attrs
self.exclude_sa_lazy = self.LAZY_ATTR in self.exclude_attrs
def getEncodableAttributes(self, obj, **kwargs):
"""
Returns a C{tuple} containing a dict of static and dynamic attributes
for C{obj}.
"""
attrs = pyamf.ClassAlias.getEncodableAttributes(self, obj, **kwargs)
if not self.exclude_sa_key:
# primary_key_from_instance actually changes obj.__dict__ if
# primary key properties do not already exist in obj.__dict__
attrs[self.KEY_ATTR] = self.mapper.primary_key_from_instance(obj)
if not self.exclude_sa_lazy:
lazy_attrs = []
for attr in self.properties:
if attr not in obj.__dict__:
lazy_attrs.append(attr)
attrs[self.LAZY_ATTR] = lazy_attrs
return attrs
def getDecodableAttributes(self, obj, attrs, **kwargs):
"""
"""
attrs = pyamf.ClassAlias.getDecodableAttributes(self, obj, attrs, **kwargs)
# Delete lazy-loaded attrs.
#
# Doing it this way ensures that lazy-loaded attributes are not
# attached to the object, even if there is a default value specified
# in the __init__ method.
#
# This is the correct behavior, because SQLAlchemy ignores __init__.
# So, an object retreived from a DB with SQLAlchemy will not have a
# lazy-loaded value, even if __init__ specifies a default value.
if self.LAZY_ATTR in attrs:
obj_state = None
if hasattr(orm.attributes, 'instance_state'):
obj_state = orm.attributes.instance_state(obj)
for lazy_attr in attrs[self.LAZY_ATTR]:
if lazy_attr in obj.__dict__:
# Delete directly from the dict, so
# SA callbacks are not triggered.
del obj.__dict__[lazy_attr]
# Delete from committed_state so SA thinks this attribute was
# never modified.
#
# If the attribute was set in the __init__ method,
# SA will think it is modified and will try to update
# it in the database.
if obj_state is not None:
if lazy_attr in obj_state.committed_state:
del obj_state.committed_state[lazy_attr]
if lazy_attr in obj_state.dict:
del obj_state.dict[lazy_attr]
if lazy_attr in attrs:
del attrs[lazy_attr]
del attrs[self.LAZY_ATTR]
if self.KEY_ATTR in attrs:
del attrs[self.KEY_ATTR]
return attrs
def createInstance(self, *args, **kwargs):
self.compile()
return self.mapper.class_manager.new_instance()
def is_class_sa_mapped(klass):
"""
@rtype: C{bool}
"""
if not isinstance(klass, type):
klass = type(klass)
for c in class_checkers:
if c(klass):
return False
try:
class_mapper(klass)
except UnmappedInstanceError:
return False
return True
pyamf.register_alias_type(SaMappedClassAlias, is_class_sa_mapped)
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
C{django.db.models.fields} adapter module.
@see: U{Django Project<http://www.djangoproject.com>}
@since: 0.4
"""
from django.db.models import fields
import pyamf
def convert_NOT_PROVIDED(x, encoder):
"""
@rtype: L{Undefined<pyamf.Undefined>}
"""
return pyamf.Undefined
pyamf.add_type(lambda x: x is fields.NOT_PROVIDED, convert_NOT_PROVIDED)
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Provides XML support.
@since: 0.6
"""
#: list of supported third party packages that support the C{etree}
#: interface. At least enough for our needs anyway.
ETREE_MODULES = [
'lxml.etree',
'xml.etree.cElementTree',
'cElementTree',
'xml.etree.ElementTree',
'elementtree.ElementTree'
]
#: A tuple of class/type objects that are used to represent XML objects.
types = None
#: A mapping of type -> module for all known xml types.
modules = {}
#: The module that will be used to create C{ElementTree} instances.
ET = None
__all__ = ['set_default_interface']
def set_default_interface(etree):
"""
Sets the default interface that PyAMF will use to deal with XML entities
(both objects and blobs).
"""
global types, ET, modules
t = _get_etree_type(etree)
_types = set(types or [])
_types.update([t])
types = tuple(_types)
modules[t] = etree
old, ET = ET, etree
return old
def find_libs():
"""
Run through L{ETREE_MODULES} and find C{ElementTree} implementations so
that any type can be encoded.
We work through the C implementations first, then the pure Python versions.
The downside to this is that B{all} libraries will be imported but I{only}
one is ever used. The libs are small (relatively) and the flexibility that
this gives seems to outweigh the cost. Time will tell.
"""
from pyamf.util import get_module
types = []
mapping = {}
for mod in ETREE_MODULES:
try:
etree = get_module(mod)
except ImportError:
continue
t = _get_etree_type(etree)
types.append(t)
mapping[t] = etree
return tuple(types), mapping
def is_xml(obj):
"""
Determines C{obj} is a valid XML type.
If L{types} is not populated then L{find_libs} be called.
"""
global types
try:
_bootstrap()
except ImportError:
return False
return isinstance(obj, types)
def _get_type(e):
"""
Returns the type associated with handling XML objects from this etree
interface.
"""
try:
return e.__class__
except AttributeError:
return type(e)
def _get_etree_type(etree):
"""
Returns the type associated with handling XML objects from this etree
interface.
"""
e = etree.fromstring('<foo/>')
return _get_type(e)
def _no_et():
raise ImportError('Unable to find at least one compatible ElementTree '
'library, use pyamf.set_default_etree to enable XML support')
def _bootstrap():
global types, modules, ET
if types is None:
types, modules = find_libs()
if ET is None:
try:
etree = modules[types[0]]
except IndexError:
_no_et()
set_default_interface(etree)
def tostring(element, *args, **kwargs):
"""
Helper func to provide easy access to the (possibly) moving target that is
C{ET}.
"""
global modules
_bootstrap()
t = _get_type(element)
etree = modules.get(t, None)
if not etree:
raise RuntimeError('Unable to find the etree implementation related '
'to %r (type %r)' % (element, t))
return etree.tostring(element, *args, **kwargs)
def fromstring(*args, **kwargs):
"""
Helper func to provide easy access to the (possibly) moving target that is
C{ET}.
"""
global ET
_bootstrap()
return ET.fromstring(*args, **kwargs)
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
U{PyAMF<http://pyamf.org>} provides Action Message Format (U{AMF
<http://en.wikipedia.org/wiki/Action_Message_Format>}) support for Python that
is compatible with the Adobe U{Flash Player
<http://en.wikipedia.org/wiki/Flash_Player>}.
@since: October 2007
@status: Production/Stable
"""
import types
import inspect
from pyamf import util, _version
from pyamf.adapters import register_adapters
from pyamf import python
from pyamf.alias import ClassAlias, UnknownClassAlias
__all__ = [
'register_class',
'register_class_loader',
'encode',
'decode',
'__version__',
'version'
]
#: PyAMF version number.
__version__ = version = _version.version
#: Class alias mapping support. Contains two types of keys: The string alias
#: related to the class and the class object itself. Both point to the linked
#: L{ClassAlias} object.
#: @see: L{register_class}, L{unregister_class}, and L{register_package}
CLASS_CACHE = {}
#: Class loaders. An iterable of callables that are handed a string alias and
#: return a class object or C{None} it not handled.
#: @see: L{register_class_loader} and L{unregister_class_loader}
CLASS_LOADERS = set()
#: Custom type map.
#: @see: L{get_type}, L{add_type}, and L{remove_type}
TYPE_MAP = {}
#: Maps error classes to string codes.
#: @see: L{add_error_class} and L{remove_error_class}
ERROR_CLASS_MAP = {
TypeError.__name__: TypeError,
KeyError.__name__: KeyError,
LookupError.__name__: LookupError,
IndexError.__name__: IndexError,
NameError.__name__: NameError,
ValueError.__name__: ValueError
}
#: Alias mapping support.
#: @see: L{get_class_alias}, L{register_alias_type}, and L{unregister_alias_type}
ALIAS_TYPES = {}
#: Specifies that objects are serialized using AMF for ActionScript 1.0
#: and 2.0 that were introduced in the Adobe Flash Player 6.
AMF0 = 0
#: Specifies that objects are serialized using AMF for ActionScript 3.0
#: that was introduced in the Adobe Flash Player 9.
AMF3 = 3
#: Supported AMF encoding types.
#: @see: L{AMF0}, L{AMF3}, and L{DEFAULT_ENCODING}
ENCODING_TYPES = (AMF0, AMF3)
#: Default encoding
DEFAULT_ENCODING = AMF3
class UndefinedType(object):
"""
Represents the C{undefined} value in the Adobe Flash Player client.
"""
def __repr__(self):
return 'pyamf.Undefined'
#: Represents the C{undefined} value in the Adobe Flash Player client.
Undefined = UndefinedType()
class BaseError(Exception):
"""
Base AMF Error.
All AMF related errors should be subclassed from this class.
"""
class DecodeError(BaseError):
"""
Raised if there is an error in decoding an AMF data stream.
"""
class EOStream(BaseError):
"""
Raised if the data stream has come to a natural end.
"""
class ReferenceError(BaseError):
"""
Raised if an AMF data stream refers to a non-existent object or string
reference (in the case of AMF3).
"""
class EncodeError(BaseError):
"""
Raised if the element could not be encoded to AMF.
"""
class ASObject(dict):
"""
Represents a Flash Actionscript Object (typed or untyped).
I supply a C{dict} interface to support C{getattr}/C{setattr} calls.
"""
class __amf__:
dynamic = True
def __getattr__(self, k):
try:
return self[k]
except KeyError:
raise AttributeError('Unknown attribute \'%s\'' % (k,))
def __setattr__(self, k, v):
self[k] = v
def __repr__(self):
return dict.__repr__(self)
def __hash__(self):
return id(self)
class MixedArray(dict):
"""
Used to be able to specify the C{mixedarray} type.
"""
class TypedObject(dict):
"""
This class is used when a strongly typed object is decoded but there is no
registered class to apply it to.
This object can only be used for standard streams - i.e. not externalized
data. If encountered, a L{DecodeError} will be raised.
@ivar alias: The alias of the typed object.
@type alias: C{string}
@since: 0.4
"""
def __init__(self, alias):
dict.__init__(self)
self.alias = alias
def __readamf__(self, o):
raise DecodeError('Unable to decode an externalised stream with '
'class alias \'%s\'.\n\nA class alias was found and because '
'strict mode is False an attempt was made to decode the object '
'automatically. To decode this stream, a registered class with '
'the alias and a corresponding __readamf__ method will be '
'required.' % (self.alias,))
def __writeamf__(self, o):
raise EncodeError('Unable to encode an externalised stream with '
'class alias \'%s\'.\n\nA class alias was found and because '
'strict mode is False an attempt was made to encode the object '
'automatically. To encode this stream, a registered class with '
'the alias and a corresponding __writeamf__ method will be '
'required.' % (self.alias,))
class TypedObjectClassAlias(ClassAlias):
"""
The meta class for L{TypedObject} used to adapt PyAMF.
@since: 0.4
"""
klass = TypedObject
def __init__(self, *args, **kwargs):
ClassAlias.__init__(self, self.klass, kwargs.pop('alias', args[0]))
def createInstance(self, codec=None):
return self.klass(self.alias)
def checkClass(kls, klass):
pass
class ErrorAlias(ClassAlias):
"""
Adapts Python exception objects to Adobe Flash Player error objects.
@since: 0.5
"""
def getCustomProperties(self):
self.exclude_attrs.update(['args'])
def getEncodableAttributes(self, obj, **kwargs):
attrs = ClassAlias.getEncodableAttributes(self, obj, **kwargs)
attrs['message'] = str(obj)
attrs['name'] = obj.__class__.__name__
return attrs
def register_class(klass, alias=None):
"""
Registers a class to be used in the data streaming. This is the equivalent
to the C{[RemoteClass(alias="foobar")]} AS3 metatag.
@return: The registered L{ClassAlias} instance.
@see: L{unregister_class}
"""
meta = util.get_class_meta(klass)
if alias is not None:
meta['alias'] = alias
alias_klass = util.get_class_alias(klass) or ClassAlias
x = alias_klass(klass, defer=True, **meta)
if not x.anonymous:
CLASS_CACHE[x.alias] = x
CLASS_CACHE[klass] = x
return x
def unregister_class(alias):
"""
Opposite of L{register_class}.
@raise UnknownClassAlias: Unknown alias.
"""
try:
x = CLASS_CACHE[alias]
except KeyError:
raise UnknownClassAlias('Unknown alias %r' % (alias,))
if not x.anonymous:
del CLASS_CACHE[x.alias]
del CLASS_CACHE[x.klass]
return x
def get_class_alias(klass_or_alias):
"""
Finds the L{ClassAlias} that is registered to C{klass_or_alias}.
If a string is supplied and no related L{ClassAlias} is found, the alias is
loaded via L{load_class}.
@raise UnknownClassAlias: Unknown alias
"""
if isinstance(klass_or_alias, python.str_types):
try:
return CLASS_CACHE[klass_or_alias]
except KeyError:
return load_class(klass_or_alias)
try:
return CLASS_CACHE[klass_or_alias]
except KeyError:
raise UnknownClassAlias('Unknown alias for %r' % (klass_or_alias,))
def register_class_loader(loader):
"""
Registers a loader that is called to provide the C{class} for a specific
alias.
The C{loader} is provided with one argument, the class alias (as a string).
If the loader succeeds in finding a suitable class then it should return
that class, otherwise it should return C{None}.
An example::
def lazy_load_from_my_module(alias):
if not alias.startswith('foo.bar.'):
return None
from foo import bar
if alias == 'foo.bar.Spam':
return bar.Spam
elif alias == 'foo.bar.Eggs':
return bar.Eggs
pyamf.register_class_loader(lazy_load_from_my_module)
@raise TypeError: C{loader} must be callable
@see: L{unregister_class_loader}
"""
if not hasattr(loader, '__call__'):
raise TypeError("loader must be callable")
CLASS_LOADERS.update([loader])
def unregister_class_loader(loader):
"""
Unregisters a class loader.
@param loader: The class loader to be unregistered.
@raise LookupError: The C{loader} was not registered.
@see: L{register_class_loader}
"""
try:
CLASS_LOADERS.remove(loader)
except KeyError:
raise LookupError("loader not found")
def load_class(alias):
"""
Finds the class registered to the alias.
The search is done in order:
1. Checks if the class name has been registered via L{register_class}
or L{register_package}.
2. Checks all functions registered via L{register_class_loader}.
3. Attempts to load the class via standard module loading techniques.
@param alias: The class name.
@type alias: C{string}
@raise UnknownClassAlias: The C{alias} was not found.
@raise TypeError: Expecting class type or L{ClassAlias} from loader.
@return: Class registered to the alias.
@rtype: C{classobj}
"""
# Try the CLASS_CACHE first
try:
return CLASS_CACHE[alias]
except KeyError:
pass
for loader in CLASS_LOADERS:
klass = loader(alias)
if klass is None:
continue
if isinstance(klass, python.class_types):
return register_class(klass, alias)
elif isinstance(klass, ClassAlias):
CLASS_CACHE[klass.alias] = klass
CLASS_CACHE[klass.klass] = klass
return klass
raise TypeError("Expecting class object or ClassAlias from loader")
mod_class = alias.split('.')
if mod_class:
module = '.'.join(mod_class[:-1])
klass = mod_class[-1]
try:
module = util.get_module(module)
except (ImportError, AttributeError):
pass
else:
klass = getattr(module, klass)
if isinstance(klass, python.class_types):
return register_class(klass, alias)
elif isinstance(klass, ClassAlias):
CLASS_CACHE[klass.alias] = klass
CLASS_CACHE[klass.klass] = klass
return klass.klass
else:
raise TypeError("Expecting class type or ClassAlias from loader")
# All available methods for finding the class have been exhausted
raise UnknownClassAlias("Unknown alias for %r" % (alias,))
def decode(stream, *args, **kwargs):
"""
A generator function to decode a datastream.
@param stream: AMF data to be decoded.
@type stream: byte data.
@kwarg encoding: AMF encoding type. One of L{ENCODING_TYPES}.
@return: A generator that will decode each element in the stream.
"""
encoding = kwargs.pop('encoding', DEFAULT_ENCODING)
decoder = get_decoder(encoding, stream, *args, **kwargs)
return decoder
def encode(*args, **kwargs):
"""
A helper function to encode an element.
@param args: The python data to be encoded.
@kwarg encoding: AMF encoding type. One of L{ENCODING_TYPES}.
@return: A L{util.BufferedByteStream} object that contains the data.
"""
encoding = kwargs.pop('encoding', DEFAULT_ENCODING)
encoder = get_encoder(encoding, **kwargs)
[encoder.writeElement(el) for el in args]
stream = encoder.stream
stream.seek(0)
return stream
def get_decoder(encoding, *args, **kwargs):
"""
Returns a L{codec.Decoder} capable of decoding AMF[C{encoding}] streams.
@raise ValueError: Unknown C{encoding}.
"""
def _get_decoder_class():
if encoding == AMF0:
try:
from cpyamf import amf0
except ImportError:
from pyamf import amf0
return amf0.Decoder
elif encoding == AMF3:
try:
from cpyamf import amf3
except ImportError:
from pyamf import amf3
return amf3.Decoder
raise ValueError("Unknown encoding %r" % (encoding,))
return _get_decoder_class()(*args, **kwargs)
def get_encoder(encoding, *args, **kwargs):
"""
Returns a L{codec.Encoder} capable of encoding AMF[C{encoding}] streams.
@raise ValueError: Unknown C{encoding}.
"""
def _get_encoder_class():
if encoding == AMF0:
try:
from cpyamf import amf0
except ImportError:
from pyamf import amf0
return amf0.Encoder
elif encoding == AMF3:
try:
from cpyamf import amf3
except ImportError:
from pyamf import amf3
return amf3.Encoder
raise ValueError("Unknown encoding %r" % (encoding,))
return _get_encoder_class()(*args, **kwargs)
def blaze_loader(alias):
"""
Loader for BlazeDS framework compatibility classes, specifically
implementing C{ISmallMessage}.
@see: U{BlazeDS<http://opensource.adobe.com/wiki/display/blazeds/BlazeDS>}
@since: 0.5
"""
if alias not in ['DSC', 'DSK']:
return
import pyamf.flex.messaging
return CLASS_CACHE[alias]
def flex_loader(alias):
"""
Loader for L{Flex<pyamf.flex>} framework compatibility classes.
@raise UnknownClassAlias: Trying to load an unknown Flex compatibility class.
"""
if not alias.startswith('flex.'):
return
try:
if alias.startswith('flex.messaging.messages'):
import pyamf.flex.messaging
elif alias.startswith('flex.messaging.io'):
import pyamf.flex
elif alias.startswith('flex.data.messages'):
import pyamf.flex.data
return CLASS_CACHE[alias]
except KeyError:
raise UnknownClassAlias(alias)
def add_type(type_, func=None):
"""
Adds a custom type to L{TYPE_MAP}. A custom type allows fine grain control
of what to encode to an AMF data stream.
@raise TypeError: Unable to add as a custom type (expected a class or callable).
@raise KeyError: Type already exists.
@see: L{get_type} and L{remove_type}
"""
def _check_type(type_):
if not (isinstance(type_, python.class_types) or
hasattr(type_, '__call__')):
raise TypeError(r'Unable to add '%r' as a custom type (expected a '
'class or callable)' % (type_,))
if isinstance(type_, list):
type_ = tuple(type_)
if type_ in TYPE_MAP:
raise KeyError('Type %r already exists' % (type_,))
if isinstance(type_, types.TupleType):
for x in type_:
_check_type(x)
else:
_check_type(type_)
TYPE_MAP[type_] = func
def get_type(type_):
"""
Gets the declaration for the corresponding custom type.
@raise KeyError: Unknown type.
@see: L{add_type} and L{remove_type}
"""
if isinstance(type_, list):
type_ = tuple(type_)
for k, v in TYPE_MAP.iteritems():
if k == type_:
return v
raise KeyError("Unknown type %r" % (type_,))
def remove_type(type_):
"""
Removes the custom type declaration.
@return: Custom type declaration.
@see: L{add_type} and L{get_type}
"""
declaration = get_type(type_)
del TYPE_MAP[type_]
return declaration
def add_error_class(klass, code):
"""
Maps an exception class to a string code. Used to map remoting C{onStatus}
objects to an exception class so that an exception can be built to
represent that error.
An example::
>>> class AuthenticationError(Exception):
... pass
...
>>> pyamf.add_error_class(AuthenticationError, 'Auth.Failed')
>>> print pyamf.ERROR_CLASS_MAP
{'TypeError': <type 'exceptions.TypeError'>, 'IndexError': <type 'exceptions.IndexError'>,
'Auth.Failed': <class '__main__.AuthenticationError'>, 'KeyError': <type 'exceptions.KeyError'>,
'NameError': <type 'exceptions.NameError'>, 'LookupError': <type 'exceptions.LookupError'>}
@param klass: Exception class
@param code: Exception code
@type code: C{str}
@see: L{remove_error_class}
"""
if not isinstance(code, python.str_types):
code = code.decode('utf-8')
if not isinstance(klass, python.class_types):
raise TypeError("klass must be a class type")
mro = inspect.getmro(klass)
if not Exception in mro:
raise TypeError(
'Error classes must subclass the __builtin__.Exception class')
if code in ERROR_CLASS_MAP:
raise ValueError('Code %s is already registered' % (code,))
ERROR_CLASS_MAP[code] = klass
def remove_error_class(klass):
"""
Removes a class from the L{ERROR_CLASS_MAP}.
An example::
>>> class AuthenticationError(Exception):
... pass
...
>>> pyamf.add_error_class(AuthenticationError, 'Auth.Failed')
>>> pyamf.remove_error_class(AuthenticationError)
@see: L{add_error_class}
"""
if isinstance(klass, python.str_types):
if klass not in ERROR_CLASS_MAP:
raise ValueError('Code %s is not registered' % (klass,))
elif isinstance(klass, python.class_types):
classes = ERROR_CLASS_MAP.values()
if klass not in classes:
raise ValueError('Class %s is not registered' % (klass,))
klass = ERROR_CLASS_MAP.keys()[classes.index(klass)]
else:
raise TypeError("Invalid type, expected class or string")
del ERROR_CLASS_MAP[klass]
def register_alias_type(klass, *args):
"""
This function allows you to map subclasses of L{ClassAlias} to classes
listed in C{args}.
When an object is read/written from/to the AMF stream, a paired L{ClassAlias}
instance is created (or reused), based on the Python class of that object.
L{ClassAlias} provides important metadata for the class and can also control
how the equivalent Python object is created, how the attributes are applied
etc.
Use this function if you need to do something non-standard.
@since: 0.4
@see:
- L{pyamf.adapters._google_appengine_ext_db.DataStoreClassAlias} for a
good example.
- L{unregister_alias_type}
@raise RuntimeError: alias is already registered
@raise TypeError: Value supplied to C{klass} is not a class
@raise ValueError:
- New aliases must subclass L{pyamf.ClassAlias}
- At least one type must be supplied
"""
def check_type_registered(arg):
for k, v in ALIAS_TYPES.iteritems():
for kl in v:
if arg is kl:
raise RuntimeError('%r is already registered under %r' % (
arg, k))
if not isinstance(klass, python.class_types):
raise TypeError('klass must be class')
if not issubclass(klass, ClassAlias):
raise ValueError('New aliases must subclass pyamf.ClassAlias')
if len(args) == 0:
raise ValueError('At least one type must be supplied')
if len(args) == 1 and hasattr(args[0], '__call__'):
c = args[0]
check_type_registered(c)
else:
for arg in args:
if not isinstance(arg, python.class_types):
raise TypeError('%r must be class' % (arg,))
check_type_registered(arg)
ALIAS_TYPES[klass] = args
for k, v in CLASS_CACHE.copy().iteritems():
new_alias = util.get_class_alias(v.klass)
if new_alias is klass:
meta = util.get_class_meta(v.klass)
meta['alias'] = v.alias
alias_klass = klass(v.klass, **meta)
CLASS_CACHE[k] = alias_klass
CLASS_CACHE[v.klass] = alias_klass
def unregister_alias_type(klass):
"""
Removes the klass from the L{ALIAS_TYPES} register.
@see: L{register_alias_type}
"""
return ALIAS_TYPES.pop(klass, None)
def register_package(module=None, package=None, separator='.', ignore=[],
strict=True):
"""
This is a helper function that takes the concept of Actionscript packages
and registers all the classes in the supplied Python module under that
package. It auto-aliased all classes in C{module} based on the parent
C{package}.
@param module: The Python module that will contain all the classes to
auto alias.
@type module: C{module} or C{dict}
@param package: The base package name. e.g. 'com.example.app'. If this
is C{None} then the value is inferred from C{module.__name__}.
@type package: C{string} or C{None}
@param separator: The separator used to append to C{package} to form the
complete alias.
@param ignore: To give fine grain control over what gets aliased and what
doesn't, supply a list of classes that you B{do not} want to be aliased.
@type ignore: C{iterable}
@param strict: Whether only classes that originate from C{module} will be
registered.
@return: A dict of all the classes that were registered and their respective
L{ClassAlias} counterparts.
@since: 0.5
@raise TypeError: Cannot get a list of classes from C{module}
"""
if isinstance(module, python.str_types):
if module == '':
raise TypeError('Cannot get list of classes from %r' % (module,))
package = module
module = None
if module is None:
import inspect
prev_frame = inspect.stack()[1][0]
module = prev_frame.f_locals
if type(module) is dict:
has = lambda x: x in module
get = module.__getitem__
elif type(module) is list:
has = lambda x: x in module
get = module.__getitem__
strict = False
else:
has = lambda x: hasattr(module, x)
get = lambda x: getattr(module, x)
if package is None:
if has('__name__'):
package = get('__name__')
else:
raise TypeError('Cannot get list of classes from %r' % (module,))
if has('__all__'):
keys = get('__all__')
elif hasattr(module, '__dict__'):
keys = module.__dict__.keys()
elif hasattr(module, 'keys'):
keys = module.keys()
elif isinstance(module, list):
keys = range(len(module))
else:
raise TypeError('Cannot get list of classes from %r' % (module,))
def check_attr(attr):
if not isinstance(attr, python.class_types):
return False
if attr.__name__ in ignore:
return False
try:
if strict and attr.__module__ != get('__name__'):
return False
except AttributeError:
return False
return True
# gotta love python
classes = filter(check_attr, [get(x) for x in keys])
registered = {}
for klass in classes:
alias = '%s%s%s' % (package, separator, klass.__name__)
registered[klass] = register_class(klass, alias)
return registered
def set_default_etree(etree):
"""
Sets the default interface that will called apon to both de/serialise XML
entities. This means providing both C{tostring} and C{fromstring} functions.
For testing purposes, will return the previous value for this (if any).
"""
from pyamf import xml
return xml.set_default_interface(etree)
#: setup some some standard class registrations and class loaders.
register_class(ASObject)
register_class_loader(flex_loader)
register_class_loader(blaze_loader)
register_alias_type(TypedObjectClassAlias, TypedObject)
register_alias_type(ErrorAlias, Exception)
register_adapters()
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Provides the pure Python versions of L{BufferedByteStream}.
Do not reference directly, use L{pyamf.util.BufferedByteStream} instead.
@since: 0.6
"""
import struct
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from pyamf import python
# worked out a little further down
SYSTEM_ENDIAN = None
class StringIOProxy(object):
"""
I am a C{StringIO} type object containing byte data from the AMF stream.
@see: U{ByteArray on OSFlash
<http://osflash.org/documentation/amf3#x0c_-_bytearray>}
@see: U{Parsing ByteArrays on OSFlash
<http://osflash.org/documentation/amf3/parsing_byte_arrays>}
"""
def __init__(self, buf=None):
"""
@raise TypeError: Unable to coerce C{buf} to C{StringIO}.
"""
self._buffer = StringIO()
if isinstance(buf, python.str_types):
self._buffer.write(buf)
elif hasattr(buf, 'getvalue'):
self._buffer.write(buf.getvalue())
elif hasattr(buf, 'read') and hasattr(buf, 'seek') and hasattr(buf, 'tell'):
old_pos = buf.tell()
buf.seek(0)
self._buffer.write(buf.read())
buf.seek(old_pos)
elif buf is not None:
raise TypeError("Unable to coerce buf->StringIO got %r" % (buf,))
self._get_len()
self._len_changed = False
self._buffer.seek(0, 0)
def getvalue(self):
"""
Get raw data from buffer.
"""
return self._buffer.getvalue()
def read(self, n=-1):
"""
Reads C{n} bytes from the stream.
"""
if n < -1:
raise IOError('Cannot read backwards')
bytes = self._buffer.read(n)
return bytes
def seek(self, pos, mode=0):
"""
Sets the file-pointer offset, measured from the beginning of this stream,
at which the next write operation will occur.
@param pos:
@type pos: C{int}
@param mode:
@type mode: C{int}
"""
return self._buffer.seek(pos, mode)
def tell(self):
"""
Returns the position of the stream pointer.
"""
return self._buffer.tell()
def truncate(self, size=0):
"""
Truncates the stream to the specified length.
@param size: The length of the stream, in bytes.
@type size: C{int}
"""
if size == 0:
self._buffer = StringIO()
self._len_changed = True
return
cur_pos = self.tell()
self.seek(0)
buf = self.read(size)
self._buffer = StringIO()
self._buffer.write(buf)
self.seek(cur_pos)
self._len_changed = True
def write(self, s, size=None):
"""
Writes the content of the specified C{s} into this buffer.
@param s: Raw bytes
"""
self._buffer.write(s)
self._len_changed = True
def _get_len(self):
"""
Return total number of bytes in buffer.
"""
if hasattr(self._buffer, 'len'):
self._len = self._buffer.len
return
old_pos = self._buffer.tell()
self._buffer.seek(0, 2)
self._len = self._buffer.tell()
self._buffer.seek(old_pos)
def __len__(self):
if not self._len_changed:
return self._len
self._get_len()
self._len_changed = False
return self._len
def consume(self):
"""
Chops the tail off the stream starting at 0 and ending at C{tell()}.
The stream pointer is set to 0 at the end of this function.
@since: 0.4
"""
try:
bytes = self.read()
except IOError:
bytes = ''
self.truncate()
if len(bytes) > 0:
self.write(bytes)
self.seek(0)
class DataTypeMixIn(object):
"""
Provides methods for reading and writing basic data types for file-like
objects.
@ivar endian: Byte ordering used to represent the data. Default byte order
is L{ENDIAN_NETWORK}.
@type endian: C{str}
"""
#: Network byte order
ENDIAN_NETWORK = "!"
#: Native byte order
ENDIAN_NATIVE = "@"
#: Little endian
ENDIAN_LITTLE = "<"
#: Big endian
ENDIAN_BIG = ">"
endian = ENDIAN_NETWORK
def _read(self, length):
"""
Reads C{length} bytes from the stream. If an attempt to read past the
end of the buffer is made, L{IOError} is raised.
"""
bytes = self.read(length)
if len(bytes) != length:
self.seek(0 - len(bytes), 1)
raise IOError("Tried to read %d byte(s) from the stream" % length)
return bytes
def _is_big_endian(self):
"""
Whether the current endian is big endian.
"""
if self.endian == DataTypeMixIn.ENDIAN_NATIVE:
return SYSTEM_ENDIAN == DataTypeMixIn.ENDIAN_BIG
return self.endian in (DataTypeMixIn.ENDIAN_BIG, DataTypeMixIn.ENDIAN_NETWORK)
def read_uchar(self):
"""
Reads an C{unsigned char} from the stream.
"""
return ord(self._read(1))
def write_uchar(self, c):
"""
Writes an C{unsigned char} to the stream.
@param c: Unsigned char
@type c: C{int}
@raise TypeError: Unexpected type for int C{c}.
@raise OverflowError: Not in range.
"""
if type(c) not in python.int_types:
raise TypeError('expected an int (got:%r)' % type(c))
if not 0 <= c <= 255:
raise OverflowError("Not in range, %d" % c)
self.write(struct.pack("B", c))
def read_char(self):
"""
Reads a C{char} from the stream.
"""
return struct.unpack("b", self._read(1))[0]
def write_char(self, c):
"""
Write a C{char} to the stream.
@param c: char
@type c: C{int}
@raise TypeError: Unexpected type for int C{c}.
@raise OverflowError: Not in range.
"""
if type(c) not in python.int_types:
raise TypeError('expected an int (got:%r)' % type(c))
if not -128 <= c <= 127:
raise OverflowError("Not in range, %d" % c)
self.write(struct.pack("b", c))
def read_ushort(self):
"""
Reads a 2 byte unsigned integer from the stream.
"""
return struct.unpack("%sH" % self.endian, self._read(2))[0]
def write_ushort(self, s):
"""
Writes a 2 byte unsigned integer to the stream.
@param s: 2 byte unsigned integer
@type s: C{int}
@raise TypeError: Unexpected type for int C{s}.
@raise OverflowError: Not in range.
"""
if type(s) not in python.int_types:
raise TypeError('expected an int (got:%r)' % (type(s),))
if not 0 <= s <= 65535:
raise OverflowError("Not in range, %d" % s)
self.write(struct.pack("%sH" % self.endian, s))
def read_short(self):
"""
Reads a 2 byte integer from the stream.
"""
return struct.unpack("%sh" % self.endian, self._read(2))[0]
def write_short(self, s):
"""
Writes a 2 byte integer to the stream.
@param s: 2 byte integer
@type s: C{int}
@raise TypeError: Unexpected type for int C{s}.
@raise OverflowError: Not in range.
"""
if type(s) not in python.int_types:
raise TypeError('expected an int (got:%r)' % (type(s),))
if not -32768 <= s <= 32767:
raise OverflowError("Not in range, %d" % s)
self.write(struct.pack("%sh" % self.endian, s))
def read_ulong(self):
"""
Reads a 4 byte unsigned integer from the stream.
"""
return struct.unpack("%sL" % self.endian, self._read(4))[0]
def write_ulong(self, l):
"""
Writes a 4 byte unsigned integer to the stream.
@param l: 4 byte unsigned integer
@type l: C{int}
@raise TypeError: Unexpected type for int C{l}.
@raise OverflowError: Not in range.
"""
if type(l) not in python.int_types:
raise TypeError('expected an int (got:%r)' % (type(l),))
if not 0 <= l <= 4294967295:
raise OverflowError("Not in range, %d" % l)
self.write(struct.pack("%sL" % self.endian, l))
def read_long(self):
"""
Reads a 4 byte integer from the stream.
"""
return struct.unpack("%sl" % self.endian, self._read(4))[0]
def write_long(self, l):
"""
Writes a 4 byte integer to the stream.
@param l: 4 byte integer
@type l: C{int}
@raise TypeError: Unexpected type for int C{l}.
@raise OverflowError: Not in range.
"""
if type(l) not in python.int_types:
raise TypeError('expected an int (got:%r)' % (type(l),))
if not -2147483648 <= l <= 2147483647:
raise OverflowError("Not in range, %d" % l)
self.write(struct.pack("%sl" % self.endian, l))
def read_24bit_uint(self):
"""
Reads a 24 bit unsigned integer from the stream.
@since: 0.4
"""
order = None
if not self._is_big_endian():
order = [0, 8, 16]
else:
order = [16, 8, 0]
n = 0
for x in order:
n += (self.read_uchar() << x)
return n
def write_24bit_uint(self, n):
"""
Writes a 24 bit unsigned integer to the stream.
@since: 0.4
@param n: 24 bit unsigned integer
@type n: C{int}
@raise TypeError: Unexpected type for int C{n}.
@raise OverflowError: Not in range.
"""
if type(n) not in python.int_types:
raise TypeError('expected an int (got:%r)' % (type(n),))
if not 0 <= n <= 0xffffff:
raise OverflowError("n is out of range")
order = None
if not self._is_big_endian():
order = [0, 8, 16]
else:
order = [16, 8, 0]
for x in order:
self.write_uchar((n >> x) & 0xff)
def read_24bit_int(self):
"""
Reads a 24 bit integer from the stream.
@since: 0.4
"""
n = self.read_24bit_uint()
if n & 0x800000 != 0:
# the int is signed
n -= 0x1000000
return n
def write_24bit_int(self, n):
"""
Writes a 24 bit integer to the stream.
@since: 0.4
@param n: 24 bit integer
@type n: C{int}
@raise TypeError: Unexpected type for int C{n}.
@raise OverflowError: Not in range.
"""
if type(n) not in python.int_types:
raise TypeError('expected an int (got:%r)' % (type(n),))
if not -8388608 <= n <= 8388607:
raise OverflowError("n is out of range")
order = None
if not self._is_big_endian():
order = [0, 8, 16]
else:
order = [16, 8, 0]
if n < 0:
n += 0x1000000
for x in order:
self.write_uchar((n >> x) & 0xff)
def read_double(self):
"""
Reads an 8 byte float from the stream.
"""
return struct.unpack("%sd" % self.endian, self._read(8))[0]
def write_double(self, d):
"""
Writes an 8 byte float to the stream.
@param d: 8 byte float
@type d: C{float}
@raise TypeError: Unexpected type for float C{d}.
"""
if not type(d) is float:
raise TypeError('expected a float (got:%r)' % (type(d),))
self.write(struct.pack("%sd" % self.endian, d))
def read_float(self):
"""
Reads a 4 byte float from the stream.
"""
return struct.unpack("%sf" % self.endian, self._read(4))[0]
def write_float(self, f):
"""
Writes a 4 byte float to the stream.
@param f: 4 byte float
@type f: C{float}
@raise TypeError: Unexpected type for float C{f}.
"""
if type(f) is not float:
raise TypeError('expected a float (got:%r)' % (type(f),))
self.write(struct.pack("%sf" % self.endian, f))
def read_utf8_string(self, length):
"""
Reads a UTF-8 string from the stream.
@rtype: C{unicode}
"""
s = struct.unpack("%s%ds" % (self.endian, length), self.read(length))[0]
return s.decode('utf-8')
def write_utf8_string(self, u):
"""
Writes a unicode object to the stream in UTF-8.
@param u: unicode object
@raise TypeError: Unexpected type for str C{u}.
"""
if not isinstance(u, python.str_types):
raise TypeError('Expected %r, got %r' % (python.str_types, u))
bytes = u
if isinstance(bytes, unicode):
bytes = u.encode("utf8")
self.write(struct.pack("%s%ds" % (self.endian, len(bytes)), bytes))
class BufferedByteStream(StringIOProxy, DataTypeMixIn):
"""
An extension of C{StringIO}.
Features:
- Raises L{IOError} if reading past end.
- Allows you to C{peek()} into the stream.
"""
def __init__(self, buf=None, min_buf_size=None):
"""
@param buf: Initial byte stream.
@type buf: C{str} or C{StringIO} instance
@param min_buf_size: Ignored in the pure python version.
"""
StringIOProxy.__init__(self, buf=buf)
def read(self, length=-1):
"""
Reads up to the specified number of bytes from the stream into
the specified byte array of specified length.
@raise IOError: Attempted to read past the end of the buffer.
"""
if length == -1 and self.at_eof():
raise IOError(
'Attempted to read from the buffer but already at the end')
elif length > 0 and self.tell() + length > len(self):
raise IOError('Attempted to read %d bytes from the buffer but '
'only %d remain' % (length, len(self) - self.tell()))
return StringIOProxy.read(self, length)
def peek(self, size=1):
"""
Looks C{size} bytes ahead in the stream, returning what it finds,
returning the stream pointer to its initial position.
@param size: Default is 1.
@type size: C{int}
@raise ValueError: Trying to peek backwards.
@return: Bytes.
"""
if size == -1:
return self.peek(len(self) - self.tell())
if size < -1:
raise ValueError("Cannot peek backwards")
bytes = ''
pos = self.tell()
while not self.at_eof() and len(bytes) != size:
bytes += self.read(1)
self.seek(pos)
return bytes
def remaining(self):
"""
Returns number of remaining bytes.
@rtype: C{number}
@return: Number of remaining bytes.
"""
return len(self) - self.tell()
def at_eof(self):
"""
Returns C{True} if the internal pointer is at the end of the stream.
@rtype: C{bool}
"""
return self.tell() == len(self)
def append(self, data):
"""
Append data to the end of the stream. The pointer will not move if
this operation is successful.
@param data: The data to append to the stream.
@type data: C{str} or C{unicode}
@raise TypeError: data is not C{str} or C{unicode}
"""
t = self.tell()
# seek to the end of the stream
self.seek(0, 2)
if hasattr(data, 'getvalue'):
self.write_utf8_string(data.getvalue())
else:
self.write_utf8_string(data)
self.seek(t)
def __add__(self, other):
old_pos = self.tell()
old_other_pos = other.tell()
new = BufferedByteStream(self)
other.seek(0)
new.seek(0, 2)
new.write(other.read())
self.seek(old_pos)
other.seek(old_other_pos)
new.seek(0)
return new
def is_float_broken():
"""
Older versions of Python (<=2.5) and the Windows platform are renowned for
mixing up 'special' floats. This function determines whether this is the
case.
@since: 0.4
@rtype: C{bool}
"""
return str(python.NaN) != str(
struct.unpack("!d", '\xff\xf8\x00\x00\x00\x00\x00\x00')[0])
# init the module from here ..
if is_float_broken():
def read_double_workaround(self):
"""
Override the L{DataTypeMixIn.read_double} method to fix problems
with doubles by using the third-party C{fpconst} library.
"""
bytes = self.read(8)
if self._is_big_endian():
if bytes == '\xff\xf8\x00\x00\x00\x00\x00\x00':
return python.NaN
if bytes == '\xff\xf0\x00\x00\x00\x00\x00\x00':
return python.NegInf
if bytes == '\x7f\xf0\x00\x00\x00\x00\x00\x00':
return python.PosInf
else:
if bytes == '\x00\x00\x00\x00\x00\x00\xf8\xff':
return python.NaN
if bytes == '\x00\x00\x00\x00\x00\x00\xf0\xff':
return python.NegInf
if bytes == '\x00\x00\x00\x00\x00\x00\xf0\x7f':
return python.PosInf
return struct.unpack("%sd" % self.endian, bytes)[0]
DataTypeMixIn.read_double = read_double_workaround
def write_double_workaround(self, d):
"""
Override the L{DataTypeMixIn.write_double} method to fix problems
with doubles by using the third-party C{fpconst} library.
"""
if type(d) is not float:
raise TypeError('expected a float (got:%r)' % (type(d),))
if python.isNaN(d):
if self._is_big_endian():
self.write('\xff\xf8\x00\x00\x00\x00\x00\x00')
else:
self.write('\x00\x00\x00\x00\x00\x00\xf8\xff')
elif python.isNegInf(d):
if self._is_big_endian():
self.write('\xff\xf0\x00\x00\x00\x00\x00\x00')
else:
self.write('\x00\x00\x00\x00\x00\x00\xf0\xff')
elif python.isPosInf(d):
if self._is_big_endian():
self.write('\x7f\xf0\x00\x00\x00\x00\x00\x00')
else:
self.write('\x00\x00\x00\x00\x00\x00\xf0\x7f')
else:
write_double_workaround.old_func(self, d)
x = DataTypeMixIn.write_double
DataTypeMixIn.write_double = write_double_workaround
write_double_workaround.old_func = x
if struct.pack('@H', 1)[0] == '\x01':
SYSTEM_ENDIAN = DataTypeMixIn.ENDIAN_LITTLE
else:
SYSTEM_ENDIAN = DataTypeMixIn.ENDIAN_BIG
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
AMF Utilities.
@since: 0.1.0
"""
import calendar
import datetime
import inspect
import pyamf
from pyamf import python
try:
from cpyamf.util import BufferedByteStream
except ImportError:
from pyamf.util.pure import BufferedByteStream
#: On some Python versions retrieving a negative timestamp, like
#: C{datetime.datetime.utcfromtimestamp(-31536000.0)} is broken.
negative_timestamp_broken = False
def get_timestamp(d):
"""
Returns a UTC timestamp for a C{datetime.datetime} object.
@type d: C{datetime.datetime}
@return: UTC timestamp.
@rtype: C{float}
@see: Inspiration taken from the U{Intertwingly blog
<http://intertwingly.net/blog/2007/09/02/Dealing-With-Dates>}.
"""
if isinstance(d, datetime.date) and not isinstance(d, datetime.datetime):
d = datetime.datetime.combine(d, datetime.time(0, 0, 0, 0))
msec = str(d.microsecond).rjust(6).replace(' ', '0')
return float('%s.%s' % (calendar.timegm(d.utctimetuple()), msec))
def get_datetime(secs):
"""
Return a UTC date from a timestamp.
@type secs: C{long}
@param secs: Seconds since 1970.
@return: UTC timestamp.
@rtype: C{datetime.datetime}
"""
if negative_timestamp_broken and secs < 0:
return datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=secs)
return datetime.datetime.utcfromtimestamp(secs)
def get_properties(obj):
"""
Returns a list of properties for L{obj}
@since: 0.5
"""
if hasattr(obj, 'keys'):
return obj.keys()
elif hasattr(obj, '__dict__'):
return obj.__dict__.keys()
return []
def set_attrs(obj, attrs):
"""
Applies a collection of attributes C{attrs} to object C{obj} in the most
generic way possible.
@param obj: An instance implementing C{__setattr__}, or C{__setitem__}
@param attrs: A collection implementing the C{iteritems} function
@type attrs: Usually a dict
"""
o = setattr
if hasattr(obj, '__setitem__'):
o = type(obj).__setitem__
[o(obj, k, v) for k, v in attrs.iteritems()]
def get_class_alias(klass):
"""
Tries to find a suitable L{pyamf.ClassAlias} subclass for C{klass}.
"""
for k, v in pyamf.ALIAS_TYPES.iteritems():
for kl in v:
try:
if issubclass(klass, kl):
return k
except TypeError:
# not a class
if hasattr(kl, '__call__'):
if kl(klass) is True:
return k
def is_class_sealed(klass):
"""
Whether or not the supplied class can accept dynamic properties.
@rtype: C{bool}
@since: 0.5
"""
mro = inspect.getmro(klass)
new = False
if mro[-1] is object:
mro = mro[:-1]
new = True
for kls in mro:
if new and '__dict__' in kls.__dict__:
return False
if not hasattr(kls, '__slots__'):
return False
return True
def get_class_meta(klass):
"""
Returns a C{dict} containing meta data based on the supplied class, useful
for class aliasing.
@rtype: C{dict}
@since: 0.5
"""
if not isinstance(klass, python.class_types) or klass is object:
raise TypeError('klass must be a class object, got %r' % type(klass))
meta = {
'static_attrs': None,
'exclude_attrs': None,
'readonly_attrs': None,
'proxy_attrs': None,
'amf3': None,
'dynamic': None,
'alias': None,
'external': None,
'synonym_attrs': None
}
if not hasattr(klass, '__amf__'):
return meta
a = klass.__amf__
if type(a) is dict:
in_func = lambda x: x in a
get_func = a.__getitem__
else:
in_func = lambda x: hasattr(a, x)
get_func = lambda x: getattr(a, x)
for prop in ['alias', 'amf3', 'dynamic', 'external']:
if in_func(prop):
meta[prop] = get_func(prop)
for prop in ['static', 'exclude', 'readonly', 'proxy', 'synonym']:
if in_func(prop):
meta[prop + '_attrs'] = get_func(prop)
return meta
def get_module(mod_name):
"""
Load and return a module based on C{mod_name}.
"""
if mod_name is '':
raise ImportError('Unable to import empty module')
mod = __import__(mod_name)
components = mod_name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
try:
datetime.datetime.utcfromtimestamp(-31536000.0)
except ValueError:
negative_timestamp_broken = True
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Tools for doing dynamic imports.
@since: 0.3
"""
import sys
__all__ = ['when_imported']
def when_imported(name, *hooks):
"""
Call C{hook(module)} when module named C{name} is first imported. C{name}
must be a fully qualified (i.e. absolute) module name.
C{hook} must accept one argument: which will be the imported module object.
If the module has already been imported, 'hook(module)' is called
immediately, and the module object is returned from this function. If the
module has not been imported, then the hook is called when the module is
first imported.
"""
global finder
finder.when_imported(name, *hooks)
class ModuleFinder(object):
"""
This is a special module finder object that executes a collection of
callables when a specific module has been imported. An instance of this
is placed in C{sys.meta_path}, which is consulted before C{sys.modules} -
allowing us to provide this functionality.
@ivar post_load_hooks: C{dict} of C{full module path -> callable} to be
executed when the module is imported.
@ivar loaded_modules: C{list} of modules that this finder has seen. Used
to stop recursive imports in L{load_module}
@see: L{when_imported}
@since: 0.5
"""
def __init__(self):
self.post_load_hooks = {}
self.loaded_modules = []
def find_module(self, name, path=None):
"""
Called when an import is made. If there are hooks waiting for this
module to be imported then we stop the normal import process and
manually load the module.
@param name: The name of the module being imported.
@param path The root path of the module (if a package). We ignore this.
@return: If we want to hook this module, we return a C{loader}
interface (which is this instance again). If not we return C{None}
to allow the standard import process to continue.
"""
if name in self.loaded_modules:
return None
hooks = self.post_load_hooks.get(name, None)
if hooks:
return self
def load_module(self, name):
"""
If we get this far, then there are hooks waiting to be called on
import of this module. We manually load the module and then run the
hooks.
@param name: The name of the module to import.
"""
self.loaded_modules.append(name)
try:
__import__(name, {}, {}, [])
mod = sys.modules[name]
self._run_hooks(name, mod)
except:
self.loaded_modules.pop()
raise
return mod
def when_imported(self, name, *hooks):
"""
@see: L{when_imported}
"""
if name in sys.modules:
for hook in hooks:
hook(sys.modules[name])
return
h = self.post_load_hooks.setdefault(name, [])
h.extend(hooks)
def _run_hooks(self, name, module):
"""
Run all hooks for a module.
"""
hooks = self.post_load_hooks.pop(name, [])
for hook in hooks:
hook(module)
def __getstate__(self):
return (self.post_load_hooks.copy(), self.loaded_modules[:])
def __setstate__(self, state):
self.post_load_hooks, self.loaded_modules = state
def _init():
"""
Internal function to install the module finder.
"""
global finder
if finder is None:
finder = ModuleFinder()
if finder not in sys.meta_path:
sys.meta_path.insert(0, finder)
finder = None
_init()
| Python |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Provides basic functionality for all pyamf.amf?.[De|E]ncoder classes.
"""
import types
import datetime
import pyamf
from pyamf import util, python, xml
__all__ = [
'IndexedCollection',
'Context',
'Decoder',
'Encoder'
]
try:
unicode
except NameError:
# py3k support
unicode = str
str = bytes
class IndexedCollection(object):
"""
Store references to objects and provides an api to query references.
All reference checks are done using the builtin C{id} function unless
C{use_hash} is specified as C{True} where the slower but more flexible
C{hash} builtin is used.
@note: All attributes on the instance are private, use the apis only.
"""
def __init__(self, use_hash=False):
if use_hash is True:
self.func = hash
else:
self.func = id
self.clear()
def clear(self):
"""
Clears the collection.
"""
self.list = []
self.dict = {}
def getByReference(self, ref):
"""
Returns an object based on the supplied reference. The C{ref} should
be an C{int}.
If the reference is not found, C{None} will be returned.
"""
try:
return self.list[ref]
except IndexError:
return None
def getReferenceTo(self, obj):
"""
Returns a reference to C{obj} if it is contained within this index.
If the object is not contained within the collection, C{-1} will be
returned.
@param obj: The object to find the reference to.
@return: An C{int} representing the reference or C{-1} is the object
is not contained within the collection.
"""
return self.dict.get(self.func(obj), -1)
def append(self, obj):
"""
Appends C{obj} to this index.
@note: Uniqueness is not checked
@return: The reference to C{obj} in this index.
"""
h = self.func(obj)
self.list.append(obj)
idx = len(self.list) - 1
self.dict[h] = idx
return idx
def __eq__(self, other):
if isinstance(other, list):
return self.list == other
raise NotImplementedError("cannot compare %s to %r" % (
type(other), self))
def __len__(self):
return len(self.list)
def __getitem__(self, idx):
return self.getByReference(idx)
def __contains__(self, obj):
r = self.getReferenceTo(obj)
return r != -1
def __repr__(self):
t = self.__class__
return '<%s.%s size=%d 0x%x>' % (
t.__module__,
t.__name__,
len(self.list),
id(self))
class Context(object):
"""
The base context for all AMF [de|en]coding.
@ivar extra: The only public attribute. This is a placeholder for any extra
contextual data that required for different adapters.
@type extra: C{dict}
@ivar _objects: A collection of stored references to objects that have
already been visited by this context.
@type _objects: L{IndexedCollection}
@ivar _class_aliases: Lookup of C{class} -> L{pyamf.ClassAlias} as
determined by L{pyamf.get_class_alias}
@ivar _unicodes: Lookup of utf-8 encoded byte strings -> string objects
(aka strings/unicodes).
"""
def __init__(self):
self._objects = IndexedCollection()
self.clear()
def clear(self):
"""
Clears the context.
"""
self._objects.clear()
self._class_aliases = {}
self._unicodes = {}
self.extra = {}
def getObject(self, ref):
"""
Gets an object based on a reference.
@type ref: C{int}
@return: The referenced object or C{None} if not found.
"""
return self._objects.getByReference(ref)
def getObjectReference(self, obj):
"""
Gets a reference for an already referenced object.
@return: The reference to the object or C{-1} if the object is not in
the context.
"""
return self._objects.getReferenceTo(obj)
def addObject(self, obj):
"""
Adds a reference to C{obj}.
@return: Reference to C{obj}.
@rtype: C{int}
"""
return self._objects.append(obj)
def getClassAlias(self, klass):
"""
Gets a class alias based on the supplied C{klass}. If one is not found
in the global context, one is created locally.
If you supply a string alias and the class is not registered,
L{pyamf.UnknownClassAlias} will be raised.
@param klass: A class object or string alias.
@return: The L{pyamf.ClassAlias} instance that describes C{klass}
"""
try:
return self._class_aliases[klass]
except KeyError:
pass
try:
alias = self._class_aliases[klass] = pyamf.get_class_alias(klass)
except pyamf.UnknownClassAlias:
if isinstance(klass, python.str_types):
raise
# no alias has been found yet .. check subclasses
alias = util.get_class_alias(klass) or pyamf.ClassAlias
meta = util.get_class_meta(klass)
alias = alias(klass, defer=True, **meta)
self._class_aliases[klass] = alias
return alias
def getStringForBytes(self, s):
"""
Returns the corresponding string for the supplied utf-8 encoded bytes.
If there is no string object, one is created.
@since: 0.6
"""
h = hash(s)
u = self._unicodes.get(h, None)
if u is not None:
return u
u = self._unicodes[h] = s.decode('utf-8')
return u
def getBytesForString(self, u):
"""
Returns the corresponding utf-8 encoded string for a given unicode
object. If there is no string, one is encoded.
@since: 0.6
"""
h = hash(u)
s = self._unicodes.get(h, None)
if s is not None:
return s
s = self._unicodes[h] = u.encode('utf-8')
return s
class _Codec(object):
"""
Base codec.
@ivar stream: The underlying data stream.
@type stream: L{util.BufferedByteStream}
@ivar context: The context for the encoding.
@ivar strict: Whether the codec should operate in I{strict} mode.
@type strict: C{bool}, default is C{False}.
@ivar timezone_offset: The offset from I{UTC} for any C{datetime} objects
being encoded. Default to C{None} means no offset.
@type timezone_offset: C{datetime.timedelta} or C{int} or C{None}
"""
def __init__(self, stream=None, context=None, strict=False,
timezone_offset=None):
if not isinstance(stream, util.BufferedByteStream):
stream = util.BufferedByteStream(stream)
self.stream = stream
self.context = context or self.buildContext()
self.strict = strict
self.timezone_offset = timezone_offset
self._func_cache = {}
def buildContext(self):
"""
A context factory.
"""
raise NotImplementedError
def getTypeFunc(self, data):
"""
Returns a callable based on C{data}. If no such callable can be found,
the default must be to return C{None}.
"""
raise NotImplementedError
class Decoder(_Codec):
"""
Base AMF decoder.
@ivar strict: Defines how strict the decoding should be. For the time
being this relates to typed objects in the stream that do not have a
registered alias. Introduced in 0.4.
@type strict: C{bool}
"""
def send(self, data):
"""
Add data for the decoder to work on.
"""
self.stream.append(data)
def next(self):
"""
Part of the iterator protocol.
"""
try:
return self.readElement()
except pyamf.EOStream:
# all data was successfully decoded from the stream
raise StopIteration
def readElement(self):
"""
Reads an AMF3 element from the data stream.
@raise DecodeError: The ActionScript type is unsupported.
@raise EOStream: No more data left to decode.
"""
pos = self.stream.tell()
try:
t = self.stream.read(1)
except IOError:
raise pyamf.EOStream
try:
func = self._func_cache[t]
except KeyError:
func = self.getTypeFunc(t)
if not func:
raise pyamf.DecodeError("Unsupported ActionScript type %s" % (
hex(ord(t)),))
self._func_cache[t] = func
try:
return func()
except IOError:
self.stream.seek(pos)
raise
def __iter__(self):
return self
class _CustomTypeFunc(object):
"""
Support for custom type mappings when encoding.
"""
def __init__(self, encoder, func):
self.encoder = encoder
self.func = func
def __call__(self, data, **kwargs):
ret = self.func(data, encoder=self.encoder)
if ret is not None:
self.encoder.writeElement(ret)
class Encoder(_Codec):
"""
Base AMF encoder.
"""
def __init__(self, *args, **kwargs):
_Codec.__init__(self, *args, **kwargs)
self.bucket = []
def _write_type(self, obj, **kwargs):
"""
Subclasses should override this and all write[type] functions
"""
raise NotImplementedError
writeNull = _write_type
writeBytes = _write_type
writeString = _write_type
writeBoolean = _write_type
writeNumber = _write_type
writeList = _write_type
writeUndefined = _write_type
writeDate = _write_type
writeXML = _write_type
writeObject = _write_type
def writeSequence(self, iterable):
"""
Encodes an iterable. The default is to write If the iterable has an al
"""
try:
alias = self.context.getClassAlias(iterable.__class__)
except (AttributeError, pyamf.UnknownClassAlias):
self.writeList(iterable)
return
if alias.external:
# a is a subclassed list with a registered alias - push to the
# correct method
self.writeObject(iterable)
return
self.writeList(iterable)
def writeGenerator(self, gen):
"""
Iterates over a generator object and encodes all that is returned.
"""
n = getattr(gen, 'next')
while True:
try:
self.writeElement(n())
except StopIteration:
break
def getTypeFunc(self, data):
"""
Returns a callable that will encode C{data} to C{self.stream}. If
C{data} is unencodable, then C{None} is returned.
"""
if data is None:
return self.writeNull
t = type(data)
# try types that we know will work
if t is str or issubclass(t, str):
return self.writeBytes
if t is unicode or issubclass(t, unicode):
return self.writeString
elif t is bool:
return self.writeBoolean
elif t is float:
return self.writeNumber
elif t in python.int_types:
return self.writeNumber
elif t in (list, tuple):
return self.writeList
elif isinstance(data, (list, tuple)):
return self.writeSequence
elif t is types.GeneratorType:
return self.writeGenerator
elif t is pyamf.UndefinedType:
return self.writeUndefined
elif t in (datetime.date, datetime.datetime, datetime.time):
return self.writeDate
elif xml.is_xml(data):
return self.writeXML
# check for any overridden types
for type_, func in pyamf.TYPE_MAP.iteritems():
try:
if isinstance(data, type_):
return _CustomTypeFunc(self, func)
except TypeError:
if python.callable(type_) and type_(data):
return _CustomTypeFunc(self, func)
# now try some types that won't encode
if t in python.class_types:
# can't encode classes
return None
elif isinstance(data, python.func_types):
# can't encode code objects
return None
elif isinstance(t, types.ModuleType):
# cannot encode module objects
return None
# well, we tried ..
return self.writeObject
def writeElement(self, data):
"""
Encodes C{data} to AMF. If the data is not able to be matched to an AMF
type, then L{pyamf.EncodeError} will be raised.
"""
key = type(data)
func = None
try:
func = self._func_cache[key]
except KeyError:
func = self.getTypeFunc(data)
if func is None:
raise pyamf.EncodeError('Unable to encode %r (type %r)' % (
data, key))
self._func_cache[key] = func
func(data)
def send(self, element):
self.bucket.append(element)
def next(self):
try:
element = self.bucket.pop(0)
except IndexError:
raise StopIteration
start_pos = self.stream.tell()
self.writeElement(element)
end_pos = self.stream.tell()
self.stream.seek(start_pos)
return self.stream.read(end_pos - start_pos)
def __iter__(self):
return self
| Python |
import logging, os
# Google App Engine imports.
from google.appengine.ext.webapp import util
# Force Django to reload its settings.
from django.conf import settings
settings._target = None
# Must set this env var before importing any part of Django
# 'project' is the name of the project created with django-admin.py
os.environ['DJANGO_SETTINGS_MODULE'] = 'deduit.settings'
import logging
import django.core.handlers.wsgi
import django.core.signals
import django.db
import django.dispatch.dispatcher
def log_exception(*args, **kwds):
logging.exception('Exception in request:')
# Log errors.
django.dispatch.dispatcher.connect(
log_exception, django.core.signals.got_request_exception)
# Unregister the rollback event handler.
django.dispatch.dispatcher.disconnect(
django.db._rollback_on_exception,
django.core.signals.got_request_exception)
def main():
# Create a Django application for WSGI.
application = django.core.handlers.wsgi.WSGIHandler()
# Run the WSGI CGI handler with that application.
util.run_wsgi_app(application)
if __name__ == '__main__':
main() | Python |
# Django settings for deduit project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'gmx*6w=3d8m021f-_k%00cqh$(-6uif0vioz+(-w5!tm6yi0=g'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
#'django.contrib.sessions.middleware.SessionMiddleware',
#'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
)
ROOT_URLCONF = 'deduit.urls'
#TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
#)
INSTALLED_APPS = (
#'django.contrib.auth',
'django.contrib.contenttypes',
#'django.contrib.sessions',
'django.contrib.sites',
'deduit.main',
'deduit.imgserve'
)
ROOT_PATH = os.path.dirname(__file__)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or
# "C:/www/django/templates". Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
ROOT_PATH + '/templates',
) | Python |
from django.conf.urls.defaults import *
urlpatterns = patterns('',
(r'^img/(?P<farm>\d+)/(?P<server>\d+)/(?P<id>\d+)/(?P<secret>\w+)/$', 'deduit.imgserve.views.main'),
(r"^photos/friends/page(?P<page>\d+)/$", "deduit.main.views.friendsphotos"),
(r"^photos/friends/$", "deduit.main.views.friendsphotos"),
(r"^photos/(?P<nsid>\d+@\w+)/page(?P<page>\d+)/$", "deduit.main.views.userphotos"),
(r"^photos/(?P<nsid>\d+@\w+)/(?P<photoid>\d+)/$", "deduit.main.views.photoByID"),
(r"^photos/(?P<nsid>\d+@\w+)/$", "deduit.main.views.userphotos"),
(r"^photos/(?P<username>[^\/]+)/page(?P<page>\d+)/$", "deduit.main.views.usernamephotos"),
(r"^photos/(?P<username>[^\/]+)/(?P<photoid>\d+)/$", "deduit.main.views.photoByUsernameAndID"),
(r"^photos/(?P<username>[^\/]+)/$", "deduit.main.views.usernamephotos"),
(r"^photos/(?P<username>[^\/]+)/popular-interesting/$", "deduit.main.views.popularusernamephotos"),
(r"^photos/(?P<username>[^\/]+)/popular-interesting/page(?P<page>\d+)/$", "deduit.main.views.popularusernamephotos"),
(r"^photos/(?P<nsid>\d+@\w+)/popular-interesting/$", "deduit.main.views.popularuserphotos"),
(r"^photos/(?P<nsid>\d+@\w+)/popular-interesting/page(?P<page>\d+)/$", "deduit.main.views.popularuserphotos"),
(r"^photos/(?P<username>[^\/]+)/favorites/$", "deduit.main.views.favoritesusernamephotos"),
(r"^photos/(?P<username>[^\/]+)/favorites/page(?P<page>\d+)/$", "deduit.main.views.favoritesusernamephotos"),
(r"^photos/(?P<nsid>\d+@\w+)/favorites/$", "deduit.main.views.favoritesuserphotos"),
(r"^photos/(?P<nsid>\d+@\w+)/favorites/page(?P<page>\d+)/$", "deduit.main.views.favoritesuserphotos"),
(r"^photos/(?P<nsid>\d+@\w+)/sets/$", "deduit.main.views.usersets"),
(r"^photos/(?P<username>[^\/]+)/sets/$", "deduit.main.views.usernamesets"),
(r"^photos/(?P<nsid>\d+@\w+)/sets/(?P<setid>\d+)/$", "deduit.main.views.usersetsphotos"),
(r"^photos/(?P<username>[^\/]+)/sets/(?P<setid>\d+)/$", "deduit.main.views.usernamesetsphotos"),
(r"^photos/(?P<nsid>\d+@\w+)/sets/(?P<setid>\d+)/page(?P<page>\d+)/$", "deduit.main.views.usersetsphotos"),
(r"^photos/(?P<username>[^\/]+)/sets/(?P<setid>\d+)/page(?P<page>\d+)/$", "deduit.main.views.usernamesetsphotos"),
(r"^groups/(?P<groupid>\d+@\w+)/pool/page(?P<page>\d+)/$", "deduit.main.views.groupphotos"),
(r"^groups/(?P<groupid>\d+@\w+)/pool/$", "deduit.main.views.groupphotos"),
(r"^groups/(?P<groupid>\d+@\w+)/$", "deduit.main.views.groupphotos"),
(r"^groups/(?P<groupname>[^\/]+)/pool/page(?P<page>\d+)/$", "deduit.main.views.groupnamephotos"),
(r"^groups/(?P<groupname>[^\/]+)/pool/$", "deduit.main.views.groupnamephotos"),
(r"^groups/(?P<groupname>[^\/]+)/$", "deduit.main.views.groupnamephotos"),
(r"^groups/(?P<groupname>[^\/]+)/popular-interesting/$", "deduit.main.views.populargroupnamephotos"),
(r"^groups/(?P<groupname>[^\/]+)/popular-interesting/page(?P<page>\d+)/$", "deduit.main.views.populargroupnamephotos"),
(r"^groups/(?P<groupid>\d+@\w+)/popular-interesting/$", "deduit.main.views.populargroupphotos"),
(r"^groups/(?P<groupid>\d+@\w+)/popular-interesting/page(?P<page>\d+)/$", "deduit.main.views.populargroupphotos"),
(r"^json/photodetails/(?P<photoid>\d+)/$", "deduit.main.json.photodetails"),
(r"^(?P<page>\d+)/$", 'deduit.main.views.main'),
(r"^explore/interesting/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/$", 'deduit.main.views.main'),
(r"^explore/interesting/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/page(?P<page>\d+)/$", 'deduit.main.views.main'),
(r"^login/", 'deduit.main.views.login'),
(r"^auth/", 'deduit.main.views.auth'),
(r"^comment/", 'deduit.main.views.comment'),
(r"^fave/", 'deduit.main.views.fave'),
(r"^test/", 'deduit.main.views.test'),
(r"^$", 'deduit.main.views.main'),
# Example:
# (r'^deduit/', include('deduit.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/', include(admin.site.urls)),
)
| Python |
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| Python |
#from google.appengine.api import users
from google.appengine.api import urlfetch
#from google.appengine.api import images
from django.http import HttpResponse
def main(request, farm, server, id, secret):
imageUrl = "http://farm%s.static.flickr.com/%s/%s_%s_z.jpg" % (farm, server, id, secret)
#imageUrl = "http://farm5.static.flickr.com/4080/4875958757_af96e449ae.jpg"
photoResponse = urlfetch.fetch(imageUrl).content
#currentPhoto = images.Image(photoResponse)
response = HttpResponse(str(photoResponse), mimetype="image/jpg")
return response
| Python |
from google.appengine.ext import db
from google.appengine.api import users
class Visitor(db.Model):
ip = db.StringProperty()
added_on = db.DateTimeProperty(auto_now_add=True)
class UserPrefs(db.Model):
added_on = db.DateTimeProperty(auto_now_add=True)
user = db.UserProperty()
token = db.StringProperty()
nsid = db.StringProperty()
username = db.StringProperty()
fullname = db.StringProperty()
def getcurrentuser(self):
user = users.get_current_user()
if not user:
return self;
q = db.GqlQuery("SELECT * FROM UserPrefs WHERE user = :1", user)
userprefs = q.get()
return userprefs
class Comment(db.Model):
added_on = db.DateTimeProperty(auto_now_add=True)
nsid = db.StringProperty()
description = db.StringProperty()
nsid = db.StringProperty()
tags = db.StringProperty()
photo_id = db.StringProperty()
def getrandomcomment(self, tags):
q = db.GqlQuery("SELECT * FROM Comment WHERE tags = :1", tags)
comment = q.get()
return comment
| Python |
from google.appengine.api import urlfetch
from xml.dom import minidom
from datetime import datetime
import hashlib
import urllib
from google.appengine.api import users
from datetime import timedelta
from models import UserPrefs
def getPhotoDetails(photoid='0', node=None):
#infoDoc = getInfoDoc(photoid)
#exifDoc = getExifDoc(photoid)
#wita = timedelta(hours=8)
url = ''
if node != None:
url_s = node.getAttribute('url_s')
height_s = int(node.getAttribute('height_s'))
width_s = int(node.getAttribute('width_s'))
url_m = node.getAttribute('url_m')
height_m = int(node.getAttribute('height_m'))
width_m = int(node.getAttribute('width_m'))
if width_s == 240:
url = url_s
width = width_s
height = height_s
else:
url = url_m
width = 240
height = height_m * 240 / width_m
photoDetails = {
'id': photoid,
'secret': node.getAttribute('secret'),
'server': node.getAttribute('server'),
'farm': node.getAttribute('farm'),
# 'title': node.getAttribute('title').replace("\n", "<br />"),
# 'description': getText(node.getElementsByTagName("description")[0].childNodes).replace("\n", "<br />"),
# 'ownername': infoDoc.getElementsByTagName("owner")[0].getAttribute('realname') if infoDoc.getElementsByTagName("owner")[0].getAttribute('realname') != "" else infoDoc.getElementsByTagName("owner")[0].getAttribute('username'),
# 'date': (datetime.fromtimestamp(int(infoDoc.getElementsByTagName("dates")[0].getAttribute('posted'))) + wita).strftime("%A, %d %B %Y"),
# 'time': (datetime.fromtimestamp(int(infoDoc.getElementsByTagName("dates")[0].getAttribute('posted'))) + wita).strftime("%I:%M%p"),
# 'title': getText(infoDoc.getElementsByTagName("title")[0].childNodes).replace("\n", "<br />"),
# 'description': getText(infoDoc.getElementsByTagName("description")[0].childNodes).replace("\n", "<br />"),
# 'tags': getTags(infoDoc),
# 'url': getText(infoDoc.getElementsByTagName("url")[0].childNodes),
# 'localurl': getText(infoDoc.getElementsByTagName("url")[0].childNodes).replace('http://www.flickr.com/', '/'),
# 'nsid': infoDoc.getElementsByTagName("owner")[0].getAttribute('nsid'),
# 'comments': getText(infoDoc.getElementsByTagName("comments")[0].childNodes),
# 'views': infoDoc.getElementsByTagName("photo")[0].getAttribute('views'),
# 'isfavorite': infoDoc.getElementsByTagName("photo")[0].getAttribute('isfavorite'),
# 'exif': exifData(exifDoc),
# 'location': getLocation(infoDoc),
# 'username': infoDoc.getElementsByTagName("owner")[0].getAttribute('realname') if infoDoc.getElementsByTagName("owner")[0].getAttribute('realname') is not '' else infoDoc.getElementsByTagName("owner")[0].getAttribute('username'),
'localurl': '/photos/44861801@N07/%s/' % photoid, #dummy nsid
'src': url,
'width': width,
'height': height
}
return photoDetails
def getPhotoDetailsJSON(photoid='0'):
infoDoc = getInfoDoc(photoid)
exifDoc = getExifDoc(photoid)
wita = timedelta(hours=8)
photoDetails = {
'id': photoid,
'secret': infoDoc.getElementsByTagName("photo")[0].getAttribute('secret'),
'server': infoDoc.getElementsByTagName("photo")[0].getAttribute('server'),
'farm': infoDoc.getElementsByTagName("photo")[0].getAttribute('farm'),
'ownername': infoDoc.getElementsByTagName("owner")[0].getAttribute('realname') if infoDoc.getElementsByTagName("owner")[0].getAttribute('realname') != "" else infoDoc.getElementsByTagName("owner")[0].getAttribute('username'),
'date': (datetime.fromtimestamp(int(infoDoc.getElementsByTagName("dates")[0].getAttribute('posted'))) + wita).strftime("%d %b %Y"),
'time': (datetime.fromtimestamp(int(infoDoc.getElementsByTagName("dates")[0].getAttribute('posted'))) + wita).strftime("%I:%M%p"),
'title': getText(infoDoc.getElementsByTagName("title")[0].childNodes).replace("\n", "<br />"),
'description': getText(infoDoc.getElementsByTagName("description")[0].childNodes).replace("\n", "<br />"),
'tags': getTags(infoDoc),
'url': getText(infoDoc.getElementsByTagName("url")[0].childNodes),
'localurl': getText(infoDoc.getElementsByTagName("url")[0].childNodes).replace('http://www.flickr.com/', '/'),
'nsid': infoDoc.getElementsByTagName("owner")[0].getAttribute('nsid'),
'comments': getText(infoDoc.getElementsByTagName("comments")[0].childNodes),
'views': infoDoc.getElementsByTagName("photo")[0].getAttribute('views'),
'isfavorite': infoDoc.getElementsByTagName("photo")[0].getAttribute('isfavorite'),
'exif': exifData(exifDoc),
'location': getLocation(infoDoc),
'username': infoDoc.getElementsByTagName("owner")[0].getAttribute('realname') if infoDoc.getElementsByTagName("owner")[0].getAttribute('realname') is not '' else infoDoc.getElementsByTagName("owner")[0].getAttribute('username')
}
return photoDetails
def getInfoDoc(photoID):
user = users.get_current_user()
if not user:
infoUrl = "http://api.flickr.com/services/rest/?method=flickr.photos.getInfo&api_key=4ffd08ef36219cd952ccbd153052f833&&photo_id=%s" % photoID
infoDoc = minidom.parseString(urlfetch.fetch(infoUrl, deadline=60).content)
return infoDoc
q = UserPrefs.gql("WHERE user = :1 AND token != NULL", user)
userprefs = q.get()
if not userprefs:
infoUrl = "http://api.flickr.com/services/rest/?method=flickr.photos.getInfo&api_key=4ffd08ef36219cd952ccbd153052f833&&photo_id=%s" % photoID
infoDoc = minidom.parseString(urlfetch.fetch(infoUrl, deadline=33).content)
return infoDoc
token = userprefs.token
apiSigString = "c9a1cf81402f8253api_key4ffd08ef36219cd952ccbd153052f833auth_token%smethodflickr.photos.getInfophoto_id%s" % (token, photoID)
api_sig = hashlib.md5(apiSigString).hexdigest()
params = urllib.urlencode({
'method': 'flickr.photos.getInfo',
'api_key': '4ffd08ef36219cd952ccbd153052f833',
'photo_id': photoID,
'auth_token': token,
'api_sig': api_sig
})
infoUrl = "http://api.flickr.com/services/rest/?%s" % params
infoDoc = minidom.parseString(urlfetch.fetch(infoUrl, deadline=33).content)
return infoDoc
def getExifDoc(photoID):
exifUrl = "http://api.flickr.com/services/rest/?method=flickr.photos.getExif&api_key=4ffd08ef36219cd952ccbd153052f833&photo_id=%s" % photoID
try:
exifDoc = minidom.parseString(urlfetch.fetch(exifUrl, deadline=33).content)
except:
exifDoc = minidom.parseString("<myxml>Some data</myxml>")
return exifDoc.getElementsByTagName("exif")
def getText(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def exifData(exif):
exifString = getExif(exif, 'Model')+ ' + ' + getExif(exif, 'Lens') + '| f/' + getExif(exif, 'FNumber') + getExif(exif, 'ExposureTime')+'Second ISO' + getExif(exif, 'ISO') + getExif(exif, 'FocalLength')
if len(exifString) > 17:
return exifString
return ""
def getExif(nodelist, tag):
for node in nodelist:
if node.getAttribute('tag') == tag:
return getText(node.getElementsByTagName("raw")[0].childNodes) + " "
return ""
def getTags(doc):
rc = []
for node in doc.getElementsByTagName("tag"):
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
for childNode in node.childNodes:
if childNode.nodeType == node.TEXT_NODE:
rc.append(childNode.data)
for grandChildNode in childNode.childNodes:
if grandChildNode.nodeType == node.TEXT_NODE:
rc.append(grandChildNode.data)
return ' '.join(rc)
def getLocation(doc):
rc = []
for node in doc.getElementsByTagName("location"):
#if node.nodeType == node.TEXT_NODE:
# rc.append(node.data)
for childNode in node.childNodes:
#if childNode.nodeType == node.TEXT_NODE:
# rc.append(childNode.data)
for grandChildNode in childNode.childNodes:
if grandChildNode.nodeType == node.TEXT_NODE:
rc.append(grandChildNode.data)
rc.reverse()
return ' > '.join(rc) | Python |
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| Python |
from django.http import HttpResponse
import utils
from django.utils import simplejson
def photodetails(request, nsid='me', photoid='0'):
photoDetails = utils.getPhotoDetailsJSON(photoid)
result = simplejson.dumps(photoDetails)
response = HttpResponse(result, mimetype="text/plain")
return response | Python |
from django.shortcuts import render_to_response
from deduit.main.models import Visitor
from deduit.main.models import UserPrefs
from google.appengine.api import urlfetch
from google.appengine.ext import db
from xml.dom import minidom
from datetime import datetime
from urllib import quote
import hashlib
import urllib
import utils
from django.http import HttpResponse
from google.appengine.api import users
from django.http import HttpResponseRedirect
from datetime import timedelta
from datetime import datetime, date, time
exploretime = timedelta(hours=-8, days=-1)
def main(request, page='1', day='%02d' % (datetime.now()+exploretime).day, month='%02d' % (datetime.now()+exploretime).month, year=(datetime.now()+exploretime).year):
url = "http://api.flickr.com/services/rest/?method=flickr.interestingness.getList&api_key=4ffd08ef36219cd952ccbd153052f833&extras=url_m,url_s&per_page=20&page=%s&date=%s-%s-%s" % (page, year, month, day)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
photos = []
for node in xmldoc.getElementsByTagName('photo'):
photos.append(utils.getPhotoDetails(node.getAttribute('id'), node))
api_sig = hashlib.md5("c9a1cf81402f8253api_key4ffd08ef36219cd952ccbd153052f833permswrite").hexdigest()
return render_to_response('main/index.html', {'userprefs': UserPrefs().getcurrentuser(), 'photos':photos, 'api_sig': api_sig, 'nextPage':'/explore/interesting/%s/%s/%s/page%d' % (year, month, day, int(page)+1)})
def friendsphotos(request, page='1'):
user = users.get_current_user()
if not user:
return HttpResponseRedirect(users.create_login_url('http://flickrforsalaryman.appspot.com'+request.path))
userprefs = UserPrefs().getcurrentuser()
sig_string = 'c9a1cf81402f8253api_key4ffd08ef36219cd952ccbd153052f833auth_token%scount50extrasdate_upload,date_taken,owner_name,url_m,url_smethodflickr.photos.getContactsPhotossingle_photo1' % userprefs.token
api_sig = hashlib.md5(sig_string).hexdigest()
url = "http://api.flickr.com/services/rest/?method=flickr.photos.getContactsPhotos&api_key=4ffd08ef36219cd952ccbd153052f833&extras=date_upload,date_taken,owner_name,url_m,url_s&count=50&auth_token=%s&api_sig=%s&single_photo=1" % (userprefs.token, api_sig)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
photos = []
for node in xmldoc.getElementsByTagName('photo'):
photos.append(utils.getPhotoDetails(node.getAttribute('id'), node))
return render_to_response('main/index.html', {'userprefs': UserPrefs().getcurrentuser(), 'photos':photos, 'nextPage':'/photos/friends/page%d' % (int(page)+1)})
def usernamephotos(request, username='me', page=1):
url = "http://api.flickr.com/services/rest/?method=flickr.urls.lookupUser&api_key=4ffd08ef36219cd952ccbd153052f833&url=%s" % quote('flickr.com/photos/'+username)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
for node in xmldoc.getElementsByTagName('user'):
nsid = node.getAttribute('id')
return userphotos(request, nsid, username, page)
#find by username
url = "http://api.flickr.com/services/rest/?method=flickr.people.findByUsername&api_key=4ffd08ef36219cd952ccbd153052f833&username=%s" % quote(username)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
for node in xmldoc.getElementsByTagName('user'):
nsid = node.getAttribute('nsid')
return userphotos(request, nsid, username, page)
return userphotos(request, username, page)
def userphotos(request, nsid='me', username = '', page=1):
visitors = Visitor.all()
visitors.order("-added_on")
url = "http://api.flickr.com/services/rest/?method=flickr.people.getPublicPhotos&api_key=4ffd08ef36219cd952ccbd153052f833&&extras=description,date_upload,path_alias,tags,owner_name,url_m,url_s&per_page=20&user_id=%s&page=%s" % (nsid, page)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
photos = []
for node in xmldoc.getElementsByTagName('photo'):
photos.append(utils.getPhotoDetails(node.getAttribute('id'), node))
api_sig = hashlib.md5("c9a1cf81402f8253api_key4ffd08ef36219cd952ccbd153052f833permswrite").hexdigest()
return render_to_response('main/index.html', {'userprefs': UserPrefs().getcurrentuser(), 'api_sig': api_sig, 'photos':photos, 'username':username if username is not '' else nsid, 'nextPage':'/photos/%s/page%d' % (username if username is not '' else nsid, int(page)+1)})
def popularusernamephotos(request, username='me', page=1):
url = "http://api.flickr.com/services/rest/?method=flickr.urls.lookupUser&api_key=4ffd08ef36219cd952ccbd153052f833&url=%s" % quote('flickr.com/photos/'+username)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
for node in xmldoc.getElementsByTagName('user'):
nsid = node.getAttribute('id')
return popularuserphotos(request, nsid, username, page)
#find by username
url = "http://api.flickr.com/services/rest/?method=flickr.people.findByUsername&api_key=4ffd08ef36219cd952ccbd153052f833&username=%s" % quote(username)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
for node in xmldoc.getElementsByTagName('user'):
nsid = node.getAttribute('nsid')
return popularuserphotos(request, nsid, username, page)
return popularuserphotos(request, username, page)
def popularuserphotos(request, nsid='me', username = '', page=1):
url = "http://api.flickr.com/services/rest/?method=flickr.photos.search&api_key=4ffd08ef36219cd952ccbd153052f833&&extras=description,date_upload,path_alias,tags,owner_name,url_m,url_s&sort=interestingness-desc&per_page=20&user_id=%s&page=%s" % (nsid, page)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
photos = []
for node in xmldoc.getElementsByTagName('photo'):
photos.append(utils.getPhotoDetails(node.getAttribute('id'), node))
api_sig = hashlib.md5("c9a1cf81402f8253api_key4ffd08ef36219cd952ccbd153052f833permswrite").hexdigest()
return render_to_response('main/index.html', {'userprefs': UserPrefs().getcurrentuser(), 'api_sig': api_sig, 'photos':photos, 'username':username if username is not '' else nsid, 'nextPage':'/photos/%s/popular-interesting/page%d' % (username if username is not '' else nsid, int(page)+1)})
def favoritesusernamephotos(request, username='me', page=1):
url = "http://api.flickr.com/services/rest/?method=flickr.urls.lookupUser&api_key=4ffd08ef36219cd952ccbd153052f833&url=%s" % quote('flickr.com/photos/'+username)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
for node in xmldoc.getElementsByTagName('user'):
nsid = node.getAttribute('id')
return favoritesuserphotos(request, nsid, username, page)
#find by username
url = "http://api.flickr.com/services/rest/?method=flickr.people.findByUsername&api_key=4ffd08ef36219cd952ccbd153052f833&username=%s" % quote(username)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
for node in xmldoc.getElementsByTagName('user'):
nsid = node.getAttribute('nsid')
return favoritesuserphotos(request, nsid, username, page)
return favoritesuserphotos(request, username, page)
def favoritesuserphotos(request, nsid='me', username = '', page=1):
user = users.get_current_user()
if not user:
return HttpResponseRedirect(users.create_login_url('http://flickrforsalaryman.appspot.com'+request.path))
q = db.GqlQuery("SELECT * FROM UserPrefs WHERE user = :1", user)
userprefs = q.get()
token = userprefs.token
apiSigString = "c9a1cf81402f8253api_key4ffd08ef36219cd952ccbd153052f833auth_token%sextrasurl_m,url_smethodflickr.favorites.getListpage%sper_page20user_id%s" % (token, page, nsid)
api_sig = hashlib.md5(apiSigString).hexdigest()
params = urllib.urlencode({
'method': 'flickr.favorites.getList',
'api_key': '4ffd08ef36219cd952ccbd153052f833',
'page': page,
'user_id': nsid,
'page': page,
'per_page': 20,
'auth_token': token,
'api_sig': api_sig,
'extras': 'url_m,url_s'
})
url = "http://api.flickr.com/services/rest/?%s" % params
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
photos = []
for node in xmldoc.getElementsByTagName('photo'):
photos.append(utils.getPhotoDetails(node.getAttribute('id'), node))
api_sig = hashlib.md5("c9a1cf81402f8253api_key4ffd08ef36219cd952ccbd153052f833permswrite").hexdigest()
return render_to_response('main/index.html', {'userprefs': UserPrefs().getcurrentuser(), 'api_sig': api_sig, 'photos':photos, 'username':username if username is not '' else nsid, 'nextPage':'/photos/%s/favorites/page%d' % (username if username is not '' else nsid, int(page)+1)})
def usernamesets(request, username='me', page=1):
url = "http://api.flickr.com/services/rest/?method=flickr.urls.lookupUser&api_key=4ffd08ef36219cd952ccbd153052f833&url=%s" % quote('flickr.com/photos/'+username)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
for node in xmldoc.getElementsByTagName('user'):
nsid = node.getAttribute('id')
return usersets(request, nsid, username, page)
#find by username
url = "http://api.flickr.com/services/rest/?method=flickr.people.findByUsername&api_key=4ffd08ef36219cd952ccbd153052f833&username=%s" % quote(username)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
for node in xmldoc.getElementsByTagName('user'):
nsid = node.getAttribute('nsid')
return usersets(request, nsid, username, page)
return usersets(request, username, page)
def usersets(request, nsid='me', username = '', page=1):
visitors = Visitor.all()
visitors.order("-added_on")
url = "http://api.flickr.com/services/rest/?method=flickr.photosets.getList&api_key=4ffd08ef36219cd952ccbd153052f833&user_id=%s" % nsid
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
sets = []
for node in xmldoc.getElementsByTagName('photoset'):
sets.append({
'id': node.getAttribute('id'),
'primary': node.getAttribute('primary'),
'secret': node.getAttribute('secret'),
'server': node.getAttribute('server'),
'farm': node.getAttribute('farm'),
'photos': node.getAttribute('photos'),
'videos': node.getAttribute('videos'),
'title': utils.getText(node.getElementsByTagName("title")[0].childNodes).replace("\n", "<br />"),
'description': utils.getText(node.getElementsByTagName("description")[0].childNodes).replace("\n", "<br />"),
'localurl': '/photos/' + (username if username is not '' else nsid) + '/sets/' + node.getAttribute('id')
})
api_sig = hashlib.md5("c9a1cf81402f8253api_key4ffd08ef36219cd952ccbd153052f833permswrite").hexdigest()
return render_to_response('main/index.html', {'userprefs': UserPrefs().getcurrentuser(), 'api_sig': api_sig, 'sets':sets, 'username':username if username is not '' else nsid, 'nextPage':'/photos/%s/page%d' % (username if username is not '' else nsid, int(page)+1)})
def usernamesetsphotos(request, username='me', setid='', page=1):
url = "http://api.flickr.com/services/rest/?method=flickr.urls.lookupUser&api_key=4ffd08ef36219cd952ccbd153052f833&url=%s" % quote('flickr.com/photos/'+username)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
for node in xmldoc.getElementsByTagName('user'):
nsid = node.getAttribute('id')
return usersetsphotos(request, nsid, username, setid, page)
#find by username
url = "http://api.flickr.com/services/rest/?method=flickr.people.findByUsername&api_key=4ffd08ef36219cd952ccbd153052f833&username=%s" % quote(username)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
for node in xmldoc.getElementsByTagName('user'):
nsid = node.getAttribute('nsid')
return usersetsphotos(request, nsid, username, setid, page)
return usersetsphotos(request, username, setid, page)
def usersetsphotos(request, nsid='me', username = '', setid='', page=1):
url = "http://api.flickr.com/services/rest/?method=flickr.photosets.getPhotos&api_key=4ffd08ef36219cd952ccbd153052f833&&extras=description,date_upload,path_alias,tags,owner_name,url_m,url_s&per_page=20&photoset_id=%s&page=%s" % (setid, page)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
photos = []
for node in xmldoc.getElementsByTagName('photo'):
photos.append(utils.getPhotoDetails(node.getAttribute('id'), node))
api_sig = hashlib.md5("c9a1cf81402f8253api_key4ffd08ef36219cd952ccbd153052f833permswrite").hexdigest()
return render_to_response('main/index.html', {'userprefs': UserPrefs().getcurrentuser(), 'api_sig': api_sig, 'photos':photos, 'username':username if username is not '' else nsid, 'nextPage':'/photos/%s/sets/%s/page%d' % (username if username is not '' else nsid, setid, int(page)+1)})
def photoByUsernameAndID(request, username='me', photoid=1):
#find by url
url = "http://api.flickr.com/services/rest/?method=flickr.urls.lookupUser&api_key=4ffd08ef36219cd952ccbd153052f833&url=%s" % quote('flickr.com/photos/'+username)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
for node in xmldoc.getElementsByTagName('user'):
nsid = node.getAttribute('id')
return photoByID(request, nsid, photoid)
#find by username
url = "http://api.flickr.com/services/rest/?method=flickr.people.findByUsername&api_key=4ffd08ef36219cd952ccbd153052f833&username=%s" % quote(username)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
for node in xmldoc.getElementsByTagName('user'):
nsid = node.getAttribute('nsid')
return photoByID(request, nsid, photoid)
return photoByID(request, username, photoid)
def photoByID(request, nsid='me', photoid='0'):
infoDoc = utils.getInfoDoc(photoid)
exif = utils.getExifDoc(photoid)
commentUrl = "http://api.flickr.com/services/rest/?method=flickr.photos.comments.getList&api_key=4ffd08ef36219cd952ccbd153052f833&&photo_id=%s" % photoid
commentDoc = minidom.parseString(urlfetch.fetch(commentUrl, deadline=33).content)
wita = timedelta(hours=8)
photo ={
'id': photoid,
'username': infoDoc.getElementsByTagName("owner")[0].getAttribute('username') if infoDoc.getElementsByTagName("owner")[0].getAttribute('username') is not '' else infoDoc.getElementsByTagName("owner")[0].getAttribute('owner'),
'secret': infoDoc.getElementsByTagName("photo")[0].getAttribute('secret'),
'server': infoDoc.getElementsByTagName("photo")[0].getAttribute('server'),
'farm': infoDoc.getElementsByTagName("photo")[0].getAttribute('farm'),
'views': infoDoc.getElementsByTagName("photo")[0].getAttribute('views'),
'title': utils.getText(infoDoc.getElementsByTagName("title")[0].childNodes),
'tags': utils.getTags(infoDoc),
'date': (datetime.fromtimestamp(int(infoDoc.getElementsByTagName("dates")[0].getAttribute('posted'))) + wita).strftime("%A, %d %B %Y"),
'time': (datetime.fromtimestamp(int(infoDoc.getElementsByTagName("dates")[0].getAttribute('posted'))) + wita).strftime("%I:%M%p"),
'ownername': infoDoc.getElementsByTagName("owner")[0].getAttribute('realname') if infoDoc.getElementsByTagName("owner")[0].getAttribute('realname') != "" else infoDoc.getElementsByTagName("owner")[0].getAttribute('username'),
'description': utils.getText(infoDoc.getElementsByTagName("description")[0].childNodes).replace("\n", "<br />"),
'url': utils.getText(infoDoc.getElementsByTagName("url")[0].childNodes),
'nsid': infoDoc.getElementsByTagName("owner")[0].getAttribute('nsid'),
'comments': utils.getText(infoDoc.getElementsByTagName("comments")[0].childNodes),
'exif': utils.exifData(exif),
'location': utils.getLocation(infoDoc)
}
username = ''
nsid = photo['nsid']
comments = []
for comment in commentDoc.getElementsByTagName('comment'):
comments.append({
'id': comment.getAttribute('id'),
'author': comment.getAttribute('author'),
'authorname': comment.getAttribute('authorname'),
'datecreate': datetime.fromtimestamp(int(comment.getAttribute('datecreate'))).strftime("%d %B %Y %I:%M%p"),
'permalink': comment.getAttribute('permalink'),
'body': utils.getText(comment.childNodes).replace("\n", "<br />")
})
return render_to_response('main/details.html', {'userprefs': UserPrefs().getcurrentuser(), 'username':username if username is not '' else nsid, 'photo':photo, 'comments':comments, 'nextPage':'/photos/%s/page%d' % (nsid, int(1)+1)})
def groupnamephotos(request, groupname='me', page=1):
url = "http://api.flickr.com/services/rest/?method=flickr.urls.lookupGroup&api_key=4ffd08ef36219cd952ccbd153052f833&url=%s" % quote('flickr.com/groups/'+groupname)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
for node in xmldoc.getElementsByTagName('group'):
groupid = node.getAttribute('id')
return groupphotos(request, groupid, groupname, page)
#find by groupname
url = "http://api.flickr.com/services/rest/?method=flickr.people.findByUsername&api_key=4ffd08ef36219cd952ccbd153052f833&username=%s" % quote(groupname)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
for node in xmldoc.getElementsByTagName('group'):
groupid = node.getAttribute('id')
return groupphotos(request, groupid, groupname, page)
return groupphotos(request, groupname, page)
def groupphotos(request, groupid='me', groupname = '', page=1):
url = "http://api.flickr.com/services/rest/?method=flickr.groups.pools.getPhotos&api_key=4ffd08ef36219cd952ccbd153052f833&extras=description,date_upload,path_alias,tags,owner_name,url_m,url_s&per_page=20&group_id=%s&page=%s" % (groupid, page)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
photos = []
for node in xmldoc.getElementsByTagName('photo'):
photos.append(utils.getPhotoDetails(node.getAttribute('id'), node))
api_sig = hashlib.md5("c9a1cf81402f8253api_key4ffd08ef36219cd952ccbd153052f833permswrite").hexdigest()
return render_to_response('main/index.html', {'userprefs': UserPrefs().getcurrentuser(), 'api_sig': api_sig, 'photos':photos, 'groupname':groupname if groupname is not '' else groupid, 'nextPage':'/groups/%s/pool/page%d' % (groupname if groupname is not '' else groupid, int(page)+1)})
def populargroupnamephotos(request, groupname='me', page=1):
url = "http://api.flickr.com/services/rest/?method=flickr.urls.lookupGroup&api_key=4ffd08ef36219cd952ccbd153052f833&url=%s" % quote('flickr.com/groups/'+groupname)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
for node in xmldoc.getElementsByTagName('group'):
groupid = node.getAttribute('id')
return populargroupphotos(request, groupid, groupname, page)
#find by groupname
url = "http://api.flickr.com/services/rest/?method=flickr.people.findByUsername&api_key=4ffd08ef36219cd952ccbd153052f833&username=%s" % quote(groupname)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
for node in xmldoc.getElementsByTagName('group'):
groupid = node.getAttribute('id')
return populargroupphotos(request, groupid, groupname, page)
return populargroupphotos(request, groupname, page)
def populargroupphotos(request, groupid='me', groupname = '', page=1):
url = "http://api.flickr.com/services/rest/?method=flickr.photos.search&api_key=4ffd08ef36219cd952ccbd153052f833&&extras=description,date_upload,path_alias,tags,owner_name,url_m,url_s&sort=interestingness-desc&per_page=20&group_id=%s&page=%s" % (groupid, page)
flickrResponse = urlfetch.fetch(url, deadline=33)
xmldoc = minidom.parseString(flickrResponse.content)
photos = []
for node in xmldoc.getElementsByTagName('photo'):
photos.append(utils.getPhotoDetails(node.getAttribute('id'), node))
api_sig = hashlib.md5("c9a1cf81402f8253api_key4ffd08ef36219cd952ccbd153052f833permswrite").hexdigest()
return render_to_response('main/index.html', {'userprefs': UserPrefs().getcurrentuser(), 'api_sig': api_sig, 'photos':photos, 'groupname':groupname if groupname is not '' else groupid, 'nextPage':'/groups/%s/popular-interesting/page%d' % (groupname if groupname is not '' else groupid, int(page)+1)})
def login(request):
user = users.get_current_user()
if not user:
return HttpResponseRedirect(users.create_login_url('http://flickrforsalaryman.appspot.com'+request.path))
frob = request.GET.get('frob')
apiSigString = "c9a1cf81402f8253api_key4ffd08ef36219cd952ccbd153052f833frob%smethodflickr.auth.getToken" % frob
api_sig = hashlib.md5(apiSigString).hexdigest()
tokenUrl = "http://api.flickr.com/services/rest/?method=flickr.auth.getToken&api_key=4ffd08ef36219cd952ccbd153052f833&frob=%s&api_sig=%s" % (frob, api_sig)
tokenDoc = minidom.parseString(urlfetch.fetch(tokenUrl, deadline=33).content)
#tokenDoc = minidom.parseString('<rsp stat="ok"><auth><token>72157625106384102-066571d12b6e0511</token><perms>write</perms><user nsid="44861801@N07" username="Fajar Nurdiansyah" fullname="fajar nurdiansyah"/></auth></rsp>')
q = db.GqlQuery("SELECT * FROM UserPrefs WHERE user = :1", user)
userprefs = q.get()
if not userprefs:
userprefs = UserPrefs()
userprefs.user = user
userprefs.token = utils.getText(tokenDoc.getElementsByTagName("token")[0].childNodes)
userprefs.nsid = tokenDoc.getElementsByTagName("user")[0].getAttribute('nsid')
userprefs.username = tokenDoc.getElementsByTagName("user")[0].getAttribute('username')
userprefs.fullname = tokenDoc.getElementsByTagName("user")[0].getAttribute('fullname')
userprefs.put()
return main(request)#render_to_response('main/details.html', {'frob':token, 'api_sig':api_sig, 'token':user})
def auth(request):
user = users.get_current_user()
if not user:
return HttpResponseRedirect(users.create_login_url('http://flickrforsalaryman.appspot.com/auth'))
q = db.GqlQuery("SELECT * FROM UserPrefs WHERE user = :1", user)
userprefs = q.get()
if not userprefs:
userprefs = UserPrefs()
userprefs.user = user
userprefs.put()
return main(request)
def comment(request):
user = users.get_current_user()
if not user:
return HttpResponse("Must login to comment", mimetype="text/plain")
q = db.GqlQuery("SELECT * FROM UserPrefs WHERE user = :1", user)
userprefs = q.get()
comment = request.POST['comment']
photoId = request.POST['photoid']
token = userprefs.token
#tokenDoc = minidom.parseString('<rsp stat="ok"><auth><token>72157625106384102-066571d12b6e0511</token><perms>write</perms><user nsid="44861801@N07" username="Fajar Nurdiansyah" fullname="fajar nurdiansyah"/></auth></rsp>')
#token = getText(tokenDoc.getElementsByTagName("token")[0].childNodes)
apiSigString = "c9a1cf81402f8253api_key4ffd08ef36219cd952ccbd153052f833auth_token%scomment_text%smethodflickr.photos.comments.addCommentphoto_id%s" % (token, comment, photoId)
api_sig = hashlib.md5(apiSigString).hexdigest()
params = urllib.urlencode({
'method': 'flickr.photos.comments.addComment',
'api_key': '4ffd08ef36219cd952ccbd153052f833',
'photo_id': photoId,
'comment_text': comment,
'auth_token': token,
'api_sig': api_sig
})
commentResult = urllib.urlopen("http://api.flickr.com/services/rest/", params).read()# + params
if 'User is posting comments too fast' in commentResult:
commentResult = "User is posting comments too fast"
elif 'Blank comment' in commentResult:
commentResult = "Comment text can not be blank"
else:
commentResult = "Comment posted"
response = HttpResponse(commentResult, mimetype="text/plain")
return response #render_to_response('main/details.html', {'frob':commentResult, 'api_sig':apiSigString, 'token':params})
def fave(request):
user = users.get_current_user()
if not user:
return HttpResponseRedirect(users.create_login_url('http://flickrforsalaryman.appspot.com'+request.path))
q = db.GqlQuery("SELECT * FROM UserPrefs WHERE user = :1", user)
userprefs = q.get()
photoId = request.POST['photoid']
token = userprefs.token
apiSigString = "c9a1cf81402f8253api_key4ffd08ef36219cd952ccbd153052f833auth_token%smethodflickr.favorites.addphoto_id%s" % (token, photoId)
api_sig = hashlib.md5(apiSigString).hexdigest()
params = urllib.urlencode({
'method': 'flickr.favorites.add',
'api_key': '4ffd08ef36219cd952ccbd153052f833',
'photo_id': photoId,
'auth_token': token,
'api_sig': api_sig
})
faveResult = urllib.urlopen("http://api.flickr.com/services/rest/", params).read()
faveDoc = minidom.parseString(faveResult)
errors = faveDoc.getElementsByTagName("err")
if errors.length > 0:
apiSigString = "c9a1cf81402f8253api_key4ffd08ef36219cd952ccbd153052f833auth_token%smethodflickr.favorites.removephoto_id%s" % (token, photoId)
api_sig = hashlib.md5(apiSigString).hexdigest()
params = urllib.urlencode({
'method': 'flickr.favorites.remove',
'api_key': '4ffd08ef36219cd952ccbd153052f833',
'photo_id': photoId,
'auth_token': token,
'api_sig': api_sig
})
faveResult = urllib.urlopen("http://api.flickr.com/services/rest/", params).read()
response = HttpResponse('unfave', mimetype="text/plain")
else:
response = HttpResponse('fave', mimetype="text/plain")
return response #render_to_response('main/details.html', {'frob':commentResult, 'api_sig':apiSigString, 'token':params})
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
"""Create portable serialized representations of Python objects.
See module cPickle for a (much) faster implementation.
See module copy_reg for a mechanism for registering custom picklers.
See module pickletools source for extensive comments.
Classes:
Pickler
Unpickler
Functions:
dump(object, file)
dumps(object) -> string
load(file) -> object
loads(string) -> object
Misc variables:
__version__
format_version
compatible_formats
"""
__version__ = "$Revision: 65524 $" # Code version
from types import *
from copy_reg import dispatch_table
from copy_reg import _extension_registry, _inverted_registry, _extension_cache
import marshal
import sys
import struct
import re
__all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler",
"Unpickler", "dump", "dumps", "load", "loads"]
# These are purely informational; no code uses these.
format_version = "2.0" # File format version we write
compatible_formats = ["1.0", # Original protocol 0
"1.1", # Protocol 0 with INST added
"1.2", # Original protocol 1
"1.3", # Protocol 1 with BINFLOAT added
"2.0", # Protocol 2
] # Old format versions we can read
# Keep in synch with cPickle. This is the highest protocol number we
# know how to read.
HIGHEST_PROTOCOL = 2
# Why use struct.pack() for pickling but marshal.loads() for
# unpickling? struct.pack() is 40% faster than marshal.dumps(), but
# marshal.loads() is twice as fast as struct.unpack()!
mloads = marshal.loads
class PickleError(Exception):
"""A common base class for the other pickling exceptions."""
pass
class PicklingError(PickleError):
"""This exception is raised when an unpicklable object is passed to the
dump() method.
"""
pass
class UnpicklingError(PickleError):
"""This exception is raised when there is a problem unpickling an object,
such as a security violation.
Note that other exceptions may also be raised during unpickling, including
(but not necessarily limited to) AttributeError, EOFError, ImportError,
and IndexError.
"""
pass
# An instance of _Stop is raised by Unpickler.load_stop() in response to
# the STOP opcode, passing the object that is the result of unpickling.
class _Stop(Exception):
def __init__(self, value):
self.value = value
# Jython has PyStringMap; it's a dict subclass with string keys
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
# UnicodeType may or may not be exported (normally imported from types)
try:
UnicodeType
except NameError:
UnicodeType = None
# Pickle opcodes. See pickletools.py for extensive docs. The listing
# here is in kind-of alphabetical order of 1-character pickle code.
# pickletools groups them by purpose.
MARK = '(' # push special markobject on stack
STOP = '.' # every pickle ends with STOP
POP = '0' # discard topmost stack item
POP_MARK = '1' # discard stack top through topmost markobject
DUP = '2' # duplicate top stack item
FLOAT = 'F' # push float object; decimal string argument
INT = 'I' # push integer or bool; decimal string argument
BININT = 'J' # push four-byte signed int
BININT1 = 'K' # push 1-byte unsigned int
LONG = 'L' # push long; decimal string argument
BININT2 = 'M' # push 2-byte unsigned int
NONE = 'N' # push None
PERSID = 'P' # push persistent object; id is taken from string arg
BINPERSID = 'Q' # " " " ; " " " " stack
REDUCE = 'R' # apply callable to argtuple, both on stack
STRING = 'S' # push string; NL-terminated string argument
BINSTRING = 'T' # push string; counted binary string argument
SHORT_BINSTRING = 'U' # " " ; " " " " < 256 bytes
UNICODE = 'V' # push Unicode string; raw-unicode-escaped'd argument
BINUNICODE = 'X' # " " " ; counted UTF-8 string argument
APPEND = 'a' # append stack top to list below it
BUILD = 'b' # call __setstate__ or __dict__.update()
GLOBAL = 'c' # push self.find_class(modname, name); 2 string args
DICT = 'd' # build a dict from stack items
EMPTY_DICT = '}' # push empty dict
APPENDS = 'e' # extend list on stack by topmost stack slice
GET = 'g' # push item from memo on stack; index is string arg
BINGET = 'h' # " " " " " " ; " " 1-byte arg
INST = 'i' # build & push class instance
LONG_BINGET = 'j' # push item from memo on stack; index is 4-byte arg
LIST = 'l' # build list from topmost stack items
EMPTY_LIST = ']' # push empty list
OBJ = 'o' # build & push class instance
PUT = 'p' # store stack top in memo; index is string arg
BINPUT = 'q' # " " " " " ; " " 1-byte arg
LONG_BINPUT = 'r' # " " " " " ; " " 4-byte arg
SETITEM = 's' # add key+value pair to dict
TUPLE = 't' # build tuple from topmost stack items
EMPTY_TUPLE = ')' # push empty tuple
SETITEMS = 'u' # modify dict by adding topmost key+value pairs
BINFLOAT = 'G' # push float; arg is 8-byte float encoding
TRUE = 'I01\n' # not an opcode; see INT docs in pickletools.py
FALSE = 'I00\n' # not an opcode; see INT docs in pickletools.py
# Protocol 2
PROTO = '\x80' # identify pickle protocol
NEWOBJ = '\x81' # build object by applying cls.__new__ to argtuple
EXT1 = '\x82' # push object from extension registry; 1-byte index
EXT2 = '\x83' # ditto, but 2-byte index
EXT4 = '\x84' # ditto, but 4-byte index
TUPLE1 = '\x85' # build 1-tuple from stack top
TUPLE2 = '\x86' # build 2-tuple from two topmost stack items
TUPLE3 = '\x87' # build 3-tuple from three topmost stack items
NEWTRUE = '\x88' # push True
NEWFALSE = '\x89' # push False
LONG1 = '\x8a' # push long from < 256 bytes
LONG4 = '\x8b' # push really big long
_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3]
__all__.extend([x for x in dir() if re.match("[A-Z][A-Z0-9_]+$",x)])
del x
# Pickling machinery
class Pickler:
def __init__(self, file, protocol=None):
"""This takes a file-like object for writing a pickle data stream.
The optional protocol argument tells the pickler to use the
given protocol; supported protocols are 0, 1, 2. The default
protocol is 0, to be backwards compatible. (Protocol 0 is the
only protocol that can be written to a file opened in text
mode and read back successfully. When using a protocol higher
than 0, make sure the file is opened in binary mode, both when
pickling and unpickling.)
Protocol 1 is more efficient than protocol 0; protocol 2 is
more efficient than protocol 1.
Specifying a negative protocol version selects the highest
protocol version supported. The higher the protocol used, the
more recent the version of Python needed to read the pickle
produced.
The file parameter must have a write() method that accepts a single
string argument. It can thus be an open file object, a StringIO
object, or any other custom object that meets this interface.
"""
if protocol is None:
protocol = 0
if protocol < 0:
protocol = HIGHEST_PROTOCOL
elif not 0 <= protocol <= HIGHEST_PROTOCOL:
raise ValueError("pickle protocol must be <= %d" % HIGHEST_PROTOCOL)
self.write = file.write
self.memo = {}
self.proto = int(protocol)
self.bin = protocol >= 1
self.fast = 0
def clear_memo(self):
"""Clears the pickler's "memo".
The memo is the data structure that remembers which objects the
pickler has already seen, so that shared or recursive objects are
pickled by reference and not by value. This method is useful when
re-using picklers.
"""
self.memo.clear()
def dump(self, obj):
"""Write a pickled representation of obj to the open file."""
if self.proto >= 2:
self.write(PROTO + chr(self.proto))
self.save(obj)
self.write(STOP)
def memoize(self, obj):
"""Store an object in the memo."""
# The Pickler memo is a dictionary mapping object ids to 2-tuples
# that contain the Unpickler memo key and the object being memoized.
# The memo key is written to the pickle and will become
# the key in the Unpickler's memo. The object is stored in the
# Pickler memo so that transient objects are kept alive during
# pickling.
# The use of the Unpickler memo length as the memo key is just a
# convention. The only requirement is that the memo values be unique.
# But there appears no advantage to any other scheme, and this
# scheme allows the Unpickler memo to be implemented as a plain (but
# growable) array, indexed by memo key.
if self.fast:
return
assert id(obj) not in self.memo
memo_len = len(self.memo)
self.write(self.put(memo_len))
self.memo[id(obj)] = memo_len, obj
# Return a PUT (BINPUT, LONG_BINPUT) opcode string, with argument i.
def put(self, i, pack=struct.pack):
if self.bin:
if i < 256:
return BINPUT + chr(i)
else:
return LONG_BINPUT + pack("<i", i)
return PUT + repr(i) + '\n'
# Return a GET (BINGET, LONG_BINGET) opcode string, with argument i.
def get(self, i, pack=struct.pack):
if self.bin:
if i < 256:
return BINGET + chr(i)
else:
return LONG_BINGET + pack("<i", i)
return GET + repr(i) + '\n'
def save(self, obj):
# Check for persistent id (defined by a subclass)
pid = self.persistent_id(obj)
if pid:
self.save_pers(pid)
return
# Check the memo
x = self.memo.get(id(obj))
if x:
self.write(self.get(x[0]))
return
# Check the type dispatch table
t = type(obj)
f = self.dispatch.get(t)
if f:
f(self, obj) # Call unbound method with explicit self
return
# Check for a class with a custom metaclass; treat as regular class
try:
issc = issubclass(t, TypeType)
except TypeError: # t is not a class (old Boost; see SF #502085)
issc = 0
if issc:
self.save_global(obj)
return
# Check copy_reg.dispatch_table
reduce = dispatch_table.get(t)
if reduce:
rv = reduce(obj)
else:
# Check for a __reduce_ex__ method, fall back to __reduce__
reduce = getattr(obj, "__reduce_ex__", None)
if reduce:
rv = reduce(self.proto)
else:
reduce = getattr(obj, "__reduce__", None)
if reduce:
rv = reduce()
else:
raise PicklingError("Can't pickle %r object: %r" %
(t.__name__, obj))
# Check for string returned by reduce(), meaning "save as global"
if type(rv) is StringType:
self.save_global(obj, rv)
return
# Assert that reduce() returned a tuple
if type(rv) is not TupleType:
raise PicklingError("%s must return string or tuple" % reduce)
# Assert that it returned an appropriately sized tuple
l = len(rv)
if not (2 <= l <= 5):
raise PicklingError("Tuple returned by %s must have "
"two to five elements" % reduce)
# Save the reduce() output and finally memoize the object
self.save_reduce(obj=obj, *rv)
def persistent_id(self, obj):
# This exists so a subclass can override it
return None
def save_pers(self, pid):
# Save a persistent id reference
if self.bin:
self.save(pid)
self.write(BINPERSID)
else:
self.write(PERSID + str(pid) + '\n')
def save_reduce(self, func, args, state=None,
listitems=None, dictitems=None, obj=None):
# This API is called by some subclasses
# Assert that args is a tuple or None
if not isinstance(args, TupleType):
raise PicklingError("args from reduce() should be a tuple")
# Assert that func is callable
if not hasattr(func, '__call__'):
raise PicklingError("func from reduce should be callable")
save = self.save
write = self.write
# Protocol 2 special case: if func's name is __newobj__, use NEWOBJ
if self.proto >= 2 and getattr(func, "__name__", "") == "__newobj__":
# A __reduce__ implementation can direct protocol 2 to
# use the more efficient NEWOBJ opcode, while still
# allowing protocol 0 and 1 to work normally. For this to
# work, the function returned by __reduce__ should be
# called __newobj__, and its first argument should be a
# new-style class. The implementation for __newobj__
# should be as follows, although pickle has no way to
# verify this:
#
# def __newobj__(cls, *args):
# return cls.__new__(cls, *args)
#
# Protocols 0 and 1 will pickle a reference to __newobj__,
# while protocol 2 (and above) will pickle a reference to
# cls, the remaining args tuple, and the NEWOBJ code,
# which calls cls.__new__(cls, *args) at unpickling time
# (see load_newobj below). If __reduce__ returns a
# three-tuple, the state from the third tuple item will be
# pickled regardless of the protocol, calling __setstate__
# at unpickling time (see load_build below).
#
# Note that no standard __newobj__ implementation exists;
# you have to provide your own. This is to enforce
# compatibility with Python 2.2 (pickles written using
# protocol 0 or 1 in Python 2.3 should be unpicklable by
# Python 2.2).
cls = args[0]
if not hasattr(cls, "__new__"):
raise PicklingError(
"args[0] from __newobj__ args has no __new__")
if obj is not None and cls is not obj.__class__:
raise PicklingError(
"args[0] from __newobj__ args has the wrong class")
args = args[1:]
save(cls)
save(args)
write(NEWOBJ)
else:
save(func)
save(args)
write(REDUCE)
if obj is not None:
self.memoize(obj)
# More new special cases (that work with older protocols as
# well): when __reduce__ returns a tuple with 4 or 5 items,
# the 4th and 5th item should be iterators that provide list
# items and dict items (as (key, value) tuples), or None.
if listitems is not None:
self._batch_appends(listitems)
if dictitems is not None:
self._batch_setitems(dictitems)
if state is not None:
save(state)
write(BUILD)
# Methods below this point are dispatched through the dispatch table
dispatch = {}
def save_none(self, obj):
self.write(NONE)
dispatch[NoneType] = save_none
def save_bool(self, obj):
if self.proto >= 2:
self.write(obj and NEWTRUE or NEWFALSE)
else:
self.write(obj and TRUE or FALSE)
dispatch[bool] = save_bool
def save_int(self, obj, pack=struct.pack):
if self.bin:
# If the int is small enough to fit in a signed 4-byte 2's-comp
# format, we can store it more efficiently than the general
# case.
# First one- and two-byte unsigned ints:
if obj >= 0:
if obj <= 0xff:
self.write(BININT1 + chr(obj))
return
if obj <= 0xffff:
self.write("%c%c%c" % (BININT2, obj&0xff, obj>>8))
return
# Next check for 4-byte signed ints:
high_bits = obj >> 31 # note that Python shift sign-extends
if high_bits == 0 or high_bits == -1:
# All high bits are copies of bit 2**31, so the value
# fits in a 4-byte signed int.
self.write(BININT + pack("<i", obj))
return
# Text pickle, or int too big to fit in signed 4-byte format.
self.write(INT + repr(obj) + '\n')
dispatch[IntType] = save_int
def save_long(self, obj, pack=struct.pack):
if self.proto >= 2:
bytes = encode_long(obj)
n = len(bytes)
if n < 256:
self.write(LONG1 + chr(n) + bytes)
else:
self.write(LONG4 + pack("<i", n) + bytes)
return
self.write(LONG + repr(obj) + '\n')
dispatch[LongType] = save_long
def save_float(self, obj, pack=struct.pack):
if self.bin:
self.write(BINFLOAT + pack('>d', obj))
else:
self.write(FLOAT + repr(obj) + '\n')
dispatch[FloatType] = save_float
def save_string(self, obj, pack=struct.pack):
if self.bin:
n = len(obj)
if n < 256:
self.write(SHORT_BINSTRING + chr(n) + obj)
else:
self.write(BINSTRING + pack("<i", n) + obj)
else:
self.write(STRING + repr(obj) + '\n')
self.memoize(obj)
dispatch[StringType] = save_string
def save_unicode(self, obj, pack=struct.pack):
if self.bin:
encoding = obj.encode('utf-8')
n = len(encoding)
self.write(BINUNICODE + pack("<i", n) + encoding)
else:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\n", "\\u000a")
self.write(UNICODE + obj.encode('raw-unicode-escape') + '\n')
self.memoize(obj)
dispatch[UnicodeType] = save_unicode
if StringType == UnicodeType:
# This is true for Jython
def save_string(self, obj, pack=struct.pack):
unicode = obj.isunicode()
if self.bin:
if unicode:
obj = obj.encode("utf-8")
l = len(obj)
if l < 256 and not unicode:
self.write(SHORT_BINSTRING + chr(l) + obj)
else:
s = pack("<i", l)
if unicode:
self.write(BINUNICODE + s + obj)
else:
self.write(BINSTRING + s + obj)
else:
if unicode:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\n", "\\u000a")
obj = obj.encode('raw-unicode-escape')
self.write(UNICODE + obj + '\n')
else:
self.write(STRING + repr(obj) + '\n')
self.memoize(obj)
dispatch[StringType] = save_string
def save_tuple(self, obj):
write = self.write
proto = self.proto
n = len(obj)
if n == 0:
if proto:
write(EMPTY_TUPLE)
else:
write(MARK + TUPLE)
return
save = self.save
memo = self.memo
if n <= 3 and proto >= 2:
for element in obj:
save(element)
# Subtle. Same as in the big comment below.
if id(obj) in memo:
get = self.get(memo[id(obj)][0])
write(POP * n + get)
else:
write(_tuplesize2code[n])
self.memoize(obj)
return
# proto 0 or proto 1 and tuple isn't empty, or proto > 1 and tuple
# has more than 3 elements.
write(MARK)
for element in obj:
save(element)
if id(obj) in memo:
# Subtle. d was not in memo when we entered save_tuple(), so
# the process of saving the tuple's elements must have saved
# the tuple itself: the tuple is recursive. The proper action
# now is to throw away everything we put on the stack, and
# simply GET the tuple (it's already constructed). This check
# could have been done in the "for element" loop instead, but
# recursive tuples are a rare thing.
get = self.get(memo[id(obj)][0])
if proto:
write(POP_MARK + get)
else: # proto 0 -- POP_MARK not available
write(POP * (n+1) + get)
return
# No recursion.
self.write(TUPLE)
self.memoize(obj)
dispatch[TupleType] = save_tuple
# save_empty_tuple() isn't used by anything in Python 2.3. However, I
# found a Pickler subclass in Zope3 that calls it, so it's not harmless
# to remove it.
def save_empty_tuple(self, obj):
self.write(EMPTY_TUPLE)
def save_list(self, obj):
write = self.write
if self.bin:
write(EMPTY_LIST)
else: # proto 0 -- can't use EMPTY_LIST
write(MARK + LIST)
self.memoize(obj)
self._batch_appends(iter(obj))
dispatch[ListType] = save_list
# Keep in synch with cPickle's BATCHSIZE. Nothing will break if it gets
# out of synch, though.
_BATCHSIZE = 1000
def _batch_appends(self, items):
# Helper to batch up APPENDS sequences
save = self.save
write = self.write
if not self.bin:
for x in items:
save(x)
write(APPEND)
return
r = xrange(self._BATCHSIZE)
while items is not None:
tmp = []
for i in r:
try:
x = items.next()
tmp.append(x)
except StopIteration:
items = None
break
n = len(tmp)
if n > 1:
write(MARK)
for x in tmp:
save(x)
write(APPENDS)
elif n:
save(tmp[0])
write(APPEND)
# else tmp is empty, and we're done
def save_dict(self, obj):
write = self.write
if self.bin:
write(EMPTY_DICT)
else: # proto 0 -- can't use EMPTY_DICT
write(MARK + DICT)
self.memoize(obj)
self._batch_setitems(obj.iteritems())
dispatch[DictionaryType] = save_dict
if not PyStringMap is None:
dispatch[PyStringMap] = save_dict
def _batch_setitems(self, items):
# Helper to batch up SETITEMS sequences; proto >= 1 only
save = self.save
write = self.write
if not self.bin:
for k, v in items:
save(k)
save(v)
write(SETITEM)
return
r = xrange(self._BATCHSIZE)
while items is not None:
tmp = []
for i in r:
try:
tmp.append(items.next())
except StopIteration:
items = None
break
n = len(tmp)
if n > 1:
write(MARK)
for k, v in tmp:
save(k)
save(v)
write(SETITEMS)
elif n:
k, v = tmp[0]
save(k)
save(v)
write(SETITEM)
# else tmp is empty, and we're done
def save_inst(self, obj):
cls = obj.__class__
memo = self.memo
write = self.write
save = self.save
if hasattr(obj, '__getinitargs__'):
args = obj.__getinitargs__()
len(args) # XXX Assert it's a sequence
_keep_alive(args, memo)
else:
args = ()
write(MARK)
if self.bin:
save(cls)
for arg in args:
save(arg)
write(OBJ)
else:
for arg in args:
save(arg)
write(INST + cls.__module__ + '\n' + cls.__name__ + '\n')
self.memoize(obj)
try:
getstate = obj.__getstate__
except AttributeError:
stuff = obj.__dict__
else:
stuff = getstate()
_keep_alive(stuff, memo)
save(stuff)
write(BUILD)
dispatch[InstanceType] = save_inst
def save_global(self, obj, name=None, pack=struct.pack):
write = self.write
memo = self.memo
if name is None:
name = obj.__name__
module = getattr(obj, "__module__", None)
if module is None:
module = whichmodule(obj, name)
try:
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
except (ImportError, KeyError, AttributeError):
raise PicklingError(
"Can't pickle %r: it's not found as %s.%s" %
(obj, module, name))
else:
if klass is not obj:
raise PicklingError(
"Can't pickle %r: it's not the same object as %s.%s" %
(obj, module, name))
if self.proto >= 2:
code = _extension_registry.get((module, name))
if code:
assert code > 0
if code <= 0xff:
write(EXT1 + chr(code))
elif code <= 0xffff:
write("%c%c%c" % (EXT2, code&0xff, code>>8))
else:
write(EXT4 + pack("<i", code))
return
write(GLOBAL + module + '\n' + name + '\n')
self.memoize(obj)
dispatch[ClassType] = save_global
dispatch[FunctionType] = save_global
dispatch[BuiltinFunctionType] = save_global
dispatch[TypeType] = save_global
# Pickling helpers
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
# A cache for whichmodule(), mapping a function object to the name of
# the module in which the function was found.
classmap = {} # called classmap for backwards compatibility
def whichmodule(func, funcname):
"""Figure out the module in which a function occurs.
Search sys.modules for the module.
Cache in classmap.
Return a module name.
If the function cannot be found, return "__main__".
"""
# Python functions should always get an __module__ from their globals.
mod = getattr(func, "__module__", None)
if mod is not None:
return mod
if func in classmap:
return classmap[func]
for name, module in sys.modules.items():
if module is None:
continue # skip dummy package entries
if name != '__main__' and getattr(module, funcname, None) is func:
break
else:
name = '__main__'
classmap[func] = name
return name
# Unpickling machinery
class Unpickler:
def __init__(self, file):
"""This takes a file-like object for reading a pickle data stream.
The protocol version of the pickle is detected automatically, so no
proto argument is needed.
The file-like object must have two methods, a read() method that
takes an integer argument, and a readline() method that requires no
arguments. Both methods should return a string. Thus file-like
object can be a file object opened for reading, a StringIO object,
or any other custom object that meets this interface.
"""
self.readline = file.readline
self.read = file.read
self.memo = {}
def load(self):
"""Read a pickled object representation from the open file.
Return the reconstituted object hierarchy specified in the file.
"""
self.mark = object() # any new unique object
self.stack = []
self.append = self.stack.append
read = self.read
dispatch = self.dispatch
try:
while 1:
key = read(1)
dispatch[key](self)
except _Stop, stopinst:
return stopinst.value
# Return largest index k such that self.stack[k] is self.mark.
# If the stack doesn't contain a mark, eventually raises IndexError.
# This could be sped by maintaining another stack, of indices at which
# the mark appears. For that matter, the latter stack would suffice,
# and we wouldn't need to push mark objects on self.stack at all.
# Doing so is probably a good thing, though, since if the pickle is
# corrupt (or hostile) we may get a clue from finding self.mark embedded
# in unpickled objects.
def marker(self):
stack = self.stack
mark = self.mark
k = len(stack)-1
while stack[k] is not mark: k = k-1
return k
dispatch = {}
def load_eof(self):
raise EOFError
dispatch[''] = load_eof
def load_proto(self):
proto = ord(self.read(1))
if not 0 <= proto <= 2:
raise ValueError, "unsupported pickle protocol: %d" % proto
dispatch[PROTO] = load_proto
def load_persid(self):
pid = self.readline()[:-1]
self.append(self.persistent_load(pid))
dispatch[PERSID] = load_persid
def load_binpersid(self):
pid = self.stack.pop()
self.append(self.persistent_load(pid))
dispatch[BINPERSID] = load_binpersid
def load_none(self):
self.append(None)
dispatch[NONE] = load_none
def load_false(self):
self.append(False)
dispatch[NEWFALSE] = load_false
def load_true(self):
self.append(True)
dispatch[NEWTRUE] = load_true
def load_int(self):
data = self.readline()
if data == FALSE[1:]:
val = False
elif data == TRUE[1:]:
val = True
else:
try:
val = int(data)
except ValueError:
val = long(data)
self.append(val)
dispatch[INT] = load_int
def load_binint(self):
self.append(mloads('i' + self.read(4)))
dispatch[BININT] = load_binint
def load_binint1(self):
self.append(ord(self.read(1)))
dispatch[BININT1] = load_binint1
def load_binint2(self):
self.append(mloads('i' + self.read(2) + '\000\000'))
dispatch[BININT2] = load_binint2
def load_long(self):
self.append(long(self.readline()[:-1], 0))
dispatch[LONG] = load_long
def load_long1(self):
n = ord(self.read(1))
bytes = self.read(n)
self.append(decode_long(bytes))
dispatch[LONG1] = load_long1
def load_long4(self):
n = mloads('i' + self.read(4))
bytes = self.read(n)
self.append(decode_long(bytes))
dispatch[LONG4] = load_long4
def load_float(self):
self.append(float(self.readline()[:-1]))
dispatch[FLOAT] = load_float
def load_binfloat(self, unpack=struct.unpack):
self.append(unpack('>d', self.read(8))[0])
dispatch[BINFLOAT] = load_binfloat
def load_string(self):
rep = self.readline()[:-1]
for q in "\"'": # double or single quote
if rep.startswith(q):
if not rep.endswith(q):
raise ValueError, "insecure string pickle"
rep = rep[len(q):-len(q)]
break
else:
raise ValueError, "insecure string pickle"
self.append(rep.decode("string-escape"))
dispatch[STRING] = load_string
def load_binstring(self):
len = mloads('i' + self.read(4))
self.append(self.read(len))
dispatch[BINSTRING] = load_binstring
def load_unicode(self):
self.append(unicode(self.readline()[:-1],'raw-unicode-escape'))
dispatch[UNICODE] = load_unicode
def load_binunicode(self):
len = mloads('i' + self.read(4))
self.append(unicode(self.read(len),'utf-8'))
dispatch[BINUNICODE] = load_binunicode
def load_short_binstring(self):
len = ord(self.read(1))
self.append(self.read(len))
dispatch[SHORT_BINSTRING] = load_short_binstring
def load_tuple(self):
k = self.marker()
self.stack[k:] = [tuple(self.stack[k+1:])]
dispatch[TUPLE] = load_tuple
def load_empty_tuple(self):
self.stack.append(())
dispatch[EMPTY_TUPLE] = load_empty_tuple
def load_tuple1(self):
self.stack[-1] = (self.stack[-1],)
dispatch[TUPLE1] = load_tuple1
def load_tuple2(self):
self.stack[-2:] = [(self.stack[-2], self.stack[-1])]
dispatch[TUPLE2] = load_tuple2
def load_tuple3(self):
self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])]
dispatch[TUPLE3] = load_tuple3
def load_empty_list(self):
self.stack.append([])
dispatch[EMPTY_LIST] = load_empty_list
def load_empty_dictionary(self):
self.stack.append({})
dispatch[EMPTY_DICT] = load_empty_dictionary
def load_list(self):
k = self.marker()
self.stack[k:] = [self.stack[k+1:]]
dispatch[LIST] = load_list
def load_dict(self):
k = self.marker()
d = {}
items = self.stack[k+1:]
for i in range(0, len(items), 2):
key = items[i]
value = items[i+1]
d[key] = value
self.stack[k:] = [d]
dispatch[DICT] = load_dict
# INST and OBJ differ only in how they get a class object. It's not
# only sensible to do the rest in a common routine, the two routines
# previously diverged and grew different bugs.
# klass is the class to instantiate, and k points to the topmost mark
# object, following which are the arguments for klass.__init__.
def _instantiate(self, klass, k):
args = tuple(self.stack[k+1:])
del self.stack[k:]
instantiated = 0
if (not args and
type(klass) is ClassType and
not hasattr(klass, "__getinitargs__")):
try:
value = _EmptyClass()
value.__class__ = klass
instantiated = 1
except RuntimeError:
# In restricted execution, assignment to inst.__class__ is
# prohibited
pass
if not instantiated:
try:
value = klass(*args)
except TypeError, err:
raise TypeError, "in constructor for %s: %s" % (
klass.__name__, str(err)), sys.exc_info()[2]
self.append(value)
def load_inst(self):
module = self.readline()[:-1]
name = self.readline()[:-1]
klass = self.find_class(module, name)
self._instantiate(klass, self.marker())
dispatch[INST] = load_inst
def load_obj(self):
# Stack is ... markobject classobject arg1 arg2 ...
k = self.marker()
klass = self.stack.pop(k+1)
self._instantiate(klass, k)
dispatch[OBJ] = load_obj
def load_newobj(self):
args = self.stack.pop()
cls = self.stack[-1]
obj = cls.__new__(cls, *args)
self.stack[-1] = obj
dispatch[NEWOBJ] = load_newobj
def load_global(self):
module = self.readline()[:-1]
name = self.readline()[:-1]
klass = self.find_class(module, name)
self.append(klass)
dispatch[GLOBAL] = load_global
def load_ext1(self):
code = ord(self.read(1))
self.get_extension(code)
dispatch[EXT1] = load_ext1
def load_ext2(self):
code = mloads('i' + self.read(2) + '\000\000')
self.get_extension(code)
dispatch[EXT2] = load_ext2
def load_ext4(self):
code = mloads('i' + self.read(4))
self.get_extension(code)
dispatch[EXT4] = load_ext4
def get_extension(self, code):
nil = []
obj = _extension_cache.get(code, nil)
if obj is not nil:
self.append(obj)
return
key = _inverted_registry.get(code)
if not key:
raise ValueError("unregistered extension code %d" % code)
obj = self.find_class(*key)
_extension_cache[code] = obj
self.append(obj)
def find_class(self, module, name):
# Subclasses may override this
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
return klass
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
value = func(*args)
stack[-1] = value
dispatch[REDUCE] = load_reduce
def load_pop(self):
del self.stack[-1]
dispatch[POP] = load_pop
def load_pop_mark(self):
k = self.marker()
del self.stack[k:]
dispatch[POP_MARK] = load_pop_mark
def load_dup(self):
self.append(self.stack[-1])
dispatch[DUP] = load_dup
def load_get(self):
self.append(self.memo[self.readline()[:-1]])
dispatch[GET] = load_get
def load_binget(self):
i = ord(self.read(1))
self.append(self.memo[repr(i)])
dispatch[BINGET] = load_binget
def load_long_binget(self):
i = mloads('i' + self.read(4))
self.append(self.memo[repr(i)])
dispatch[LONG_BINGET] = load_long_binget
def load_put(self):
self.memo[self.readline()[:-1]] = self.stack[-1]
dispatch[PUT] = load_put
def load_binput(self):
i = ord(self.read(1))
self.memo[repr(i)] = self.stack[-1]
dispatch[BINPUT] = load_binput
def load_long_binput(self):
i = mloads('i' + self.read(4))
self.memo[repr(i)] = self.stack[-1]
dispatch[LONG_BINPUT] = load_long_binput
def load_append(self):
stack = self.stack
value = stack.pop()
list = stack[-1]
list.append(value)
dispatch[APPEND] = load_append
def load_appends(self):
stack = self.stack
mark = self.marker()
list = stack[mark - 1]
list.extend(stack[mark + 1:])
del stack[mark:]
dispatch[APPENDS] = load_appends
def load_setitem(self):
stack = self.stack
value = stack.pop()
key = stack.pop()
dict = stack[-1]
dict[key] = value
dispatch[SETITEM] = load_setitem
def load_setitems(self):
stack = self.stack
mark = self.marker()
dict = stack[mark - 1]
for i in range(mark + 1, len(stack), 2):
dict[stack[i]] = stack[i + 1]
del stack[mark:]
dispatch[SETITEMS] = load_setitems
def load_build(self):
stack = self.stack
state = stack.pop()
inst = stack[-1]
setstate = getattr(inst, "__setstate__", None)
if setstate:
setstate(state)
return
slotstate = None
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if state:
try:
inst.__dict__.update(state)
except RuntimeError:
# XXX In restricted execution, the instance's __dict__
# is not accessible. Use the old way of unpickling
# the instance variables. This is a semantic
# difference when unpickling in restricted
# vs. unrestricted modes.
# Note, however, that cPickle has never tried to do the
# .update() business, and always uses
# PyObject_SetItem(inst.__dict__, key, value) in a
# loop over state.items().
for k, v in state.items():
setattr(inst, k, v)
if slotstate:
for k, v in slotstate.items():
setattr(inst, k, v)
dispatch[BUILD] = load_build
def load_mark(self):
self.append(self.mark)
dispatch[MARK] = load_mark
def load_stop(self):
value = self.stack.pop()
raise _Stop(value)
dispatch[STOP] = load_stop
# Helper class for load_inst/load_obj
class _EmptyClass:
pass
# Encode/decode longs in linear time.
import binascii as _binascii
def encode_long(x):
r"""Encode a long to a two's complement little-endian binary string.
Note that 0L is a special case, returning an empty string, to save a
byte in the LONG1 pickling context.
>>> encode_long(0L)
''
>>> encode_long(255L)
'\xff\x00'
>>> encode_long(32767L)
'\xff\x7f'
>>> encode_long(-256L)
'\x00\xff'
>>> encode_long(-32768L)
'\x00\x80'
>>> encode_long(-128L)
'\x80'
>>> encode_long(127L)
'\x7f'
>>>
"""
if x == 0:
return ''
if x > 0:
ashex = hex(x)
assert ashex.startswith("0x")
njunkchars = 2 + ashex.endswith('L')
nibbles = len(ashex) - njunkchars
if nibbles & 1:
# need an even # of nibbles for unhexlify
ashex = "0x0" + ashex[2:]
elif int(ashex[2], 16) >= 8:
# "looks negative", so need a byte of sign bits
ashex = "0x00" + ashex[2:]
else:
# Build the 256's-complement: (1L << nbytes) + x. The trick is
# to find the number of bytes in linear time (although that should
# really be a constant-time task).
ashex = hex(-x)
assert ashex.startswith("0x")
njunkchars = 2 + ashex.endswith('L')
nibbles = len(ashex) - njunkchars
if nibbles & 1:
# Extend to a full byte.
nibbles += 1
nbits = nibbles * 4
x += 1L << nbits
assert x > 0
ashex = hex(x)
njunkchars = 2 + ashex.endswith('L')
newnibbles = len(ashex) - njunkchars
if newnibbles < nibbles:
ashex = "0x" + "0" * (nibbles - newnibbles) + ashex[2:]
if int(ashex[2], 16) < 8:
# "looks positive", so need a byte of sign bits
ashex = "0xff" + ashex[2:]
if ashex.endswith('L'):
ashex = ashex[2:-1]
else:
ashex = ashex[2:]
assert len(ashex) & 1 == 0, (x, ashex)
binary = _binascii.unhexlify(ashex)
return binary[::-1]
def decode_long(data):
r"""Decode a long from a two's complement little-endian binary string.
>>> decode_long('')
0L
>>> decode_long("\xff\x00")
255L
>>> decode_long("\xff\x7f")
32767L
>>> decode_long("\x00\xff")
-256L
>>> decode_long("\x00\x80")
-32768L
>>> decode_long("\x80")
-128L
>>> decode_long("\x7f")
127L
"""
nbytes = len(data)
if nbytes == 0:
return 0L
ashex = _binascii.hexlify(data[::-1])
n = long(ashex, 16) # quadratic time before Python 2.3; linear now
if data[-1] >= '\x80':
n -= 1L << (nbytes * 8)
return n
# Shorthands
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def dump(obj, file, protocol=None):
Pickler(file, protocol).dump(obj)
def dumps(obj, protocol=None):
file = StringIO()
Pickler(file, protocol).dump(obj)
return file.getvalue()
def load(file):
return Unpickler(file).load()
def loads(str):
file = StringIO(str)
return Unpickler(file).load()
# Doctest
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
_test()
| Python |
"""
Developed by
Andrea Stagi <stagi.andrea@gmail.com>
Sanjeya Cooray <sanjeya.cooray@gmail.com>
FlickrAvatar image feeder for emesene
Copyright (C) 2010 Andrea Stagi - Sanjeya Cooray
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import pickle
import os
def writeOpt(opt,user):
output = open(os.path.join(os.path.expanduser("~"),".config",".flickrmsntemp",user+".fkl"), 'wb')
pickle.dump(opt, output)
output.close()
def readOpt(user):
try:
output = open(os.path.join(os.path.expanduser("~"),".config",".flickrmsntemp",user+".fkl"), 'rb')
opt=pickle.load(output)
output.close()
except IOError as (errno, strerror):
opt=FlickrOpt(True,True,0,0)
writeOpt(opt,user)
return opt
class FlickrOpt:
def __init__(self,enabled,random,album,speed):
self.enabled=enabled
self.random=random
self.album=album
self.speed=speed
| Python |
"""
Developed by
Andrea Stagi <stagi.andrea@gmail.com>
Sanjeya Cooray <sanjeya.cooray@gmail.com>
FlickrAvatar image feeder for emesene
Copyright (C) 2010 Andrea Stagi - Sanjeya Cooray
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
#from PIL import Image
import urllib
def download_file(url,filename):
u = urllib.urlopen(url)
localFile = open(filename, 'wb')
localFile.write(u.read())
localFile.close()
| Python |
"""
Developed by
Andrea Stagi <stagi.andrea@gmail.com>
Sanjeya Cooray <sanjeya.cooray@gmail.com>
FlickrAvatar image feeder for emesene
Copyright (C) 2010 Andrea Stagi - Sanjeya Cooray
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import gtk
import sys
import os
class ModalBox:
def __init__(self):
#attribs
self.user=gtk.Entry()
self.time_change=gtk.Entry()
self.logoutmess=gtk.Label("")
self.logo=gtk.Image()
self.preview=gtk.Image()
self.combobox=gtk.ComboBoxEntry()
self.combospeed=gtk.ComboBoxEntry()
self.check_enable=gtk.CheckButton("Enable")
self.check_random=gtk.CheckButton("Get Randomly")
self.combo_change_callback=None
self.btn_get_album=gtk.Button("Refresh your Sets from Flickr")
self.btn_logout=gtk.Button("Logout")
self.btn_ok=gtk.Button("Done")
#gui layout
self.main_boxv=gtk.VBox()
self.main_box=gtk.HBox()
self.main_boxv.pack_start(self.main_box)
time_layout=gtk.HBox()
time_layout.pack_start(gtk.Label("Rotation speed:"))
time_layout.pack_start(self.combospeed)
desc=gtk.VBox()
desc.pack_start(self.logo)
desc.pack_start(gtk.HSeparator())
desc.pack_start(gtk.Label("Preview"))
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
desc.pack_start(self.preview)
desc.pack_start(self.btn_logout)
fields=gtk.VBox()
fields.pack_start(gtk.Label(""))
fields.pack_start(self.logoutmess)
fields.pack_start(self.btn_get_album)
fields.pack_start(gtk.Label(""))
fields.pack_start(gtk.Label("Choose your set to display:"))
fields.pack_start(self.combobox)
fields.pack_start(gtk.Label(""))
fields.pack_start(self.check_enable)
fields.pack_start(self.check_random)
fields.pack_start(time_layout)
self.main_box.pack_start(desc)
self.main_box.pack_start(gtk.VSeparator())
self.main_box.pack_start(fields)
self.main_boxv.pack_start(self.btn_ok)
#window
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.add(self.main_boxv)
self.window.set_title("Flickr Avatar for emesene (ver. 1.0)")
self.window.set_modal(True)
#init
self.set_model_from_list( self.combospeed , ["Very Slow","Slow","Medium","Fast"])
self.combospeed.set_active(0)
#signal connection
self.btn_ok.connect("clicked",self.done_cb)
self.btn_get_album.connect("clicked",self.get_album_cb)
self.btn_logout.connect("clicked",self.logout_cb)
self.combobox.connect("changed",self.combo_change_cb)
self.window.set_position(gtk.WIN_POS_CENTER_ALWAYS)
self.window.connect("delete-event", self.delete_event)
self.window.set_resizable(False)
#callbacks
def set_app_logo(self,path):
self.logo_path=path
self.set_logo(self.logo_path)
def set_on_logout(self):
self.logoutmess.set_text("You have just been logged out.\nNow go on Flickr, login again,\nand press 'Refresh your Sets from Flickr'")
def restore_on_logout(self):
self.logoutmess.set_text("")
def done_cb(self,event):
if self.done_callback:
self.done_callback()
def get_album_cb(self,event):
if self.get_album_callback:
self.get_album_callback()
def logout_cb(self,event):
if self.logout_callback:
self.logout_callback()
def random_cb(self,event):
if self.random_callback:
self.random_callback()
def combo_change_cb(self,event):
if self.combo_change_callback:
self.combo_change_callback()
#callback setter
def set_album_callback(self,cb):
self.get_album_callback=cb
def set_logout_callback(self,cb):
self.logout_callback=cb
def set_done_callback(self,cb):
self.done_callback=cb
def set_random_callback(self,cb):
self.random_callback=cb
def set_combo_change_callback(self,cb):
self.combo_change_callback=cb
#window callback
def self_destroy(self,e,w):
self.window.hide_all()
def hide_event(self,e,w):
self.window.hide()
def show(self,e=None,w=None):
self.window.show_all()
def delete_event(self,event,widget):
self.window.hide()
return True
def set_logo(self,img):
self.logo.set_from_file(img)
def set_preview(self,img):
self.preview.set_from_file(img)
def set_albums(self,albums):
self.albums=albums
self.set_model_from_list(self.combobox,albums)
self.combobox.set_active(0)
def get_selected_album(self):
return self.combobox.get_active_text()
def set_model_from_list (self,cb, items):
model = gtk.ListStore(str)
for i in items:
model.append([i])
cb.set_model(model)
if type(cb) == gtk.ComboBoxEntry:
cb.set_text_column(0)
elif type(cb) == gtk.ComboBox:
cell = gtk.CellRendererText()
cb.pack_start(cell, True)
cb.add_attribute(cell, 'text', 0)
#states
def is_enabled(self):
return self.check_enable.get_active()
def is_random(self):
return self.check_random.get_active()
def get_time_text(self,text):
self.time_change.set_text(text)
def set_time_text(self):
return self.time_change.get_text()
def get_combo_index(self):
return self.combobox.get_active()
def get_combo_speed_index(self):
return self.combospeed.get_active()
def set_combo_index(self,n):
return self.combobox.set_active(n)
def set_combo_speed_index(self,n):
return self.combospeed.set_active(n)
| Python |
"""An XML Reader is the SAX 2 name for an XML parser. XML Parsers
should be based on this code. """
import handler
from _exceptions import SAXNotSupportedException, SAXNotRecognizedException
# ===== XMLREADER =====
class XMLReader:
"""Interface for reading an XML document using callbacks.
XMLReader is the interface that an XML parser's SAX2 driver must
implement. This interface allows an application to set and query
features and properties in the parser, to register event handlers
for document processing, and to initiate a document parse.
All SAX interfaces are assumed to be synchronous: the parse
methods must not return until parsing is complete, and readers
must wait for an event-handler callback to return before reporting
the next event."""
def __init__(self):
self._cont_handler = handler.ContentHandler()
self._dtd_handler = handler.DTDHandler()
self._ent_handler = handler.EntityResolver()
self._err_handler = handler.ErrorHandler()
def parse(self, source):
"Parse an XML document from a system identifier or an InputSource."
raise NotImplementedError("This method must be implemented!")
def getContentHandler(self):
"Returns the current ContentHandler."
return self._cont_handler
def setContentHandler(self, handler):
"Registers a new object to receive document content events."
self._cont_handler = handler
def getDTDHandler(self):
"Returns the current DTD handler."
return self._dtd_handler
def setDTDHandler(self, handler):
"Register an object to receive basic DTD-related events."
self._dtd_handler = handler
def getEntityResolver(self):
"Returns the current EntityResolver."
return self._ent_handler
def setEntityResolver(self, resolver):
"Register an object to resolve external entities."
self._ent_handler = resolver
def getErrorHandler(self):
"Returns the current ErrorHandler."
return self._err_handler
def setErrorHandler(self, handler):
"Register an object to receive error-message events."
self._err_handler = handler
def setLocale(self, locale):
"""Allow an application to set the locale for errors and warnings.
SAX parsers are not required to provide localization for errors
and warnings; if they cannot support the requested locale,
however, they must throw a SAX exception. Applications may
request a locale change in the middle of a parse."""
raise SAXNotSupportedException("Locale support not implemented")
def getFeature(self, name):
"Looks up and returns the state of a SAX2 feature."
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
"Sets the state of a SAX2 feature."
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def getProperty(self, name):
"Looks up and returns the value of a SAX2 property."
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
"Sets the value of a SAX2 property."
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
class IncrementalParser(XMLReader):
"""This interface adds three extra methods to the XMLReader
interface that allow XML parsers to support incremental
parsing. Support for this interface is optional, since not all
underlying XML parsers support this functionality.
When the parser is instantiated it is ready to begin accepting
data from the feed method immediately. After parsing has been
finished with a call to close the reset method must be called to
make the parser ready to accept new data, either from feed or
using the parse method.
Note that these methods must _not_ be called during parsing, that
is, after parse has been called and before it returns.
By default, the class also implements the parse method of the XMLReader
interface using the feed, close and reset methods of the
IncrementalParser interface as a convenience to SAX 2.0 driver
writers."""
def __init__(self, bufsize=2**16):
self._bufsize = bufsize
XMLReader.__init__(self)
def parse(self, source):
import saxutils
source = saxutils.prepare_input_source(source)
self.prepareParser(source)
file = source.getByteStream()
buffer = file.read(self._bufsize)
while buffer != "":
self.feed(buffer)
buffer = file.read(self._bufsize)
self.close()
def feed(self, data):
"""This method gives the raw XML data in the data parameter to
the parser and makes it parse the data, emitting the
corresponding events. It is allowed for XML constructs to be
split across several calls to feed.
feed may raise SAXException."""
raise NotImplementedError("This method must be implemented!")
def prepareParser(self, source):
"""This method is called by the parse implementation to allow
the SAX 2.0 driver to prepare itself for parsing."""
raise NotImplementedError("prepareParser must be overridden!")
def close(self):
"""This method is called when the entire XML document has been
passed to the parser through the feed method, to notify the
parser that there are no more data. This allows the parser to
do the final checks on the document and empty the internal
data buffer.
The parser will not be ready to parse another document until
the reset method has been called.
close may raise SAXException."""
raise NotImplementedError("This method must be implemented!")
def reset(self):
"""This method is called after close has been called to reset
the parser so that it is ready to parse new documents. The
results of calling parse or feed after close without calling
reset are undefined."""
raise NotImplementedError("This method must be implemented!")
# ===== LOCATOR =====
class Locator:
"""Interface for associating a SAX event with a document
location. A locator object will return valid results only during
calls to DocumentHandler methods; at any other time, the
results are unpredictable."""
def getColumnNumber(self):
"Return the column number where the current event ends."
return -1
def getLineNumber(self):
"Return the line number where the current event ends."
return -1
def getPublicId(self):
"Return the public identifier for the current event."
return None
def getSystemId(self):
"Return the system identifier for the current event."
return None
# ===== INPUTSOURCE =====
class InputSource:
"""Encapsulation of the information needed by the XMLReader to
read entities.
This class may include information about the public identifier,
system identifier, byte stream (possibly with character encoding
information) and/or the character stream of an entity.
Applications will create objects of this class for use in the
XMLReader.parse method and for returning from
EntityResolver.resolveEntity.
An InputSource belongs to the application, the XMLReader is not
allowed to modify InputSource objects passed to it from the
application, although it may make copies and modify those."""
def __init__(self, system_id = None):
self.__system_id = system_id
self.__public_id = None
self.__encoding = None
self.__bytefile = None
self.__charfile = None
def setPublicId(self, public_id):
"Sets the public identifier of this InputSource."
self.__public_id = public_id
def getPublicId(self):
"Returns the public identifier of this InputSource."
return self.__public_id
def setSystemId(self, system_id):
"Sets the system identifier of this InputSource."
self.__system_id = system_id
def getSystemId(self):
"Returns the system identifier of this InputSource."
return self.__system_id
def setEncoding(self, encoding):
"""Sets the character encoding of this InputSource.
The encoding must be a string acceptable for an XML encoding
declaration (see section 4.3.3 of the XML recommendation).
The encoding attribute of the InputSource is ignored if the
InputSource also contains a character stream."""
self.__encoding = encoding
def getEncoding(self):
"Get the character encoding of this InputSource."
return self.__encoding
def setByteStream(self, bytefile):
"""Set the byte stream (a Python file-like object which does
not perform byte-to-character conversion) for this input
source.
The SAX parser will ignore this if there is also a character
stream specified, but it will use a byte stream in preference
to opening a URI connection itself.
If the application knows the character encoding of the byte
stream, it should set it with the setEncoding method."""
self.__bytefile = bytefile
def getByteStream(self):
"""Get the byte stream for this input source.
The getEncoding method will return the character encoding for
this byte stream, or None if unknown."""
return self.__bytefile
def setCharacterStream(self, charfile):
"""Set the character stream for this input source. (The stream
must be a Python 2.0 Unicode-wrapped file-like that performs
conversion to Unicode strings.)
If there is a character stream specified, the SAX parser will
ignore any byte stream and will not attempt to open a URI
connection to the system identifier."""
self.__charfile = charfile
def getCharacterStream(self):
"Get the character stream for this input source."
return self.__charfile
# ===== ATTRIBUTESIMPL =====
class AttributesImpl:
def __init__(self, attrs):
"""Non-NS-aware implementation.
attrs should be of the form {name : value}."""
self._attrs = attrs
def getLength(self):
return len(self._attrs)
def getType(self, name):
return "CDATA"
def getValue(self, name):
return self._attrs[name]
def getValueByQName(self, name):
return self._attrs[name]
def getNameByQName(self, name):
if not name in self._attrs:
raise KeyError, name
return name
def getQNameByName(self, name):
if not name in self._attrs:
raise KeyError, name
return name
def getNames(self):
return self._attrs.keys()
def getQNames(self):
return self._attrs.keys()
def __len__(self):
return len(self._attrs)
def __getitem__(self, name):
return self._attrs[name]
def keys(self):
return self._attrs.keys()
def has_key(self, name):
return name in self._attrs
def __contains__(self, name):
return self._attrs.has_key(name)
def get(self, name, alternative=None):
return self._attrs.get(name, alternative)
def copy(self):
return self.__class__(self._attrs)
def items(self):
return self._attrs.items()
def values(self):
return self._attrs.values()
# ===== ATTRIBUTESNSIMPL =====
class AttributesNSImpl(AttributesImpl):
def __init__(self, attrs, qnames):
"""NS-aware implementation.
attrs should be of the form {(ns_uri, lname): value, ...}.
qnames of the form {(ns_uri, lname): qname, ...}."""
self._attrs = attrs
self._qnames = qnames
def getValueByQName(self, name):
for (nsname, qname) in self._qnames.items():
if qname == name:
return self._attrs[nsname]
raise KeyError, name
def getNameByQName(self, name):
for (nsname, qname) in self._qnames.items():
if qname == name:
return nsname
raise KeyError, name
def getQNameByName(self, name):
return self._qnames[name]
def getQNames(self):
return self._qnames.values()
def copy(self):
return self.__class__(self._attrs, self._qnames)
def _test():
XMLReader()
IncrementalParser()
Locator()
if __name__ == "__main__":
_test()
| Python |
"""\
A library of useful helper classes to the SAX classes, for the
convenience of application and driver writers.
"""
import os, urlparse, urllib, types
import handler
import xmlreader
try:
_StringTypes = [types.StringType, types.UnicodeType]
except AttributeError:
_StringTypes = [types.StringType]
# See whether the xmlcharrefreplace error handler is
# supported
try:
from codecs import xmlcharrefreplace_errors
_error_handling = "xmlcharrefreplace"
del xmlcharrefreplace_errors
except ImportError:
_error_handling = "strict"
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
# must do ampersand first
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
entities = entities.copy()
entities.update({'\n': ' ', '\r': ' ', '\t':'	'})
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
class XMLGenerator(handler.ContentHandler):
def __init__(self, out=None, encoding="iso-8859-1"):
if out is None:
import sys
out = sys.stdout
handler.ContentHandler.__init__(self)
self._out = out
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
def _write(self, text):
if isinstance(text, str):
self._out.write(text)
else:
self._out.write(text.encode(self._encoding, _error_handling))
def _qname(self, name):
"""Builds a qualified name from a (ns_url, localname) pair"""
if name[0]:
# The name is in a non-empty namespace
prefix = self._current_context[name[0]]
if prefix:
# If it is not the default namespace, prepend the prefix
return prefix + ":" + name[1]
# Return the unqualified name
return name[1]
# ContentHandler methods
def startDocument(self):
self._write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._write('<' + name)
for (name, value) in attrs.items():
self._write(' %s=%s' % (name, quoteattr(value)))
self._write('>')
def endElement(self, name):
self._write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
self._write('<' + self._qname(name))
for prefix, uri in self._undeclared_ns_maps:
if prefix:
self._out.write(' xmlns:%s="%s"' % (prefix, uri))
else:
self._out.write(' xmlns="%s"' % uri)
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
self._write(' %s=%s' % (self._qname(name), quoteattr(value)))
self._write('>')
def endElementNS(self, name, qname):
self._write('</%s>' % self._qname(name))
def characters(self, content):
self._write(escape(content))
def ignorableWhitespace(self, content):
self._write(content)
def processingInstruction(self, target, data):
self._write('<?%s %s?>' % (target, data))
class XMLFilterBase(xmlreader.XMLReader):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
def __init__(self, parent = None):
xmlreader.XMLReader.__init__(self)
self._parent = parent
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
return self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# XMLFilter methods
def getParent(self):
return self._parent
def setParent(self, parent):
self._parent = parent
# --- Utility functions
def prepare_input_source(source, base = ""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if type(source) in _StringTypes:
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name"):
source.setSystemId(f.name)
if source.getByteStream() is None:
sysid = source.getSystemId()
basehead = os.path.dirname(os.path.normpath(base))
sysidfilename = os.path.join(basehead, sysid)
if os.path.isfile(sysidfilename):
source.setSystemId(sysidfilename)
f = open(sysidfilename, "rb")
else:
source.setSystemId(urlparse.urljoin(base, sysid))
f = urllib.urlopen(source.getSystemId())
source.setByteStream(f)
return source
| Python |
"""Different kinds of SAX Exceptions"""
import sys
if sys.platform[:4] == "java":
from java.lang import Exception
del sys
# ===== SAXEXCEPTION =====
class SAXException(Exception):
"""Encapsulate an XML error or warning. This class can contain
basic error or warning information from either the XML parser or
the application: you can subclass it to provide additional
functionality, or to add localization. Note that although you will
receive a SAXException as the argument to the handlers in the
ErrorHandler interface, you are not actually required to throw
the exception; instead, you can simply read the information in
it."""
def __init__(self, msg, exception=None):
"""Creates an exception. The message is required, but the exception
is optional."""
self._msg = msg
self._exception = exception
Exception.__init__(self, msg)
def getMessage(self):
"Return a message for this exception."
return self._msg
def getException(self):
"Return the embedded exception, or None if there was none."
return self._exception
def __str__(self):
"Create a string representation of the exception."
return self._msg
def __getitem__(self, ix):
"""Avoids weird error messages if someone does exception[ix] by
mistake, since Exception has __getitem__ defined."""
raise AttributeError("__getitem__")
# ===== SAXPARSEEXCEPTION =====
class SAXParseException(SAXException):
"""Encapsulate an XML parse error or warning.
This exception will include information for locating the error in
the original XML document. Note that although the application will
receive a SAXParseException as the argument to the handlers in the
ErrorHandler interface, the application is not actually required
to throw the exception; instead, it can simply read the
information in it and take a different action.
Since this exception is a subclass of SAXException, it inherits
the ability to wrap another exception."""
def __init__(self, msg, exception, locator):
"Creates the exception. The exception parameter is allowed to be None."
SAXException.__init__(self, msg, exception)
self._locator = locator
# We need to cache this stuff at construction time.
# If this exception is thrown, the objects through which we must
# traverse to get this information may be deleted by the time
# it gets caught.
self._systemId = self._locator.getSystemId()
self._colnum = self._locator.getColumnNumber()
self._linenum = self._locator.getLineNumber()
def getColumnNumber(self):
"""The column number of the end of the text where the exception
occurred."""
return self._colnum
def getLineNumber(self):
"The line number of the end of the text where the exception occurred."
return self._linenum
def getPublicId(self):
"Get the public identifier of the entity where the exception occurred."
return self._locator.getPublicId()
def getSystemId(self):
"Get the system identifier of the entity where the exception occurred."
return self._systemId
def __str__(self):
"Create a string representation of the exception."
sysid = self.getSystemId()
if sysid is None:
sysid = "<unknown>"
linenum = self.getLineNumber()
if linenum is None:
linenum = "?"
colnum = self.getColumnNumber()
if colnum is None:
colnum = "?"
return "%s:%s:%s: %s" % (sysid, linenum, colnum, self._msg)
# ===== SAXNOTRECOGNIZEDEXCEPTION =====
class SAXNotRecognizedException(SAXException):
"""Exception class for an unrecognized identifier.
An XMLReader will raise this exception when it is confronted with an
unrecognized feature or property. SAX applications and extensions may
use this class for similar purposes."""
# ===== SAXNOTSUPPORTEDEXCEPTION =====
class SAXNotSupportedException(SAXException):
"""Exception class for an unsupported operation.
An XMLReader will raise this exception when a service it cannot
perform is requested (specifically setting a state or value). SAX
applications and extensions may use this class for similar
purposes."""
# ===== SAXNOTSUPPORTEDEXCEPTION =====
class SAXReaderNotAvailable(SAXNotSupportedException):
"""Exception class for a missing driver.
An XMLReader module (driver) should raise this exception when it
is first imported, e.g. when a support module cannot be imported.
It also may be raised during parsing, e.g. if executing an external
program is not permitted."""
| Python |
"""
This module contains the core classes of version 2.0 of SAX for Python.
This file provides only default classes with absolutely minimum
functionality, from which drivers and applications can be subclassed.
Many of these classes are empty and are included only as documentation
of the interfaces.
$Id: handler.py 35816 2004-05-06 03:47:48Z fdrake $
"""
version = '2.0beta'
#============================================================================
#
# HANDLER INTERFACES
#
#============================================================================
# ===== ERRORHANDLER =====
class ErrorHandler:
"""Basic interface for SAX error handlers.
If you create an object that implements this interface, then
register the object with your XMLReader, the parser will call the
methods in your object to report all warnings and errors. There
are three levels of errors available: warnings, (possibly)
recoverable errors, and unrecoverable errors. All methods take a
SAXParseException as the only parameter."""
def error(self, exception):
"Handle a recoverable error."
raise exception
def fatalError(self, exception):
"Handle a non-recoverable error."
raise exception
def warning(self, exception):
"Handle a warning."
print exception
# ===== CONTENTHANDLER =====
class ContentHandler:
"""Interface for receiving logical document content events.
This is the main callback interface in SAX, and the one most
important to applications. The order of events in this interface
mirrors the order of the information in the document."""
def __init__(self):
self._locator = None
def setDocumentLocator(self, locator):
"""Called by the parser to give the application a locator for
locating the origin of document events.
SAX parsers are strongly encouraged (though not absolutely
required) to supply a locator: if it does so, it must supply
the locator to the application by invoking this method before
invoking any of the other methods in the DocumentHandler
interface.
The locator allows the application to determine the end
position of any document-related event, even if the parser is
not reporting an error. Typically, the application will use
this information for reporting its own errors (such as
character content that does not match an application's
business rules). The information returned by the locator is
probably not sufficient for use with a search engine.
Note that the locator will return correct information only
during the invocation of the events in this interface. The
application should not attempt to use it at any other time."""
self._locator = locator
def startDocument(self):
"""Receive notification of the beginning of a document.
The SAX parser will invoke this method only once, before any
other methods in this interface or in DTDHandler (except for
setDocumentLocator)."""
def endDocument(self):
"""Receive notification of the end of a document.
The SAX parser will invoke this method only once, and it will
be the last method invoked during the parse. The parser shall
not invoke this method until it has either abandoned parsing
(because of an unrecoverable error) or reached the end of
input."""
def startPrefixMapping(self, prefix, uri):
"""Begin the scope of a prefix-URI Namespace mapping.
The information from this event is not necessary for normal
Namespace processing: the SAX XML reader will automatically
replace prefixes for element and attribute names when the
http://xml.org/sax/features/namespaces feature is true (the
default).
There are cases, however, when applications need to use
prefixes in character data or in attribute values, where they
cannot safely be expanded automatically; the
start/endPrefixMapping event supplies the information to the
application to expand prefixes in those contexts itself, if
necessary.
Note that start/endPrefixMapping events are not guaranteed to
be properly nested relative to each-other: all
startPrefixMapping events will occur before the corresponding
startElement event, and all endPrefixMapping events will occur
after the corresponding endElement event, but their order is
not guaranteed."""
def endPrefixMapping(self, prefix):
"""End the scope of a prefix-URI mapping.
See startPrefixMapping for details. This event will always
occur after the corresponding endElement event, but the order
of endPrefixMapping events is not otherwise guaranteed."""
def startElement(self, name, attrs):
"""Signals the start of an element in non-namespace mode.
The name parameter contains the raw XML 1.0 name of the
element type as a string and the attrs parameter holds an
instance of the Attributes class containing the attributes of
the element."""
def endElement(self, name):
"""Signals the end of an element in non-namespace mode.
The name parameter contains the name of the element type, just
as with the startElement event."""
def startElementNS(self, name, qname, attrs):
"""Signals the start of an element in namespace mode.
The name parameter contains the name of the element type as a
(uri, localname) tuple, the qname parameter the raw XML 1.0
name used in the source document, and the attrs parameter
holds an instance of the Attributes class containing the
attributes of the element.
The uri part of the name tuple is None for elements which have
no namespace."""
def endElementNS(self, name, qname):
"""Signals the end of an element in namespace mode.
The name parameter contains the name of the element type, just
as with the startElementNS event."""
def characters(self, content):
"""Receive notification of character data.
The Parser will call this method to report each chunk of
character data. SAX parsers may return all contiguous
character data in a single chunk, or they may split it into
several chunks; however, all of the characters in any single
event must come from the same external entity so that the
Locator provides useful information."""
def ignorableWhitespace(self, whitespace):
"""Receive notification of ignorable whitespace in element content.
Validating Parsers must use this method to report each chunk
of ignorable whitespace (see the W3C XML 1.0 recommendation,
section 2.10): non-validating parsers may also use this method
if they are capable of parsing and using content models.
SAX parsers may return all contiguous whitespace in a single
chunk, or they may split it into several chunks; however, all
of the characters in any single event must come from the same
external entity, so that the Locator provides useful
information."""
def processingInstruction(self, target, data):
"""Receive notification of a processing instruction.
The Parser will invoke this method once for each processing
instruction found: note that processing instructions may occur
before or after the main document element.
A SAX parser should never report an XML declaration (XML 1.0,
section 2.8) or a text declaration (XML 1.0, section 4.3.1)
using this method."""
def skippedEntity(self, name):
"""Receive notification of a skipped entity.
The Parser will invoke this method once for each entity
skipped. Non-validating processors may skip entities if they
have not seen the declarations (because, for example, the
entity was declared in an external DTD subset). All processors
may skip external entities, depending on the values of the
http://xml.org/sax/features/external-general-entities and the
http://xml.org/sax/features/external-parameter-entities
properties."""
# ===== DTDHandler =====
class DTDHandler:
"""Handle DTD events.
This interface specifies only those DTD events required for basic
parsing (unparsed entities and attributes)."""
def notationDecl(self, name, publicId, systemId):
"Handle a notation declaration event."
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
"Handle an unparsed entity declaration event."
# ===== ENTITYRESOLVER =====
class EntityResolver:
"""Basic interface for resolving entities. If you create an object
implementing this interface, then register the object with your
Parser, the parser will call the method in your object to
resolve all external entities. Note that DefaultHandler implements
this interface with the default behaviour."""
def resolveEntity(self, publicId, systemId):
"""Resolve the system identifier of an entity and return either
the system identifier to read from as a string, or an InputSource
to read from."""
return systemId
#============================================================================
#
# CORE FEATURES
#
#============================================================================
feature_namespaces = "http://xml.org/sax/features/namespaces"
# true: Perform Namespace processing (default).
# false: Optionally do not perform Namespace processing
# (implies namespace-prefixes).
# access: (parsing) read-only; (not parsing) read/write
feature_namespace_prefixes = "http://xml.org/sax/features/namespace-prefixes"
# true: Report the original prefixed names and attributes used for Namespace
# declarations.
# false: Do not report attributes used for Namespace declarations, and
# optionally do not report original prefixed names (default).
# access: (parsing) read-only; (not parsing) read/write
feature_string_interning = "http://xml.org/sax/features/string-interning"
# true: All element names, prefixes, attribute names, Namespace URIs, and
# local names are interned using the built-in intern function.
# false: Names are not necessarily interned, although they may be (default).
# access: (parsing) read-only; (not parsing) read/write
feature_validation = "http://xml.org/sax/features/validation"
# true: Report all validation errors (implies external-general-entities and
# external-parameter-entities).
# false: Do not report validation errors.
# access: (parsing) read-only; (not parsing) read/write
feature_external_ges = "http://xml.org/sax/features/external-general-entities"
# true: Include all external general (text) entities.
# false: Do not include external general entities.
# access: (parsing) read-only; (not parsing) read/write
feature_external_pes = "http://xml.org/sax/features/external-parameter-entities"
# true: Include all external parameter entities, including the external
# DTD subset.
# false: Do not include any external parameter entities, even the external
# DTD subset.
# access: (parsing) read-only; (not parsing) read/write
all_features = [feature_namespaces,
feature_namespace_prefixes,
feature_string_interning,
feature_validation,
feature_external_ges,
feature_external_pes]
#============================================================================
#
# CORE PROPERTIES
#
#============================================================================
property_lexical_handler = "http://xml.org/sax/properties/lexical-handler"
# data type: xml.sax.sax2lib.LexicalHandler
# description: An optional extension handler for lexical events like comments.
# access: read/write
property_declaration_handler = "http://xml.org/sax/properties/declaration-handler"
# data type: xml.sax.sax2lib.DeclHandler
# description: An optional extension handler for DTD-related events other
# than notations and unparsed entities.
# access: read/write
property_dom_node = "http://xml.org/sax/properties/dom-node"
# data type: org.w3c.dom.Node
# description: When parsing, the current DOM node being visited if this is
# a DOM iterator; when not parsing, the root DOM node for
# iteration.
# access: (parsing) read-only; (not parsing) read/write
property_xml_string = "http://xml.org/sax/properties/xml-string"
# data type: String
# description: The literal string of characters that was the source for
# the current event.
# access: read-only
property_encoding = "http://www.python.org/sax/properties/encoding"
# data type: String
# description: The name of the encoding to assume for input data.
# access: write: set the encoding, e.g. established by a higher-level
# protocol. May change during parsing (e.g. after
# processing a META tag)
# read: return the current encoding (possibly established through
# auto-detection.
# initial value: UTF-8
#
property_interning_dict = "http://www.python.org/sax/properties/interning-dict"
# data type: Dictionary
# description: The dictionary used to intern common strings in the document
# access: write: Request that the parser uses a specific dictionary, to
# allow interning across different documents
# read: return the current interning dictionary, or None
#
all_properties = [property_lexical_handler,
property_dom_node,
property_declaration_handler,
property_xml_string,
property_encoding,
property_interning_dict]
| Python |
"""Simple API for XML (SAX) implementation for Python.
This module provides an implementation of the SAX 2 interface;
information about the Java version of the interface can be found at
http://www.megginson.com/SAX/. The Python version of the interface is
documented at <...>.
This package contains the following modules:
handler -- Base classes and constants which define the SAX 2 API for
the 'client-side' of SAX for Python.
saxutils -- Implementation of the convenience classes commonly used to
work with SAX.
xmlreader -- Base classes and constants which define the SAX 2 API for
the parsers used with SAX for Python.
expatreader -- Driver that allows use of the Expat parser with SAX.
"""
from xmlreader import InputSource
from handler import ContentHandler, ErrorHandler
from _exceptions import SAXException, SAXNotRecognizedException, \
SAXParseException, SAXNotSupportedException, \
SAXReaderNotAvailable
def parse(source, handler, errorHandler=ErrorHandler()):
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
parser.parse(source)
def parseString(string, handler, errorHandler=ErrorHandler()):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
if errorHandler is None:
errorHandler = ErrorHandler()
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
inpsrc = InputSource()
inpsrc.setByteStream(StringIO(string))
parser.parse(inpsrc)
# this is the parser list used by the make_parser function if no
# alternatives are given as parameters to the function
default_parser_list = ["xml.sax.expatreader"]
# tell modulefinder that importing sax potentially imports expatreader
_false = 0
if _false:
import xml.sax.expatreader
import os, sys
if os.environ.has_key("PY_SAX_PARSER"):
default_parser_list = os.environ["PY_SAX_PARSER"].split(",")
del os
_key = "python.xml.sax.parser"
if sys.platform[:4] == "java" and sys.registry.containsKey(_key):
default_parser_list = sys.registry.getProperty(_key).split(",")
def make_parser(parser_list = []):
"""Creates and returns a SAX parser.
Creates the first parser it is able to instantiate of the ones
given in the list created by doing parser_list +
default_parser_list. The lists must contain the names of Python
modules containing both a SAX parser and a create_parser function."""
for parser_name in parser_list + default_parser_list:
try:
return _create_parser(parser_name)
except ImportError,e:
import sys
if parser_name in sys.modules:
# The parser module was found, but importing it
# failed unexpectedly, pass this exception through
raise
except SAXReaderNotAvailable:
# The parser module detected that it won't work properly,
# so try the next one
pass
raise SAXReaderNotAvailable("No parsers found", None)
# --- Internal utility methods used by make_parser
if sys.platform[ : 4] == "java":
def _create_parser(parser_name):
from org.python.core import imp
drv_module = imp.importName(parser_name, 0, globals())
return drv_module.create_parser()
else:
def _create_parser(parser_name):
drv_module = __import__(parser_name,{},{},['create_parser'])
return drv_module.create_parser()
del sys
| Python |
"""
SAX driver for the pyexpat C module. This driver works with
pyexpat.__version__ == '2.22'.
"""
version = "0.20"
from xml.sax._exceptions import *
from xml.sax.handler import feature_validation, feature_namespaces
from xml.sax.handler import feature_namespace_prefixes
from xml.sax.handler import feature_external_ges, feature_external_pes
from xml.sax.handler import feature_string_interning
from xml.sax.handler import property_xml_string, property_interning_dict
# xml.parsers.expat does not raise ImportError in Jython
import sys
if sys.platform[:4] == "java":
raise SAXReaderNotAvailable("expat not available in Java", None)
del sys
try:
from xml.parsers import expat
except ImportError:
raise SAXReaderNotAvailable("expat not supported", None)
else:
if not hasattr(expat, "ParserCreate"):
raise SAXReaderNotAvailable("expat not supported", None)
from xml.sax import xmlreader, saxutils, handler
AttributesImpl = xmlreader.AttributesImpl
AttributesNSImpl = xmlreader.AttributesNSImpl
# If we're using a sufficiently recent version of Python, we can use
# weak references to avoid cycles between the parser and content
# handler, otherwise we'll just have to pretend.
try:
import _weakref
except ImportError:
def _mkproxy(o):
return o
else:
import weakref
_mkproxy = weakref.proxy
del weakref, _weakref
# --- ExpatLocator
class ExpatLocator(xmlreader.Locator):
"""Locator for use with the ExpatParser class.
This uses a weak reference to the parser object to avoid creating
a circular reference between the parser and the content handler.
"""
def __init__(self, parser):
self._ref = _mkproxy(parser)
def getColumnNumber(self):
parser = self._ref
if parser._parser is None:
return None
return parser._parser.ErrorColumnNumber
def getLineNumber(self):
parser = self._ref
if parser._parser is None:
return 1
return parser._parser.ErrorLineNumber
def getPublicId(self):
parser = self._ref
if parser is None:
return None
return parser._source.getPublicId()
def getSystemId(self):
parser = self._ref
if parser is None:
return None
return parser._source.getSystemId()
# --- ExpatParser
class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator):
"""SAX driver for the pyexpat C module."""
def __init__(self, namespaceHandling=0, bufsize=2**16-20):
xmlreader.IncrementalParser.__init__(self, bufsize)
self._source = xmlreader.InputSource()
self._parser = None
self._namespaces = namespaceHandling
self._lex_handler_prop = None
self._parsing = 0
self._entity_stack = []
self._external_ges = 1
self._interning = None
# XMLReader methods
def parse(self, source):
"Parse an XML document from a URL or an InputSource."
source = saxutils.prepare_input_source(source)
self._source = source
self.reset()
self._cont_handler.setDocumentLocator(ExpatLocator(self))
xmlreader.IncrementalParser.parse(self, source)
def prepareParser(self, source):
if source.getSystemId() is not None:
self._parser.SetBase(source.getSystemId())
# Redefined setContentHandler to allow changing handlers during parsing
def setContentHandler(self, handler):
xmlreader.IncrementalParser.setContentHandler(self, handler)
if self._parsing:
self._reset_cont_handler()
def getFeature(self, name):
if name == feature_namespaces:
return self._namespaces
elif name == feature_string_interning:
return self._interning is not None
elif name in (feature_validation, feature_external_pes,
feature_namespace_prefixes):
return 0
elif name == feature_external_ges:
return self._external_ges
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
if self._parsing:
raise SAXNotSupportedException("Cannot set features while parsing")
if name == feature_namespaces:
self._namespaces = state
elif name == feature_external_ges:
self._external_ges = state
elif name == feature_string_interning:
if state:
if self._interning is None:
self._interning = {}
else:
self._interning = None
elif name == feature_validation:
if state:
raise SAXNotSupportedException(
"expat does not support validation")
elif name == feature_external_pes:
if state:
raise SAXNotSupportedException(
"expat does not read external parameter entities")
elif name == feature_namespace_prefixes:
if state:
raise SAXNotSupportedException(
"expat does not report namespace prefixes")
else:
raise SAXNotRecognizedException(
"Feature '%s' not recognized" % name)
def getProperty(self, name):
if name == handler.property_lexical_handler:
return self._lex_handler_prop
elif name == property_interning_dict:
return self._interning
elif name == property_xml_string:
if self._parser:
if hasattr(self._parser, "GetInputContext"):
return self._parser.GetInputContext()
else:
raise SAXNotRecognizedException(
"This version of expat does not support getting"
" the XML string")
else:
raise SAXNotSupportedException(
"XML string cannot be returned when not parsing")
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
if name == handler.property_lexical_handler:
self._lex_handler_prop = value
if self._parsing:
self._reset_lex_handler_prop()
elif name == property_interning_dict:
self._interning = value
elif name == property_xml_string:
raise SAXNotSupportedException("Property '%s' cannot be set" %
name)
else:
raise SAXNotRecognizedException("Property '%s' not recognized" %
name)
# IncrementalParser methods
def feed(self, data, isFinal = 0):
if not self._parsing:
self.reset()
self._parsing = 1
self._cont_handler.startDocument()
try:
# The isFinal parameter is internal to the expat reader.
# If it is set to true, expat will check validity of the entire
# document. When feeding chunks, they are not normally final -
# except when invoked from close.
self._parser.Parse(data, isFinal)
except expat.error, e:
exc = SAXParseException(expat.ErrorString(e.code), e, self)
# FIXME: when to invoke error()?
self._err_handler.fatalError(exc)
def close(self):
if self._entity_stack:
# If we are completing an external entity, do nothing here
return
self.feed("", isFinal = 1)
self._cont_handler.endDocument()
self._parsing = 0
# break cycle created by expat handlers pointing to our methods
self._parser = None
def _reset_cont_handler(self):
self._parser.ProcessingInstructionHandler = \
self._cont_handler.processingInstruction
self._parser.CharacterDataHandler = self._cont_handler.characters
def _reset_lex_handler_prop(self):
lex = self._lex_handler_prop
parser = self._parser
if lex is None:
parser.CommentHandler = None
parser.StartCdataSectionHandler = None
parser.EndCdataSectionHandler = None
parser.StartDoctypeDeclHandler = None
parser.EndDoctypeDeclHandler = None
else:
parser.CommentHandler = lex.comment
parser.StartCdataSectionHandler = lex.startCDATA
parser.EndCdataSectionHandler = lex.endCDATA
parser.StartDoctypeDeclHandler = self.start_doctype_decl
parser.EndDoctypeDeclHandler = lex.endDTD
def reset(self):
if self._namespaces:
self._parser = expat.ParserCreate(self._source.getEncoding(), " ",
intern=self._interning)
self._parser.namespace_prefixes = 1
self._parser.StartElementHandler = self.start_element_ns
self._parser.EndElementHandler = self.end_element_ns
else:
self._parser = expat.ParserCreate(self._source.getEncoding(),
intern = self._interning)
self._parser.StartElementHandler = self.start_element
self._parser.EndElementHandler = self.end_element
self._reset_cont_handler()
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
self._parser.NotationDeclHandler = self.notation_decl
self._parser.StartNamespaceDeclHandler = self.start_namespace_decl
self._parser.EndNamespaceDeclHandler = self.end_namespace_decl
self._decl_handler_prop = None
if self._lex_handler_prop:
self._reset_lex_handler_prop()
# self._parser.DefaultHandler =
# self._parser.DefaultHandlerExpand =
# self._parser.NotStandaloneHandler =
self._parser.ExternalEntityRefHandler = self.external_entity_ref
try:
self._parser.SkippedEntityHandler = self.skipped_entity_handler
except AttributeError:
# This pyexpat does not support SkippedEntity
pass
self._parser.SetParamEntityParsing(
expat.XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE)
self._parsing = 0
self._entity_stack = []
# Locator methods
def getColumnNumber(self):
if self._parser is None:
return None
return self._parser.ErrorColumnNumber
def getLineNumber(self):
if self._parser is None:
return 1
return self._parser.ErrorLineNumber
def getPublicId(self):
return self._source.getPublicId()
def getSystemId(self):
return self._source.getSystemId()
# event handlers
def start_element(self, name, attrs):
self._cont_handler.startElement(name, AttributesImpl(attrs))
def end_element(self, name):
self._cont_handler.endElement(name)
def start_element_ns(self, name, attrs):
pair = name.split()
if len(pair) == 1:
# no namespace
pair = (None, name)
elif len(pair) == 3:
pair = pair[0], pair[1]
else:
# default namespace
pair = tuple(pair)
newattrs = {}
qnames = {}
for (aname, value) in attrs.items():
parts = aname.split()
length = len(parts)
if length == 1:
# no namespace
qname = aname
apair = (None, aname)
elif length == 3:
qname = "%s:%s" % (parts[2], parts[1])
apair = parts[0], parts[1]
else:
# default namespace
qname = parts[1]
apair = tuple(parts)
newattrs[apair] = value
qnames[apair] = qname
self._cont_handler.startElementNS(pair, None,
AttributesNSImpl(newattrs, qnames))
def end_element_ns(self, name):
pair = name.split()
if len(pair) == 1:
pair = (None, name)
elif len(pair) == 3:
pair = pair[0], pair[1]
else:
pair = tuple(pair)
self._cont_handler.endElementNS(pair, None)
# this is not used (call directly to ContentHandler)
def processing_instruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
# this is not used (call directly to ContentHandler)
def character_data(self, data):
self._cont_handler.characters(data)
def start_namespace_decl(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def end_namespace_decl(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
self._lex_handler_prop.startDTD(name, pubid, sysid)
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
self._dtd_handler.unparsedEntityDecl(name, pubid, sysid, notation_name)
def notation_decl(self, name, base, sysid, pubid):
self._dtd_handler.notationDecl(name, pubid, sysid)
def external_entity_ref(self, context, base, sysid, pubid):
if not self._external_ges:
return 1
source = self._ent_handler.resolveEntity(pubid, sysid)
source = saxutils.prepare_input_source(source,
self._source.getSystemId() or
"")
self._entity_stack.append((self._parser, self._source))
self._parser = self._parser.ExternalEntityParserCreate(context)
self._source = source
try:
xmlreader.IncrementalParser.parse(self, source)
except:
return 0 # FIXME: save error info here?
(self._parser, self._source) = self._entity_stack[-1]
del self._entity_stack[-1]
return 1
def skipped_entity_handler(self, name, is_pe):
if is_pe:
# The SAX spec requires to report skipped PEs with a '%'
name = '%'+name
self._cont_handler.skippedEntity(name)
# ---
def create_parser(*args, **kwargs):
return ExpatParser(*args, **kwargs)
# ---
if __name__ == "__main__":
import xml.sax
p = create_parser()
p.setContentHandler(xml.sax.XMLGenerator())
p.setErrorHandler(xml.sax.ErrorHandler())
p.parse("../../../hamlet.xml")
| Python |
"""Core XML support for Python.
This package contains four sub-packages:
dom -- The W3C Document Object Model. This supports DOM Level 1 +
Namespaces.
parsers -- Python wrappers for XML parsers (currently only supports Expat).
sax -- The Simple API for XML, developed by XML-Dev, led by David
Megginson and ported to Python by Lars Marius Garshol. This
supports the SAX 2 API.
etree -- The ElementTree XML library. This is a subset of the full
ElementTree XML release.
"""
__all__ = ["dom", "parsers", "sax", "etree"]
# When being checked-out without options, this has the form
# "<dollar>Revision: x.y </dollar>"
# When exported using -kv, it is "x.y".
__version__ = "$Revision: 41660 $".split()[-2:][0]
_MINIMUM_XMLPLUS_VERSION = (0, 8, 4)
try:
import _xmlplus
except ImportError:
pass
else:
try:
v = _xmlplus.version_info
except AttributeError:
# _xmlplus is too old; ignore it
pass
else:
if v >= _MINIMUM_XMLPLUS_VERSION:
import sys
_xmlplus.__path__.extend(__path__)
sys.modules[__name__] = _xmlplus
else:
del v
| Python |
#
# ElementTree
# $Id: ElementInclude.py 1862 2004-06-18 07:31:02Z Fredrik $
#
# limited xinclude support for element trees
#
# history:
# 2003-08-15 fl created
# 2003-11-14 fl fixed default loader
#
# Copyright (c) 2003-2004 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
##
# Limited XInclude support for the ElementTree package.
##
import copy
import ElementTree
XINCLUDE = "{http://www.w3.org/2001/XInclude}"
XINCLUDE_INCLUDE = XINCLUDE + "include"
XINCLUDE_FALLBACK = XINCLUDE + "fallback"
##
# Fatal include error.
class FatalIncludeError(SyntaxError):
pass
##
# Default loader. This loader reads an included resource from disk.
#
# @param href Resource reference.
# @param parse Parse mode. Either "xml" or "text".
# @param encoding Optional text encoding.
# @return The expanded resource. If the parse mode is "xml", this
# is an ElementTree instance. If the parse mode is "text", this
# is a Unicode string. If the loader fails, it can return None
# or raise an IOError exception.
# @throws IOError If the loader fails to load the resource.
def default_loader(href, parse, encoding=None):
file = open(href)
if parse == "xml":
data = ElementTree.parse(file).getroot()
else:
data = file.read()
if encoding:
data = data.decode(encoding)
file.close()
return data
##
# Expand XInclude directives.
#
# @param elem Root element.
# @param loader Optional resource loader. If omitted, it defaults
# to {@link default_loader}. If given, it should be a callable
# that implements the same interface as <b>default_loader</b>.
# @throws FatalIncludeError If the function fails to include a given
# resource, or if the tree contains malformed XInclude elements.
# @throws IOError If the function fails to load a given resource.
def include(elem, loader=None):
if loader is None:
loader = default_loader
# look for xinclude elements
i = 0
while i < len(elem):
e = elem[i]
if e.tag == XINCLUDE_INCLUDE:
# process xinclude directive
href = e.get("href")
parse = e.get("parse", "xml")
if parse == "xml":
node = loader(href, parse)
if node is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
node = copy.copy(node)
if e.tail:
node.tail = (node.tail or "") + e.tail
elem[i] = node
elif parse == "text":
text = loader(href, parse, e.get("encoding"))
if text is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
if i:
node = elem[i-1]
node.tail = (node.tail or "") + text
else:
elem.text = (elem.text or "") + text + (e.tail or "")
del elem[i]
continue
else:
raise FatalIncludeError(
"unknown parse type in xi:include tag (%r)" % parse
)
elif e.tag == XINCLUDE_FALLBACK:
raise FatalIncludeError(
"xi:fallback tag must be child of xi:include (%r)" % e.tag
)
else:
include(e, loader)
i = i + 1
| Python |
# Wrapper module for _elementtree
from _elementtree import *
| Python |
#
# ElementTree
# $Id: ElementPath.py 1858 2004-06-17 21:31:41Z Fredrik $
#
# limited xpath support for element trees
#
# history:
# 2003-05-23 fl created
# 2003-05-28 fl added support for // etc
# 2003-08-27 fl fixed parsing of periods in element names
#
# Copyright (c) 2003-2004 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
##
# Implementation module for XPath support. There's usually no reason
# to import this module directly; the <b>ElementTree</b> does this for
# you, if needed.
##
import re
xpath_tokenizer = re.compile(
"(::|\.\.|\(\)|[/.*:\[\]\(\)@=])|((?:\{[^}]+\})?[^/:\[\]\(\)@=\s]+)|\s+"
).findall
class xpath_descendant_or_self:
pass
##
# Wrapper for a compiled XPath.
class Path:
##
# Create an Path instance from an XPath expression.
def __init__(self, path):
tokens = xpath_tokenizer(path)
# the current version supports 'path/path'-style expressions only
self.path = []
self.tag = None
if tokens and tokens[0][0] == "/":
raise SyntaxError("cannot use absolute path on element")
while tokens:
op, tag = tokens.pop(0)
if tag or op == "*":
self.path.append(tag or op)
elif op == ".":
pass
elif op == "/":
self.path.append(xpath_descendant_or_self())
continue
else:
raise SyntaxError("unsupported path syntax (%s)" % op)
if tokens:
op, tag = tokens.pop(0)
if op != "/":
raise SyntaxError(
"expected path separator (%s)" % (op or tag)
)
if self.path and isinstance(self.path[-1], xpath_descendant_or_self):
raise SyntaxError("path cannot end with //")
if len(self.path) == 1 and isinstance(self.path[0], type("")):
self.tag = self.path[0]
##
# Find first matching object.
def find(self, element):
tag = self.tag
if tag is None:
nodeset = self.findall(element)
if not nodeset:
return None
return nodeset[0]
for elem in element:
if elem.tag == tag:
return elem
return None
##
# Find text for first matching object.
def findtext(self, element, default=None):
tag = self.tag
if tag is None:
nodeset = self.findall(element)
if not nodeset:
return default
return nodeset[0].text or ""
for elem in element:
if elem.tag == tag:
return elem.text or ""
return default
##
# Find all matching objects.
def findall(self, element):
nodeset = [element]
index = 0
while 1:
try:
path = self.path[index]
index = index + 1
except IndexError:
return nodeset
set = []
if isinstance(path, xpath_descendant_or_self):
try:
tag = self.path[index]
if not isinstance(tag, type("")):
tag = None
else:
index = index + 1
except IndexError:
tag = None # invalid path
for node in nodeset:
new = list(node.getiterator(tag))
if new and new[0] is node:
set.extend(new[1:])
else:
set.extend(new)
else:
for node in nodeset:
for node in node:
if path == "*" or node.tag == path:
set.append(node)
if not set:
return []
nodeset = set
_cache = {}
##
# (Internal) Compile path.
def _compile(path):
p = _cache.get(path)
if p is not None:
return p
p = Path(path)
if len(_cache) >= 100:
_cache.clear()
_cache[path] = p
return p
##
# Find first matching object.
def find(element, path):
return _compile(path).find(element)
##
# Find text for first matching object.
def findtext(element, path, default=None):
return _compile(path).findtext(element, default)
##
# Find all matching objects.
def findall(element, path):
return _compile(path).findall(element)
| Python |
#
# ElementTree
# $Id: ElementTree.py 2326 2005-03-17 07:45:21Z fredrik $
#
# light-weight XML support for Python 1.5.2 and later.
#
# history:
# 2001-10-20 fl created (from various sources)
# 2001-11-01 fl return root from parse method
# 2002-02-16 fl sort attributes in lexical order
# 2002-04-06 fl TreeBuilder refactoring, added PythonDoc markup
# 2002-05-01 fl finished TreeBuilder refactoring
# 2002-07-14 fl added basic namespace support to ElementTree.write
# 2002-07-25 fl added QName attribute support
# 2002-10-20 fl fixed encoding in write
# 2002-11-24 fl changed default encoding to ascii; fixed attribute encoding
# 2002-11-27 fl accept file objects or file names for parse/write
# 2002-12-04 fl moved XMLTreeBuilder back to this module
# 2003-01-11 fl fixed entity encoding glitch for us-ascii
# 2003-02-13 fl added XML literal factory
# 2003-02-21 fl added ProcessingInstruction/PI factory
# 2003-05-11 fl added tostring/fromstring helpers
# 2003-05-26 fl added ElementPath support
# 2003-07-05 fl added makeelement factory method
# 2003-07-28 fl added more well-known namespace prefixes
# 2003-08-15 fl fixed typo in ElementTree.findtext (Thomas Dartsch)
# 2003-09-04 fl fall back on emulator if ElementPath is not installed
# 2003-10-31 fl markup updates
# 2003-11-15 fl fixed nested namespace bug
# 2004-03-28 fl added XMLID helper
# 2004-06-02 fl added default support to findtext
# 2004-06-08 fl fixed encoding of non-ascii element/attribute names
# 2004-08-23 fl take advantage of post-2.1 expat features
# 2005-02-01 fl added iterparse implementation
# 2005-03-02 fl fixed iterparse support for pre-2.2 versions
#
# Copyright (c) 1999-2005 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2005 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
__all__ = [
# public symbols
"Comment",
"dump",
"Element", "ElementTree",
"fromstring",
"iselement", "iterparse",
"parse",
"PI", "ProcessingInstruction",
"QName",
"SubElement",
"tostring",
"TreeBuilder",
"VERSION", "XML",
"XMLParser", "XMLTreeBuilder",
]
##
# The <b>Element</b> type is a flexible container object, designed to
# store hierarchical data structures in memory. The type can be
# described as a cross between a list and a dictionary.
# <p>
# Each element has a number of properties associated with it:
# <ul>
# <li>a <i>tag</i>. This is a string identifying what kind of data
# this element represents (the element type, in other words).</li>
# <li>a number of <i>attributes</i>, stored in a Python dictionary.</li>
# <li>a <i>text</i> string.</li>
# <li>an optional <i>tail</i> string.</li>
# <li>a number of <i>child elements</i>, stored in a Python sequence</li>
# </ul>
#
# To create an element instance, use the {@link #Element} or {@link
# #SubElement} factory functions.
# <p>
# The {@link #ElementTree} class can be used to wrap an element
# structure, and convert it from and to XML.
##
import string, sys, re
class _SimpleElementPath:
# emulate pre-1.2 find/findtext/findall behaviour
def find(self, element, tag):
for elem in element:
if elem.tag == tag:
return elem
return None
def findtext(self, element, tag, default=None):
for elem in element:
if elem.tag == tag:
return elem.text or ""
return default
def findall(self, element, tag):
if tag[:3] == ".//":
return element.getiterator(tag[3:])
result = []
for elem in element:
if elem.tag == tag:
result.append(elem)
return result
try:
import ElementPath
except ImportError:
# FIXME: issue warning in this case?
ElementPath = _SimpleElementPath()
# TODO: add support for custom namespace resolvers/default namespaces
# TODO: add improved support for incremental parsing
VERSION = "1.2.6"
##
# Internal element class. This class defines the Element interface,
# and provides a reference implementation of this interface.
# <p>
# You should not create instances of this class directly. Use the
# appropriate factory functions instead, such as {@link #Element}
# and {@link #SubElement}.
#
# @see Element
# @see SubElement
# @see Comment
# @see ProcessingInstruction
class _ElementInterface:
# <tag attrib>text<child/>...</tag>tail
##
# (Attribute) Element tag.
tag = None
##
# (Attribute) Element attribute dictionary. Where possible, use
# {@link #_ElementInterface.get},
# {@link #_ElementInterface.set},
# {@link #_ElementInterface.keys}, and
# {@link #_ElementInterface.items} to access
# element attributes.
attrib = None
##
# (Attribute) Text before first subelement. This is either a
# string or the value None, if there was no text.
text = None
##
# (Attribute) Text after this element's end tag, but before the
# next sibling element's start tag. This is either a string or
# the value None, if there was no text.
tail = None # text after end tag, if any
def __init__(self, tag, attrib):
self.tag = tag
self.attrib = attrib
self._children = []
def __repr__(self):
return "<Element %s at %x>" % (self.tag, id(self))
##
# Creates a new element object of the same type as this element.
#
# @param tag Element tag.
# @param attrib Element attributes, given as a dictionary.
# @return A new element instance.
def makeelement(self, tag, attrib):
return Element(tag, attrib)
##
# Returns the number of subelements.
#
# @return The number of subelements.
def __len__(self):
return len(self._children)
##
# Returns the given subelement.
#
# @param index What subelement to return.
# @return The given subelement.
# @exception IndexError If the given element does not exist.
def __getitem__(self, index):
return self._children[index]
##
# Replaces the given subelement.
#
# @param index What subelement to replace.
# @param element The new element value.
# @exception IndexError If the given element does not exist.
# @exception AssertionError If element is not a valid object.
def __setitem__(self, index, element):
assert iselement(element)
self._children[index] = element
##
# Deletes the given subelement.
#
# @param index What subelement to delete.
# @exception IndexError If the given element does not exist.
def __delitem__(self, index):
del self._children[index]
##
# Returns a list containing subelements in the given range.
#
# @param start The first subelement to return.
# @param stop The first subelement that shouldn't be returned.
# @return A sequence object containing subelements.
def __getslice__(self, start, stop):
return self._children[start:stop]
##
# Replaces a number of subelements with elements from a sequence.
#
# @param start The first subelement to replace.
# @param stop The first subelement that shouldn't be replaced.
# @param elements A sequence object with zero or more elements.
# @exception AssertionError If a sequence member is not a valid object.
def __setslice__(self, start, stop, elements):
for element in elements:
assert iselement(element)
self._children[start:stop] = list(elements)
##
# Deletes a number of subelements.
#
# @param start The first subelement to delete.
# @param stop The first subelement to leave in there.
def __delslice__(self, start, stop):
del self._children[start:stop]
##
# Adds a subelement to the end of this element.
#
# @param element The element to add.
# @exception AssertionError If a sequence member is not a valid object.
def append(self, element):
assert iselement(element)
self._children.append(element)
##
# Inserts a subelement at the given position in this element.
#
# @param index Where to insert the new subelement.
# @exception AssertionError If the element is not a valid object.
def insert(self, index, element):
assert iselement(element)
self._children.insert(index, element)
##
# Removes a matching subelement. Unlike the <b>find</b> methods,
# this method compares elements based on identity, not on tag
# value or contents.
#
# @param element What element to remove.
# @exception ValueError If a matching element could not be found.
# @exception AssertionError If the element is not a valid object.
def remove(self, element):
assert iselement(element)
self._children.remove(element)
##
# Returns all subelements. The elements are returned in document
# order.
#
# @return A list of subelements.
# @defreturn list of Element instances
def getchildren(self):
return self._children
##
# Finds the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path):
return ElementPath.find(self, path)
##
# Finds text for the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @param default What to return if the element was not found.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# has is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None):
return ElementPath.findtext(self, path, default)
##
# Finds all matching subelements, by tag name or path.
#
# @param path What element to look for.
# @return A list or iterator containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path):
return ElementPath.findall(self, path)
##
# Resets an element. This function removes all subelements, clears
# all attributes, and sets the text and tail attributes to None.
def clear(self):
self.attrib.clear()
self._children = []
self.text = self.tail = None
##
# Gets an element attribute.
#
# @param key What attribute to look for.
# @param default What to return if the attribute was not found.
# @return The attribute value, or the default value, if the
# attribute was not found.
# @defreturn string or None
def get(self, key, default=None):
return self.attrib.get(key, default)
##
# Sets an element attribute.
#
# @param key What attribute to set.
# @param value The attribute value.
def set(self, key, value):
self.attrib[key] = value
##
# Gets a list of attribute names. The names are returned in an
# arbitrary order (just like for an ordinary Python dictionary).
#
# @return A list of element attribute names.
# @defreturn list of strings
def keys(self):
return self.attrib.keys()
##
# Gets element attributes, as a sequence. The attributes are
# returned in an arbitrary order.
#
# @return A list of (name, value) tuples for all attributes.
# @defreturn list of (string, string) tuples
def items(self):
return self.attrib.items()
##
# Creates a tree iterator. The iterator loops over this element
# and all subelements, in document order, and returns all elements
# with a matching tag.
# <p>
# If the tree structure is modified during iteration, the result
# is undefined.
#
# @param tag What tags to look for (default is to return all elements).
# @return A list or iterator containing all the matching elements.
# @defreturn list or iterator
def getiterator(self, tag=None):
nodes = []
if tag == "*":
tag = None
if tag is None or self.tag == tag:
nodes.append(self)
for node in self._children:
nodes.extend(node.getiterator(tag))
return nodes
# compatibility
_Element = _ElementInterface
##
# Element factory. This function returns an object implementing the
# standard Element interface. The exact class or type of that object
# is implementation dependent, but it will always be compatible with
# the {@link #_ElementInterface} class in this module.
# <p>
# The element name, attribute names, and attribute values can be
# either 8-bit ASCII strings or Unicode strings.
#
# @param tag The element name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @return An element instance.
# @defreturn Element
def Element(tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
return _ElementInterface(tag, attrib)
##
# Subelement factory. This function creates an element instance, and
# appends it to an existing element.
# <p>
# The element name, attribute names, and attribute values can be
# either 8-bit ASCII strings or Unicode strings.
#
# @param parent The parent element.
# @param tag The subelement name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @return An element instance.
# @defreturn Element
def SubElement(parent, tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
element = parent.makeelement(tag, attrib)
parent.append(element)
return element
##
# Comment element factory. This factory function creates a special
# element that will be serialized as an XML comment.
# <p>
# The comment string can be either an 8-bit ASCII string or a Unicode
# string.
#
# @param text A string containing the comment string.
# @return An element instance, representing a comment.
# @defreturn Element
def Comment(text=None):
element = Element(Comment)
element.text = text
return element
##
# PI element factory. This factory function creates a special element
# that will be serialized as an XML processing instruction.
#
# @param target A string containing the PI target.
# @param text A string containing the PI contents, if any.
# @return An element instance, representing a PI.
# @defreturn Element
def ProcessingInstruction(target, text=None):
element = Element(ProcessingInstruction)
element.text = target
if text:
element.text = element.text + " " + text
return element
PI = ProcessingInstruction
##
# QName wrapper. This can be used to wrap a QName attribute value, in
# order to get proper namespace handling on output.
#
# @param text A string containing the QName value, in the form {uri}local,
# or, if the tag argument is given, the URI part of a QName.
# @param tag Optional tag. If given, the first argument is interpreted as
# an URI, and this argument is interpreted as a local name.
# @return An opaque object, representing the QName.
class QName:
def __init__(self, text_or_uri, tag=None):
if tag:
text_or_uri = "{%s}%s" % (text_or_uri, tag)
self.text = text_or_uri
def __str__(self):
return self.text
def __hash__(self):
return hash(self.text)
def __cmp__(self, other):
if isinstance(other, QName):
return cmp(self.text, other.text)
return cmp(self.text, other)
##
# ElementTree wrapper class. This class represents an entire element
# hierarchy, and adds some extra support for serialization to and from
# standard XML.
#
# @param element Optional root element.
# @keyparam file Optional file handle or name. If given, the
# tree is initialized with the contents of this XML file.
class ElementTree:
def __init__(self, element=None, file=None):
assert element is None or iselement(element)
self._root = element # first node
if file:
self.parse(file)
##
# Gets the root element for this tree.
#
# @return An element instance.
# @defreturn Element
def getroot(self):
return self._root
##
# Replaces the root element for this tree. This discards the
# current contents of the tree, and replaces it with the given
# element. Use with care.
#
# @param element An element instance.
def _setroot(self, element):
assert iselement(element)
self._root = element
##
# Loads an external XML document into this element tree.
#
# @param source A file name or file object.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLTreeBuilder} parser is used.
# @return The document root element.
# @defreturn Element
def parse(self, source, parser=None):
if not hasattr(source, "read"):
source = open(source, "rb")
if not parser:
parser = XMLTreeBuilder()
while 1:
data = source.read(32768)
if not data:
break
parser.feed(data)
self._root = parser.close()
return self._root
##
# Creates a tree iterator for the root element. The iterator loops
# over all elements in this tree, in document order.
#
# @param tag What tags to look for (default is to return all elements)
# @return An iterator.
# @defreturn iterator
def getiterator(self, tag=None):
assert self._root is not None
return self._root.getiterator(tag)
##
# Finds the first toplevel element with given tag.
# Same as getroot().find(path).
#
# @param path What element to look for.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path):
assert self._root is not None
if path[:1] == "/":
path = "." + path
return self._root.find(path)
##
# Finds the element text for the first toplevel element with given
# tag. Same as getroot().findtext(path).
#
# @param path What toplevel element to look for.
# @param default What to return if the element was not found.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# has is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None):
assert self._root is not None
if path[:1] == "/":
path = "." + path
return self._root.findtext(path, default)
##
# Finds all toplevel elements with the given tag.
# Same as getroot().findall(path).
#
# @param path What element to look for.
# @return A list or iterator containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path):
assert self._root is not None
if path[:1] == "/":
path = "." + path
return self._root.findall(path)
##
# Writes the element tree to a file, as XML.
#
# @param file A file name, or a file object opened for writing.
# @param encoding Optional output encoding (default is US-ASCII).
def write(self, file, encoding="us-ascii"):
assert self._root is not None
if not hasattr(file, "write"):
file = open(file, "wb")
if not encoding:
encoding = "us-ascii"
elif encoding != "utf-8" and encoding != "us-ascii":
file.write("<?xml version='1.0' encoding='%s'?>\n" % encoding)
self._write(file, self._root, encoding, {})
def _write(self, file, node, encoding, namespaces):
# write XML to file
tag = node.tag
if tag is Comment:
file.write("<!-- %s -->" % _escape_cdata(node.text, encoding))
elif tag is ProcessingInstruction:
file.write("<?%s?>" % _escape_cdata(node.text, encoding))
else:
items = node.items()
xmlns_items = [] # new namespaces in this scope
try:
if isinstance(tag, QName) or tag[:1] == "{":
tag, xmlns = fixtag(tag, namespaces)
if xmlns: xmlns_items.append(xmlns)
except TypeError:
_raise_serialization_error(tag)
file.write("<" + _encode(tag, encoding))
if items or xmlns_items:
items.sort() # lexical order
for k, v in items:
try:
if isinstance(k, QName) or k[:1] == "{":
k, xmlns = fixtag(k, namespaces)
if xmlns: xmlns_items.append(xmlns)
except TypeError:
_raise_serialization_error(k)
try:
if isinstance(v, QName):
v, xmlns = fixtag(v, namespaces)
if xmlns: xmlns_items.append(xmlns)
except TypeError:
_raise_serialization_error(v)
file.write(" %s=\"%s\"" % (_encode(k, encoding),
_escape_attrib(v, encoding)))
for k, v in xmlns_items:
file.write(" %s=\"%s\"" % (_encode(k, encoding),
_escape_attrib(v, encoding)))
if node.text or len(node):
file.write(">")
if node.text:
file.write(_escape_cdata(node.text, encoding))
for n in node:
self._write(file, n, encoding, namespaces)
file.write("</" + _encode(tag, encoding) + ">")
else:
file.write(" />")
for k, v in xmlns_items:
del namespaces[v]
if node.tail:
file.write(_escape_cdata(node.tail, encoding))
# --------------------------------------------------------------------
# helpers
##
# Checks if an object appears to be a valid element object.
#
# @param An element instance.
# @return A true value if this is an element object.
# @defreturn flag
def iselement(element):
# FIXME: not sure about this; might be a better idea to look
# for tag/attrib/text attributes
return isinstance(element, _ElementInterface) or hasattr(element, "tag")
##
# Writes an element tree or element structure to sys.stdout. This
# function should be used for debugging only.
# <p>
# The exact output format is implementation dependent. In this
# version, it's written as an ordinary XML file.
#
# @param elem An element tree or an individual element.
def dump(elem):
# debugging
if not isinstance(elem, ElementTree):
elem = ElementTree(elem)
elem.write(sys.stdout)
tail = elem.getroot().tail
if not tail or tail[-1] != "\n":
sys.stdout.write("\n")
def _encode(s, encoding):
try:
return s.encode(encoding)
except AttributeError:
return s # 1.5.2: assume the string uses the right encoding
if sys.version[:3] == "1.5":
_escape = re.compile(r"[&<>\"\x80-\xff]+") # 1.5.2
else:
_escape = re.compile(eval(r'u"[&<>\"\u0080-\uffff]+"'))
_escape_map = {
"&": "&",
"<": "<",
">": ">",
'"': """,
}
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
}
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _encode_entity(text, pattern=_escape):
# map reserved and non-ascii characters to numerical entities
def escape_entities(m, map=_escape_map):
out = []
append = out.append
for char in m.group():
text = map.get(char)
if text is None:
text = "&#%d;" % ord(char)
append(text)
return string.join(out, "")
try:
return _encode(pattern.sub(escape_entities, text), "ascii")
except TypeError:
_raise_serialization_error(text)
#
# the following functions assume an ascii-compatible encoding
# (or "utf-16")
def _escape_cdata(text, encoding=None, replace=string.replace):
# escape character data
try:
if encoding:
try:
text = _encode(text, encoding)
except UnicodeError:
return _encode_entity(text)
text = replace(text, "&", "&")
text = replace(text, "<", "<")
text = replace(text, ">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text, encoding=None, replace=string.replace):
# escape attribute value
try:
if encoding:
try:
text = _encode(text, encoding)
except UnicodeError:
return _encode_entity(text)
text = replace(text, "&", "&")
text = replace(text, "'", "'") # FIXME: overkill
text = replace(text, "\"", """)
text = replace(text, "<", "<")
text = replace(text, ">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def fixtag(tag, namespaces):
# given a decorated tag (of the form {uri}tag), return prefixed
# tag and namespace declaration, if any
if isinstance(tag, QName):
tag = tag.text
namespace_uri, tag = string.split(tag[1:], "}", 1)
prefix = namespaces.get(namespace_uri)
if prefix is None:
prefix = _namespace_map.get(namespace_uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
namespaces[namespace_uri] = prefix
if prefix == "xml":
xmlns = None
else:
xmlns = ("xmlns:%s" % prefix, namespace_uri)
else:
xmlns = None
return "%s:%s" % (prefix, tag), xmlns
##
# Parses an XML document into an element tree.
#
# @param source A filename or file object containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLTreeBuilder} parser is used.
# @return An ElementTree instance
def parse(source, parser=None):
tree = ElementTree()
tree.parse(source, parser)
return tree
##
# Parses an XML document into an element tree incrementally, and reports
# what's going on to the user.
#
# @param source A filename or file object containing XML data.
# @param events A list of events to report back. If omitted, only "end"
# events are reported.
# @return A (event, elem) iterator.
class iterparse:
def __init__(self, source, events=None):
if not hasattr(source, "read"):
source = open(source, "rb")
self._file = source
self._events = []
self._index = 0
self.root = self._root = None
self._parser = XMLTreeBuilder()
# wire up the parser for event reporting
parser = self._parser._parser
append = self._events.append
if events is None:
events = ["end"]
for event in events:
if event == "start":
try:
parser.ordered_attributes = 1
parser.specified_attributes = 1
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start_list):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
except AttributeError:
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
elif event == "end":
def handler(tag, event=event, append=append,
end=self._parser._end):
append((event, end(tag)))
parser.EndElementHandler = handler
elif event == "start-ns":
def handler(prefix, uri, event=event, append=append):
try:
uri = _encode(uri, "ascii")
except UnicodeError:
pass
append((event, (prefix or "", uri)))
parser.StartNamespaceDeclHandler = handler
elif event == "end-ns":
def handler(prefix, event=event, append=append):
append((event, None))
parser.EndNamespaceDeclHandler = handler
def next(self):
while 1:
try:
item = self._events[self._index]
except IndexError:
if self._parser is None:
self.root = self._root
try:
raise StopIteration
except NameError:
raise IndexError
# load event buffer
del self._events[:]
self._index = 0
data = self._file.read(16384)
if data:
self._parser.feed(data)
else:
self._root = self._parser.close()
self._parser = None
else:
self._index = self._index + 1
return item
try:
iter
def __iter__(self):
return self
except NameError:
def __getitem__(self, index):
return self.next()
##
# Parses an XML document from a string constant. This function can
# be used to embed "XML literals" in Python code.
#
# @param source A string containing XML data.
# @return An Element instance.
# @defreturn Element
def XML(text):
parser = XMLTreeBuilder()
parser.feed(text)
return parser.close()
##
# Parses an XML document from a string constant, and also returns
# a dictionary which maps from element id:s to elements.
#
# @param source A string containing XML data.
# @return A tuple containing an Element instance and a dictionary.
# @defreturn (Element, dictionary)
def XMLID(text):
parser = XMLTreeBuilder()
parser.feed(text)
tree = parser.close()
ids = {}
for elem in tree.getiterator():
id = elem.get("id")
if id:
ids[id] = elem
return tree, ids
##
# Parses an XML document from a string constant. Same as {@link #XML}.
#
# @def fromstring(text)
# @param source A string containing XML data.
# @return An Element instance.
# @defreturn Element
fromstring = XML
##
# Generates a string representation of an XML element, including all
# subelements.
#
# @param element An Element instance.
# @return An encoded string containing the XML data.
# @defreturn string
def tostring(element, encoding=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
ElementTree(element).write(file, encoding)
return string.join(data, "")
##
# Generic element structure builder. This builder converts a sequence
# of {@link #TreeBuilder.start}, {@link #TreeBuilder.data}, and {@link
# #TreeBuilder.end} method calls to a well-formed element structure.
# <p>
# You can use this class to build an element structure using a custom XML
# parser, or a parser for some other XML-like format.
#
# @param element_factory Optional element factory. This factory
# is called to create new Element instances, as necessary.
class TreeBuilder:
def __init__(self, element_factory=None):
self._data = [] # data collector
self._elem = [] # element stack
self._last = None # last element
self._tail = None # true if we're after an end tag
if element_factory is None:
element_factory = _ElementInterface
self._factory = element_factory
##
# Flushes the parser buffers, and returns the toplevel documen
# element.
#
# @return An Element instance.
# @defreturn Element
def close(self):
assert len(self._elem) == 0, "missing end tags"
assert self._last != None, "missing toplevel element"
return self._last
def _flush(self):
if self._data:
if self._last is not None:
text = string.join(self._data, "")
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
self._data = []
##
# Adds text to the current element.
#
# @param data A string. This should be either an 8-bit string
# containing ASCII text, or a Unicode string.
def data(self, data):
self._data.append(data)
##
# Opens a new element.
#
# @param tag The element name.
# @param attrib A dictionary containing element attributes.
# @return The opened element.
# @defreturn Element
def start(self, tag, attrs):
self._flush()
self._last = elem = self._factory(tag, attrs)
if self._elem:
self._elem[-1].append(elem)
self._elem.append(elem)
self._tail = 0
return elem
##
# Closes the current element.
#
# @param tag The element name.
# @return The closed element.
# @defreturn Element
def end(self, tag):
self._flush()
self._last = self._elem.pop()
assert self._last.tag == tag,\
"end tag mismatch (expected %s, got %s)" % (
self._last.tag, tag)
self._tail = 1
return self._last
##
# Element structure builder for XML source data, based on the
# <b>expat</b> parser.
#
# @keyparam target Target object. If omitted, the builder uses an
# instance of the standard {@link #TreeBuilder} class.
# @keyparam html Predefine HTML entities. This flag is not supported
# by the current implementation.
# @see #ElementTree
# @see #TreeBuilder
class XMLTreeBuilder:
def __init__(self, html=0, target=None):
try:
from xml.parsers import expat
except ImportError:
raise ImportError(
"No module named expat; use SimpleXMLTreeBuilder instead"
)
self._parser = parser = expat.ParserCreate(None, "}")
if target is None:
target = TreeBuilder()
self._target = target
self._names = {} # name memo cache
# callbacks
parser.DefaultHandlerExpand = self._default
parser.StartElementHandler = self._start
parser.EndElementHandler = self._end
parser.CharacterDataHandler = self._data
# let expat do the buffering, if supported
try:
self._parser.buffer_text = 1
except AttributeError:
pass
# use new-style attribute handling, if supported
try:
self._parser.ordered_attributes = 1
self._parser.specified_attributes = 1
parser.StartElementHandler = self._start_list
except AttributeError:
pass
encoding = None
if not parser.returns_unicode:
encoding = "utf-8"
# target.xml(encoding, None)
self._doctype = None
self.entity = {}
def _fixtext(self, text):
# convert text string to ascii, if possible
try:
return _encode(text, "ascii")
except UnicodeError:
return text
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
try:
name = self._names[key]
except KeyError:
name = key
if "}" in name:
name = "{" + name
self._names[key] = name = self._fixtext(name)
return name
def _start(self, tag, attrib_in):
fixname = self._fixname
tag = fixname(tag)
attrib = {}
for key, value in attrib_in.items():
attrib[fixname(key)] = self._fixtext(value)
return self._target.start(tag, attrib)
def _start_list(self, tag, attrib_in):
fixname = self._fixname
tag = fixname(tag)
attrib = {}
if attrib_in:
for i in range(0, len(attrib_in), 2):
attrib[fixname(attrib_in[i])] = self._fixtext(attrib_in[i+1])
return self._target.start(tag, attrib)
def _data(self, text):
return self._target.data(self._fixtext(text))
def _end(self, tag):
return self._target.end(self._fixname(tag))
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
self._target.data(self.entity[text[1:-1]])
except KeyError:
from xml.parsers import expat
raise expat.error(
"undefined entity %s: line %d, column %d" %
(text, self._parser.ErrorLineNumber,
self._parser.ErrorColumnNumber)
)
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
elif self._doctype is not None:
# parse doctype contents
if prefix == ">":
self._doctype = None
return
text = string.strip(text)
if not text:
return
self._doctype.append(text)
n = len(self._doctype)
if n > 2:
type = self._doctype[1]
if type == "PUBLIC" and n == 4:
name, type, pubid, system = self._doctype
elif type == "SYSTEM" and n == 3:
name, type, system = self._doctype
pubid = None
else:
return
if pubid:
pubid = pubid[1:-1]
self.doctype(name, pubid, system[1:-1])
self._doctype = None
##
# Handles a doctype declaration.
#
# @param name Doctype name.
# @param pubid Public identifier.
# @param system System identifier.
def doctype(self, name, pubid, system):
pass
##
# Feeds data to the parser.
#
# @param data Encoded data.
def feed(self, data):
self._parser.Parse(data, 0)
##
# Finishes feeding data to the parser.
#
# @return An element structure.
# @defreturn Element
def close(self):
self._parser.Parse("", 1) # end of data
tree = self._target.close()
del self._target, self._parser # get rid of circular references
return tree
# compatibility
XMLParser = XMLTreeBuilder
| Python |
# $Id: __init__.py 1821 2004-06-03 16:57:49Z fredrik $
# elementtree package
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
| Python |
"""Interface to the Expat non-validating XML parser."""
__version__ = '$Revision: 17640 $'
from pyexpat import *
| Python |
"""Python interfaces to XML parsers.
This package contains one module:
expat -- Python wrapper for James Clark's Expat parser, with namespace
support.
"""
| Python |
# This is the Python mapping for interface NodeFilter from
# DOM2-Traversal-Range. It contains only constants.
class NodeFilter:
"""
This is the DOM2 NodeFilter interface. It contains only constants.
"""
FILTER_ACCEPT = 1
FILTER_REJECT = 2
FILTER_SKIP = 3
SHOW_ALL = 0xFFFFFFFFL
SHOW_ELEMENT = 0x00000001
SHOW_ATTRIBUTE = 0x00000002
SHOW_TEXT = 0x00000004
SHOW_CDATA_SECTION = 0x00000008
SHOW_ENTITY_REFERENCE = 0x00000010
SHOW_ENTITY = 0x00000020
SHOW_PROCESSING_INSTRUCTION = 0x00000040
SHOW_COMMENT = 0x00000080
SHOW_DOCUMENT = 0x00000100
SHOW_DOCUMENT_TYPE = 0x00000200
SHOW_DOCUMENT_FRAGMENT = 0x00000400
SHOW_NOTATION = 0x00000800
def acceptNode(self, node):
raise NotImplementedError
| Python |
"""Implementation of the DOM Level 3 'LS-Load' feature."""
import copy
import xml.dom
from xml.dom.NodeFilter import NodeFilter
__all__ = ["DOMBuilder", "DOMEntityResolver", "DOMInputSource"]
class Options:
"""Features object that has variables set for each DOMBuilder feature.
The DOMBuilder class uses an instance of this class to pass settings to
the ExpatBuilder class.
"""
# Note that the DOMBuilder class in LoadSave constrains which of these
# values can be set using the DOM Level 3 LoadSave feature.
namespaces = 1
namespace_declarations = True
validation = False
external_parameter_entities = True
external_general_entities = True
external_dtd_subset = True
validate_if_schema = False
validate = False
datatype_normalization = False
create_entity_ref_nodes = True
entities = True
whitespace_in_element_content = True
cdata_sections = True
comments = True
charset_overrides_xml_encoding = True
infoset = False
supported_mediatypes_only = False
errorHandler = None
filter = None
class DOMBuilder:
entityResolver = None
errorHandler = None
filter = None
ACTION_REPLACE = 1
ACTION_APPEND_AS_CHILDREN = 2
ACTION_INSERT_AFTER = 3
ACTION_INSERT_BEFORE = 4
_legal_actions = (ACTION_REPLACE, ACTION_APPEND_AS_CHILDREN,
ACTION_INSERT_AFTER, ACTION_INSERT_BEFORE)
def __init__(self):
self._options = Options()
def _get_entityResolver(self):
return self.entityResolver
def _set_entityResolver(self, entityResolver):
self.entityResolver = entityResolver
def _get_errorHandler(self):
return self.errorHandler
def _set_errorHandler(self, errorHandler):
self.errorHandler = errorHandler
def _get_filter(self):
return self.filter
def _set_filter(self, filter):
self.filter = filter
def setFeature(self, name, state):
if self.supportsFeature(name):
state = state and 1 or 0
try:
settings = self._settings[(_name_xform(name), state)]
except KeyError:
raise xml.dom.NotSupportedErr(
"unsupported feature: %r" % (name,))
else:
for name, value in settings:
setattr(self._options, name, value)
else:
raise xml.dom.NotFoundErr("unknown feature: " + repr(name))
def supportsFeature(self, name):
return hasattr(self._options, _name_xform(name))
def canSetFeature(self, name, state):
key = (_name_xform(name), state and 1 or 0)
return self._settings.has_key(key)
# This dictionary maps from (feature,value) to a list of
# (option,value) pairs that should be set on the Options object.
# If a (feature,value) setting is not in this dictionary, it is
# not supported by the DOMBuilder.
#
_settings = {
("namespace_declarations", 0): [
("namespace_declarations", 0)],
("namespace_declarations", 1): [
("namespace_declarations", 1)],
("validation", 0): [
("validation", 0)],
("external_general_entities", 0): [
("external_general_entities", 0)],
("external_general_entities", 1): [
("external_general_entities", 1)],
("external_parameter_entities", 0): [
("external_parameter_entities", 0)],
("external_parameter_entities", 1): [
("external_parameter_entities", 1)],
("validate_if_schema", 0): [
("validate_if_schema", 0)],
("create_entity_ref_nodes", 0): [
("create_entity_ref_nodes", 0)],
("create_entity_ref_nodes", 1): [
("create_entity_ref_nodes", 1)],
("entities", 0): [
("create_entity_ref_nodes", 0),
("entities", 0)],
("entities", 1): [
("entities", 1)],
("whitespace_in_element_content", 0): [
("whitespace_in_element_content", 0)],
("whitespace_in_element_content", 1): [
("whitespace_in_element_content", 1)],
("cdata_sections", 0): [
("cdata_sections", 0)],
("cdata_sections", 1): [
("cdata_sections", 1)],
("comments", 0): [
("comments", 0)],
("comments", 1): [
("comments", 1)],
("charset_overrides_xml_encoding", 0): [
("charset_overrides_xml_encoding", 0)],
("charset_overrides_xml_encoding", 1): [
("charset_overrides_xml_encoding", 1)],
("infoset", 0): [],
("infoset", 1): [
("namespace_declarations", 0),
("validate_if_schema", 0),
("create_entity_ref_nodes", 0),
("entities", 0),
("cdata_sections", 0),
("datatype_normalization", 1),
("whitespace_in_element_content", 1),
("comments", 1),
("charset_overrides_xml_encoding", 1)],
("supported_mediatypes_only", 0): [
("supported_mediatypes_only", 0)],
("namespaces", 0): [
("namespaces", 0)],
("namespaces", 1): [
("namespaces", 1)],
}
def getFeature(self, name):
xname = _name_xform(name)
try:
return getattr(self._options, xname)
except AttributeError:
if name == "infoset":
options = self._options
return (options.datatype_normalization
and options.whitespace_in_element_content
and options.comments
and options.charset_overrides_xml_encoding
and not (options.namespace_declarations
or options.validate_if_schema
or options.create_entity_ref_nodes
or options.entities
or options.cdata_sections))
raise xml.dom.NotFoundErr("feature %s not known" % repr(name))
def parseURI(self, uri):
if self.entityResolver:
input = self.entityResolver.resolveEntity(None, uri)
else:
input = DOMEntityResolver().resolveEntity(None, uri)
return self.parse(input)
def parse(self, input):
options = copy.copy(self._options)
options.filter = self.filter
options.errorHandler = self.errorHandler
fp = input.byteStream
if fp is None and options.systemId:
import urllib2
fp = urllib2.urlopen(input.systemId)
return self._parse_bytestream(fp, options)
def parseWithContext(self, input, cnode, action):
if action not in self._legal_actions:
raise ValueError("not a legal action")
raise NotImplementedError("Haven't written this yet...")
def _parse_bytestream(self, stream, options):
import xml.dom.expatbuilder
builder = xml.dom.expatbuilder.makeBuilder(options)
return builder.parseFile(stream)
def _name_xform(name):
return name.lower().replace('-', '_')
class DOMEntityResolver(object):
__slots__ = '_opener',
def resolveEntity(self, publicId, systemId):
assert systemId is not None
source = DOMInputSource()
source.publicId = publicId
source.systemId = systemId
source.byteStream = self._get_opener().open(systemId)
# determine the encoding if the transport provided it
source.encoding = self._guess_media_encoding(source)
# determine the base URI is we can
import posixpath, urlparse
parts = urlparse.urlparse(systemId)
scheme, netloc, path, params, query, fragment = parts
# XXX should we check the scheme here as well?
if path and not path.endswith("/"):
path = posixpath.dirname(path) + "/"
parts = scheme, netloc, path, params, query, fragment
source.baseURI = urlparse.urlunparse(parts)
return source
def _get_opener(self):
try:
return self._opener
except AttributeError:
self._opener = self._create_opener()
return self._opener
def _create_opener(self):
import urllib2
return urllib2.build_opener()
def _guess_media_encoding(self, source):
info = source.byteStream.info()
if info.has_key("Content-Type"):
for param in info.getplist():
if param.startswith("charset="):
return param.split("=", 1)[1].lower()
class DOMInputSource(object):
__slots__ = ('byteStream', 'characterStream', 'stringData',
'encoding', 'publicId', 'systemId', 'baseURI')
def __init__(self):
self.byteStream = None
self.characterStream = None
self.stringData = None
self.encoding = None
self.publicId = None
self.systemId = None
self.baseURI = None
def _get_byteStream(self):
return self.byteStream
def _set_byteStream(self, byteStream):
self.byteStream = byteStream
def _get_characterStream(self):
return self.characterStream
def _set_characterStream(self, characterStream):
self.characterStream = characterStream
def _get_stringData(self):
return self.stringData
def _set_stringData(self, data):
self.stringData = data
def _get_encoding(self):
return self.encoding
def _set_encoding(self, encoding):
self.encoding = encoding
def _get_publicId(self):
return self.publicId
def _set_publicId(self, publicId):
self.publicId = publicId
def _get_systemId(self):
return self.systemId
def _set_systemId(self, systemId):
self.systemId = systemId
def _get_baseURI(self):
return self.baseURI
def _set_baseURI(self, uri):
self.baseURI = uri
class DOMBuilderFilter:
"""Element filter which can be used to tailor construction of
a DOM instance.
"""
# There's really no need for this class; concrete implementations
# should just implement the endElement() and startElement()
# methods as appropriate. Using this makes it easy to only
# implement one of them.
FILTER_ACCEPT = 1
FILTER_REJECT = 2
FILTER_SKIP = 3
FILTER_INTERRUPT = 4
whatToShow = NodeFilter.SHOW_ALL
def _get_whatToShow(self):
return self.whatToShow
def acceptNode(self, element):
return self.FILTER_ACCEPT
def startContainer(self, element):
return self.FILTER_ACCEPT
del NodeFilter
class DocumentLS:
"""Mixin to create documents that conform to the load/save spec."""
async = False
def _get_async(self):
return False
def _set_async(self, async):
if async:
raise xml.dom.NotSupportedErr(
"asynchronous document loading is not supported")
def abort(self):
# What does it mean to "clear" a document? Does the
# documentElement disappear?
raise NotImplementedError(
"haven't figured out what this means yet")
def load(self, uri):
raise NotImplementedError("haven't written this yet")
def loadXML(self, source):
raise NotImplementedError("haven't written this yet")
def saveXML(self, snode):
if snode is None:
snode = self
elif snode.ownerDocument is not self:
raise xml.dom.WrongDocumentErr()
return snode.toxml()
class DOMImplementationLS:
MODE_SYNCHRONOUS = 1
MODE_ASYNCHRONOUS = 2
def createDOMBuilder(self, mode, schemaType):
if schemaType is not None:
raise xml.dom.NotSupportedErr(
"schemaType not yet supported")
if mode == self.MODE_SYNCHRONOUS:
return DOMBuilder()
if mode == self.MODE_ASYNCHRONOUS:
raise xml.dom.NotSupportedErr(
"asynchronous builders are not supported")
raise ValueError("unknown value for mode")
def createDOMWriter(self):
raise NotImplementedError(
"the writer interface hasn't been written yet!")
def createDOMInputSource(self):
return DOMInputSource()
| Python |
import xml.sax
import xml.sax.handler
import types
try:
_StringTypes = [types.StringType, types.UnicodeType]
except AttributeError:
_StringTypes = [types.StringType]
START_ELEMENT = "START_ELEMENT"
END_ELEMENT = "END_ELEMENT"
COMMENT = "COMMENT"
START_DOCUMENT = "START_DOCUMENT"
END_DOCUMENT = "END_DOCUMENT"
PROCESSING_INSTRUCTION = "PROCESSING_INSTRUCTION"
IGNORABLE_WHITESPACE = "IGNORABLE_WHITESPACE"
CHARACTERS = "CHARACTERS"
class PullDOM(xml.sax.ContentHandler):
_locator = None
document = None
def __init__(self, documentFactory=None):
from xml.dom import XML_NAMESPACE
self.documentFactory = documentFactory
self.firstEvent = [None, None]
self.lastEvent = self.firstEvent
self.elementStack = []
self.push = self.elementStack.append
try:
self.pop = self.elementStack.pop
except AttributeError:
# use class' pop instead
pass
self._ns_contexts = [{XML_NAMESPACE:'xml'}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self.pending_events = []
def pop(self):
result = self.elementStack[-1]
del self.elementStack[-1]
return result
def setDocumentLocator(self, locator):
self._locator = locator
def startPrefixMapping(self, prefix, uri):
if not hasattr(self, '_xmlns_attrs'):
self._xmlns_attrs = []
self._xmlns_attrs.append((prefix or 'xmlns', uri))
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix or None
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts.pop()
def startElementNS(self, name, tagName , attrs):
# Retrieve xml namespace declaration attributes.
xmlns_uri = 'http://www.w3.org/2000/xmlns/'
xmlns_attrs = getattr(self, '_xmlns_attrs', None)
if xmlns_attrs is not None:
for aname, value in xmlns_attrs:
attrs._attrs[(xmlns_uri, aname)] = value
self._xmlns_attrs = []
uri, localname = name
if uri:
# When using namespaces, the reader may or may not
# provide us with the original name. If not, create
# *a* valid tagName from the current context.
if tagName is None:
prefix = self._current_context[uri]
if prefix:
tagName = prefix + ":" + localname
else:
tagName = localname
if self.document:
node = self.document.createElementNS(uri, tagName)
else:
node = self.buildDocument(uri, tagName)
else:
# When the tagname is not prefixed, it just appears as
# localname
if self.document:
node = self.document.createElement(localname)
else:
node = self.buildDocument(None, localname)
for aname,value in attrs.items():
a_uri, a_localname = aname
if a_uri == xmlns_uri:
if a_localname == 'xmlns':
qname = a_localname
else:
qname = 'xmlns:' + a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
elif a_uri:
prefix = self._current_context[a_uri]
if prefix:
qname = prefix + ":" + a_localname
else:
qname = a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
else:
attr = self.document.createAttribute(a_localname)
node.setAttributeNode(attr)
attr.value = value
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElementNS(self, name, tagName):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def startElement(self, name, attrs):
if self.document:
node = self.document.createElement(name)
else:
node = self.buildDocument(None, name)
for aname,value in attrs.items():
attr = self.document.createAttribute(aname)
attr.value = value
node.setAttributeNode(attr)
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElement(self, name):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def comment(self, s):
if self.document:
node = self.document.createComment(s)
self.lastEvent[1] = [(COMMENT, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(COMMENT, s), None]
self.pending_events.append(event)
def processingInstruction(self, target, data):
if self.document:
node = self.document.createProcessingInstruction(target, data)
self.lastEvent[1] = [(PROCESSING_INSTRUCTION, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(PROCESSING_INSTRUCTION, target, data), None]
self.pending_events.append(event)
def ignorableWhitespace(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(IGNORABLE_WHITESPACE, node), None]
self.lastEvent = self.lastEvent[1]
def characters(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(CHARACTERS, node), None]
self.lastEvent = self.lastEvent[1]
def startDocument(self):
if self.documentFactory is None:
import xml.dom.minidom
self.documentFactory = xml.dom.minidom.Document.implementation
def buildDocument(self, uri, tagname):
# Can't do that in startDocument, since we need the tagname
# XXX: obtain DocumentType
node = self.documentFactory.createDocument(uri, tagname, None)
self.document = node
self.lastEvent[1] = [(START_DOCUMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
# Put everything we have seen so far into the document
for e in self.pending_events:
if e[0][0] == PROCESSING_INSTRUCTION:
_,target,data = e[0]
n = self.document.createProcessingInstruction(target, data)
e[0] = (PROCESSING_INSTRUCTION, n)
elif e[0][0] == COMMENT:
n = self.document.createComment(e[0][1])
e[0] = (COMMENT, n)
else:
raise AssertionError("Unknown pending event ",e[0][0])
self.lastEvent[1] = e
self.lastEvent = e
self.pending_events = None
return node.firstChild
def endDocument(self):
self.lastEvent[1] = [(END_DOCUMENT, self.document), None]
self.pop()
def clear(self):
"clear(): Explicitly release parsing structures"
self.document = None
class ErrorHandler:
def warning(self, exception):
print exception
def error(self, exception):
raise exception
def fatalError(self, exception):
raise exception
class DOMEventStream:
def __init__(self, stream, parser, bufsize):
self.stream = stream
self.parser = parser
self.bufsize = bufsize
if not hasattr(self.parser, 'feed'):
self.getEvent = self._slurp
self.reset()
def reset(self):
self.pulldom = PullDOM()
# This content handler relies on namespace support
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler(self.pulldom)
def __getitem__(self, pos):
rc = self.getEvent()
if rc:
return rc
raise IndexError
def next(self):
rc = self.getEvent()
if rc:
return rc
raise StopIteration
def __iter__(self):
return self
def expandNode(self, node):
event = self.getEvent()
parents = [node]
while event:
token, cur_node = event
if cur_node is node:
return
if token != END_ELEMENT:
parents[-1].appendChild(cur_node)
if token == START_ELEMENT:
parents.append(cur_node)
elif token == END_ELEMENT:
del parents[-1]
event = self.getEvent()
def getEvent(self):
# use IncrementalParser interface, so we get the desired
# pull effect
if not self.pulldom.firstEvent[1]:
self.pulldom.lastEvent = self.pulldom.firstEvent
while not self.pulldom.firstEvent[1]:
buf = self.stream.read(self.bufsize)
if not buf:
self.parser.close()
return None
self.parser.feed(buf)
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def _slurp(self):
""" Fallback replacement for getEvent() using the
standard SAX2 interface, which means we slurp the
SAX events into memory (no performance gain, but
we are compatible to all SAX parsers).
"""
self.parser.parse(self.stream)
self.getEvent = self._emit
return self._emit()
def _emit(self):
""" Fallback replacement for getEvent() that emits
the events that _slurp() read previously.
"""
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def clear(self):
"""clear(): Explicitly release parsing objects"""
self.pulldom.clear()
del self.pulldom
self.parser = None
self.stream = None
class SAX2DOM(PullDOM):
def startElementNS(self, name, tagName , attrs):
PullDOM.startElementNS(self, name, tagName, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def startElement(self, name, attrs):
PullDOM.startElement(self, name, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def processingInstruction(self, target, data):
PullDOM.processingInstruction(self, target, data)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def ignorableWhitespace(self, chars):
PullDOM.ignorableWhitespace(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def characters(self, chars):
PullDOM.characters(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
default_bufsize = (2 ** 14) - 20
def parse(stream_or_string, parser=None, bufsize=None):
if bufsize is None:
bufsize = default_bufsize
if type(stream_or_string) in _StringTypes:
stream = open(stream_or_string)
else:
stream = stream_or_string
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(stream, parser, bufsize)
def parseString(string, parser=None):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
bufsize = len(string)
buf = StringIO(string)
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(buf, parser, bufsize)
| Python |
"""Registration facilities for DOM. This module should not be used
directly. Instead, the functions getDOMImplementation and
registerDOMImplementation should be imported from xml.dom."""
from xml.dom.minicompat import * # isinstance, StringTypes
# This is a list of well-known implementations. Well-known names
# should be published by posting to xml-sig@python.org, and are
# subsequently recorded in this file.
well_known_implementations = {
'minidom':'xml.dom.minidom',
'4DOM': 'xml.dom.DOMImplementation',
}
# DOM implementations not officially registered should register
# themselves with their
registered = {}
def registerDOMImplementation(name, factory):
"""registerDOMImplementation(name, factory)
Register the factory function with the name. The factory function
should return an object which implements the DOMImplementation
interface. The factory function can either return the same object,
or a new one (e.g. if that implementation supports some
customization)."""
registered[name] = factory
def _good_enough(dom, features):
"_good_enough(dom, features) -> Return 1 if the dom offers the features"
for f,v in features:
if not dom.hasFeature(f,v):
return 0
return 1
def getDOMImplementation(name = None, features = ()):
"""getDOMImplementation(name = None, features = ()) -> DOM implementation.
Return a suitable DOM implementation. The name is either
well-known, the module name of a DOM implementation, or None. If
it is not None, imports the corresponding module and returns
DOMImplementation object if the import succeeds.
If name is not given, consider the available implementations to
find one with the required feature set. If no implementation can
be found, raise an ImportError. The features list must be a sequence
of (feature, version) pairs which are passed to hasFeature."""
import os
creator = None
mod = well_known_implementations.get(name)
if mod:
mod = __import__(mod, {}, {}, ['getDOMImplementation'])
return mod.getDOMImplementation()
elif name:
return registered[name]()
elif os.environ.has_key("PYTHON_DOM"):
return getDOMImplementation(name = os.environ["PYTHON_DOM"])
# User did not specify a name, try implementations in arbitrary
# order, returning the one that has the required features
if isinstance(features, StringTypes):
features = _parse_feature_string(features)
for creator in registered.values():
dom = creator()
if _good_enough(dom, features):
return dom
for creator in well_known_implementations.keys():
try:
dom = getDOMImplementation(name = creator)
except StandardError: # typically ImportError, or AttributeError
continue
if _good_enough(dom, features):
return dom
raise ImportError,"no suitable DOM implementation found"
def _parse_feature_string(s):
features = []
parts = s.split()
i = 0
length = len(parts)
while i < length:
feature = parts[i]
if feature[0] in "0123456789":
raise ValueError, "bad feature name: %r" % (feature,)
i = i + 1
version = None
if i < length:
v = parts[i]
if v[0] in "0123456789":
i = i + 1
version = v
features.append((feature, version))
return tuple(features)
| Python |
"""Facility to use the Expat parser to load a minidom instance
from a string or file.
This avoids all the overhead of SAX and pulldom to gain performance.
"""
# Warning!
#
# This module is tightly bound to the implementation details of the
# minidom DOM and can't be used with other DOM implementations. This
# is due, in part, to a lack of appropriate methods in the DOM (there is
# no way to create Entity and Notation nodes via the DOM Level 2
# interface), and for performance. The later is the cause of some fairly
# cryptic code.
#
# Performance hacks:
#
# - .character_data_handler() has an extra case in which continuing
# data is appended to an existing Text node; this can be a
# speedup since pyexpat can break up character data into multiple
# callbacks even though we set the buffer_text attribute on the
# parser. This also gives us the advantage that we don't need a
# separate normalization pass.
#
# - Determining that a node exists is done using an identity comparison
# with None rather than a truth test; this avoids searching for and
# calling any methods on the node object if it exists. (A rather
# nice speedup is achieved this way as well!)
from xml.dom import xmlbuilder, minidom, Node
from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE
from xml.parsers import expat
from xml.dom.minidom import _append_child, _set_attribute_node
from xml.dom.NodeFilter import NodeFilter
from xml.dom.minicompat import *
TEXT_NODE = Node.TEXT_NODE
CDATA_SECTION_NODE = Node.CDATA_SECTION_NODE
DOCUMENT_NODE = Node.DOCUMENT_NODE
FILTER_ACCEPT = xmlbuilder.DOMBuilderFilter.FILTER_ACCEPT
FILTER_REJECT = xmlbuilder.DOMBuilderFilter.FILTER_REJECT
FILTER_SKIP = xmlbuilder.DOMBuilderFilter.FILTER_SKIP
FILTER_INTERRUPT = xmlbuilder.DOMBuilderFilter.FILTER_INTERRUPT
theDOMImplementation = minidom.getDOMImplementation()
# Expat typename -> TypeInfo
_typeinfo_map = {
"CDATA": minidom.TypeInfo(None, "cdata"),
"ENUM": minidom.TypeInfo(None, "enumeration"),
"ENTITY": minidom.TypeInfo(None, "entity"),
"ENTITIES": minidom.TypeInfo(None, "entities"),
"ID": minidom.TypeInfo(None, "id"),
"IDREF": minidom.TypeInfo(None, "idref"),
"IDREFS": minidom.TypeInfo(None, "idrefs"),
"NMTOKEN": minidom.TypeInfo(None, "nmtoken"),
"NMTOKENS": minidom.TypeInfo(None, "nmtokens"),
}
class ElementInfo(object):
__slots__ = '_attr_info', '_model', 'tagName'
def __init__(self, tagName, model=None):
self.tagName = tagName
self._attr_info = []
self._model = model
def __getstate__(self):
return self._attr_info, self._model, self.tagName
def __setstate__(self, state):
self._attr_info, self._model, self.tagName = state
def getAttributeType(self, aname):
for info in self._attr_info:
if info[1] == aname:
t = info[-2]
if t[0] == "(":
return _typeinfo_map["ENUM"]
else:
return _typeinfo_map[info[-2]]
return minidom._no_type
def getAttributeTypeNS(self, namespaceURI, localName):
return minidom._no_type
def isElementContent(self):
if self._model:
type = self._model[0]
return type not in (expat.model.XML_CTYPE_ANY,
expat.model.XML_CTYPE_MIXED)
else:
return False
def isEmpty(self):
if self._model:
return self._model[0] == expat.model.XML_CTYPE_EMPTY
else:
return False
def isId(self, aname):
for info in self._attr_info:
if info[1] == aname:
return info[-2] == "ID"
return False
def isIdNS(self, euri, ename, auri, aname):
# not sure this is meaningful
return self.isId((auri, aname))
def _intern(builder, s):
return builder._intern_setdefault(s, s)
def _parse_ns_name(builder, name):
assert ' ' in name
parts = name.split(' ')
intern = builder._intern_setdefault
if len(parts) == 3:
uri, localname, prefix = parts
prefix = intern(prefix, prefix)
qname = "%s:%s" % (prefix, localname)
qname = intern(qname, qname)
localname = intern(localname, localname)
else:
uri, localname = parts
prefix = EMPTY_PREFIX
qname = localname = intern(localname, localname)
return intern(uri, uri), localname, prefix, qname
class ExpatBuilder:
"""Document builder that uses Expat to build a ParsedXML.DOM document
instance."""
def __init__(self, options=None):
if options is None:
options = xmlbuilder.Options()
self._options = options
if self._options.filter is not None:
self._filter = FilterVisibilityController(self._options.filter)
else:
self._filter = None
# This *really* doesn't do anything in this case, so
# override it with something fast & minimal.
self._finish_start_element = id
self._parser = None
self.reset()
def createParser(self):
"""Create a new parser object."""
return expat.ParserCreate()
def getParser(self):
"""Return the parser object, creating a new one if needed."""
if not self._parser:
self._parser = self.createParser()
self._intern_setdefault = self._parser.intern.setdefault
self._parser.buffer_text = True
self._parser.ordered_attributes = True
self._parser.specified_attributes = True
self.install(self._parser)
return self._parser
def reset(self):
"""Free all data structures used during DOM construction."""
self.document = theDOMImplementation.createDocument(
EMPTY_NAMESPACE, None, None)
self.curNode = self.document
self._elem_info = self.document._elem_info
self._cdata = False
def install(self, parser):
"""Install the callbacks needed to build the DOM into the parser."""
# This creates circular references!
parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler
parser.StartElementHandler = self.first_element_handler
parser.EndElementHandler = self.end_element_handler
parser.ProcessingInstructionHandler = self.pi_handler
if self._options.entities:
parser.EntityDeclHandler = self.entity_decl_handler
parser.NotationDeclHandler = self.notation_decl_handler
if self._options.comments:
parser.CommentHandler = self.comment_handler
if self._options.cdata_sections:
parser.StartCdataSectionHandler = self.start_cdata_section_handler
parser.EndCdataSectionHandler = self.end_cdata_section_handler
parser.CharacterDataHandler = self.character_data_handler_cdata
else:
parser.CharacterDataHandler = self.character_data_handler
parser.ExternalEntityRefHandler = self.external_entity_ref_handler
parser.XmlDeclHandler = self.xml_decl_handler
parser.ElementDeclHandler = self.element_decl_handler
parser.AttlistDeclHandler = self.attlist_decl_handler
def parseFile(self, file):
"""Parse a document from a file object, returning the document
node."""
parser = self.getParser()
first_buffer = True
try:
while 1:
buffer = file.read(16*1024)
if not buffer:
break
parser.Parse(buffer, 0)
if first_buffer and self.document.documentElement:
self._setup_subset(buffer)
first_buffer = False
parser.Parse("", True)
except ParseEscape:
pass
doc = self.document
self.reset()
self._parser = None
return doc
def parseString(self, string):
"""Parse a document from a string, returning the document node."""
parser = self.getParser()
try:
parser.Parse(string, True)
self._setup_subset(string)
except ParseEscape:
pass
doc = self.document
self.reset()
self._parser = None
return doc
def _setup_subset(self, buffer):
"""Load the internal subset if there might be one."""
if self.document.doctype:
extractor = InternalSubsetExtractor()
extractor.parseString(buffer)
subset = extractor.getSubset()
self.document.doctype.internalSubset = subset
def start_doctype_decl_handler(self, doctypeName, systemId, publicId,
has_internal_subset):
doctype = self.document.implementation.createDocumentType(
doctypeName, publicId, systemId)
doctype.ownerDocument = self.document
self.document.childNodes.append(doctype)
self.document.doctype = doctype
if self._filter and self._filter.acceptNode(doctype) == FILTER_REJECT:
self.document.doctype = None
del self.document.childNodes[-1]
doctype = None
self._parser.EntityDeclHandler = None
self._parser.NotationDeclHandler = None
if has_internal_subset:
if doctype is not None:
doctype.entities._seq = []
doctype.notations._seq = []
self._parser.CommentHandler = None
self._parser.ProcessingInstructionHandler = None
self._parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler
def end_doctype_decl_handler(self):
if self._options.comments:
self._parser.CommentHandler = self.comment_handler
self._parser.ProcessingInstructionHandler = self.pi_handler
if not (self._elem_info or self._filter):
self._finish_end_element = id
def pi_handler(self, target, data):
node = self.document.createProcessingInstruction(target, data)
_append_child(self.curNode, node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
self.curNode.removeChild(node)
def character_data_handler_cdata(self, data):
childNodes = self.curNode.childNodes
if self._cdata:
if ( self._cdata_continue
and childNodes[-1].nodeType == CDATA_SECTION_NODE):
childNodes[-1].appendData(data)
return
node = self.document.createCDATASection(data)
self._cdata_continue = True
elif childNodes and childNodes[-1].nodeType == TEXT_NODE:
node = childNodes[-1]
value = node.data + data
d = node.__dict__
d['data'] = d['nodeValue'] = value
return
else:
node = minidom.Text()
d = node.__dict__
d['data'] = d['nodeValue'] = data
d['ownerDocument'] = self.document
_append_child(self.curNode, node)
def character_data_handler(self, data):
childNodes = self.curNode.childNodes
if childNodes and childNodes[-1].nodeType == TEXT_NODE:
node = childNodes[-1]
d = node.__dict__
d['data'] = d['nodeValue'] = node.data + data
return
node = minidom.Text()
d = node.__dict__
d['data'] = d['nodeValue'] = node.data + data
d['ownerDocument'] = self.document
_append_child(self.curNode, node)
def entity_decl_handler(self, entityName, is_parameter_entity, value,
base, systemId, publicId, notationName):
if is_parameter_entity:
# we don't care about parameter entities for the DOM
return
if not self._options.entities:
return
node = self.document._create_entity(entityName, publicId,
systemId, notationName)
if value is not None:
# internal entity
# node *should* be readonly, but we'll cheat
child = self.document.createTextNode(value)
node.childNodes.append(child)
self.document.doctype.entities._seq.append(node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
del self.document.doctype.entities._seq[-1]
def notation_decl_handler(self, notationName, base, systemId, publicId):
node = self.document._create_notation(notationName, publicId, systemId)
self.document.doctype.notations._seq.append(node)
if self._filter and self._filter.acceptNode(node) == FILTER_ACCEPT:
del self.document.doctype.notations._seq[-1]
def comment_handler(self, data):
node = self.document.createComment(data)
_append_child(self.curNode, node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
self.curNode.removeChild(node)
def start_cdata_section_handler(self):
self._cdata = True
self._cdata_continue = False
def end_cdata_section_handler(self):
self._cdata = False
self._cdata_continue = False
def external_entity_ref_handler(self, context, base, systemId, publicId):
return 1
def first_element_handler(self, name, attributes):
if self._filter is None and not self._elem_info:
self._finish_end_element = id
self.getParser().StartElementHandler = self.start_element_handler
self.start_element_handler(name, attributes)
def start_element_handler(self, name, attributes):
node = self.document.createElement(name)
_append_child(self.curNode, node)
self.curNode = node
if attributes:
for i in range(0, len(attributes), 2):
a = minidom.Attr(attributes[i], EMPTY_NAMESPACE,
None, EMPTY_PREFIX)
value = attributes[i+1]
d = a.childNodes[0].__dict__
d['data'] = d['nodeValue'] = value
d = a.__dict__
d['value'] = d['nodeValue'] = value
d['ownerDocument'] = self.document
_set_attribute_node(node, a)
if node is not self.document.documentElement:
self._finish_start_element(node)
def _finish_start_element(self, node):
if self._filter:
# To be general, we'd have to call isSameNode(), but this
# is sufficient for minidom:
if node is self.document.documentElement:
return
filt = self._filter.startContainer(node)
if filt == FILTER_REJECT:
# ignore this node & all descendents
Rejecter(self)
elif filt == FILTER_SKIP:
# ignore this node, but make it's children become
# children of the parent node
Skipper(self)
else:
return
self.curNode = node.parentNode
node.parentNode.removeChild(node)
node.unlink()
# If this ever changes, Namespaces.end_element_handler() needs to
# be changed to match.
#
def end_element_handler(self, name):
curNode = self.curNode
self.curNode = curNode.parentNode
self._finish_end_element(curNode)
def _finish_end_element(self, curNode):
info = self._elem_info.get(curNode.tagName)
if info:
self._handle_white_text_nodes(curNode, info)
if self._filter:
if curNode is self.document.documentElement:
return
if self._filter.acceptNode(curNode) == FILTER_REJECT:
self.curNode.removeChild(curNode)
curNode.unlink()
def _handle_white_text_nodes(self, node, info):
if (self._options.whitespace_in_element_content
or not info.isElementContent()):
return
# We have element type information and should remove ignorable
# whitespace; identify for text nodes which contain only
# whitespace.
L = []
for child in node.childNodes:
if child.nodeType == TEXT_NODE and not child.data.strip():
L.append(child)
# Remove ignorable whitespace from the tree.
for child in L:
node.removeChild(child)
def element_decl_handler(self, name, model):
info = self._elem_info.get(name)
if info is None:
self._elem_info[name] = ElementInfo(name, model)
else:
assert info._model is None
info._model = model
def attlist_decl_handler(self, elem, name, type, default, required):
info = self._elem_info.get(elem)
if info is None:
info = ElementInfo(elem)
self._elem_info[elem] = info
info._attr_info.append(
[None, name, None, None, default, 0, type, required])
def xml_decl_handler(self, version, encoding, standalone):
self.document.version = version
self.document.encoding = encoding
# This is still a little ugly, thanks to the pyexpat API. ;-(
if standalone >= 0:
if standalone:
self.document.standalone = True
else:
self.document.standalone = False
# Don't include FILTER_INTERRUPT, since that's checked separately
# where allowed.
_ALLOWED_FILTER_RETURNS = (FILTER_ACCEPT, FILTER_REJECT, FILTER_SKIP)
class FilterVisibilityController(object):
"""Wrapper around a DOMBuilderFilter which implements the checks
to make the whatToShow filter attribute work."""
__slots__ = 'filter',
def __init__(self, filter):
self.filter = filter
def startContainer(self, node):
mask = self._nodetype_mask[node.nodeType]
if self.filter.whatToShow & mask:
val = self.filter.startContainer(node)
if val == FILTER_INTERRUPT:
raise ParseEscape
if val not in _ALLOWED_FILTER_RETURNS:
raise ValueError, \
"startContainer() returned illegal value: " + repr(val)
return val
else:
return FILTER_ACCEPT
def acceptNode(self, node):
mask = self._nodetype_mask[node.nodeType]
if self.filter.whatToShow & mask:
val = self.filter.acceptNode(node)
if val == FILTER_INTERRUPT:
raise ParseEscape
if val == FILTER_SKIP:
# move all child nodes to the parent, and remove this node
parent = node.parentNode
for child in node.childNodes[:]:
parent.appendChild(child)
# node is handled by the caller
return FILTER_REJECT
if val not in _ALLOWED_FILTER_RETURNS:
raise ValueError, \
"acceptNode() returned illegal value: " + repr(val)
return val
else:
return FILTER_ACCEPT
_nodetype_mask = {
Node.ELEMENT_NODE: NodeFilter.SHOW_ELEMENT,
Node.ATTRIBUTE_NODE: NodeFilter.SHOW_ATTRIBUTE,
Node.TEXT_NODE: NodeFilter.SHOW_TEXT,
Node.CDATA_SECTION_NODE: NodeFilter.SHOW_CDATA_SECTION,
Node.ENTITY_REFERENCE_NODE: NodeFilter.SHOW_ENTITY_REFERENCE,
Node.ENTITY_NODE: NodeFilter.SHOW_ENTITY,
Node.PROCESSING_INSTRUCTION_NODE: NodeFilter.SHOW_PROCESSING_INSTRUCTION,
Node.COMMENT_NODE: NodeFilter.SHOW_COMMENT,
Node.DOCUMENT_NODE: NodeFilter.SHOW_DOCUMENT,
Node.DOCUMENT_TYPE_NODE: NodeFilter.SHOW_DOCUMENT_TYPE,
Node.DOCUMENT_FRAGMENT_NODE: NodeFilter.SHOW_DOCUMENT_FRAGMENT,
Node.NOTATION_NODE: NodeFilter.SHOW_NOTATION,
}
class FilterCrutch(object):
__slots__ = '_builder', '_level', '_old_start', '_old_end'
def __init__(self, builder):
self._level = 0
self._builder = builder
parser = builder._parser
self._old_start = parser.StartElementHandler
self._old_end = parser.EndElementHandler
parser.StartElementHandler = self.start_element_handler
parser.EndElementHandler = self.end_element_handler
class Rejecter(FilterCrutch):
__slots__ = ()
def __init__(self, builder):
FilterCrutch.__init__(self, builder)
parser = builder._parser
for name in ("ProcessingInstructionHandler",
"CommentHandler",
"CharacterDataHandler",
"StartCdataSectionHandler",
"EndCdataSectionHandler",
"ExternalEntityRefHandler",
):
setattr(parser, name, None)
def start_element_handler(self, *args):
self._level = self._level + 1
def end_element_handler(self, *args):
if self._level == 0:
# restore the old handlers
parser = self._builder._parser
self._builder.install(parser)
parser.StartElementHandler = self._old_start
parser.EndElementHandler = self._old_end
else:
self._level = self._level - 1
class Skipper(FilterCrutch):
__slots__ = ()
def start_element_handler(self, *args):
node = self._builder.curNode
self._old_start(*args)
if self._builder.curNode is not node:
self._level = self._level + 1
def end_element_handler(self, *args):
if self._level == 0:
# We're popping back out of the node we're skipping, so we
# shouldn't need to do anything but reset the handlers.
self._builder._parser.StartElementHandler = self._old_start
self._builder._parser.EndElementHandler = self._old_end
self._builder = None
else:
self._level = self._level - 1
self._old_end(*args)
# framework document used by the fragment builder.
# Takes a string for the doctype, subset string, and namespace attrs string.
_FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID = \
"http://xml.python.org/entities/fragment-builder/internal"
_FRAGMENT_BUILDER_TEMPLATE = (
'''\
<!DOCTYPE wrapper
%%s [
<!ENTITY fragment-builder-internal
SYSTEM "%s">
%%s
]>
<wrapper %%s
>&fragment-builder-internal;</wrapper>'''
% _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID)
class FragmentBuilder(ExpatBuilder):
"""Builder which constructs document fragments given XML source
text and a context node.
The context node is expected to provide information about the
namespace declarations which are in scope at the start of the
fragment.
"""
def __init__(self, context, options=None):
if context.nodeType == DOCUMENT_NODE:
self.originalDocument = context
self.context = context
else:
self.originalDocument = context.ownerDocument
self.context = context
ExpatBuilder.__init__(self, options)
def reset(self):
ExpatBuilder.reset(self)
self.fragment = None
def parseFile(self, file):
"""Parse a document fragment from a file object, returning the
fragment node."""
return self.parseString(file.read())
def parseString(self, string):
"""Parse a document fragment from a string, returning the
fragment node."""
self._source = string
parser = self.getParser()
doctype = self.originalDocument.doctype
ident = ""
if doctype:
subset = doctype.internalSubset or self._getDeclarations()
if doctype.publicId:
ident = ('PUBLIC "%s" "%s"'
% (doctype.publicId, doctype.systemId))
elif doctype.systemId:
ident = 'SYSTEM "%s"' % doctype.systemId
else:
subset = ""
nsattrs = self._getNSattrs() # get ns decls from node's ancestors
document = _FRAGMENT_BUILDER_TEMPLATE % (ident, subset, nsattrs)
try:
parser.Parse(document, 1)
except:
self.reset()
raise
fragment = self.fragment
self.reset()
## self._parser = None
return fragment
def _getDeclarations(self):
"""Re-create the internal subset from the DocumentType node.
This is only needed if we don't already have the
internalSubset as a string.
"""
doctype = self.context.ownerDocument.doctype
s = ""
if doctype:
for i in range(doctype.notations.length):
notation = doctype.notations.item(i)
if s:
s = s + "\n "
s = "%s<!NOTATION %s" % (s, notation.nodeName)
if notation.publicId:
s = '%s PUBLIC "%s"\n "%s">' \
% (s, notation.publicId, notation.systemId)
else:
s = '%s SYSTEM "%s">' % (s, notation.systemId)
for i in range(doctype.entities.length):
entity = doctype.entities.item(i)
if s:
s = s + "\n "
s = "%s<!ENTITY %s" % (s, entity.nodeName)
if entity.publicId:
s = '%s PUBLIC "%s"\n "%s"' \
% (s, entity.publicId, entity.systemId)
elif entity.systemId:
s = '%s SYSTEM "%s"' % (s, entity.systemId)
else:
s = '%s "%s"' % (s, entity.firstChild.data)
if entity.notationName:
s = "%s NOTATION %s" % (s, entity.notationName)
s = s + ">"
return s
def _getNSattrs(self):
return ""
def external_entity_ref_handler(self, context, base, systemId, publicId):
if systemId == _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID:
# this entref is the one that we made to put the subtree
# in; all of our given input is parsed in here.
old_document = self.document
old_cur_node = self.curNode
parser = self._parser.ExternalEntityParserCreate(context)
# put the real document back, parse into the fragment to return
self.document = self.originalDocument
self.fragment = self.document.createDocumentFragment()
self.curNode = self.fragment
try:
parser.Parse(self._source, 1)
finally:
self.curNode = old_cur_node
self.document = old_document
self._source = None
return -1
else:
return ExpatBuilder.external_entity_ref_handler(
self, context, base, systemId, publicId)
class Namespaces:
"""Mix-in class for builders; adds support for namespaces."""
def _initNamespaces(self):
# list of (prefix, uri) ns declarations. Namespace attrs are
# constructed from this and added to the element's attrs.
self._ns_ordered_prefixes = []
def createParser(self):
"""Create a new namespace-handling parser."""
parser = expat.ParserCreate(namespace_separator=" ")
parser.namespace_prefixes = True
return parser
def install(self, parser):
"""Insert the namespace-handlers onto the parser."""
ExpatBuilder.install(self, parser)
if self._options.namespace_declarations:
parser.StartNamespaceDeclHandler = (
self.start_namespace_decl_handler)
def start_namespace_decl_handler(self, prefix, uri):
"""Push this namespace declaration on our storage."""
self._ns_ordered_prefixes.append((prefix, uri))
def start_element_handler(self, name, attributes):
if ' ' in name:
uri, localname, prefix, qname = _parse_ns_name(self, name)
else:
uri = EMPTY_NAMESPACE
qname = name
localname = None
prefix = EMPTY_PREFIX
node = minidom.Element(qname, uri, prefix, localname)
node.ownerDocument = self.document
_append_child(self.curNode, node)
self.curNode = node
if self._ns_ordered_prefixes:
for prefix, uri in self._ns_ordered_prefixes:
if prefix:
a = minidom.Attr(_intern(self, 'xmlns:' + prefix),
XMLNS_NAMESPACE, prefix, "xmlns")
else:
a = minidom.Attr("xmlns", XMLNS_NAMESPACE,
"xmlns", EMPTY_PREFIX)
d = a.childNodes[0].__dict__
d['data'] = d['nodeValue'] = uri
d = a.__dict__
d['value'] = d['nodeValue'] = uri
d['ownerDocument'] = self.document
_set_attribute_node(node, a)
del self._ns_ordered_prefixes[:]
if attributes:
_attrs = node._attrs
_attrsNS = node._attrsNS
for i in range(0, len(attributes), 2):
aname = attributes[i]
value = attributes[i+1]
if ' ' in aname:
uri, localname, prefix, qname = _parse_ns_name(self, aname)
a = minidom.Attr(qname, uri, localname, prefix)
_attrs[qname] = a
_attrsNS[(uri, localname)] = a
else:
a = minidom.Attr(aname, EMPTY_NAMESPACE,
aname, EMPTY_PREFIX)
_attrs[aname] = a
_attrsNS[(EMPTY_NAMESPACE, aname)] = a
d = a.childNodes[0].__dict__
d['data'] = d['nodeValue'] = value
d = a.__dict__
d['ownerDocument'] = self.document
d['value'] = d['nodeValue'] = value
d['ownerElement'] = node
if __debug__:
# This only adds some asserts to the original
# end_element_handler(), so we only define this when -O is not
# used. If changing one, be sure to check the other to see if
# it needs to be changed as well.
#
def end_element_handler(self, name):
curNode = self.curNode
if ' ' in name:
uri, localname, prefix, qname = _parse_ns_name(self, name)
assert (curNode.namespaceURI == uri
and curNode.localName == localname
and curNode.prefix == prefix), \
"element stack messed up! (namespace)"
else:
assert curNode.nodeName == name, \
"element stack messed up - bad nodeName"
assert curNode.namespaceURI == EMPTY_NAMESPACE, \
"element stack messed up - bad namespaceURI"
self.curNode = curNode.parentNode
self._finish_end_element(curNode)
class ExpatBuilderNS(Namespaces, ExpatBuilder):
"""Document builder that supports namespaces."""
def reset(self):
ExpatBuilder.reset(self)
self._initNamespaces()
class FragmentBuilderNS(Namespaces, FragmentBuilder):
"""Fragment builder that supports namespaces."""
def reset(self):
FragmentBuilder.reset(self)
self._initNamespaces()
def _getNSattrs(self):
"""Return string of namespace attributes from this element and
ancestors."""
# XXX This needs to be re-written to walk the ancestors of the
# context to build up the namespace information from
# declarations, elements, and attributes found in context.
# Otherwise we have to store a bunch more data on the DOM
# (though that *might* be more reliable -- not clear).
attrs = ""
context = self.context
L = []
while context:
if hasattr(context, '_ns_prefix_uri'):
for prefix, uri in context._ns_prefix_uri.items():
# add every new NS decl from context to L and attrs string
if prefix in L:
continue
L.append(prefix)
if prefix:
declname = "xmlns:" + prefix
else:
declname = "xmlns"
if attrs:
attrs = "%s\n %s='%s'" % (attrs, declname, uri)
else:
attrs = " %s='%s'" % (declname, uri)
context = context.parentNode
return attrs
class ParseEscape(Exception):
"""Exception raised to short-circuit parsing in InternalSubsetExtractor."""
pass
class InternalSubsetExtractor(ExpatBuilder):
"""XML processor which can rip out the internal document type subset."""
subset = None
def getSubset(self):
"""Return the internal subset as a string."""
return self.subset
def parseFile(self, file):
try:
ExpatBuilder.parseFile(self, file)
except ParseEscape:
pass
def parseString(self, string):
try:
ExpatBuilder.parseString(self, string)
except ParseEscape:
pass
def install(self, parser):
parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler
parser.StartElementHandler = self.start_element_handler
def start_doctype_decl_handler(self, name, publicId, systemId,
has_internal_subset):
if has_internal_subset:
parser = self.getParser()
self.subset = []
parser.DefaultHandler = self.subset.append
parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler
else:
raise ParseEscape()
def end_doctype_decl_handler(self):
s = ''.join(self.subset).replace('\r\n', '\n').replace('\r', '\n')
self.subset = s
raise ParseEscape()
def start_element_handler(self, name, attrs):
raise ParseEscape()
def parse(file, namespaces=True):
"""Parse a document, returning the resulting Document node.
'file' may be either a file name or an open file object.
"""
if namespaces:
builder = ExpatBuilderNS()
else:
builder = ExpatBuilder()
if isinstance(file, StringTypes):
fp = open(file, 'rb')
try:
result = builder.parseFile(fp)
finally:
fp.close()
else:
result = builder.parseFile(file)
return result
def parseString(string, namespaces=True):
"""Parse a document from a string, returning the resulting
Document node.
"""
if namespaces:
builder = ExpatBuilderNS()
else:
builder = ExpatBuilder()
return builder.parseString(string)
def parseFragment(file, context, namespaces=True):
"""Parse a fragment of a document, given the context from which it
was originally extracted. context should be the parent of the
node(s) which are in the fragment.
'file' may be either a file name or an open file object.
"""
if namespaces:
builder = FragmentBuilderNS(context)
else:
builder = FragmentBuilder(context)
if isinstance(file, StringTypes):
fp = open(file, 'rb')
try:
result = builder.parseFile(fp)
finally:
fp.close()
else:
result = builder.parseFile(file)
return result
def parseFragmentString(string, context, namespaces=True):
"""Parse a fragment of a document from a string, given the context
from which it was originally extracted. context should be the
parent of the node(s) which are in the fragment.
"""
if namespaces:
builder = FragmentBuilderNS(context)
else:
builder = FragmentBuilder(context)
return builder.parseString(string)
def makeBuilder(options):
"""Create a builder based on an Options object."""
if options.namespaces:
return ExpatBuilderNS(options)
else:
return ExpatBuilder(options)
| Python |
"""Python version compatibility support for minidom."""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guarateed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
import xml.dom
try:
unicode
except NameError:
StringTypes = type(''),
else:
StringTypes = type(''), type(unicode(''))
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name)).im_func
def set(self, value, name=name):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
| Python |
"""W3C Document Object Model implementation for Python.
The Python mapping of the Document Object Model is documented in the
Python Library Reference in the section on the xml.dom package.
This package contains the following modules:
minidom -- A simple implementation of the Level 1 DOM with namespace
support added (based on the Level 2 specification) and other
minor Level 2 functionality.
pulldom -- DOM builder supporting on-demand tree-building for selected
subtrees of the document.
"""
class Node:
"""Class giving the NodeType constants."""
# DOM implementations may use this as a base class for their own
# Node implementations. If they don't, the constants defined here
# should still be used as the canonical definitions as they match
# the values given in the W3C recommendation. Client code can
# safely refer to these values in all tests of Node.nodeType
# values.
ELEMENT_NODE = 1
ATTRIBUTE_NODE = 2
TEXT_NODE = 3
CDATA_SECTION_NODE = 4
ENTITY_REFERENCE_NODE = 5
ENTITY_NODE = 6
PROCESSING_INSTRUCTION_NODE = 7
COMMENT_NODE = 8
DOCUMENT_NODE = 9
DOCUMENT_TYPE_NODE = 10
DOCUMENT_FRAGMENT_NODE = 11
NOTATION_NODE = 12
#ExceptionCode
INDEX_SIZE_ERR = 1
DOMSTRING_SIZE_ERR = 2
HIERARCHY_REQUEST_ERR = 3
WRONG_DOCUMENT_ERR = 4
INVALID_CHARACTER_ERR = 5
NO_DATA_ALLOWED_ERR = 6
NO_MODIFICATION_ALLOWED_ERR = 7
NOT_FOUND_ERR = 8
NOT_SUPPORTED_ERR = 9
INUSE_ATTRIBUTE_ERR = 10
INVALID_STATE_ERR = 11
SYNTAX_ERR = 12
INVALID_MODIFICATION_ERR = 13
NAMESPACE_ERR = 14
INVALID_ACCESS_ERR = 15
VALIDATION_ERR = 16
class DOMException(Exception):
"""Abstract base class for DOM exceptions.
Exceptions with specific codes are specializations of this class."""
def __init__(self, *args, **kw):
if self.__class__ is DOMException:
raise RuntimeError(
"DOMException should not be instantiated directly")
Exception.__init__(self, *args, **kw)
def _get_code(self):
return self.code
class IndexSizeErr(DOMException):
code = INDEX_SIZE_ERR
class DomstringSizeErr(DOMException):
code = DOMSTRING_SIZE_ERR
class HierarchyRequestErr(DOMException):
code = HIERARCHY_REQUEST_ERR
class WrongDocumentErr(DOMException):
code = WRONG_DOCUMENT_ERR
class InvalidCharacterErr(DOMException):
code = INVALID_CHARACTER_ERR
class NoDataAllowedErr(DOMException):
code = NO_DATA_ALLOWED_ERR
class NoModificationAllowedErr(DOMException):
code = NO_MODIFICATION_ALLOWED_ERR
class NotFoundErr(DOMException):
code = NOT_FOUND_ERR
class NotSupportedErr(DOMException):
code = NOT_SUPPORTED_ERR
class InuseAttributeErr(DOMException):
code = INUSE_ATTRIBUTE_ERR
class InvalidStateErr(DOMException):
code = INVALID_STATE_ERR
class SyntaxErr(DOMException):
code = SYNTAX_ERR
class InvalidModificationErr(DOMException):
code = INVALID_MODIFICATION_ERR
class NamespaceErr(DOMException):
code = NAMESPACE_ERR
class InvalidAccessErr(DOMException):
code = INVALID_ACCESS_ERR
class ValidationErr(DOMException):
code = VALIDATION_ERR
class UserDataHandler:
"""Class giving the operation constants for UserDataHandler.handle()."""
# Based on DOM Level 3 (WD 9 April 2002)
NODE_CLONED = 1
NODE_IMPORTED = 2
NODE_DELETED = 3
NODE_RENAMED = 4
XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace"
XMLNS_NAMESPACE = "http://www.w3.org/2000/xmlns/"
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
EMPTY_NAMESPACE = None
EMPTY_PREFIX = None
from domreg import getDOMImplementation,registerDOMImplementation
| Python |
"""\
minidom.py -- a lightweight DOM implementation.
parse("foo.xml")
parseString("<foo><bar/></foo>")
Todo:
=====
* convenience methods for getting elements and text.
* more testing
* bring some of the writer and linearizer code into conformance with this
interface
* SAX 2 namespaces
"""
import xml.dom
from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE, domreg
from xml.dom.minicompat import *
from xml.dom.xmlbuilder import DOMImplementationLS, DocumentLS
# This is used by the ID-cache invalidation checks; the list isn't
# actually complete, since the nodes being checked will never be the
# DOCUMENT_NODE or DOCUMENT_FRAGMENT_NODE. (The node being checked is
# the node being added or removed, not the node being modified.)
#
_nodeTypes_with_children = (xml.dom.Node.ELEMENT_NODE,
xml.dom.Node.ENTITY_REFERENCE_NODE)
class Node(xml.dom.Node):
namespaceURI = None # this is non-null only for elements and attributes
parentNode = None
ownerDocument = None
nextSibling = None
previousSibling = None
prefix = EMPTY_PREFIX # non-null only for NS elements and attributes
def __nonzero__(self):
return True
def toxml(self, encoding = None):
return self.toprettyxml("", "", encoding)
def toprettyxml(self, indent="\t", newl="\n", encoding = None):
# indent = the indentation string to prepend, per level
# newl = the newline string to append
writer = _get_StringIO()
if encoding is not None:
import codecs
# Can't use codecs.getwriter to preserve 2.0 compatibility
writer = codecs.lookup(encoding)[3](writer)
if self.nodeType == Node.DOCUMENT_NODE:
# Can pass encoding only to document, to put it into XML header
self.writexml(writer, "", indent, newl, encoding)
else:
self.writexml(writer, "", indent, newl)
return writer.getvalue()
def hasChildNodes(self):
if self.childNodes:
return True
else:
return False
def _get_childNodes(self):
return self.childNodes
def _get_firstChild(self):
if self.childNodes:
return self.childNodes[0]
def _get_lastChild(self):
if self.childNodes:
return self.childNodes[-1]
def insertBefore(self, newChild, refChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(newChild.childNodes):
self.insertBefore(c, refChild)
### The DOM does not clearly specify what to return in this case
return newChild
if newChild.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(newChild), repr(self)))
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
if refChild is None:
self.appendChild(newChild)
else:
try:
index = self.childNodes.index(refChild)
except ValueError:
raise xml.dom.NotFoundErr()
if newChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
self.childNodes.insert(index, newChild)
newChild.nextSibling = refChild
refChild.previousSibling = newChild
if index:
node = self.childNodes[index-1]
node.nextSibling = newChild
newChild.previousSibling = node
else:
newChild.previousSibling = None
newChild.parentNode = self
return newChild
def appendChild(self, node):
if node.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(node.childNodes):
self.appendChild(c)
### The DOM does not clearly specify what to return in this case
return node
if node.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
elif node.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
if node.parentNode is not None:
node.parentNode.removeChild(node)
_append_child(self, node)
node.nextSibling = None
return node
def replaceChild(self, newChild, oldChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
refChild = oldChild.nextSibling
self.removeChild(oldChild)
return self.insertBefore(newChild, refChild)
if newChild.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(newChild), repr(self)))
if newChild is oldChild:
return
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
try:
index = self.childNodes.index(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
self.childNodes[index] = newChild
newChild.parentNode = self
oldChild.parentNode = None
if (newChild.nodeType in _nodeTypes_with_children
or oldChild.nodeType in _nodeTypes_with_children):
_clear_id_cache(self)
newChild.nextSibling = oldChild.nextSibling
newChild.previousSibling = oldChild.previousSibling
oldChild.nextSibling = None
oldChild.previousSibling = None
if newChild.previousSibling:
newChild.previousSibling.nextSibling = newChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
return oldChild
def removeChild(self, oldChild):
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
if oldChild.nextSibling is not None:
oldChild.nextSibling.previousSibling = oldChild.previousSibling
if oldChild.previousSibling is not None:
oldChild.previousSibling.nextSibling = oldChild.nextSibling
oldChild.nextSibling = oldChild.previousSibling = None
if oldChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
oldChild.parentNode = None
return oldChild
def normalize(self):
L = []
for child in self.childNodes:
if child.nodeType == Node.TEXT_NODE:
data = child.data
if data and L and L[-1].nodeType == child.nodeType:
# collapse text node
node = L[-1]
node.data = node.data + child.data
node.nextSibling = child.nextSibling
child.unlink()
elif data:
if L:
L[-1].nextSibling = child
child.previousSibling = L[-1]
else:
child.previousSibling = None
L.append(child)
else:
# empty text node; discard
child.unlink()
else:
if L:
L[-1].nextSibling = child
child.previousSibling = L[-1]
else:
child.previousSibling = None
L.append(child)
if child.nodeType == Node.ELEMENT_NODE:
child.normalize()
if L:
L[-1].nextSibling = None
self.childNodes[:] = L
def cloneNode(self, deep):
return _clone_node(self, deep, self.ownerDocument or self)
def isSupported(self, feature, version):
return self.ownerDocument.implementation.hasFeature(feature, version)
def _get_localName(self):
# Overridden in Element and Attr where localName can be Non-Null
return None
# Node interfaces from Level 3 (WD 9 April 2002)
def isSameNode(self, other):
return self is other
def getInterface(self, feature):
if self.isSupported(feature, None):
return self
else:
return None
# The "user data" functions use a dictionary that is only present
# if some user data has been set, so be careful not to assume it
# exists.
def getUserData(self, key):
try:
return self._user_data[key][0]
except (AttributeError, KeyError):
return None
def setUserData(self, key, data, handler):
old = None
try:
d = self._user_data
except AttributeError:
d = {}
self._user_data = d
if key in d:
old = d[key][0]
if data is None:
# ignore handlers passed for None
handler = None
if old is not None:
del d[key]
else:
d[key] = (data, handler)
return old
def _call_user_data_handler(self, operation, src, dst):
if hasattr(self, "_user_data"):
for key, (data, handler) in self._user_data.items():
if handler is not None:
handler.handle(operation, key, data, src, dst)
# minidom-specific API:
def unlink(self):
self.parentNode = self.ownerDocument = None
if self.childNodes:
for child in self.childNodes:
child.unlink()
self.childNodes = NodeList()
self.previousSibling = None
self.nextSibling = None
defproperty(Node, "firstChild", doc="First child node, or None.")
defproperty(Node, "lastChild", doc="Last child node, or None.")
defproperty(Node, "localName", doc="Namespace-local name of this node.")
def _append_child(self, node):
# fast path with less checks; usable by DOM builders if careful
childNodes = self.childNodes
if childNodes:
last = childNodes[-1]
node.__dict__["previousSibling"] = last
last.__dict__["nextSibling"] = node
childNodes.append(node)
node.__dict__["parentNode"] = self
def _in_document(node):
# return True iff node is part of a document tree
while node is not None:
if node.nodeType == Node.DOCUMENT_NODE:
return True
node = node.parentNode
return False
def _write_data(writer, data):
"Writes datachars to writer."
data = data.replace("&", "&").replace("<", "<")
data = data.replace("\"", """).replace(">", ">")
writer.write(data)
def _get_elements_by_tagName_helper(parent, name, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE and \
(name == "*" or node.tagName == name):
rc.append(node)
_get_elements_by_tagName_helper(node, name, rc)
return rc
def _get_elements_by_tagName_ns_helper(parent, nsURI, localName, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
if ((localName == "*" or node.localName == localName) and
(nsURI == "*" or node.namespaceURI == nsURI)):
rc.append(node)
_get_elements_by_tagName_ns_helper(node, nsURI, localName, rc)
return rc
class DocumentFragment(Node):
nodeType = Node.DOCUMENT_FRAGMENT_NODE
nodeName = "#document-fragment"
nodeValue = None
attributes = None
parentNode = None
_child_node_types = (Node.ELEMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.NOTATION_NODE)
def __init__(self):
self.childNodes = NodeList()
class Attr(Node):
nodeType = Node.ATTRIBUTE_NODE
attributes = None
ownerElement = None
specified = False
_is_id = False
_child_node_types = (Node.TEXT_NODE, Node.ENTITY_REFERENCE_NODE)
def __init__(self, qName, namespaceURI=EMPTY_NAMESPACE, localName=None,
prefix=None):
# skip setattr for performance
d = self.__dict__
d["nodeName"] = d["name"] = qName
d["namespaceURI"] = namespaceURI
d["prefix"] = prefix
d['childNodes'] = NodeList()
# Add the single child node that represents the value of the attr
self.childNodes.append(Text())
# nodeValue and value are set elsewhere
def _get_localName(self):
return self.nodeName.split(":", 1)[-1]
def _get_name(self):
return self.name
def _get_specified(self):
return self.specified
def __setattr__(self, name, value):
d = self.__dict__
if name in ("value", "nodeValue"):
d["value"] = d["nodeValue"] = value
d2 = self.childNodes[0].__dict__
d2["data"] = d2["nodeValue"] = value
if self.ownerElement is not None:
_clear_id_cache(self.ownerElement)
elif name in ("name", "nodeName"):
d["name"] = d["nodeName"] = value
if self.ownerElement is not None:
_clear_id_cache(self.ownerElement)
else:
d[name] = value
def _set_prefix(self, prefix):
nsuri = self.namespaceURI
if prefix == "xmlns":
if nsuri and nsuri != XMLNS_NAMESPACE:
raise xml.dom.NamespaceErr(
"illegal use of 'xmlns' prefix for the wrong namespace")
d = self.__dict__
d['prefix'] = prefix
if prefix is None:
newName = self.localName
else:
newName = "%s:%s" % (prefix, self.localName)
if self.ownerElement:
_clear_id_cache(self.ownerElement)
d['nodeName'] = d['name'] = newName
def _set_value(self, value):
d = self.__dict__
d['value'] = d['nodeValue'] = value
if self.ownerElement:
_clear_id_cache(self.ownerElement)
self.childNodes[0].data = value
def unlink(self):
# This implementation does not call the base implementation
# since most of that is not needed, and the expense of the
# method call is not warranted. We duplicate the removal of
# children, but that's all we needed from the base class.
elem = self.ownerElement
if elem is not None:
del elem._attrs[self.nodeName]
del elem._attrsNS[(self.namespaceURI, self.localName)]
if self._is_id:
self._is_id = False
elem._magic_id_nodes -= 1
self.ownerDocument._magic_id_count -= 1
for child in self.childNodes:
child.unlink()
del self.childNodes[:]
def _get_isId(self):
if self._is_id:
return True
doc = self.ownerDocument
elem = self.ownerElement
if doc is None or elem is None:
return False
info = doc._get_elem_info(elem)
if info is None:
return False
if self.namespaceURI:
return info.isIdNS(self.namespaceURI, self.localName)
else:
return info.isId(self.nodeName)
def _get_schemaType(self):
doc = self.ownerDocument
elem = self.ownerElement
if doc is None or elem is None:
return _no_type
info = doc._get_elem_info(elem)
if info is None:
return _no_type
if self.namespaceURI:
return info.getAttributeTypeNS(self.namespaceURI, self.localName)
else:
return info.getAttributeType(self.nodeName)
defproperty(Attr, "isId", doc="True if this attribute is an ID.")
defproperty(Attr, "localName", doc="Namespace-local name of this attribute.")
defproperty(Attr, "schemaType", doc="Schema type for this attribute.")
class NamedNodeMap(object):
"""The attribute list is a transient interface to the underlying
dictionaries. Mutations here will change the underlying element's
dictionary.
Ordering is imposed artificially and does not reflect the order of
attributes as found in an input document.
"""
__slots__ = ('_attrs', '_attrsNS', '_ownerElement')
def __init__(self, attrs, attrsNS, ownerElement):
self._attrs = attrs
self._attrsNS = attrsNS
self._ownerElement = ownerElement
def _get_length(self):
return len(self._attrs)
def item(self, index):
try:
return self[self._attrs.keys()[index]]
except IndexError:
return None
def items(self):
L = []
for node in self._attrs.values():
L.append((node.nodeName, node.value))
return L
def itemsNS(self):
L = []
for node in self._attrs.values():
L.append(((node.namespaceURI, node.localName), node.value))
return L
def has_key(self, key):
if isinstance(key, StringTypes):
return self._attrs.has_key(key)
else:
return self._attrsNS.has_key(key)
def keys(self):
return self._attrs.keys()
def keysNS(self):
return self._attrsNS.keys()
def values(self):
return self._attrs.values()
def get(self, name, value=None):
return self._attrs.get(name, value)
__len__ = _get_length
__hash__ = None # Mutable type can't be correctly hashed
def __cmp__(self, other):
if self._attrs is getattr(other, "_attrs", None):
return 0
else:
return cmp(id(self), id(other))
def __getitem__(self, attname_or_tuple):
if isinstance(attname_or_tuple, tuple):
return self._attrsNS[attname_or_tuple]
else:
return self._attrs[attname_or_tuple]
# same as set
def __setitem__(self, attname, value):
if isinstance(value, StringTypes):
try:
node = self._attrs[attname]
except KeyError:
node = Attr(attname)
node.ownerDocument = self._ownerElement.ownerDocument
self.setNamedItem(node)
node.value = value
else:
if not isinstance(value, Attr):
raise TypeError, "value must be a string or Attr object"
node = value
self.setNamedItem(node)
def getNamedItem(self, name):
try:
return self._attrs[name]
except KeyError:
return None
def getNamedItemNS(self, namespaceURI, localName):
try:
return self._attrsNS[(namespaceURI, localName)]
except KeyError:
return None
def removeNamedItem(self, name):
n = self.getNamedItem(name)
if n is not None:
_clear_id_cache(self._ownerElement)
del self._attrs[n.nodeName]
del self._attrsNS[(n.namespaceURI, n.localName)]
if 'ownerElement' in n.__dict__:
n.__dict__['ownerElement'] = None
return n
else:
raise xml.dom.NotFoundErr()
def removeNamedItemNS(self, namespaceURI, localName):
n = self.getNamedItemNS(namespaceURI, localName)
if n is not None:
_clear_id_cache(self._ownerElement)
del self._attrsNS[(n.namespaceURI, n.localName)]
del self._attrs[n.nodeName]
if 'ownerElement' in n.__dict__:
n.__dict__['ownerElement'] = None
return n
else:
raise xml.dom.NotFoundErr()
def setNamedItem(self, node):
if not isinstance(node, Attr):
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
old = self._attrs.get(node.name)
if old:
old.unlink()
self._attrs[node.name] = node
self._attrsNS[(node.namespaceURI, node.localName)] = node
node.ownerElement = self._ownerElement
_clear_id_cache(node.ownerElement)
return old
def setNamedItemNS(self, node):
return self.setNamedItem(node)
def __delitem__(self, attname_or_tuple):
node = self[attname_or_tuple]
_clear_id_cache(node.ownerElement)
node.unlink()
def __getstate__(self):
return self._attrs, self._attrsNS, self._ownerElement
def __setstate__(self, state):
self._attrs, self._attrsNS, self._ownerElement = state
defproperty(NamedNodeMap, "length",
doc="Number of nodes in the NamedNodeMap.")
AttributeList = NamedNodeMap
class TypeInfo(object):
__slots__ = 'namespace', 'name'
def __init__(self, namespace, name):
self.namespace = namespace
self.name = name
def __repr__(self):
if self.namespace:
return "<TypeInfo %r (from %r)>" % (self.name, self.namespace)
else:
return "<TypeInfo %r>" % self.name
def _get_name(self):
return self.name
def _get_namespace(self):
return self.namespace
_no_type = TypeInfo(None, None)
class Element(Node):
nodeType = Node.ELEMENT_NODE
nodeValue = None
schemaType = _no_type
_magic_id_nodes = 0
_child_node_types = (Node.ELEMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE)
def __init__(self, tagName, namespaceURI=EMPTY_NAMESPACE, prefix=None,
localName=None):
self.tagName = self.nodeName = tagName
self.prefix = prefix
self.namespaceURI = namespaceURI
self.childNodes = NodeList()
self._attrs = {} # attributes are double-indexed:
self._attrsNS = {} # tagName -> Attribute
# URI,localName -> Attribute
# in the future: consider lazy generation
# of attribute objects this is too tricky
# for now because of headaches with
# namespaces.
def _get_localName(self):
return self.tagName.split(":", 1)[-1]
def _get_tagName(self):
return self.tagName
def unlink(self):
for attr in self._attrs.values():
attr.unlink()
self._attrs = None
self._attrsNS = None
Node.unlink(self)
def getAttribute(self, attname):
try:
return self._attrs[attname].value
except KeyError:
return ""
def getAttributeNS(self, namespaceURI, localName):
try:
return self._attrsNS[(namespaceURI, localName)].value
except KeyError:
return ""
def setAttribute(self, attname, value):
attr = self.getAttributeNode(attname)
if attr is None:
attr = Attr(attname)
# for performance
d = attr.__dict__
d["value"] = d["nodeValue"] = value
d["ownerDocument"] = self.ownerDocument
self.setAttributeNode(attr)
elif value != attr.value:
d = attr.__dict__
d["value"] = d["nodeValue"] = value
if attr.isId:
_clear_id_cache(self)
def setAttributeNS(self, namespaceURI, qualifiedName, value):
prefix, localname = _nssplit(qualifiedName)
attr = self.getAttributeNodeNS(namespaceURI, localname)
if attr is None:
# for performance
attr = Attr(qualifiedName, namespaceURI, localname, prefix)
d = attr.__dict__
d["prefix"] = prefix
d["nodeName"] = qualifiedName
d["value"] = d["nodeValue"] = value
d["ownerDocument"] = self.ownerDocument
self.setAttributeNode(attr)
else:
d = attr.__dict__
if value != attr.value:
d["value"] = d["nodeValue"] = value
if attr.isId:
_clear_id_cache(self)
if attr.prefix != prefix:
d["prefix"] = prefix
d["nodeName"] = qualifiedName
def getAttributeNode(self, attrname):
return self._attrs.get(attrname)
def getAttributeNodeNS(self, namespaceURI, localName):
return self._attrsNS.get((namespaceURI, localName))
def setAttributeNode(self, attr):
if attr.ownerElement not in (None, self):
raise xml.dom.InuseAttributeErr("attribute node already owned")
old1 = self._attrs.get(attr.name, None)
if old1 is not None:
self.removeAttributeNode(old1)
old2 = self._attrsNS.get((attr.namespaceURI, attr.localName), None)
if old2 is not None and old2 is not old1:
self.removeAttributeNode(old2)
_set_attribute_node(self, attr)
if old1 is not attr:
# It might have already been part of this node, in which case
# it doesn't represent a change, and should not be returned.
return old1
if old2 is not attr:
return old2
setAttributeNodeNS = setAttributeNode
def removeAttribute(self, name):
try:
attr = self._attrs[name]
except KeyError:
raise xml.dom.NotFoundErr()
self.removeAttributeNode(attr)
def removeAttributeNS(self, namespaceURI, localName):
try:
attr = self._attrsNS[(namespaceURI, localName)]
except KeyError:
raise xml.dom.NotFoundErr()
self.removeAttributeNode(attr)
def removeAttributeNode(self, node):
if node is None:
raise xml.dom.NotFoundErr()
try:
self._attrs[node.name]
except KeyError:
raise xml.dom.NotFoundErr()
_clear_id_cache(self)
node.unlink()
# Restore this since the node is still useful and otherwise
# unlinked
node.ownerDocument = self.ownerDocument
removeAttributeNodeNS = removeAttributeNode
def hasAttribute(self, name):
return self._attrs.has_key(name)
def hasAttributeNS(self, namespaceURI, localName):
return self._attrsNS.has_key((namespaceURI, localName))
def getElementsByTagName(self, name):
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
return _get_elements_by_tagName_ns_helper(
self, namespaceURI, localName, NodeList())
def __repr__(self):
return "<DOM Element: %s at %#x>" % (self.tagName, id(self))
def writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
writer.write(">%s"%(newl))
for node in self.childNodes:
node.writexml(writer,indent+addindent,addindent,newl)
writer.write("%s</%s>%s" % (indent,self.tagName,newl))
else:
writer.write("/>%s"%(newl))
def _get_attributes(self):
return NamedNodeMap(self._attrs, self._attrsNS, self)
def hasAttributes(self):
if self._attrs:
return True
else:
return False
# DOM Level 3 attributes, based on the 22 Oct 2002 draft
def setIdAttribute(self, name):
idAttr = self.getAttributeNode(name)
self.setIdAttributeNode(idAttr)
def setIdAttributeNS(self, namespaceURI, localName):
idAttr = self.getAttributeNodeNS(namespaceURI, localName)
self.setIdAttributeNode(idAttr)
def setIdAttributeNode(self, idAttr):
if idAttr is None or not self.isSameNode(idAttr.ownerElement):
raise xml.dom.NotFoundErr()
if _get_containing_entref(self) is not None:
raise xml.dom.NoModificationAllowedErr()
if not idAttr._is_id:
idAttr.__dict__['_is_id'] = True
self._magic_id_nodes += 1
self.ownerDocument._magic_id_count += 1
_clear_id_cache(self)
defproperty(Element, "attributes",
doc="NamedNodeMap of attributes on the element.")
defproperty(Element, "localName",
doc="Namespace-local name of this element.")
def _set_attribute_node(element, attr):
_clear_id_cache(element)
element._attrs[attr.name] = attr
element._attrsNS[(attr.namespaceURI, attr.localName)] = attr
# This creates a circular reference, but Element.unlink()
# breaks the cycle since the references to the attribute
# dictionaries are tossed.
attr.__dict__['ownerElement'] = element
class Childless:
"""Mixin that makes childless-ness easy to implement and avoids
the complexity of the Node methods that deal with children.
"""
attributes = None
childNodes = EmptyNodeList()
firstChild = None
lastChild = None
def _get_firstChild(self):
return None
def _get_lastChild(self):
return None
def appendChild(self, node):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes cannot have children")
def hasChildNodes(self):
return False
def insertBefore(self, newChild, refChild):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes do not have children")
def removeChild(self, oldChild):
raise xml.dom.NotFoundErr(
self.nodeName + " nodes do not have children")
def replaceChild(self, newChild, oldChild):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes do not have children")
class ProcessingInstruction(Childless, Node):
nodeType = Node.PROCESSING_INSTRUCTION_NODE
def __init__(self, target, data):
self.target = self.nodeName = target
self.data = self.nodeValue = data
def _get_data(self):
return self.data
def _set_data(self, value):
d = self.__dict__
d['data'] = d['nodeValue'] = value
def _get_target(self):
return self.target
def _set_target(self, value):
d = self.__dict__
d['target'] = d['nodeName'] = value
def __setattr__(self, name, value):
if name == "data" or name == "nodeValue":
self.__dict__['data'] = self.__dict__['nodeValue'] = value
elif name == "target" or name == "nodeName":
self.__dict__['target'] = self.__dict__['nodeName'] = value
else:
self.__dict__[name] = value
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write("%s<?%s %s?>%s" % (indent,self.target, self.data, newl))
class CharacterData(Childless, Node):
def _get_length(self):
return len(self.data)
__len__ = _get_length
def _get_data(self):
return self.__dict__['data']
def _set_data(self, data):
d = self.__dict__
d['data'] = d['nodeValue'] = data
_get_nodeValue = _get_data
_set_nodeValue = _set_data
def __setattr__(self, name, value):
if name == "data" or name == "nodeValue":
self.__dict__['data'] = self.__dict__['nodeValue'] = value
else:
self.__dict__[name] = value
def __repr__(self):
data = self.data
if len(data) > 10:
dotdotdot = "..."
else:
dotdotdot = ""
return '<DOM %s node "%r%s">' % (
self.__class__.__name__, data[0:10], dotdotdot)
def substringData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
return self.data[offset:offset+count]
def appendData(self, arg):
self.data = self.data + arg
def insertData(self, offset, arg):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if arg:
self.data = "%s%s%s" % (
self.data[:offset], arg, self.data[offset:])
def deleteData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
if count:
self.data = self.data[:offset] + self.data[offset+count:]
def replaceData(self, offset, count, arg):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
if count:
self.data = "%s%s%s" % (
self.data[:offset], arg, self.data[offset+count:])
defproperty(CharacterData, "length", doc="Length of the string data.")
class Text(CharacterData):
# Make sure we don't add an instance __dict__ if we don't already
# have one, at least when that's possible:
# XXX this does not work, CharacterData is an old-style class
# __slots__ = ()
nodeType = Node.TEXT_NODE
nodeName = "#text"
attributes = None
def splitText(self, offset):
if offset < 0 or offset > len(self.data):
raise xml.dom.IndexSizeErr("illegal offset value")
newText = self.__class__()
newText.data = self.data[offset:]
newText.ownerDocument = self.ownerDocument
next = self.nextSibling
if self.parentNode and self in self.parentNode.childNodes:
if next is None:
self.parentNode.appendChild(newText)
else:
self.parentNode.insertBefore(newText, next)
self.data = self.data[:offset]
return newText
def writexml(self, writer, indent="", addindent="", newl=""):
_write_data(writer, "%s%s%s"%(indent, self.data, newl))
# DOM Level 3 (WD 9 April 2002)
def _get_wholeText(self):
L = [self.data]
n = self.previousSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
L.insert(0, n.data)
n = n.previousSibling
else:
break
n = self.nextSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
L.append(n.data)
n = n.nextSibling
else:
break
return ''.join(L)
def replaceWholeText(self, content):
# XXX This needs to be seriously changed if minidom ever
# supports EntityReference nodes.
parent = self.parentNode
n = self.previousSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
next = n.previousSibling
parent.removeChild(n)
n = next
else:
break
n = self.nextSibling
if not content:
parent.removeChild(self)
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
next = n.nextSibling
parent.removeChild(n)
n = next
else:
break
if content:
d = self.__dict__
d['data'] = content
d['nodeValue'] = content
return self
else:
return None
def _get_isWhitespaceInElementContent(self):
if self.data.strip():
return False
elem = _get_containing_element(self)
if elem is None:
return False
info = self.ownerDocument._get_elem_info(elem)
if info is None:
return False
else:
return info.isElementContent()
defproperty(Text, "isWhitespaceInElementContent",
doc="True iff this text node contains only whitespace"
" and is in element content.")
defproperty(Text, "wholeText",
doc="The text of all logically-adjacent text nodes.")
def _get_containing_element(node):
c = node.parentNode
while c is not None:
if c.nodeType == Node.ELEMENT_NODE:
return c
c = c.parentNode
return None
def _get_containing_entref(node):
c = node.parentNode
while c is not None:
if c.nodeType == Node.ENTITY_REFERENCE_NODE:
return c
c = c.parentNode
return None
class Comment(Childless, CharacterData):
nodeType = Node.COMMENT_NODE
nodeName = "#comment"
def __init__(self, data):
self.data = self.nodeValue = data
def writexml(self, writer, indent="", addindent="", newl=""):
if "--" in self.data:
raise ValueError("'--' is not allowed in a comment node")
writer.write("%s<!--%s-->%s" % (indent, self.data, newl))
class CDATASection(Text):
# Make sure we don't add an instance __dict__ if we don't already
# have one, at least when that's possible:
# XXX this does not work, Text is an old-style class
# __slots__ = ()
nodeType = Node.CDATA_SECTION_NODE
nodeName = "#cdata-section"
def writexml(self, writer, indent="", addindent="", newl=""):
if self.data.find("]]>") >= 0:
raise ValueError("']]>' not allowed in a CDATA section")
writer.write("<![CDATA[%s]]>" % self.data)
class ReadOnlySequentialNamedNodeMap(object):
__slots__ = '_seq',
def __init__(self, seq=()):
# seq should be a list or tuple
self._seq = seq
def __len__(self):
return len(self._seq)
def _get_length(self):
return len(self._seq)
def getNamedItem(self, name):
for n in self._seq:
if n.nodeName == name:
return n
def getNamedItemNS(self, namespaceURI, localName):
for n in self._seq:
if n.namespaceURI == namespaceURI and n.localName == localName:
return n
def __getitem__(self, name_or_tuple):
if isinstance(name_or_tuple, tuple):
node = self.getNamedItemNS(*name_or_tuple)
else:
node = self.getNamedItem(name_or_tuple)
if node is None:
raise KeyError, name_or_tuple
return node
def item(self, index):
if index < 0:
return None
try:
return self._seq[index]
except IndexError:
return None
def removeNamedItem(self, name):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def removeNamedItemNS(self, namespaceURI, localName):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def setNamedItem(self, node):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def setNamedItemNS(self, node):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def __getstate__(self):
return [self._seq]
def __setstate__(self, state):
self._seq = state[0]
defproperty(ReadOnlySequentialNamedNodeMap, "length",
doc="Number of entries in the NamedNodeMap.")
class Identified:
"""Mix-in class that supports the publicId and systemId attributes."""
# XXX this does not work, this is an old-style class
# __slots__ = 'publicId', 'systemId'
def _identified_mixin_init(self, publicId, systemId):
self.publicId = publicId
self.systemId = systemId
def _get_publicId(self):
return self.publicId
def _get_systemId(self):
return self.systemId
class DocumentType(Identified, Childless, Node):
nodeType = Node.DOCUMENT_TYPE_NODE
nodeValue = None
name = None
publicId = None
systemId = None
internalSubset = None
def __init__(self, qualifiedName):
self.entities = ReadOnlySequentialNamedNodeMap()
self.notations = ReadOnlySequentialNamedNodeMap()
if qualifiedName:
prefix, localname = _nssplit(qualifiedName)
self.name = localname
self.nodeName = self.name
def _get_internalSubset(self):
return self.internalSubset
def cloneNode(self, deep):
if self.ownerDocument is None:
# it's ok
clone = DocumentType(None)
clone.name = self.name
clone.nodeName = self.name
operation = xml.dom.UserDataHandler.NODE_CLONED
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in self.notations._seq:
notation = Notation(n.nodeName, n.publicId, n.systemId)
clone.notations._seq.append(notation)
n._call_user_data_handler(operation, n, notation)
for e in self.entities._seq:
entity = Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
clone.entities._seq.append(entity)
e._call_user_data_handler(operation, n, entity)
self._call_user_data_handler(operation, self, clone)
return clone
else:
return None
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write("<!DOCTYPE ")
writer.write(self.name)
if self.publicId:
writer.write("%s PUBLIC '%s'%s '%s'"
% (newl, self.publicId, newl, self.systemId))
elif self.systemId:
writer.write("%s SYSTEM '%s'" % (newl, self.systemId))
if self.internalSubset is not None:
writer.write(" [")
writer.write(self.internalSubset)
writer.write("]")
writer.write(">"+newl)
class Entity(Identified, Node):
attributes = None
nodeType = Node.ENTITY_NODE
nodeValue = None
actualEncoding = None
encoding = None
version = None
def __init__(self, name, publicId, systemId, notation):
self.nodeName = name
self.notationName = notation
self.childNodes = NodeList()
self._identified_mixin_init(publicId, systemId)
def _get_actualEncoding(self):
return self.actualEncoding
def _get_encoding(self):
return self.encoding
def _get_version(self):
return self.version
def appendChild(self, newChild):
raise xml.dom.HierarchyRequestErr(
"cannot append children to an entity node")
def insertBefore(self, newChild, refChild):
raise xml.dom.HierarchyRequestErr(
"cannot insert children below an entity node")
def removeChild(self, oldChild):
raise xml.dom.HierarchyRequestErr(
"cannot remove children from an entity node")
def replaceChild(self, newChild, oldChild):
raise xml.dom.HierarchyRequestErr(
"cannot replace children of an entity node")
class Notation(Identified, Childless, Node):
nodeType = Node.NOTATION_NODE
nodeValue = None
def __init__(self, name, publicId, systemId):
self.nodeName = name
self._identified_mixin_init(publicId, systemId)
class DOMImplementation(DOMImplementationLS):
_features = [("core", "1.0"),
("core", "2.0"),
("core", "3.0"),
("core", None),
("xml", "1.0"),
("xml", "2.0"),
("xml", "3.0"),
("xml", None),
("ls-load", "3.0"),
("ls-load", None),
]
def hasFeature(self, feature, version):
if version == "":
version = None
return (feature.lower(), version) in self._features
def createDocument(self, namespaceURI, qualifiedName, doctype):
if doctype and doctype.parentNode is not None:
raise xml.dom.WrongDocumentErr(
"doctype object owned by another DOM tree")
doc = self._create_document()
add_root_element = not (namespaceURI is None
and qualifiedName is None
and doctype is None)
if not qualifiedName and add_root_element:
# The spec is unclear what to raise here; SyntaxErr
# would be the other obvious candidate. Since Xerces raises
# InvalidCharacterErr, and since SyntaxErr is not listed
# for createDocument, that seems to be the better choice.
# XXX: need to check for illegal characters here and in
# createElement.
# DOM Level III clears this up when talking about the return value
# of this function. If namespaceURI, qName and DocType are
# Null the document is returned without a document element
# Otherwise if doctype or namespaceURI are not None
# Then we go back to the above problem
raise xml.dom.InvalidCharacterErr("Element with no name")
if add_root_element:
prefix, localname = _nssplit(qualifiedName)
if prefix == "xml" \
and namespaceURI != "http://www.w3.org/XML/1998/namespace":
raise xml.dom.NamespaceErr("illegal use of 'xml' prefix")
if prefix and not namespaceURI:
raise xml.dom.NamespaceErr(
"illegal use of prefix without namespaces")
element = doc.createElementNS(namespaceURI, qualifiedName)
if doctype:
doc.appendChild(doctype)
doc.appendChild(element)
if doctype:
doctype.parentNode = doctype.ownerDocument = doc
doc.doctype = doctype
doc.implementation = self
return doc
def createDocumentType(self, qualifiedName, publicId, systemId):
doctype = DocumentType(qualifiedName)
doctype.publicId = publicId
doctype.systemId = systemId
return doctype
# DOM Level 3 (WD 9 April 2002)
def getInterface(self, feature):
if self.hasFeature(feature, None):
return self
else:
return None
# internal
def _create_document(self):
return Document()
class ElementInfo(object):
"""Object that represents content-model information for an element.
This implementation is not expected to be used in practice; DOM
builders should provide implementations which do the right thing
using information available to it.
"""
__slots__ = 'tagName',
def __init__(self, name):
self.tagName = name
def getAttributeType(self, aname):
return _no_type
def getAttributeTypeNS(self, namespaceURI, localName):
return _no_type
def isElementContent(self):
return False
def isEmpty(self):
"""Returns true iff this element is declared to have an EMPTY
content model."""
return False
def isId(self, aname):
"""Returns true iff the named attribte is a DTD-style ID."""
return False
def isIdNS(self, namespaceURI, localName):
"""Returns true iff the identified attribute is a DTD-style ID."""
return False
def __getstate__(self):
return self.tagName
def __setstate__(self, state):
self.tagName = state
def _clear_id_cache(node):
if node.nodeType == Node.DOCUMENT_NODE:
node._id_cache.clear()
node._id_search_stack = None
elif _in_document(node):
node.ownerDocument._id_cache.clear()
node.ownerDocument._id_search_stack= None
class Document(Node, DocumentLS):
_child_node_types = (Node.ELEMENT_NODE, Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE, Node.DOCUMENT_TYPE_NODE)
nodeType = Node.DOCUMENT_NODE
nodeName = "#document"
nodeValue = None
attributes = None
doctype = None
parentNode = None
previousSibling = nextSibling = None
implementation = DOMImplementation()
# Document attributes from Level 3 (WD 9 April 2002)
actualEncoding = None
encoding = None
standalone = None
version = None
strictErrorChecking = False
errorHandler = None
documentURI = None
_magic_id_count = 0
def __init__(self):
self.childNodes = NodeList()
# mapping of (namespaceURI, localName) -> ElementInfo
# and tagName -> ElementInfo
self._elem_info = {}
self._id_cache = {}
self._id_search_stack = None
def _get_elem_info(self, element):
if element.namespaceURI:
key = element.namespaceURI, element.localName
else:
key = element.tagName
return self._elem_info.get(key)
def _get_actualEncoding(self):
return self.actualEncoding
def _get_doctype(self):
return self.doctype
def _get_documentURI(self):
return self.documentURI
def _get_encoding(self):
return self.encoding
def _get_errorHandler(self):
return self.errorHandler
def _get_standalone(self):
return self.standalone
def _get_strictErrorChecking(self):
return self.strictErrorChecking
def _get_version(self):
return self.version
def appendChild(self, node):
if node.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
if node.parentNode is not None:
# This needs to be done before the next test since this
# may *be* the document element, in which case it should
# end up re-ordered to the end.
node.parentNode.removeChild(node)
if node.nodeType == Node.ELEMENT_NODE \
and self._get_documentElement():
raise xml.dom.HierarchyRequestErr(
"two document elements disallowed")
return Node.appendChild(self, node)
def removeChild(self, oldChild):
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
oldChild.nextSibling = oldChild.previousSibling = None
oldChild.parentNode = None
if self.documentElement is oldChild:
self.documentElement = None
return oldChild
def _get_documentElement(self):
for node in self.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
return node
def unlink(self):
if self.doctype is not None:
self.doctype.unlink()
self.doctype = None
Node.unlink(self)
def cloneNode(self, deep):
if not deep:
return None
clone = self.implementation.createDocument(None, None, None)
clone.encoding = self.encoding
clone.standalone = self.standalone
clone.version = self.version
for n in self.childNodes:
childclone = _clone_node(n, deep, clone)
assert childclone.ownerDocument.isSameNode(clone)
clone.childNodes.append(childclone)
if childclone.nodeType == Node.DOCUMENT_NODE:
assert clone.documentElement is None
elif childclone.nodeType == Node.DOCUMENT_TYPE_NODE:
assert clone.doctype is None
clone.doctype = childclone
childclone.parentNode = clone
self._call_user_data_handler(xml.dom.UserDataHandler.NODE_CLONED,
self, clone)
return clone
def createDocumentFragment(self):
d = DocumentFragment()
d.ownerDocument = self
return d
def createElement(self, tagName):
e = Element(tagName)
e.ownerDocument = self
return e
def createTextNode(self, data):
if not isinstance(data, StringTypes):
raise TypeError, "node contents must be a string"
t = Text()
t.data = data
t.ownerDocument = self
return t
def createCDATASection(self, data):
if not isinstance(data, StringTypes):
raise TypeError, "node contents must be a string"
c = CDATASection()
c.data = data
c.ownerDocument = self
return c
def createComment(self, data):
c = Comment(data)
c.ownerDocument = self
return c
def createProcessingInstruction(self, target, data):
p = ProcessingInstruction(target, data)
p.ownerDocument = self
return p
def createAttribute(self, qName):
a = Attr(qName)
a.ownerDocument = self
a.value = ""
return a
def createElementNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
e = Element(qualifiedName, namespaceURI, prefix)
e.ownerDocument = self
return e
def createAttributeNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
a = Attr(qualifiedName, namespaceURI, localName, prefix)
a.ownerDocument = self
a.value = ""
return a
# A couple of implementation-specific helpers to create node types
# not supported by the W3C DOM specs:
def _create_entity(self, name, publicId, systemId, notationName):
e = Entity(name, publicId, systemId, notationName)
e.ownerDocument = self
return e
def _create_notation(self, name, publicId, systemId):
n = Notation(name, publicId, systemId)
n.ownerDocument = self
return n
def getElementById(self, id):
if id in self._id_cache:
return self._id_cache[id]
if not (self._elem_info or self._magic_id_count):
return None
stack = self._id_search_stack
if stack is None:
# we never searched before, or the cache has been cleared
stack = [self.documentElement]
self._id_search_stack = stack
elif not stack:
# Previous search was completed and cache is still valid;
# no matching node.
return None
result = None
while stack:
node = stack.pop()
# add child elements to stack for continued searching
stack.extend([child for child in node.childNodes
if child.nodeType in _nodeTypes_with_children])
# check this node
info = self._get_elem_info(node)
if info:
# We have to process all ID attributes before
# returning in order to get all the attributes set to
# be IDs using Element.setIdAttribute*().
for attr in node.attributes.values():
if attr.namespaceURI:
if info.isIdNS(attr.namespaceURI, attr.localName):
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif not node._magic_id_nodes:
break
elif info.isId(attr.name):
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif not node._magic_id_nodes:
break
elif attr._is_id:
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif node._magic_id_nodes == 1:
break
elif node._magic_id_nodes:
for attr in node.attributes.values():
if attr._is_id:
self._id_cache[attr.value] = node
if attr.value == id:
result = node
if result is not None:
break
return result
def getElementsByTagName(self, name):
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
return _get_elements_by_tagName_ns_helper(
self, namespaceURI, localName, NodeList())
def isSupported(self, feature, version):
return self.implementation.hasFeature(feature, version)
def importNode(self, node, deep):
if node.nodeType == Node.DOCUMENT_NODE:
raise xml.dom.NotSupportedErr("cannot import document nodes")
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
raise xml.dom.NotSupportedErr("cannot import document type nodes")
return _clone_node(node, deep, self)
def writexml(self, writer, indent="", addindent="", newl="",
encoding = None):
if encoding is None:
writer.write('<?xml version="1.0" ?>'+newl)
else:
writer.write('<?xml version="1.0" encoding="%s"?>%s' % (encoding, newl))
for node in self.childNodes:
node.writexml(writer, indent, addindent, newl)
# DOM Level 3 (WD 9 April 2002)
def renameNode(self, n, namespaceURI, name):
if n.ownerDocument is not self:
raise xml.dom.WrongDocumentErr(
"cannot rename nodes from other documents;\n"
"expected %s,\nfound %s" % (self, n.ownerDocument))
if n.nodeType not in (Node.ELEMENT_NODE, Node.ATTRIBUTE_NODE):
raise xml.dom.NotSupportedErr(
"renameNode() only applies to element and attribute nodes")
if namespaceURI != EMPTY_NAMESPACE:
if ':' in name:
prefix, localName = name.split(':', 1)
if ( prefix == "xmlns"
and namespaceURI != xml.dom.XMLNS_NAMESPACE):
raise xml.dom.NamespaceErr(
"illegal use of 'xmlns' prefix")
else:
if ( name == "xmlns"
and namespaceURI != xml.dom.XMLNS_NAMESPACE
and n.nodeType == Node.ATTRIBUTE_NODE):
raise xml.dom.NamespaceErr(
"illegal use of the 'xmlns' attribute")
prefix = None
localName = name
else:
prefix = None
localName = None
if n.nodeType == Node.ATTRIBUTE_NODE:
element = n.ownerElement
if element is not None:
is_id = n._is_id
element.removeAttributeNode(n)
else:
element = None
# avoid __setattr__
d = n.__dict__
d['prefix'] = prefix
d['localName'] = localName
d['namespaceURI'] = namespaceURI
d['nodeName'] = name
if n.nodeType == Node.ELEMENT_NODE:
d['tagName'] = name
else:
# attribute node
d['name'] = name
if element is not None:
element.setAttributeNode(n)
if is_id:
element.setIdAttributeNode(n)
# It's not clear from a semantic perspective whether we should
# call the user data handlers for the NODE_RENAMED event since
# we're re-using the existing node. The draft spec has been
# interpreted as meaning "no, don't call the handler unless a
# new node is created."
return n
defproperty(Document, "documentElement",
doc="Top-level element of this document.")
def _clone_node(node, deep, newOwnerDocument):
"""
Clone a node and give it the new owner document.
Called by Node.cloneNode and Document.importNode
"""
if node.ownerDocument.isSameNode(newOwnerDocument):
operation = xml.dom.UserDataHandler.NODE_CLONED
else:
operation = xml.dom.UserDataHandler.NODE_IMPORTED
if node.nodeType == Node.ELEMENT_NODE:
clone = newOwnerDocument.createElementNS(node.namespaceURI,
node.nodeName)
for attr in node.attributes.values():
clone.setAttributeNS(attr.namespaceURI, attr.nodeName, attr.value)
a = clone.getAttributeNodeNS(attr.namespaceURI, attr.localName)
a.specified = attr.specified
if deep:
for child in node.childNodes:
c = _clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
clone = newOwnerDocument.createDocumentFragment()
if deep:
for child in node.childNodes:
c = _clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == Node.TEXT_NODE:
clone = newOwnerDocument.createTextNode(node.data)
elif node.nodeType == Node.CDATA_SECTION_NODE:
clone = newOwnerDocument.createCDATASection(node.data)
elif node.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
clone = newOwnerDocument.createProcessingInstruction(node.target,
node.data)
elif node.nodeType == Node.COMMENT_NODE:
clone = newOwnerDocument.createComment(node.data)
elif node.nodeType == Node.ATTRIBUTE_NODE:
clone = newOwnerDocument.createAttributeNS(node.namespaceURI,
node.nodeName)
clone.specified = True
clone.value = node.value
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
assert node.ownerDocument is not newOwnerDocument
operation = xml.dom.UserDataHandler.NODE_IMPORTED
clone = newOwnerDocument.implementation.createDocumentType(
node.name, node.publicId, node.systemId)
clone.ownerDocument = newOwnerDocument
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in node.notations._seq:
notation = Notation(n.nodeName, n.publicId, n.systemId)
notation.ownerDocument = newOwnerDocument
clone.notations._seq.append(notation)
if hasattr(n, '_call_user_data_handler'):
n._call_user_data_handler(operation, n, notation)
for e in node.entities._seq:
entity = Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
entity.ownerDocument = newOwnerDocument
clone.entities._seq.append(entity)
if hasattr(e, '_call_user_data_handler'):
e._call_user_data_handler(operation, n, entity)
else:
# Note the cloning of Document and DocumentType nodes is
# implemenetation specific. minidom handles those cases
# directly in the cloneNode() methods.
raise xml.dom.NotSupportedErr("Cannot clone node %s" % repr(node))
# Check for _call_user_data_handler() since this could conceivably
# used with other DOM implementations (one of the FourThought
# DOMs, perhaps?).
if hasattr(node, '_call_user_data_handler'):
node._call_user_data_handler(operation, node, clone)
return clone
def _nssplit(qualifiedName):
fields = qualifiedName.split(':', 1)
if len(fields) == 2:
return fields
else:
return (None, fields[0])
def _get_StringIO():
# we can't use cStringIO since it doesn't support Unicode strings
from StringIO import StringIO
return StringIO()
def _do_pulldom_parse(func, args, kwargs):
events = func(*args, **kwargs)
toktype, rootNode = events.getEvent()
events.expandNode(rootNode)
events.clear()
return rootNode
def parse(file, parser=None, bufsize=None):
"""Parse a file into a DOM by filename or file object."""
if parser is None and not bufsize:
from xml.dom import expatbuilder
return expatbuilder.parse(file)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parse, (file,),
{'parser': parser, 'bufsize': bufsize})
def parseString(string, parser=None):
"""Parse a file into a DOM from a string."""
if parser is None:
from xml.dom import expatbuilder
return expatbuilder.parseString(string)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parseString, (string,),
{'parser': parser})
def getDOMImplementation(features=None):
if features:
if isinstance(features, StringTypes):
features = domreg._parse_feature_string(features)
for f, v in features:
if not Document.implementation.hasFeature(f, v):
return None
return Document.implementation
| Python |
'''FlickrAPI uses its own in-memory XML representation, to be able to easily
use the info returned from Flickr.
There is no need to use this module directly, you'll get XMLNode instances
from the FlickrAPI method calls.
'''
import xml.dom.minidom
__all__ = ('XMLNode', )
class XMLNode:
"""XMLNode -- generic class for holding an XML node
>>> xml_str = '''<xml foo="32">
... <taggy bar="10">Name0</taggy>
... <taggy bar="11" baz="12">Name1</taggy>
... </xml>'''
>>> f = XMLNode.parse(xml_str)
>>> f.name
u'xml'
>>> f['foo']
u'32'
>>> f.taggy[0].name
u'taggy'
>>> f.taggy[0]["bar"]
u'10'
>>> f.taggy[0].text
u'Name0'
>>> f.taggy[1].name
u'taggy'
>>> f.taggy[1]["bar"]
u'11'
>>> f.taggy[1]["baz"]
u'12'
"""
def __init__(self):
"""Construct an empty XML node."""
self.name = ""
self.text = ""
self.attrib = {}
self.xml = None
def __setitem__(self, key, item):
"""Store a node's attribute in the attrib hash."""
self.attrib[key] = item
def __getitem__(self, key):
"""Retrieve a node's attribute from the attrib hash."""
return self.attrib[key]
@classmethod
def __parse_element(cls, element, this_node):
"""Recursive call to process this XMLNode."""
this_node.name = element.nodeName
# add element attributes as attributes to this node
for i in range(element.attributes.length):
an = element.attributes.item(i)
this_node[an.name] = an.nodeValue
for a in element.childNodes:
if a.nodeType == xml.dom.Node.ELEMENT_NODE:
child = XMLNode()
# Ugly fix for an ugly bug. If an XML element <name />
# exists, it now overwrites the 'name' attribute
# storing the XML element name.
if not hasattr(this_node, a.nodeName) or a.nodeName == 'name':
setattr(this_node, a.nodeName, [])
# add the child node as an attrib to this node
children = getattr(this_node, a.nodeName)
children.append(child)
cls.__parse_element(a, child)
elif a.nodeType == xml.dom.Node.TEXT_NODE:
this_node.text += a.nodeValue
return this_node
@classmethod
def parse(cls, xml_str, store_xml=False):
"""Convert an XML string into a nice instance tree of XMLNodes.
xml_str -- the XML to parse
store_xml -- if True, stores the XML string in the root XMLNode.xml
"""
dom = xml.dom.minidom.parseString(xml_str)
# get the root
root_node = XMLNode()
if store_xml: root_node.xml = xml_str
return cls.__parse_element(dom.firstChild, root_node)
| Python |
# -*- coding: utf-8 -*-
'''Helper functions for the short http://fli.kr/p/... URL notation.
Photo IDs can be converted to and from Base58 short IDs, and a short
URL can be generated from a photo ID.
The implementation of the encoding and decoding functions is based on
the posts by stevefaeembra and Kohichi on
http://www.flickr.com/groups/api/discuss/72157616713786392/
'''
__all__ = ['encode', 'decode', 'url', 'SHORT_URL']
ALPHABET = u'123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ'
ALPHALEN = len(ALPHABET)
SHORT_URL = u'http://flic.kr/p/%s'
def encode(photo_id):
'''encode(photo_id) -> short id
>>> encode(u'4325695128')
u'7Afjsu'
>>> encode(u'2811466321')
u'5hruZg'
'''
photo_id = int(photo_id)
encoded = u''
while photo_id >= ALPHALEN:
div, mod = divmod(photo_id, ALPHALEN)
encoded = ALPHABET[mod] + encoded
photo_id = int(div)
encoded = ALPHABET[photo_id] + encoded
return encoded
def decode(short_id):
'''decode(short id) -> photo id
>>> decode(u'7Afjsu')
u'4325695128'
>>> decode(u'5hruZg')
u'2811466321'
'''
decoded = 0
multi = 1
for i in xrange(len(short_id)-1, -1, -1):
char = short_id[i]
index = ALPHABET.index(char)
decoded = decoded + multi * index
multi = multi * len(ALPHABET)
return unicode(decoded)
def url(photo_id):
'''url(photo id) -> short url
>>> url(u'4325695128')
u'http://flic.kr/p/7Afjsu'
>>> url(u'2811466321')
u'http://flic.kr/p/5hruZg'
'''
short_id = encode(photo_id)
return SHORT_URL % short_id
| Python |
# -*- encoding: utf-8 -*-
'''Call result cache.
Designed to have the same interface as the `Django low-level cache API`_.
Heavily inspired (read: mostly copied-and-pasted) from the Django framework -
thanks to those guys for designing a simple and effective cache!
.. _`Django low-level cache API`: http://www.djangoproject.com/documentation/cache/#the-low-level-cache-api
'''
import threading
import time
class SimpleCache(object):
'''Simple response cache for FlickrAPI calls.
This stores max 50 entries, timing them out after 120 seconds:
>>> cache = SimpleCache(timeout=120, max_entries=50)
'''
def __init__(self, timeout=300, max_entries=200):
self.storage = {}
self.expire_info = {}
self.lock = threading.RLock()
self.default_timeout = timeout
self.max_entries = max_entries
self.cull_frequency = 3
def locking(method):
'''Method decorator, ensures the method call is locked'''
def locked(self, *args, **kwargs):
self.lock.acquire()
try:
return method(self, *args, **kwargs)
finally:
self.lock.release()
return locked
@locking
def get(self, key, default=None):
'''Fetch a given key from the cache. If the key does not exist, return
default, which itself defaults to None.
'''
now = time.time()
exp = self.expire_info.get(key)
if exp is None:
return default
elif exp < now:
self.delete(key)
return default
return self.storage[key]
@locking
def set(self, key, value, timeout=None):
'''Set a value in the cache. If timeout is given, that timeout will be
used for the key; otherwise the default cache timeout will be used.
'''
if len(self.storage) >= self.max_entries:
self.cull()
if timeout is None:
timeout = self.default_timeout
self.storage[key] = value
self.expire_info[key] = time.time() + timeout
@locking
def delete(self, key):
'''Deletes a key from the cache, failing silently if it doesn't exist.'''
if key in self.storage:
del self.storage[key]
if key in self.expire_info:
del self.expire_info[key]
@locking
def has_key(self, key):
'''Returns True if the key is in the cache and has not expired.'''
return self.get(key) is not None
@locking
def __contains__(self, key):
'''Returns True if the key is in the cache and has not expired.'''
return self.has_key(key)
@locking
def cull(self):
'''Reduces the number of cached items'''
doomed = [k for (i, k) in enumerate(self.storage)
if i % self.cull_frequency == 0]
for k in doomed:
self.delete(k)
@locking
def __len__(self):
'''Returns the number of cached items -- they might be expired
though.
'''
return len(self.storage)
| Python |
# -*- encoding: utf-8 -*-
'''Module for encoding data as form-data/multipart'''
import os
import base64
class Part(object):
'''A single part of the multipart data.
>>> Part({'name': 'headline'}, 'Nice Photo')
... # doctest: +ELLIPSIS
<flickrapi.multipart.Part object at 0x...>
>>> image = open('tests/photo.jpg')
>>> Part({'name': 'photo', 'filename': image}, image.read(), 'image/jpeg')
... # doctest: +ELLIPSIS
<flickrapi.multipart.Part object at 0x...>
'''
def __init__(self, parameters, payload, content_type=None):
self.content_type = content_type
self.parameters = parameters
self.payload = payload
def render(self):
'''Renders this part -> List of Strings'''
parameters = ['%s="%s"' % (k, v)
for k, v in self.parameters.iteritems()]
lines = ['Content-Disposition: form-data; %s' % '; '.join(parameters)]
if self.content_type:
lines.append("Content-Type: %s" % self.content_type)
lines.append('')
if isinstance(self.payload, unicode):
lines.append(self.payload.encode('utf-8'))
else:
lines.append(self.payload)
return lines
class FilePart(Part):
'''A single part with a file as the payload
This example has the same semantics as the second Part example:
>>> FilePart({'name': 'photo'}, 'tests/photo.jpg', 'image/jpeg')
... #doctest: +ELLIPSIS
<flickrapi.multipart.FilePart object at 0x...>
'''
def __init__(self, parameters, filename, content_type):
parameters['filename'] = filename
imagefile = open(filename, 'rb')
payload = imagefile.read()
imagefile.close()
Part.__init__(self, parameters, payload, content_type)
def boundary():
"""Generate a random boundary, a bit like Python 2.5's uuid module."""
bytes = os.urandom(16)
return base64.b64encode(bytes, 'ab').strip('=')
class Multipart(object):
'''Container for multipart data'''
def __init__(self):
'''Creates a new Multipart.'''
self.parts = []
self.content_type = 'form-data/multipart'
self.boundary = boundary()
def attach(self, part):
'''Attaches a part'''
self.parts.append(part)
def __str__(self):
'''Renders the Multipart'''
lines = []
for part in self.parts:
lines += ['--' + self.boundary]
lines += part.render()
lines += ['--' + self.boundary + "--"]
return '\r\n'.join(lines)
def header(self):
'''Returns the top-level HTTP header of this multipart'''
return ("Content-Type",
"multipart/form-data; boundary=%s" % self.boundary)
| Python |
# -*- encoding: utf-8 -*-
'''HTTPHandler that supports a callback method for progress reports.
'''
import urllib2
import httplib
import logging
__all__ = ['urlopen']
logging.basicConfig()
LOG = logging.getLogger(__name__)
progress_callback = None
class ReportingSocket(object):
'''Wrapper around a socket. Gives progress report through a
callback function.
'''
min_chunksize = 10240
def __init__(self, socket):
self.socket = socket
def sendall(self, bits):
'''Sends all data, calling the callback function for every
sent chunk.
'''
LOG.debug("SENDING: %s..." % bits[0:30])
total = len(bits)
sent = 0
chunksize = max(self.min_chunksize, total // 100)
while len(bits) > 0:
send = bits[0:chunksize]
self.socket.sendall(send)
sent += len(send)
if progress_callback:
progress = float(sent) / total * 100
progress_callback(progress, sent == total)
bits = bits[chunksize:]
def makefile(self, mode, bufsize):
'''Returns a file-like object for the socket.'''
return self.socket.makefile(mode, bufsize)
def close(self):
'''Closes the socket.'''
return self.socket.close()
class ProgressHTTPConnection(httplib.HTTPConnection):
'''HTTPConnection that gives regular progress reports during
sending of data.
'''
def connect(self):
'''Connects to a HTTP server.'''
httplib.HTTPConnection.connect(self)
self.sock = ReportingSocket(self.sock)
class ProgressHTTPHandler(urllib2.HTTPHandler):
'''HTTPHandler that gives regular progress reports during sending
of data.
'''
def http_open(self, req):
return self.do_open(ProgressHTTPConnection, req)
def set_callback(method):
'''Sets the callback function to use for progress reports.'''
global progress_callback # IGNORE:W0603
if not hasattr(method, '__call__'):
raise ValueError('Callback method must be callable')
progress_callback = method
def urlopen(url_or_request, callback, body=None):
'''Opens an URL using the ProgressHTTPHandler.'''
set_callback(callback)
opener = urllib2.build_opener(ProgressHTTPHandler)
return opener.open(url_or_request, body)
if __name__ == '__main__':
def upload(progress, finished):
'''Upload progress demo'''
LOG.info("%3.0f - %s" % (progress, finished))
conn = urlopen("http://www.flickr.com/", 'x' * 10245, upload)
data = conn.read()
LOG.info("Read data")
print data[:100].split('\n')[0]
| Python |
'''Exceptions used by the FlickrAPI module.'''
class IllegalArgumentException(ValueError):
'''Raised when a method is passed an illegal argument.
More specific details will be included in the exception message
when thrown.
'''
class FlickrError(Exception):
'''Raised when a Flickr method fails.
More specific details will be included in the exception message
when thrown.
'''
class CancelUpload(Exception):
'''Raise this exception in an upload/replace callback function to
abort the upload.
'''
class LockingError(Exception):
'''Raised when TokenCache cannot acquire a lock within the timeout
period, or when a lock release is attempted when the lock does not
belong to this process.
'''
| Python |
# Copyright 2001-2007 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python, and influenced by
Apache's log4j system.
Copyright (C) 2001-2009 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import logging, socket, types, os, string, cPickle, struct, time, re
from stat import ST_DEV, ST_INO
try:
import codecs
except ImportError:
codecs = None
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
def __init__(self, filename, mode, encoding=None, delay=0):
"""
Use the specified filename for streamed logging
"""
if codecs is None:
encoding = None
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
if maxBytes > 0:
mode = 'a' # doesn't make sense otherwise!
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
self.stream.close()
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = "%s.%d" % (self.baseFilename, i)
dfn = "%s.%d" % (self.baseFilename, i + 1)
if os.path.exists(sfn):
#print "%s -> %s" % (sfn, dfn)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.baseFilename + ".1"
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
#print "%s -> %s" % (self.baseFilename, dfn)
self.mode = 'w'
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=0, utc=0):
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.when = string.upper(when)
self.backupCount = backupCount
self.utc = utc
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
currentTime = int(time.time())
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch)
self.interval = self.interval * interval # multiply by units requested
self.rolloverAt = self.computeRollover(int(time.time()))
#print "Will rollover at %d, %d seconds from now" % (self.rolloverAt, self.rolloverAt - currentTime)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = t[6] # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
newRolloverAt = newRolloverAt - 3600
else: # DST bows out before next rollover, so we need to add an hour
newRolloverAt = newRolloverAt + 3600
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
#print "No need to rollover: %d, %d" % (t, self.rolloverAt)
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
# get the time that this sequence started at and make it a TimeTuple
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
if self.backupCount > 0:
# find the oldest log file and delete it
#s = glob.glob(self.baseFilename + ".20*")
#if len(s) > self.backupCount:
# s.sort()
# os.remove(s[0])
for s in self.getFilesToDelete():
os.remove(s)
#print "%s -> %s" % (self.baseFilename, dfn)
self.mode = 'w'
self.stream = self._open()
currentTime = int(time.time())
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstNow = time.localtime(currentTime)[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
newRolloverAt = newRolloverAt - 3600
else: # DST bows out before next rollover, so we need to add an hour
newRolloverAt = newRolloverAt + 3600
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=0):
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
if not os.path.exists(self.baseFilename):
self.dev, self.ino = -1, -1
else:
stat = os.stat(self.baseFilename)
self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
def emit(self, record):
"""
Emit a record.
First check if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
if not os.path.exists(self.baseFilename):
stat = None
changed = 1
else:
stat = os.stat(self.baseFilename)
changed = (stat[ST_DEV] != self.dev) or (stat[ST_INO] != self.ino)
if changed and self.stream is not None:
self.stream.flush()
self.stream.close()
self.stream = self._open()
if stat is None:
stat = os.stat(self.baseFilename)
self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
The attribute 'closeOnError' is set to 1 - which means that if
a socket error occurs, the socket is silently closed and then
reopened on the next logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
self.sock = None
self.closeOnError = 0
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if hasattr(s, 'settimeout'):
s.settimeout(timeout)
s.connect((self.host, self.port))
return s
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = 1
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except socket.error:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
if hasattr(self.sock, "sendall"):
self.sock.sendall(s)
else:
sentsofar = 0
left = len(s)
while left > 0:
sent = self.sock.send(s[sentsofar:])
sentsofar = sentsofar + sent
left = left - sent
except socket.error:
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
dummy = self.format(record) # just to get traceback text into record.exc_text
record.exc_info = None # to avoid Unpickleable error
s = cPickle.dumps(record.__dict__, 1)
if ei:
record.exc_info = ei # for next handler
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
if self.sock:
self.sock.close()
self.sock = None
logging.Handler.close(self)
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = 0
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, (self.host, self.port))
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT), facility=LOG_USER):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
if type(address) == types.StringType:
self.unixsocket = 1
self._connect_unixsocket(address)
else:
self.unixsocket = 0
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.formatter = None
def _connect_unixsocket(self, address):
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
# syslog may require either DGRAM or STREAM sockets
try:
self.socket.connect(address)
except socket.error:
self.socket.close()
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.connect(address)
# curious: when talking to the unix-domain '/dev/log' socket, a
# zero-terminator seems to be required. this string is placed
# into a class variable so that it can be overridden if
# necessary.
log_format_string = '<%d>%s\000'
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if type(facility) == types.StringType:
facility = self.facility_names[facility]
if type(priority) == types.StringType:
priority = self.priority_names[priority]
return (facility << 3) | priority
def close (self):
"""
Closes the socket.
"""
if self.unixsocket:
self.socket.close()
logging.Handler.close(self)
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
msg = self.format(record)
"""
We need to convert record level to lowercase, maybe this will
change in the future.
"""
msg = self.log_format_string % (
self.encodePriority(self.facility,
self.mapPriority(record.levelname)),
msg)
try:
if self.unixsocket:
try:
self.socket.send(msg)
except socket.error:
self._connect_unixsocket(self.address)
self.socket.send(msg)
else:
self.socket.sendto(msg, self.address)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject, credentials=None):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument.
"""
logging.Handler.__init__(self)
if type(mailhost) == types.TupleType:
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if type(credentials) == types.TupleType:
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if type(toaddrs) == types.StringType:
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def date_time(self):
"""
Return the current date and time formatted for a MIME header.
Needed for Python 1.5.2 (no email package available)
"""
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(time.time())
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
try:
from email.utils import formatdate
except ImportError:
formatdate = self.date_time
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
string.join(self.toaddrs, ","),
self.getSubject(record),
formatdate(), msg)
if self.username:
smtp.login(self.username, self.password)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print "The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available."
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET"):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = string.upper(method)
if method not in ["GET", "POST"]:
raise ValueError, "method must be GET or POST"
self.host = host
self.url = url
self.method = method
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as an URL-encoded dictionary
"""
try:
import httplib, urllib
host = self.host
h = httplib.HTTP(host)
url = self.url
data = urllib.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (string.find(url, '?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = string.find(host, ":")
if i >= 0:
host = host[:i]
h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
h.endheaders()
if self.method == "POST":
h.send(data)
h.getreply() #can't do anything with the result
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.buffer = []
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
self.flush()
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
"""
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer = []
def close(self):
"""
Flush, set the target to None and lose the buffer.
"""
self.flush()
self.target = None
BufferingHandler.close(self)
| Python |
# Copyright 2001-2009 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python, and influenced by Apache's log4j system.
Copyright (C) 2001-2009 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning']
import sys, os, types, time, string, cStringIO, traceback
try:
import codecs
except ImportError:
codecs = None
try:
import thread
import threading
except ImportError:
thread = None
__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>"
__status__ = "production"
__version__ = "0.5.0.5"
__date__ = "17 February 2009"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
elif string.lower(__file__[-4:]) in ['.pyc', '.pyo']:
_srcfile = __file__[:-4] + '.py'
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
# next bit filched from 1.5.2's inspect.py
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_traceback.tb_frame.f_back
if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3)
# done filching
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = 1
#
# If you don't want threading information in the log, set this to zero
#
logThreads = 1
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = 1
#
# If you don't want process information in the log, set this to zero
#
logProcesses = 1
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates Handlers and so
#might arbitrary user threads. Since Handler.__init__() updates the shared
#dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
global _lock
if (not _lock) and thread:
_lock = threading.RLock()
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord:
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warn('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and len(args) == 1 and (
type(args[0]) == types.DictType
) and args[0]:
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - long(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and thread:
self.thread = thread.get_ident()
self.threadName = threading.current_thread().name
else:
self.thread = None
self.threadName = None
if not logMultiprocessing:
self.processName = None
elif 'multiprocessing' not in sys.modules:
self.processName = 'MainProcess'
else:
self.processName = sys.modules['multiprocessing'].current_process().name
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
if not hasattr(types, "UnicodeType"): #if no unicode support...
msg = str(self.msg)
else:
msg = self.msg
if type(msg) not in (types.UnicodeType, types.StringType):
try:
msg = str(self.msg)
except UnicodeError:
msg = self.msg #Defer encoding till later
if self.args:
msg = msg % self.args
return msg
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = LogRecord(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class Formatter:
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)\\n" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
"""
if fmt:
self._fmt = fmt
else:
self._fmt = "%(message)s"
self.datefmt = datefmt
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime("%Y-%m-%d %H:%M:%S", ct)
s = "%s,%03d" % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = cStringIO.StringIO()
traceback.print_exception(ei[0], ei[1], ei[2], None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string contains
"%(asctime)", formatTime() is called to format the event time.
If there is exception information, it is formatted using
formatException() and appended to the message.
"""
record.message = record.getMessage()
if string.find(self._fmt,"%(asctime)") >= 0:
record.asctime = self.formatTime(record, self.datefmt)
s = self._fmt % record.__dict__
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
s = s + record.exc_text
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter:
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter:
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return 1
elif self.name == record.name:
return 1
elif string.find(record.name, self.name, 0, self.nlen) != 0:
return 0
return (record.name[self.nlen] == ".")
class Filterer:
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
"""
rv = 1
for f in self.filters:
if not f.filter(record):
rv = 0
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = {} #repository of handlers (for flushing when shutdown called)
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self.level = level
self.formatter = None
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
_handlers[self] = 1
_handlerList.insert(0, self)
finally:
_releaseLock()
self.createLock()
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if thread:
self.lock = threading.RLock()
else:
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler.
"""
self.level = level
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError, 'emit must be implemented '\
'by Handler subclasses'
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version does removes the handler from an internal list
of handlers which is closed when shutdown() is called. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
del _handlers[self]
_handlerList.remove(self)
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions:
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr)
except IOError:
pass # see issue 5971
finally:
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
def __init__(self, strm=None):
"""
Initialize the handler.
If strm is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if strm is None:
strm = sys.stderr
self.stream = strm
def flush(self):
"""
Flushes the stream.
"""
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to encode the message before
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
fs = "%s\n"
if not hasattr(types, "UnicodeType"): #if no unicode support...
stream.write(fs % msg)
else:
try:
if (isinstance(msg, unicode) and
getattr(stream, 'encoding', None)):
fs = fs.decode(stream.encoding)
try:
stream.write(fs % msg)
except UnicodeEncodeError:
#Printing to terminals sometimes fails. For example,
#with an encoding of 'cp1251', the above write will
#work if written to a stream opened or wrapped by
#the codecs module, but fail when writing to a
#terminal even when the codepage is set to cp1251.
#An extra encoding step seems to be needed.
stream.write((fs % msg).encode(stream.encoding))
else:
stream.write(fs % msg)
except UnicodeError:
stream.write(fs % msg.encode("UTF-8"))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=0):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
if codecs is None:
encoding = None
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
StreamHandler.close(self)
self.stream = None
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
if self.encoding is None:
stream = open(self.baseFilename, self.mode)
else:
stream = codecs.open(self.baseFilename, self.mode, self.encoding)
return stream
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder:
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
#self.loggers = [alogger]
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
#if alogger not in self.loggers:
if alogger not in self.loggerMap:
#self.loggers.append(alogger)
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError, "logger not derived from logging.Logger: " + \
klass.__name__
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager:
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = 0
self.loggerDict = {}
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = _loggerClass(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = _loggerClass(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = string.rfind(name, ".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = string.rfind(name, ".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
#if string.find(c.parent.name, nm) <> 0:
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = level
self.parent = None
self.propagate = 1
self.handlers = []
self.disabled = 0
def setLevel(self, level):
"""
Set the logging level of this logger.
"""
self.level = level
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
warn = warning
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args):
"""
Convenience method for logging an ERROR with exception information.
"""
self.error(*((msg,) + args), **{'exc_info': 1})
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if type(level) != types.IntType:
if raiseExceptions:
raise TypeError, "level must be an integer"
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
rv = (filename, f.f_lineno, co.co_name)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = LogRecord(name, level, fn, lno, msg, args, exc_info, func)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
if _srcfile:
#IronPython doesn't track Python frames, so findCaller throws an
#exception. We trap it here so that IronPython can use logging.
try:
fn, lno, func = self.findCaller()
except ValueError:
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else:
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if type(exc_info) != types.TupleType:
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info, func, extra)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
if hdlr in self.handlers:
#hdlr.close()
hdlr.acquire()
try:
self.handlers.remove(hdlr)
finally:
hdlr.release()
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0) and raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = 1
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return 0
return level >= self.getEffectiveLevel()
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter:
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.debug(msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.info(msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.error(msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
kwargs["exc_info"] = 1
self.logger.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.critical(msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.log(level, msg, *args, **kwargs)
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
"""
if len(root.handlers) == 0:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
hdlr = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
hdlr = StreamHandler(stream)
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
fmt = Formatter(fs, dfs)
hdlr.setFormatter(fmt)
root.addHandler(hdlr)
level = kwargs.get("level")
if level is not None:
root.setLevel(level)
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
#def getRootLogger():
# """
# Return the root logger.
#
# Note that getLogger('') now does the same thing, so this function is
# deprecated and may disappear in the future.
# """
# return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(*((msg,)+args), **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(*((msg,)+args), **kwargs)
def exception(msg, *args):
"""
Log a message with severity 'ERROR' on the root logger,
with exception information.
"""
error(*((msg,)+args), **{'exc_info': 1})
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(*((msg,)+args), **kwargs)
warn = warning
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(*((msg,)+args), **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(*((msg,)+args), **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(*((level, msg)+args), **kwargs)
def disable(level):
"""
Disable all logging calls less severe than 'level'.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for h in handlerList[:]:
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h.flush()
h.close()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
try:
import atexit
atexit.register(shutdown)
except ImportError: # for Python versions < 2.0
def exithook(status, old_exit=sys.exit):
try:
shutdown()
finally:
old_exit(status)
sys.exit = exithook
| Python |
# Copyright 2001-2007 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Configuration functions for the logging package for Python. The core package
is based on PEP 282 and comments thereto in comp.lang.python, and influenced
by Apache's log4j system.
Should work under Python versions >= 1.5.2, except that source line
information is not available unless 'sys._getframe()' is.
Copyright (C) 2001-2008 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, logging, logging.handlers, string, socket, struct, os, traceback, types
try:
import thread
import threading
except ImportError:
thread = None
from SocketServer import ThreadingTCPServer, StreamRequestHandler
DEFAULT_LOGGING_CONFIG_PORT = 9030
if sys.platform == "win32":
RESET_ERROR = 10054 #WSAECONNRESET
else:
RESET_ERROR = 104 #ECONNRESET
#
# The following code implements a socket listener for on-the-fly
# reconfiguration of logging.
#
# _listener holds the server object doing the listening
_listener = None
def fileConfig(fname, defaults=None, disable_existing_loggers=1):
"""
Read the logging configuration from a ConfigParser-format file.
This can be called several times from an application, allowing an end user
the ability to select from various pre-canned configurations (if the
developer provides a mechanism to present the choices and load the chosen
configuration).
In versions of ConfigParser which have the readfp method [typically
shipped in 2.x versions of Python], you can pass in a file-like object
rather than a filename, in which case the file-like object will be read
using readfp.
"""
import ConfigParser
cp = ConfigParser.ConfigParser(defaults)
if hasattr(cp, 'readfp') and hasattr(fname, 'readline'):
cp.readfp(fname)
else:
cp.read(fname)
formatters = _create_formatters(cp)
# critical section
logging._acquireLock()
try:
logging._handlers.clear()
del logging._handlerList[:]
# Handlers add themselves to logging._handlers
handlers = _install_handlers(cp, formatters)
_install_loggers(cp, handlers, disable_existing_loggers)
finally:
logging._releaseLock()
def _resolve(name):
"""Resolve a dotted name to a global object."""
name = string.split(name, '.')
used = name.pop(0)
found = __import__(used)
for n in name:
used = used + '.' + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n)
return found
def _strip_spaces(alist):
return map(lambda x: string.strip(x), alist)
def _create_formatters(cp):
"""Create and return formatters"""
flist = cp.get("formatters", "keys")
if not len(flist):
return {}
flist = string.split(flist, ",")
flist = _strip_spaces(flist)
formatters = {}
for form in flist:
sectname = "formatter_%s" % form
opts = cp.options(sectname)
if "format" in opts:
fs = cp.get(sectname, "format", 1)
else:
fs = None
if "datefmt" in opts:
dfs = cp.get(sectname, "datefmt", 1)
else:
dfs = None
c = logging.Formatter
if "class" in opts:
class_name = cp.get(sectname, "class")
if class_name:
c = _resolve(class_name)
f = c(fs, dfs)
formatters[form] = f
return formatters
def _install_handlers(cp, formatters):
"""Install and return handlers"""
hlist = cp.get("handlers", "keys")
if not len(hlist):
return {}
hlist = string.split(hlist, ",")
hlist = _strip_spaces(hlist)
handlers = {}
fixups = [] #for inter-handler references
for hand in hlist:
sectname = "handler_%s" % hand
klass = cp.get(sectname, "class")
opts = cp.options(sectname)
if "formatter" in opts:
fmt = cp.get(sectname, "formatter")
else:
fmt = ""
try:
klass = eval(klass, vars(logging))
except (AttributeError, NameError):
klass = _resolve(klass)
args = cp.get(sectname, "args")
args = eval(args, vars(logging))
h = klass(*args)
if "level" in opts:
level = cp.get(sectname, "level")
h.setLevel(logging._levelNames[level])
if len(fmt):
h.setFormatter(formatters[fmt])
if issubclass(klass, logging.handlers.MemoryHandler):
if "target" in opts:
target = cp.get(sectname,"target")
else:
target = ""
if len(target): #the target handler may not be loaded yet, so keep for later...
fixups.append((h, target))
handlers[hand] = h
#now all handlers are loaded, fixup inter-handler references...
for h, t in fixups:
h.setTarget(handlers[t])
return handlers
def _install_loggers(cp, handlers, disable_existing_loggers):
"""Create and install loggers"""
# configure the root first
llist = cp.get("loggers", "keys")
llist = string.split(llist, ",")
llist = map(lambda x: string.strip(x), llist)
llist.remove("root")
sectname = "logger_root"
root = logging.root
log = root
opts = cp.options(sectname)
if "level" in opts:
level = cp.get(sectname, "level")
log.setLevel(logging._levelNames[level])
for h in root.handlers[:]:
root.removeHandler(h)
hlist = cp.get(sectname, "handlers")
if len(hlist):
hlist = string.split(hlist, ",")
hlist = _strip_spaces(hlist)
for hand in hlist:
log.addHandler(handlers[hand])
#and now the others...
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
existing = root.manager.loggerDict.keys()
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
for log in llist:
sectname = "logger_%s" % log
qn = cp.get(sectname, "qualname")
opts = cp.options(sectname)
if "propagate" in opts:
propagate = cp.getint(sectname, "propagate")
else:
propagate = 1
logger = logging.getLogger(qn)
if qn in existing:
i = existing.index(qn)
prefixed = qn + "."
pflen = len(prefixed)
num_existing = len(existing)
i = i + 1 # look at the entry after qn
while (i < num_existing) and (existing[i][:pflen] == prefixed):
child_loggers.append(existing[i])
i = i + 1
existing.remove(qn)
if "level" in opts:
level = cp.get(sectname, "level")
logger.setLevel(logging._levelNames[level])
for h in logger.handlers[:]:
logger.removeHandler(h)
logger.propagate = propagate
logger.disabled = 0
hlist = cp.get(sectname, "handlers")
if len(hlist):
hlist = string.split(hlist, ",")
hlist = _strip_spaces(hlist)
for hand in hlist:
logger.addHandler(handlers[hand])
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = 1
elif disable_existing_loggers:
logger.disabled = 1
def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
"""
Start up a socket server on the specified port, and listen for new
configurations.
These will be sent as a file suitable for processing by fileConfig().
Returns a Thread object on which you can call start() to start the server,
and which you can join() when appropriate. To stop the server, call
stopListening().
"""
if not thread:
raise NotImplementedError, "listen() needs threading to work"
class ConfigStreamHandler(StreamRequestHandler):
"""
Handler for a logging configuration request.
It expects a completely new logging configuration and uses fileConfig
to install it.
"""
def handle(self):
"""
Handle a request.
Each request is expected to be a 4-byte length, packed using
struct.pack(">L", n), followed by the config file.
Uses fileConfig() to do the grunt work.
"""
import tempfile
try:
conn = self.connection
chunk = conn.recv(4)
if len(chunk) == 4:
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
#Apply new configuration. We'd like to be able to
#create a StringIO and pass that in, but unfortunately
#1.5.2 ConfigParser does not support reading file
#objects, only actual files. So we create a temporary
#file and remove it later.
file = tempfile.mktemp(".ini")
f = open(file, "w")
f.write(chunk)
f.close()
try:
fileConfig(file)
except (KeyboardInterrupt, SystemExit):
raise
except:
traceback.print_exc()
os.remove(file)
except socket.error, e:
if type(e.args) != types.TupleType:
raise
else:
errcode = e.args[0]
if errcode != RESET_ERROR:
raise
class ConfigSocketReceiver(ThreadingTCPServer):
"""
A simple TCP socket-based logging config receiver.
"""
allow_reuse_address = 1
def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
handler=None):
ThreadingTCPServer.__init__(self, (host, port), handler)
logging._acquireLock()
self.abort = 0
logging._releaseLock()
self.timeout = 1
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
logging._acquireLock()
abort = self.abort
logging._releaseLock()
def serve(rcvr, hdlr, port):
server = rcvr(port=port, handler=hdlr)
global _listener
logging._acquireLock()
_listener = server
logging._releaseLock()
server.serve_until_stopped()
return threading.Thread(target=serve,
args=(ConfigSocketReceiver,
ConfigStreamHandler, port))
def stopListening():
"""
Stop the listening server which was created with a call to listen().
"""
global _listener
if _listener:
logging._acquireLock()
_listener.abort = 1
_listener = None
logging._releaseLock()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''A FlickrAPI interface.
The main functionality can be found in the `flickrapi.FlickrAPI`
class.
See `the FlickrAPI homepage`_ for more info.
.. _`the FlickrAPI homepage`: http://stuvel.eu/projects/flickrapi
'''
__version__ = '1.4.2'
__all__ = ('FlickrAPI', 'IllegalArgumentException', 'FlickrError',
'CancelUpload', 'XMLNode', 'set_log_level', '__version__')
__author__ = u'Sybren St\u00fcvel'.encode('utf-8')
# Copyright (c) 2007 by the respective coders, see
# http://www.stuvel.eu/projects/flickrapi
#
# This code is subject to the Python licence, as can be read on
# http://www.python.org/download/releases/2.5.2/license/
#
# For those without an internet connection, here is a summary. When this
# summary clashes with the Python licence, the latter will be applied.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import urllib
import urllib2
import os.path
import logging
import copy
import webbrowser
# Smartly import hashlib and fall back on md5
try: from hashlib import md5
except ImportError: from md5 import md5
from tokencache import TokenCache, SimpleTokenCache, LockingTokenCache
from xmlnode import XMLNode
from multipart import Part, Multipart, FilePart
from exceptions import *
from cache import SimpleCache
import reportinghttp
logging.basicConfig()
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
def make_utf8(dictionary):
'''Encodes all Unicode strings in the dictionary to UTF-8. Converts
all other objects to regular strings.
Returns a copy of the dictionary, doesn't touch the original.
'''
result = {}
for (key, value) in dictionary.iteritems():
if isinstance(value, unicode):
value = value.encode('utf-8')
else:
value = str(value)
result[key] = value
return result
def debug(method):
'''Method decorator for debugging method calls.
Using this automatically sets the log level to DEBUG.
'''
LOG.setLevel(logging.DEBUG)
def debugged(*args, **kwargs):
LOG.debug("Call: %s(%s, %s)" % (method.__name__, args,
kwargs))
result = method(*args, **kwargs)
LOG.debug("\tResult: %s" % result)
return result
return debugged
# REST parsers, {format: parser_method, ...}. Fill by using the
# @rest_parser(format) function decorator
rest_parsers = {}
def rest_parser(format):
'''Method decorator, use this to mark a function as the parser for
REST as returned by Flickr.
'''
def decorate_parser(method):
rest_parsers[format] = method
return method
return decorate_parser
def require_format(required_format):
'''Method decorator, raises a ValueError when the decorated method
is called if the default format is not set to ``required_format``.
'''
def decorator(method):
def decorated(self, *args, **kwargs):
# If everything is okay, call the method
if self.default_format == required_format:
return method(self, *args, **kwargs)
# Otherwise raise an exception
msg = 'Function %s requires that you use ' \
'ElementTree ("etree") as the communication format, ' \
'while the current format is set to "%s".'
raise ValueError(msg % (method.func_name, self.default_format))
return decorated
return decorator
class FlickrAPI(object):
"""Encapsulates Flickr functionality.
Example usage::
flickr = flickrapi.FlickrAPI(api_key)
photos = flickr.photos_search(user_id='73509078@N00', per_page='10')
sets = flickr.photosets_getList(user_id='73509078@N00')
"""
flickr_host = "api.flickr.com"
flickr_rest_form = "/services/rest/"
flickr_auth_form = "/services/auth/"
flickr_upload_form = "/services/upload/"
flickr_replace_form = "/services/replace/"
def __init__(self, api_key, secret=None, username=None,
token=None, format='etree', store_token=True,
cache=False):
"""Construct a new FlickrAPI instance for a given API key
and secret.
api_key
The API key as obtained from Flickr.
secret
The secret belonging to the API key.
username
Used to identify the appropriate authentication token for a
certain user.
token
If you already have an authentication token, you can give
it here. It won't be stored on disk by the FlickrAPI instance.
format
The response format. Use either "xmlnode" or "etree" to get a parsed
response, or use any response format supported by Flickr to get an
unparsed response from method calls. It's also possible to pass the
``format`` parameter on individual calls.
store_token
Disables the on-disk token cache if set to False (default is True).
Use this to ensure that tokens aren't read nor written to disk, for
example in web applications that store tokens in cookies.
cache
Enables in-memory caching of FlickrAPI calls - set to ``True`` to
use. If you don't want to use the default settings, you can
instantiate a cache yourself too:
>>> f = FlickrAPI(api_key='123')
>>> f.cache = SimpleCache(timeout=5, max_entries=100)
"""
self.api_key = api_key
self.secret = secret
self.default_format = format
self.__handler_cache = {}
if token:
# Use a memory-only token cache
self.token_cache = SimpleTokenCache()
self.token_cache.token = token
elif not store_token:
# Use an empty memory-only token cache
self.token_cache = SimpleTokenCache()
else:
# Use a real token cache
self.token_cache = TokenCache(api_key, username)
if cache:
self.cache = SimpleCache()
else:
self.cache = None
def __repr__(self):
'''Returns a string representation of this object.'''
return '[FlickrAPI for key "%s"]' % self.api_key
__str__ = __repr__
def trait_names(self):
'''Returns a list of method names as supported by the Flickr
API. Used for tab completion in IPython.
'''
try:
rsp = self.reflection_getMethods(format='etree')
except FlickrError:
return None
def tr(name):
'''Translates Flickr names to something that can be called
here.
>>> tr(u'flickr.photos.getInfo')
u'photos_getInfo'
'''
return name[7:].replace('.', '_')
return [tr(m.text) for m in rsp.getiterator('method')]
@rest_parser('xmlnode')
def parse_xmlnode(self, rest_xml):
'''Parses a REST XML response from Flickr into an XMLNode object.'''
rsp = XMLNode.parse(rest_xml, store_xml=True)
if rsp['stat'] == 'ok':
return rsp
err = rsp.err[0]
raise FlickrError(u'Error: %(code)s: %(msg)s' % err)
@rest_parser('etree')
def parse_etree(self, rest_xml):
'''Parses a REST XML response from Flickr into an ElementTree object.'''
try:
import xml.etree.ElementTree as ElementTree
except ImportError:
# For Python 2.4 compatibility:
try:
import elementtree.ElementTree as ElementTree
except ImportError:
raise ImportError("You need to install "
"ElementTree for using the etree format")
rsp = ElementTree.fromstring(rest_xml)
if rsp.attrib['stat'] == 'ok':
return rsp
err = rsp.find('err')
raise FlickrError(u'Error: %(code)s: %(msg)s' % err.attrib)
def sign(self, dictionary):
"""Calculate the flickr signature for a set of params.
data
a hash of all the params and values to be hashed, e.g.
``{"api_key":"AAAA", "auth_token":"TTTT", "key":
u"value".encode('utf-8')}``
"""
data = [self.secret]
for key in sorted(dictionary.keys()):
data.append(key)
datum = dictionary[key]
if isinstance(datum, unicode):
raise IllegalArgumentException("No Unicode allowed, "
"argument %s (%r) should have been UTF-8 by now"
% (key, datum))
data.append(datum)
md5_hash = md5(''.join(data))
return md5_hash.hexdigest()
def encode_and_sign(self, dictionary):
'''URL encodes the data in the dictionary, and signs it using the
given secret, if a secret was given.
'''
dictionary = make_utf8(dictionary)
if self.secret:
dictionary['api_sig'] = self.sign(dictionary)
return urllib.urlencode(dictionary)
def __getattr__(self, attrib):
"""Handle all the regular Flickr API calls.
Example::
flickr.auth_getFrob(api_key="AAAAAA")
etree = flickr.photos_getInfo(photo_id='1234')
etree = flickr.photos_getInfo(photo_id='1234', format='etree')
xmlnode = flickr.photos_getInfo(photo_id='1234', format='xmlnode')
json = flickr.photos_getInfo(photo_id='1234', format='json')
"""
# Refuse to act as a proxy for unimplemented special methods
if attrib.startswith('_'):
raise AttributeError("No such attribute '%s'" % attrib)
# Construct the method name and see if it's cached
method = "flickr." + attrib.replace("_", ".")
if method in self.__handler_cache:
return self.__handler_cache[method]
def handler(**args):
'''Dynamically created handler for a Flickr API call'''
if self.token_cache.token and not self.secret:
raise ValueError("Auth tokens cannot be used without "
"API secret")
# Set some defaults
defaults = {'method': method,
'auth_token': self.token_cache.token,
'api_key': self.api_key,
'format': self.default_format}
args = self.__supply_defaults(args, defaults)
return self.__wrap_in_parser(self.__flickr_call,
parse_format=args['format'], **args)
handler.method = method
self.__handler_cache[method] = handler
return handler
def __supply_defaults(self, args, defaults):
'''Returns a new dictionary containing ``args``, augmented with defaults
from ``defaults``.
Defaults can be overridden, or completely removed by setting the
appropriate value in ``args`` to ``None``.
>>> f = FlickrAPI('123')
>>> f._FlickrAPI__supply_defaults(
... {'foo': 'bar', 'baz': None, 'token': None},
... {'baz': 'foobar', 'room': 'door'})
{'foo': 'bar', 'room': 'door'}
'''
result = args.copy()
for key, default_value in defaults.iteritems():
# Set the default if the parameter wasn't passed
if key not in args:
result[key] = default_value
for key, value in result.copy().iteritems():
# You are able to remove a default by assigning None, and we can't
# pass None to Flickr anyway.
if result[key] is None:
del result[key]
return result
def __flickr_call(self, **kwargs):
'''Performs a Flickr API call with the given arguments. The method name
itself should be passed as the 'method' parameter.
Returns the unparsed data from Flickr::
data = self.__flickr_call(method='flickr.photos.getInfo',
photo_id='123', format='rest')
'''
LOG.debug("Calling %s" % kwargs)
post_data = self.encode_and_sign(kwargs)
# Return value from cache if available
if self.cache and self.cache.get(post_data):
return self.cache.get(post_data)
url = "http://" + self.flickr_host + self.flickr_rest_form
flicksocket = urllib2.urlopen(url, post_data)
reply = flicksocket.read()
flicksocket.close()
# Store in cache, if we have one
if self.cache is not None:
self.cache.set(post_data, reply)
return reply
def __wrap_in_parser(self, wrapped_method, parse_format, *args, **kwargs):
'''Wraps a method call in a parser.
The parser will be looked up by the ``parse_format`` specifier. If there
is a parser and ``kwargs['format']`` is set, it's set to ``rest``, and
the response of the method is parsed before it's returned.
'''
# Find the parser, and set the format to rest if we're supposed to
# parse it.
if parse_format in rest_parsers and 'format' in kwargs:
kwargs['format'] = 'rest'
LOG.debug('Wrapping call %s(self, %s, %s)' % (wrapped_method, args,
kwargs))
data = wrapped_method(*args, **kwargs)
# Just return if we have no parser
if parse_format not in rest_parsers:
return data
# Return the parsed data
parser = rest_parsers[parse_format]
return parser(self, data)
def auth_url(self, perms, frob):
"""Return the authorization URL to get a token.
This is the URL the app will launch a browser toward if it
needs a new token.
perms
"read", "write", or "delete"
frob
picked up from an earlier call to FlickrAPI.auth_getFrob()
"""
encoded = self.encode_and_sign({
"api_key": self.api_key,
"frob": frob,
"perms": perms})
return "http://%s%s?%s" % (self.flickr_host, \
self.flickr_auth_form, encoded)
def web_login_url(self, perms):
'''Returns the web login URL to forward web users to.
perms
"read", "write", or "delete"
'''
encoded = self.encode_and_sign({
"api_key": self.api_key,
"perms": perms})
return "http://%s%s?%s" % (self.flickr_host, \
self.flickr_auth_form, encoded)
def __extract_upload_response_format(self, kwargs):
'''Returns the response format given in kwargs['format'], or
the default format if there is no such key.
If kwargs contains 'format', it is removed from kwargs.
If the format isn't compatible with Flickr's upload response
type, a FlickrError exception is raised.
'''
# Figure out the response format
format = kwargs.get('format', self.default_format)
if format not in rest_parsers and format != 'rest':
raise FlickrError('Format %s not supported for uploading '
'photos' % format)
# The format shouldn't be used in the request to Flickr.
if 'format' in kwargs:
del kwargs['format']
return format
def upload(self, filename, callback=None, **kwargs):
"""Upload a file to flickr.
Be extra careful you spell the parameters correctly, or you will
get a rather cryptic "Invalid Signature" error on the upload!
Supported parameters:
filename
name of a file to upload
callback
method that gets progress reports
title
title of the photo
description
description a.k.a. caption of the photo
tags
space-delimited list of tags, ``'''tag1 tag2 "long
tag"'''``
is_public
"1" or "0" for a public resp. private photo
is_friend
"1" or "0" whether friends can see the photo while it's
marked as private
is_family
"1" or "0" whether family can see the photo while it's
marked as private
content_type
Set to "1" for Photo, "2" for Screenshot, or "3" for Other.
hidden
Set to "1" to keep the photo in global search results, "2"
to hide from public searches.
format
The response format. You can only choose between the
parsed responses or 'rest' for plain REST.
The callback method should take two parameters:
``def callback(progress, done)``
Progress is a number between 0 and 100, and done is a boolean
that's true only when the upload is done.
"""
return self.__upload_to_form(self.flickr_upload_form,
filename, callback, **kwargs)
def replace(self, filename, photo_id, callback=None, **kwargs):
"""Replace an existing photo.
Supported parameters:
filename
name of a file to upload
photo_id
the ID of the photo to replace
callback
method that gets progress reports
format
The response format. You can only choose between the
parsed responses or 'rest' for plain REST. Defaults to the
format passed to the constructor.
The callback parameter has the same semantics as described in the
``upload`` function.
"""
if not photo_id:
raise IllegalArgumentException("photo_id must be specified")
kwargs['photo_id'] = photo_id
return self.__upload_to_form(self.flickr_replace_form,
filename, callback, **kwargs)
def __upload_to_form(self, form_url, filename, callback, **kwargs):
'''Uploads a photo - can be used to either upload a new photo
or replace an existing one.
form_url must be either ``FlickrAPI.flickr_replace_form`` or
``FlickrAPI.flickr_upload_form``.
'''
if not filename:
raise IllegalArgumentException("filename must be specified")
if not self.token_cache.token:
raise IllegalArgumentException("Authentication is required")
# Figure out the response format
format = self.__extract_upload_response_format(kwargs)
# Update the arguments with the ones the user won't have to supply
arguments = {'auth_token': self.token_cache.token,
'api_key': self.api_key}
arguments.update(kwargs)
# Convert to UTF-8 if an argument is an Unicode string
kwargs = make_utf8(arguments)
if self.secret:
kwargs["api_sig"] = self.sign(kwargs)
url = "http://%s%s" % (self.flickr_host, form_url)
# construct POST data
body = Multipart()
for arg, value in kwargs.iteritems():
part = Part({'name': arg}, value)
body.attach(part)
filepart = FilePart({'name': 'photo'}, filename, 'image/jpeg')
body.attach(filepart)
return self.__wrap_in_parser(self.__send_multipart, format,
url, body, callback)
def __send_multipart(self, url, body, progress_callback=None):
'''Sends a Multipart object to an URL.
Returns the resulting unparsed XML from Flickr.
'''
LOG.debug("Uploading to %s" % url)
request = urllib2.Request(url)
request.add_data(str(body))
(header, value) = body.header()
request.add_header(header, value)
if not progress_callback:
# Just use urllib2 if there is no progress callback
# function
response = urllib2.urlopen(request)
return response.read()
def __upload_callback(percentage, done, seen_header=[False]):
'''Filters out the progress report on the HTTP header'''
# Call the user's progress callback when we've filtered
# out the HTTP header
if seen_header[0]:
return progress_callback(percentage, done)
# Remember the first time we hit 'done'.
if done:
seen_header[0] = True
response = reportinghttp.urlopen(request, __upload_callback)
return response.read()
def validate_frob(self, frob, perms):
'''Lets the user validate the frob by launching a browser to
the Flickr website.
'''
auth_url = self.auth_url(perms, frob)
try:
browser = webbrowser.get()
except webbrowser.Error:
if 'BROWSER' not in os.environ:
raise
browser = webbrowser.GenericBrowser(os.environ['BROWSER'])
browser.open(auth_url, True, True)
def get_token_part_one(self, perms="read", auth_callback=None):
"""Get a token either from the cache, or make a new one from
the frob.
This first attempts to find a token in the user's token cache
on disk. If that token is present and valid, it is returned by
the method.
If that fails (or if the token is no longer valid based on
flickr.auth.checkToken) a new frob is acquired. If an auth_callback
method has been specified it will be called. Otherwise the frob is
validated by having the user log into flickr (with a browser).
To get a proper token, follow these steps:
- Store the result value of this method call
- Give the user a way to signal the program that he/she
has authorized it, for example show a button that can be
pressed.
- Wait for the user to signal the program that the
authorization was performed, but only if there was no
cached token.
- Call flickrapi.get_token_part_two(...) and pass it the
result value you stored.
The newly minted token is then cached locally for the next
run.
perms
"read", "write", or "delete"
auth_callback
method to be called if authorization is needed. When not
passed, ``self.validate_frob(...)`` is called. You can
call this method yourself from the callback method too.
If authorization should be blocked, pass
``auth_callback=False``.
The auth_callback method should take ``(frob, perms)`` as
parameters.
An example::
(token, frob) = flickr.get_token_part_one(perms='write')
if not token: raw_input("Press ENTER after you authorized this program")
flickr.get_token_part_two((token, frob))
Also take a look at ``authenticate_console(perms)``.
"""
# Check our auth_callback parameter for correctness before we
# do anything
authenticate = self.validate_frob
if auth_callback is not None:
if hasattr(auth_callback, '__call__'):
# use the provided callback function
authenticate = auth_callback
elif auth_callback is False:
authenticate = None
else:
# Any non-callable non-False value is invalid
raise ValueError('Invalid value for auth_callback: %s'
% auth_callback)
# see if we have a saved token
token = self.token_cache.token
frob = None
# see if it's valid
if token:
LOG.debug("Trying cached token '%s'" % token)
try:
rsp = self.auth_checkToken(auth_token=token, format='xmlnode')
# see if we have enough permissions
tokenPerms = rsp.auth[0].perms[0].text
if tokenPerms == "read" and perms != "read": token = None
elif tokenPerms == "write" and perms == "delete": token = None
except FlickrError:
LOG.debug("Cached token invalid")
self.token_cache.forget()
token = None
# get a new token if we need one
if not token:
# If we can't authenticate, it's all over.
if not authenticate:
raise FlickrError('Authentication required but '
'blocked using auth_callback=False')
# get the frob
LOG.debug("Getting frob for new token")
rsp = self.auth_getFrob(auth_token=None, format='xmlnode')
frob = rsp.frob[0].text
authenticate(frob, perms)
return (token, frob)
def get_token_part_two(self, (token, frob)):
"""Part two of getting a token, see ``get_token_part_one(...)`` for details."""
# If a valid token was obtained in the past, we're done
if token:
LOG.debug("get_token_part_two: no need, token already there")
self.token_cache.token = token
return token
LOG.debug("get_token_part_two: getting a new token for frob '%s'" % frob)
return self.get_token(frob)
def get_token(self, frob):
'''Gets the token given a certain frob. Used by ``get_token_part_two`` and
by the web authentication method.
'''
# get a token
rsp = self.auth_getToken(frob=frob, auth_token=None, format='xmlnode')
token = rsp.auth[0].token[0].text
LOG.debug("get_token: new token '%s'" % token)
# store the auth info for next time
self.token_cache.token = token
return token
def authenticate_console(self, perms='read', auth_callback=None):
'''Performs the authentication, assuming a console program.
Gets the token, if needed starts the browser and waits for the user to
press ENTER before continuing.
See ``get_token_part_one(...)`` for an explanation of the
parameters.
'''
(token, frob) = self.get_token_part_one(perms, auth_callback)
if not token: raw_input("Press ENTER after you authorized this program")
self.get_token_part_two((token, frob))
@require_format('etree')
def __data_walker(self, method, **params):
'''Calls 'method' with page=0, page=1 etc. until the total
number of pages has been visited. Yields the photos
returned.
Assumes that ``method(page=n, **params).findall('*/photos')``
results in a list of photos, and that the toplevel element of
the result contains a 'pages' attribute with the total number
of pages.
'''
page = 1
total = 1 # We don't know that yet, update when needed
while page <= total:
# Fetch a single page of photos
LOG.debug('Calling %s(page=%i of %i, %s)' %
(method.func_name, page, total, params))
rsp = method(page=page, **params)
photoset = rsp.getchildren()[0]
total = int(photoset.get('pages'))
photos = rsp.findall('*/photo')
# Yield each photo
for photo in photos:
yield photo
# Ready to get the next page
page += 1
@require_format('etree')
def walk_set(self, photoset_id, per_page=50, **kwargs):
'''walk_set(self, photoset_id, per_page=50, ...) -> \
generator, yields each photo in a single set.
:Parameters:
photoset_id
the photoset ID
per_page
the number of photos that are fetched in one call to
Flickr.
Other arguments can be passed, as documented in the
flickr.photosets.getPhotos_ API call in the Flickr API
documentation, except for ``page`` because all pages will be
returned eventually.
.. _flickr.photosets.getPhotos:
http://www.flickr.com/services/api/flickr.photosets.getPhotos.html
Uses the ElementTree format, incompatible with other formats.
'''
return self.__data_walker(self.photosets_getPhotos,
photoset_id=photoset_id, per_page=per_page, **kwargs)
@require_format('etree')
def walk(self, per_page=50, **kwargs):
'''walk(self, user_id=..., tags=..., ...) -> generator, \
yields each photo in a search query result
Accepts the same parameters as flickr.photos.search_ API call,
except for ``page`` because all pages will be returned
eventually.
.. _flickr.photos.search:
http://www.flickr.com/services/api/flickr.photos.search.html
Also see `walk_set`.
'''
return self.__data_walker(self.photos_search,
per_page=per_page, **kwargs)
def set_log_level(level):
'''Sets the log level of the logger used by the FlickrAPI module.
>>> import flickrapi
>>> import logging
>>> flickrapi.set_log_level(logging.INFO)
'''
import flickrapi.tokencache
LOG.setLevel(level)
flickrapi.tokencache.LOG.setLevel(level)
if __name__ == "__main__":
print "Running doctests"
import doctest
doctest.testmod()
print "Tests OK"
| Python |
'''Persistent token cache management for the Flickr API'''
import os.path
import logging
import time
from exceptions import LockingError
logging.basicConfig()
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
__all__ = ('TokenCache', 'SimpleTokenCache')
class SimpleTokenCache(object):
'''In-memory token cache.'''
def __init__(self):
self.token = None
def forget(self):
'''Removes the cached token'''
self.token = None
class TokenCache(object):
'''On-disk persistent token cache for a single application.
The application is identified by the API key used. Per
application multiple users are supported, with a single
token per user.
'''
def __init__(self, api_key, username=None):
'''Creates a new token cache instance'''
self.api_key = api_key
self.username = username
self.memory = {}
self.path = os.path.join("~", ".flickr")
def get_cached_token_path(self):
"""Return the directory holding the app data."""
return os.path.expanduser(os.path.join(self.path, self.api_key))
def get_cached_token_filename(self):
"""Return the full pathname of the cached token file."""
if self.username:
filename = 'auth-%s.token' % self.username
else:
filename = 'auth.token'
return os.path.join(self.get_cached_token_path(), filename)
def get_cached_token(self):
"""Read and return a cached token, or None if not found.
The token is read from the cached token file.
"""
# Only read the token once
if self.username in self.memory:
return self.memory[self.username]
try:
f = open(self.get_cached_token_filename(), "r")
token = f.read()
f.close()
return token.strip()
except IOError:
return None
def set_cached_token(self, token):
"""Cache a token for later use."""
# Remember for later use
self.memory[self.username] = token
path = self.get_cached_token_path()
if not os.path.exists(path):
os.makedirs(path)
f = open(self.get_cached_token_filename(), "w")
f.write(token)
f.close()
def forget(self):
'''Removes the cached token'''
if self.username in self.memory:
del self.memory[self.username]
filename = self.get_cached_token_filename()
if os.path.exists(filename):
os.unlink(filename)
token = property(get_cached_token, set_cached_token, forget, "The cached token")
class LockingTokenCache(TokenCache):
'''Locks the token cache when reading or updating it, so that
multiple processes can safely use the same API key.
'''
def get_lock_name(self):
'''Returns the filename of the lock.'''
token_name = self.get_cached_token_filename()
return '%s-lock' % token_name
lock = property(get_lock_name)
def get_pidfile_name(self):
'''Returns the name of the pidfile in the lock directory.'''
return os.path.join(self.lock, 'pid')
pidfile_name = property(get_pidfile_name)
def get_lock_pid(self):
'''Returns the PID that is stored in the lock directory, or
None if there is no such file.
'''
filename = self.pidfile_name
if not os.path.exists(filename):
return None
pidfile = open(filename)
try:
pid = pidfile.read()
if pid:
return int(pid)
finally:
pidfile.close()
return None
def acquire(self, timeout=60):
'''Locks the token cache for this key and username.
If the token cache is already locked, waits until it is
released. Throws an exception when the lock cannot be acquired
after ``timeout`` seconds.
'''
# Check whether there is a PID file already with our PID in
# it.
lockpid = self.get_lock_pid()
if lockpid == os.getpid():
LOG.debug('The lock is ours, continuing')
return
# Figure out the lock filename
lock = self.get_lock_name()
LOG.debug('Acquiring lock %s' % lock)
# Try to obtain the lock
start_time = time.time()
while True:
try:
os.makedirs(lock)
break
except OSError:
# If the path doesn't exist, the error isn't that it
# can't be created because someone else has got the
# lock. Just bail out then.
if not os.path.exists(lock):
LOG.error('Unable to acquire lock %s, aborting' %
lock)
raise
if time.time() - start_time >= timeout:
# Timeout has passed, bail out
raise LockingError('Unable to acquire lock ' +
'%s, aborting' % lock)
# Wait for a bit, then try again
LOG.debug('Unable to acquire lock, waiting')
time.sleep(0.1)
# Write the PID file
LOG.debug('Lock acquired, writing our PID')
pidfile = open(self.pidfile_name, 'w')
try:
pidfile.write('%s' % os.getpid())
finally:
pidfile.close()
def release(self):
'''Unlocks the token cache for this key.'''
# Figure out the lock filename
lock = self.get_lock_name()
if not os.path.exists(lock):
LOG.warn('Trying to release non-existing lock %s' % lock)
return
# If the PID file isn't ours, abort.
lockpid = self.get_lock_pid()
if lockpid and lockpid != os.getpid():
raise LockingError(('Lock %s is NOT ours, but belongs ' +
'to PID %i, unable to release.') % (lock, lockpid))
LOG.debug('Releasing lock %s' % lock)
# Remove the PID file and the lock directory
pidfile = self.pidfile_name
if os.path.exists(pidfile):
os.remove(pidfile)
os.removedirs(lock)
def __del__(self):
'''Cleans up any existing lock.'''
# Figure out the lock filename
lock = self.get_lock_name()
if not os.path.exists(lock):
return
# If the PID file isn't ours, we're done
lockpid = self.get_lock_pid()
if lockpid and lockpid != os.getpid():
return
# Release the lock
self.release()
def locked(method):
'''Decorator, ensures the method runs in a locked cache.'''
def locker(self, *args, **kwargs):
self.acquire()
try:
return method(self, *args, **kwargs)
finally:
self.release()
return locker
@locked
def get_cached_token(self):
"""Read and return a cached token, or None if not found.
The token is read from the cached token file.
"""
return TokenCache.get_cached_token(self)
@locked
def set_cached_token(self, token):
"""Cache a token for later use."""
TokenCache.set_cached_token(self, token)
@locked
def forget(self):
'''Removes the cached token'''
TokenCache.forget(self)
token = property(get_cached_token, set_cached_token, forget, "The cached token")
| Python |
"""
Developed by
Andrea Stagi <stagi.andrea@gmail.com>
Sanjeya Cooray <sanjeya.cooray@gmail.com>
FlickrAvatar image feeder for emesene
Copyright (C) 2010 Andrea Stagi - Sanjeya Cooray
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
#! /usr/bin/python
from flickrutils import *
from album import *
from timers import *
from options import *
from modalbox import *
from utility import *
from flickrapi import *
import urllib
import random
import os
import tempfile
gtk.gdk.threads_init()
class App:
def __init__(self,controller,user):
self.options=readOpt(user)
self.flickr=None
self.albums=None
self.mbox=None
self.ht=None
self.sld=None
self.ldn=None
self.cur_photo=0
self.loaded=0
self.controller=None
self.tmpdir=tempfile.mkdtemp(suffix="flickrmsn")
self.setemesene=0
self.rnd=-1
self.logout=True
self.user=user
self.curralbum=0
self.speed=0
self.needref=1
self.isclean=False
if os.path.exists(os.path.join(os.getcwd(),"emesene","plugins_base","flickrAvatar","img","wait.gif")):
self.wait_path=os.path.join(os.getcwd(),"emesene","plugins_base","flickrAvatar","img","wait.gif")
self.noimg_path=os.path.join(os.getcwd(),"emesene","plugins_base","flickrAvatar","img","noimg.png")
self.logo_path=os.path.join(os.getcwd(),"emesene","plugins_base","flickrAvatar","img","logo.png")
else:
self.wait_path=os.path.join(os.getcwd(),"plugins_base","flickrAvatar","img","wait.gif")
self.noimg_path=os.path.join(os.getcwd(),"plugins_base","flickrAvatar","img","noimg.png")
self.logo_path=os.path.join(os.getcwd(),"plugins_base","flickrAvatar","img","logo.png")
self.controller=controller
def cleanAll(self):
self.isclean=True
def cleanT(self):
return self.isclean
def setOptions(self):
self.mbox.check_enable.set_active(self.options.enabled)
self.mbox.check_random.set_active(self.options.random)
self.speed=self.options.speed
self.mbox.set_combo_speed_index(self.options.speed)
def isLoaded(self):
return self.loaded
def isSettingEmesene(self):
return self.setemesene
def on_get_album(self):
self.mbox.restore_on_logout()
if(os.name=="nt"):
self.refreshSets()
else:
self.ldn=refresh(self)
self.ldn.start()
def on_logout(self):
self.mbox.set_on_logout()
try:
destroyService(self.user)
except:
pass
else:
self.logout=True
def on_dove(self):
self.curralbum=self.mbox.get_combo_index()
self.speed=self.mbox.get_combo_speed_index()
self.stopSlide()
self.options=FlickrOpt(self.mbox.is_enabled(),self.mbox.is_random(),self.albums[self.mbox.get_combo_index()].title,self.mbox.get_combo_speed_index())
writeOpt(self.options,self.user)
if(self.mbox.is_enabled()):
self.startSlideShow()
self.mbox.window.hide_all()
def startSlideShow(self):
self.sld=slideshow(self)
self.sld.start()
def on_change(self):
if(self.loaded!=0):
self.ht=loadtim(self)
self.ht.start()
def disableAll(self):
self.mbox.btn_ok.set_sensitive(0)
self.mbox.btn_get_album.set_sensitive(0)
def enableAll(self):
self.mbox.btn_ok.set_sensitive(1)
self.mbox.btn_get_album.set_sensitive(1)
def setLoading(self):
self.mbox.set_preview(self.wait_path)
def refreshNow(self):
self.loaded=0
if(self.cleanT()):
return
self.albums=getAlbums(self.flickr)
titles=[]
count=0
for album in self.albums:
titles.append(album.title + " (" + str(album.getPhotoCount()) + ")")
if(album.title==self.options.album):
self.curralbum=count
count+=1
self.loaded=1
def refreshSets(self):
self.loaded=0
self.flickr=loginService(self.user,self)
if(self.flickr!=None):
self.logout=False
else:
self.logout=True
return
if(self.cleanT()):
return
self.albums=getAlbums(self.flickr)
titles=[]
count=0
for album in self.albums:
titles.append(album.title + " (" + str(album.getPhotoCount()) + ")")
if(album.title==self.options.album):
self.curralbum=count
count+=1
self.mbox.set_albums(titles)
self.mbox.combobox.set_active(self.curralbum)
self.setPreview()
self.loaded=1
def setPreview(self):
self.albums[self.mbox.get_combo_index()].refreshPhotos(0)
photo_url= self.albums[self.mbox.get_combo_index()].getPhotos()[0]
filename=os.path.join(self.tmpdir,"imgprev.jpeg")
download_file(photo_url,filename)
self.mbox.set_preview(filename)
def getSpeed(self):
return self.speed
def setEmesene(self):
self.setemesene=1
if(self.mbox.is_random()):
newrnd=random.randint(0,self.albums[self.curralbum].getPhotoCount()-1)
if(self.rnd==newrnd):
newrnd=(newrnd+1)%self.albums[self.curralbum].getPhotoCount()
self.rnd=newrnd
else:
self.cur_photo=self.cur_photo%self.albums[self.curralbum].getPhotoCount()
self.rnd=self.cur_photo
self.cur_photo+=1
self.albums[self.curralbum].refreshPhotos(self.rnd)
photo_url= self.albums[self.curralbum].getPhotos()[self.rnd]
filename=os.path.join(self.tmpdir,"imgemesene.jpeg")
download_file(photo_url,filename)
self.controller.changeAvatar(filename)
self.needref=(self.needref+1)%self.albums[self.curralbum].getPhotoCount()
self.setemesene=0
if(self.needref==0 and self.logout==False):
self.refreshNow()
return 1
def stopSlide(self):
if(self.sld!=None):
self.sld.stop()
def main(self):
self.mbox=ModalBox()
self.mbox.set_album_callback(self.on_get_album)
self.mbox.set_done_callback(self.on_dove)
self.mbox.set_logout_callback(self.on_logout)
self.mbox.set_combo_change_callback(self.on_change)
self.mbox.set_preview(self.noimg_path)
self.mbox.set_app_logo(self.logo_path)
self.setOptions()
self.ldn=refresh(self)
self.ldn.start()
if(self.options.enabled):
self.startSlideShow()
return 0
| Python |
"""
Developed by
Andrea Stagi <stagi.andrea@gmail.com>
Sanjeya Cooray <sanjeya.cooray@gmail.com>
FlickrAvatar image feeder for emesene
Copyright (C) 2010 Andrea Stagi - Sanjeya Cooray
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import re
import time
import sys
from threading import Thread
class refresh(Thread):
def __init__ (self,app):
Thread.__init__(self)
self.app = app
def run(self):
self.app.disableAll()
self.app.setLoading()
while(self.app.isSettingEmesene()):
time.sleep(2)
self.app.refreshSets()
self.app.enableAll()
class loadtim(Thread):
def __init__ (self,app):
Thread.__init__(self)
self.app = app
def run(self):
self.app.setLoading()
self.app.setPreview()
class slideshow(Thread):
def __init__ (self,app):
Thread.__init__(self)
self.cont=True
self.app = app
self.rot=0
def run(self):
cost=0.2
while self.cont:
if(self.app.isLoaded() and self.rot==5):
while not self.app.setEmesene():
time.sleep(2)
cost=1.0-(self.app.getSpeed()/5.0)
time.sleep( cost * 20 / 5.0 )
self.rot=(self.rot+1)%6
def stop(self):
self.cont=False
| Python |
"""
Developed by
Andrea Stagi <stagi.andrea@gmail.com>
Sanjeya Cooray <sanjeya.cooray@gmail.com>
FlickrAvatar image feeder for emesene
Copyright (C) 2010 Andrea Stagi - Sanjeya Cooray
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import urllib
from flickrapi import shorturl
class album:
title=''
id_album=0
n_photo=0
flickr=None
imgadd=[]
def __init__(self,title,id_album,n_photo,flickr):
self.title=title
self.id_album=id_album
self.n_photo=n_photo
self.flickr=flickr
self.imgadd=range(0,int(n_photo))
def getTitle(self):
return self.title
def getId(self):
return self.id_album
def getPhotoCount(self):
return int(self.n_photo)
def refreshPhotos(self,index_p):
countp=0
for photo in self.flickr.walk_set(photoset_id=self.id_album,per_page=20):
if(countp==index_p):
srcfile="http://farm5.static.flickr.com/"+photo.get('server')+"/"+photo.get('id')+"_"+photo.get('secret')+"_s.jpg"
self.imgadd[index_p]=srcfile
countp+=1
def getPhotos(self):
return self.imgadd
| Python |
"""
Developed by
Andrea Stagi <stagi.andrea@gmail.com>
Sanjeya Cooray <sanjeya.cooray@gmail.com>
FlickrAvatar image feeder for emesene
Copyright (C) 2010 Andrea Stagi - Sanjeya Cooray
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from flickrapi import *
from album import album
import time
import os
import webbrowser
api_key = '47b7b29216283347519507046f0562a8'
api_secret = '2510f66995bad407'
def auth(frob, perms):
print 'Please give us permission %s ' % (perms)
def loginService(user,app):
ok=False
flickr = FlickrAPI(api_key, api_secret,username=user,store_token=True)
try:
flickr.token_cache.path=os.path.join(os.path.expanduser("~"),".config",".flickrmsntemp")
(token, frob) = flickr.get_token_part_one(perms='read')
except:
return None
#if not token:
while not ok:
try:
flickr.get_token_part_two((token, frob))
ok=True
except:
if(app.cleanT()):
ok=True
time.sleep(1)
continue
return flickr
def destroyService(user):
os.remove(os.path.join(os.path.expanduser("~"),".config",".flickrmsntemp",api_key,"auth-"+user+".token"))
def getAlbums(flickr):
sets = flickr.photosets_getList()
allsets=sets.find('photosets').findall('photoset')
al=[]
for aset in allsets:
al.append(album(aset.find('title').text,aset.attrib['id'],aset.attrib['photos'],flickr))
return al
| Python |
"""
Developed by
Andrea Stagi <stagi.andrea@gmail.com>
Sanjeya Cooray <sanjeya.cooray@gmail.com>
FlickrAvatar image feeder for emesene
Copyright (C) 2010 Andrea Stagi - Sanjeya Cooray
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import gtk
import Login
import Plugin
from gobject import timeout_add, source_remove
from flickrAvatar.flickrAvatar import *
import os
#Plugin Class
class MainClass(Plugin.Plugin):
description = _('Get emesene\'s avatar images from your Flickr sets [Ver.1.0]')
authors = {
'Cooray Sanjeya (Sanje)' : 'sanjeya dot cooray at gmail dot com',
'Stagi Andrea (4ndreaSt4gi)' : 'stagi dot andrea at gmail dot com'
}
website = 'http://code.google.com/p/flickravatar'
displayName = _('Flickr Avatar')
name = 'FlickrAvatar1.0'
def __init__(self, controller, msn):
Plugin.Plugin.__init__(self, controller, msn)
self.controller = controller
self.app=None
self.current_user = self.controller.config.getCurrentUser()
def start(self):
'''start the plugin'''
self.img = gtk.Image()
if os.path.exists(os.path.join(os.getcwd(),"emesene","plugins_base","flickrAvatar","img","flickr.png")):
self.img.set_from_file(os.path.join(os.getcwd(),"emesene","plugins_base","flickrAvatar","img","flickr.png"))
else:
self.img.set_from_file(os.path.join(os.getcwd(),"plugins_base","flickrAvatar","img","flickr.png"))
self.flickrbtn = gtk.Button()
self.flickrbtn.connect("clicked",self.show_dialog)
self.flickrbtn.set_tooltip_text("Load Avatar from Flickr")
self.flickrbtn.set_image(self.img)
self.flickrbtn.set_relief(gtk.RELIEF_NONE)
self.flickrbtn.set_alignment(1.0,0.0)
try:
os.mkdir(os.path.join(os.path.expanduser("~"),".config",".flickrmsntemp"))
except:
pass
self.hbox = gtk.HBox()
self.hbox.pack_start(self.flickrbtn,False,False)
self.controller.mainWindow.userPanel.hbox2.pack_start(self.hbox,False,False)
self.hbox.show_all()
self.app=App(self.controller,self.current_user)
self.app.main()
self.enabled = True
def stop(self):
self.app.stopSlide()
self.app.cleanAll()
self.hbox.destroy()
self.enable=False
def check(self):
return (True, 'Ok')
def show_dialog(self,event):
self.app.mbox.show()
| Python |
#!/usr/bin/python
# A dumb example of an automated fuzzer.
#
# Copyright 2006 Will Drewry <redpig@dataspill.org>
# Copyright 2007 Google Inc.
# See docs/COPYING for License details (GPLv2)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
__author__ = "Will Drewry"
"""implements an automated fuzzer using flayer"""
import flayer.core
import flayer.input.fuzz
import os
import random
import subprocess
class Fuzzer:
def __init__(self, program, args=[], env={}, libtaint=''):
self._program = program
self._args = args
self._env = env
self._flayer = flayer.core.Flayer(program, args, env, libtaint)
self._input = flayer.input.fuzz.FuzzFile()
self._altered = {}
self._cond_input = random.Random()
def set_seed(self, seed=0):
self._input.set_seed(seed)
self._cond_input.seed(seed)
def get_seed(self):
return self._input.get_seed()
def Run(self, count=1, input_is='file', seed=None):
if seed is not None:
self._set_seed(seed)
# Generate input
print "Generating random input..."
self._input.Run()
if input_is == 'file':
args.append(self._input.get_target())
self._flayer.set_command(self._program,
self._args,
self._env)
self._flayer.set_taint('f')
self._flayer.set_taint_file_filter(self._input.get_target())
runs = 0
while runs < count:
runs += 1
lead = "== %d ==" % runs
altered = ','.join(["%s:%d" % (a[0],a[1]) for a in self._altered.items()])
print "%s altered: %s" % (lead, altered)
process = self._flayer.Run()
process.stdin.close()
#process.stdout.close() # Write stdout to a log...
ret, ret = os.wait()
print "%s return code %d" % (lead, ret)
self._flayer.ProcessLastRun()
errors = self._flayer.Errors()
# Fuzz paths while keeping constant fuzz input.
# Set up for next run
#last_altered = copy.copy(self._altered)
for e in errors.items():
# Look for one hit wonders
if e[1].kind == 'TaintedCondition' and e[1].count == 1:
action = self._cond_input.choice([True, False])
ip = e[1].frames[0].instruction_pointer
self._altered[ip] = action
self._flayer.add_branch_alteration(ip, action)
for address in self._altered.keys():
self._flayer.del_branch_alteration(address)
action = self._cond_input.choice([True, False])
self._altered[address] = action
self._flayer.add_branch_alteration(address, action)
if __name__ == '__main__':
import sys
runs = sys.argv[1]
seed = sys.argv[2]
program = sys.argv[3]
args = sys.argv[4:]
fuzzer = Fuzzer(program, args, libtaint='/home/wad/sync/bzr/flayer/libtaint/libtaint.so.1.0')
fuzzer.set_seed(seed)
fuzzer.Run(count=runs)
| Python |
#!/usr/bin/python -i
#
# Copyright 2006 Will Drewry <redpig@dataspill.org>
# Copyright 2007 Google Inc.
# See docs/COPYING for License details (GPLv2)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
__author__ = "Will Drewry"
import flayer.core
import flayer.wrappers.commandline
import sys
if __name__ == '__main__':
program, args = ('', [])
if len(sys.argv) >= 2:
program = sys.argv[1]
args = sys.argv[2:]
cli = flayer.wrappers.commandline.Shell(flayer.core.Flayer(program, args))
cli.Export()
cli.Banner()
| Python |
#!/usr/bin/python -i
#
# Copyright 2006 Will Drewry <redpig@dataspill.org>
# Copyright 2007 Google Inc.
# See docs/COPYING for License details (GPLv2)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
__author__ = "Will Drewry"
import flayer.core
import flayer.wrappers.commandline
import sys
if __name__ == '__main__':
program, args = ('', [])
if len(sys.argv) >= 2:
program = sys.argv[1]
args = sys.argv[2:]
cli = flayer.wrappers.commandline.Shell(flayer.core.Flayer(program, args))
cli.Export()
cli.Banner()
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.