code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
#!/usr/bin/python
# A dumb example of an automated fuzzer.
#
# Copyright 2006 Will Drewry <redpig@dataspill.org>
# Copyright 2007 Google Inc.
# See docs/COPYING for License details (GPLv2)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
__author__ = "Will Drewry"
"""implements an automated fuzzer using flayer"""
import flayer.core
import flayer.input.fuzz
import os
import random
import subprocess
class Fuzzer:
def __init__(self, program, args=[], env={}, libtaint=''):
self._program = program
self._args = args
self._env = env
self._flayer = flayer.core.Flayer(program, args, env, libtaint)
self._input = flayer.input.fuzz.FuzzFile()
self._altered = {}
self._cond_input = random.Random()
def set_seed(self, seed=0):
self._input.set_seed(seed)
self._cond_input.seed(seed)
def get_seed(self):
return self._input.get_seed()
def Run(self, count=1, input_is='file', seed=None):
if seed is not None:
self._set_seed(seed)
# Generate input
print "Generating random input..."
self._input.Run()
if input_is == 'file':
args.append(self._input.get_target())
self._flayer.set_command(self._program,
self._args,
self._env)
self._flayer.set_taint('f')
self._flayer.set_taint_file_filter(self._input.get_target())
runs = 0
while runs < count:
runs += 1
lead = "== %d ==" % runs
altered = ','.join(["%s:%d" % (a[0],a[1]) for a in self._altered.items()])
print "%s altered: %s" % (lead, altered)
process = self._flayer.Run()
process.stdin.close()
#process.stdout.close() # Write stdout to a log...
ret, ret = os.wait()
print "%s return code %d" % (lead, ret)
self._flayer.ProcessLastRun()
errors = self._flayer.Errors()
# Fuzz paths while keeping constant fuzz input.
# Set up for next run
#last_altered = copy.copy(self._altered)
for e in errors.items():
# Look for one hit wonders
if e[1].kind == 'TaintedCondition' and e[1].count == 1:
action = self._cond_input.choice([True, False])
ip = e[1].frames[0].instruction_pointer
self._altered[ip] = action
self._flayer.add_branch_alteration(ip, action)
for address in self._altered.keys():
self._flayer.del_branch_alteration(address)
action = self._cond_input.choice([True, False])
self._altered[address] = action
self._flayer.add_branch_alteration(address, action)
if __name__ == '__main__':
import sys
runs = sys.argv[1]
seed = sys.argv[2]
program = sys.argv[3]
args = sys.argv[4:]
fuzzer = Fuzzer(program, args, libtaint='/home/wad/sync/bzr/flayer/libtaint/libtaint.so.1.0')
fuzzer.set_seed(seed)
fuzzer.Run(count=runs)
| Python |
#!/usr/bin/python
#
# Copyright 2006-2007 Will Drewry <redpig@dataspill.org>
# Some portions copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
"""flayer - a fuzzing framework for bypassing basic structural error checking
...
"""
__author__ = 'Will Drewry'
__revision__ = '$Id: $'
import copy
from distutils.spawn import find_executable
import os
import shelve
import subprocess
import sys
import tempfile
import valgrind.error_parser
import valgrind.runner
class Flayer:
"""wrapper for valgrind/memcheck undef-as-taint and alter-branch arguments"""
VERSION = '0.0.1'
def __init__(self, program='/bin/true', args=[], env={}):
self.__runner = valgrind.runner.Runner()
self.__runner.executable = find_executable('valgrind')
self.__args = []
self.set_command(program, args, env)
self.__taint = ''
self.set_taint('nfs') # TODO: e
self.__taint_filter = {}
#self.set_taint_network_filter('')
self.set_taint_file_filter('')
self.__errors = {}
self.__shelf = None
self.__tmpdir = tempfile.mkdtemp()
self.__runner['log-file'] = self.__tmpdir + '/flayer'
def __cleanup_tmpdir(self):
"""attempts to cleanup the tmpdir"""
for root, dirs, files in os.walk(self.__tmpdir):
for f in files:
os.unlink(os.path.join(root, f)) # todo use join
os.rmdir(root)
def __del__(self):
"""properly clean up the temporary files on destruction"""
self.__cleanup_tmpdir()
def Errors(self):
"""returns the valgrind output errors"""
return copy.deepcopy(self.__errors)
def GetTmpDir(self):
"""returns the tmpdir"""
return self.__tmpdir
def ResetTmpDir(self):
"""resets the tmpdir and cleans up one if it exists"""
self.__cleanup_tmpdir()
self.__tmpdir = tempfile.mkdtemp()
self.__runner['log-file'] = self.__tmpdir + '/flayer'
# TODO: change these to properties
def get_taint(self):
taint = ''
if self.__runner.has_key('taint-network') and \
self.__runner['taint-network']:
taint += 'n'
if self.__runner.has_key('taint-file') and \
self.__runner['taint-file']:
taint += 'f'
taint += 's'
return taint
def set_taint(self, value):
# TODO validate
self.__runner['taint-network'] = False
self.__runner['taint-file'] = False
for ch in value:
if ch == 'n':
self.__runner['taint-network'] = True
elif ch == 'f' or ch == 's': # no diff now...
self.__runner['taint-file'] = True
else:
raise RuntimeError, "Request value not yet implemented: " + ch
def set_taint_network_filter(self, value):
"""specified the host or port traffic to mark"""
raise RuntimeError, "NOT YET IMPLEMENTED"
def set_taint_file_filter(self, value):
"""specified the path prefix for file activity to mark"""
self.__runner['file-filter'] = value
def get_taint_file_filter(self):
if self.__runner.has_key('file-filter'):
return copy.copy(self.__runner['file-filter'])
else:
return ''
def get_taint_network_filter(self):
if self.__runner.has_key('network-filter'):
return copy.copy(self.__runner['network-filter'])
else:
return ''
def Run(self, verbose=False, *io):
"""runs the specified command under valgrind-flayer and gets the errors"""
process = self.__runner.run(self.__args, verbose, *io)
self.__errors_file = ''.join([self.__tmpdir, '/flayer.', str(process.pid)])
return process
def ProcessLastRun(self):
self.__errors = {}
self._ReadErrors(self.__errors_file)
def _ReadErrors(self, f):
"""opens the valgrind error output and parses it"""
p = valgrind.error_parser.Parser()
self.__errors = p.parse(open(f))
def clear_branch_alterations(self):
self.__runner['alter-branch'] = {}
def add_branch_alteration(self, address, action):
if action:
self.__runner['alter-branch'][address] = '1'
else:
self.__runner['alter-branch'][address] = '0'
def del_branch_alteration(self, address):
if self.__runner['alter-branch'].has_key(address):
self.__runner['alter-branch'].pop(address)
def get_branch_alterations(self):
return copy.deepcopy(self.__runner['alter-branch'])
def set_command(self, command, args=[], env={}):
"""sets the target program command, arguments, and environment"""
self.__args = copy.copy(args)
self.__args.insert(0, command)
if env != {}:
self.__runner.environment.update(env)
def get_command(self):
"""gets the target program command, arguments, and env"""
return (self.__args[0], # command
self.__args[1:], # arguments
copy.copy(self.__runner.environment)) # environment
def About(self):
"""returns a nice 'about' text"""
return """
Flayer is a framework for automating and easing the use of
two valgrind features: --undef-as-taint and --alter-branch.
It is the proof of concept implementation of the paper.
The flayer suite (libflayer, valgrind/flayer)
provides a system which traces user input through memory
and opens doors for it.
What does this mean technically? Traditional fuzzing is
limited in its overall code coverage. It is often blocked
early in the fuzzing process by protocol banner checks and other
version and sanity checks. This suite allows for these checks to be
forcibly skipped at runtime. This breathes new life into the
good, ol' fashion Fuzz[1] technique by providing access to
program internals without specifying a complicated protocol.
However, this system can be used with almost any existing fuzzing
technique to allow for enhanced code coverage.
Flayer was conceived of and written by Will Drewry <redpig@dataspill.org>.
Tavis Ormandy <taviso@sdf.lonestar.org> created the manual fuzzing
technique that flayer automates.
[1] http://www.cs.wisc.edu/~bart/fuzz
"""
def FullCommand(self):
vg = [self.__runner.executable] + self.__runner.arguments
command = ' '.join(vg + self.__args)
return command
def Banner(self):
"""display a banner when running in interactive shell mode"""
vg = [self.__runner.executable] + self.__runner.arguments
command = ' '.join(vg + self.__args)
return """
Welcome to Flayer %s!
Type 'help()' for extra details or 'about()' for more
on flayer.
Current settings:
- Command: %s
- Taint settings: %s
- Temporary directory: %s
""" % (Flayer.VERSION, command, self.get_taint(), self.__tmpdir)
if __name__ == '__main__':
program, args = ('', [])
if len(sys.argv) >= 2:
program = sys.argv[1]
args = sys.argv[2:]
import wrappers.commandline
cli = wrappers.commandline.Shell(Flayer(program, args))
cli.Export()
cli.Banner()
| Python |
#!/usr/bin/python
#
# Copyright 2006-2007 Will Drewry <redpig@dataspill.org>
# Some portions copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
"""contains classes for fuzzing various inputs
with the random class
import input.fuzz
f = input.fuzz.FuzzFile()
f.Run()
command(..,[f.get_target()],..)
run()
"""
__author__ = "Will Drewry"
import binascii
import os
import random
import tempfile
#### Support classes - where should this go?
class Fuzz:
def __init__(self, seed=0, block_size=4096):
# Note: SystemRandom appears to ignore seeding.
self._rand = random.Random()
self._seed = seed
self._rand.seed(seed)
self._target = None # subclass must supply
self._block_size = block_size
self._max_bytes = 1024*1024
def set_target(self, target=None):
"""sets the fuzz output target"""
self._target = target
def get_target(self):
"""returns the current target - not a copy"""
return self._target
def set_seed(self, seed=0):
"""sets the fuzz seed"""
self._seed = seed
self._rand.seed(seed)
def get_seed(self):
"""returns the current seed"""
return self._seed
def set_block_size(self, size=4096):
"""sets the fuzz block_size"""
self._block_size = size
def get_block_size(self):
"""returns the current block_size"""
return self._block_size
def set_maximum_bytes(self, bytes):
self._max_bytes = bytes
def get_maximum_bytes(self, bytes):
return self._max_bytes
def __del__(self):
self.CleanUp()
def CleanUp(self):
pass
def Run(self):
self._Run()
def _Run(self):
raise RuntimeError, "_Run() should be implemented in a subclass"
class FuzzWritable(Fuzz):
def _Run(self):
"""writes up to the given limit to _target"""
if self._target is None:
raise RuntimeError, "No target set"
bytes = 0
while bytes < self._max_bytes:
bytes += self._block_size
bits = self._rand.getrandbits(self._block_size * 8)
# annoying way to convert to bytes without looping.
# haven't benchmarked - may be slower.
hexed = hex(bits)[2:-1]
if len(hexed) % 2 != 0:
hexed += '0'
payload = binascii.unhexlify(hexed)
self._target.write(payload)
class FuzzFile(FuzzWritable):
def __init__(self, seed=0, block_size=4096):
FuzzWritable.__init__(self, seed, block_size)
self.set_file()
def set_file(self, filename=tempfile.mktemp()):
# clean up any old files
if self._target is not None:
os.file.remove(self._target.name)
self._target = file(filename, 'w')
def get_file(self):
return self._target.name
def CleanUp(self):
# Clean up tmp file
if self._target is not None and \
os.path.exists(self._target.name):
os.remove(self._target.name)
def set_target(self, target):
self.set_file(target)
def get_target(self):
return self.get_file()
| Python |
#!/usr/bin/python
#
# Copyright 2006-2007 Will Drewry <redpig@dataspill.org>
# Some portions copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
"""contains classes for fuzzing various inputs
with the random class
import input.fuzz
f = input.fuzz.FuzzFile()
f.Run()
command(..,[f.get_target()],..)
run()
"""
__author__ = "Will Drewry"
import binascii
import os
import random
import tempfile
#### Support classes - where should this go?
class Fuzz:
def __init__(self, seed=0, block_size=4096):
# Note: SystemRandom appears to ignore seeding.
self._rand = random.Random()
self._seed = seed
self._rand.seed(seed)
self._target = None # subclass must supply
self._block_size = block_size
self._max_bytes = 1024*1024
def set_target(self, target=None):
"""sets the fuzz output target"""
self._target = target
def get_target(self):
"""returns the current target - not a copy"""
return self._target
def set_seed(self, seed=0):
"""sets the fuzz seed"""
self._seed = seed
self._rand.seed(seed)
def get_seed(self):
"""returns the current seed"""
return self._seed
def set_block_size(self, size=4096):
"""sets the fuzz block_size"""
self._block_size = size
def get_block_size(self):
"""returns the current block_size"""
return self._block_size
def set_maximum_bytes(self, bytes):
self._max_bytes = bytes
def get_maximum_bytes(self, bytes):
return self._max_bytes
def __del__(self):
self.CleanUp()
def CleanUp(self):
pass
def Run(self):
self._Run()
def _Run(self):
raise RuntimeError, "_Run() should be implemented in a subclass"
class FuzzWritable(Fuzz):
def _Run(self):
"""writes up to the given limit to _target"""
if self._target is None:
raise RuntimeError, "No target set"
bytes = 0
while bytes < self._max_bytes:
bytes += self._block_size
bits = self._rand.getrandbits(self._block_size * 8)
# annoying way to convert to bytes without looping.
# haven't benchmarked - may be slower.
hexed = hex(bits)[2:-1]
if len(hexed) % 2 != 0:
hexed += '0'
payload = binascii.unhexlify(hexed)
self._target.write(payload)
class FuzzFile(FuzzWritable):
def __init__(self, seed=0, block_size=4096):
FuzzWritable.__init__(self, seed, block_size)
self.set_file()
def set_file(self, filename=tempfile.mktemp()):
# clean up any old files
if self._target is not None:
os.file.remove(self._target.name)
self._target = file(filename, 'w')
def get_file(self):
return self._target.name
def CleanUp(self):
# Clean up tmp file
if self._target is not None and \
os.path.exists(self._target.name):
os.remove(self._target.name)
def set_target(self, target):
self.set_file(target)
def get_target(self):
return self.get_file()
| Python |
#!/usr/bin/python
#
# Copyright 2006-2007 Will Drewry <redpig@dataspill.org>
# Some portions copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
"""valgrind XML output parser for extracting error data"""
__author__ = 'Will Drewry'
from xml.sax._exceptions import SAXParseException
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
import copy
class ErrorFrame:
"""Contains frame information"""
def __init__(self):
self.instruction_pointer = ''
self.obj = ''
self.function_name = ''
self.dir = ''
self.file = ''
self.line = ''
def __repr__(self):
return self.__str__()
def __str__(self):
return '{instruction_pointer:%s, obj:%s, function_name:%s, dir:%s, file:%s, line:%s}' % (
self.instruction_pointer, self.obj, self.function_name, self.dir, self.file, self.line)
def __eq__(self, a):
raise "TODO: ErrorFrame,__eq__"
class ErrorCount:
"""container class for XML error counts"""
def __init__(self):
self.unique = ''
self.count = ''
class Error:
"""container class for XML error data"""
def __init__(self):
self.unique = ''
self.tid = ''
self.kind = ''
self.what = ''
self.frames = []
self.count = 0
def __eq__(self, a):
if a.unique != self.unique:
return False
if a.tid != self.tid:
return False
if a.kind != self.kind:
return False
if a.what != self.what:
return False
if a.frames != self.frames:
return False
if a.count != self.count:
return False
return True
def __repr__(self):
return self.__str__()
def __str__(self):
return '{unique:%s, tid:%s, kind:%s, what:%s, count:%s, frames:%s}' % (
self.unique, self.tid, self.kind, self.what, self.count, self.frames)
class HandlerState:
"""stack of elements and errors extracted"""
def __init__(self):
self.elements = []
self.errors = []
self.errorcounts = []
class Handler(ContentHandler):
"""handler for SAX XML processing of valgrind error output"""
def __init__(self):
self.__state = HandlerState()
ContentHandler.__init__(self)
def errorcounts(self):
"""provides a copy of populated errorcounts"""
return copy.deepcopy(self.__state.errorcounts)
def errors(self):
"""provides a copy of populated errors"""
return copy.deepcopy(self.__state.errors)
def startElement(self, name, attrs):
"""extracts error elements and their children"""
# attrs is unused in valgrind output
if len(attrs) != 0:
pass # TODO: exception perhaps?
self.__state.elements.insert(0, name)
if name == 'error': # errors should never be nested
self.__state.errors.insert(0, Error())
elif name == 'frame':
self.__state.errors[0].frames.insert(0, ErrorFrame())
elif name == 'pair': # assume this only occurs in errorcounts
self.__state.errorcounts.insert(0, ErrorCount())
def endElement(self, name):
"""handles proper nesting of errors"""
self.__state.elements.pop(0)
if name == 'error':
# Clean up frame ordering
self.__state.errors[0].frames.reverse()
def characters(self, ch):
"""appends CDATA to the appropriate data structure per character"""
if len(self.__state.elements) < 2:
return
element = self.__state.elements[0]
if self.__state.elements[1] == 'error':
if element == 'unique':
self.__state.errors[0].unique += ch
elif element == 'tid':
self.__state.errors[0].tid += ch
elif element == 'kind':
self.__state.errors[0].kind += ch
elif element == 'what':
self.__state.errors[0].what += ch
elif self.__state.elements[1] == 'frame' and \
self.__state.elements[2] == 'stack':
if element == 'ip':
self.__state.errors[0].frames[0].instruction_pointer += ch
elif element == 'obj':
self.__state.errors[0].frames[0].obj += ch
elif element == 'fn':
self.__state.errors[0].frames[0].function_name += ch
elif element == 'dir':
self.__state.errors[0].frames[0].dir += ch
elif element == 'file':
self.__state.errors[0].frames[0].file += ch
elif element == 'line':
self.__state.errors[0].frames[0].line += ch
elif self.__state.elements[1] == 'pair' and \
self.__state.elements[2] == 'errorcounts':
if element == 'count':
self.__state.errorcounts[0].count += ch
elif element == 'unique':
self.__state.errorcounts[0].unique += ch
class Parser:
"""complete encapsulation of the SAX parsing of valgrind error output"""
def __init__(self):
self.__parser = make_parser()
self.__handler = Handler()
self.__parser.setContentHandler(self.__handler)
def parse(self, s=''):
"""calls the SAX parser and returns the parsed error array"""
try:
self.__parser.parse(s)
except SAXParseException:
# Accept what we could grab
# TODO(wad@google.com): look primarily for the
# "junk after document element" exc.
print "[flayer] an error occurred during error parsing.\n"
print "[flayer] some data may be missing.\n"
pass
errors = {}
errorcount = {}
for ec in self.__handler.errorcounts():
if ec.unique != '' and ec.count != '':
errorcount[int(ec.unique, 16)] = int(str(ec.count))
for error in self.__handler.errors():
key = int(error.unique, 16)
errors[key] = copy.copy(error)
# Sometimes the error count is lost with
# valgrind's malformed xml output.
if errorcount.has_key(key):
errors[key].count = errorcount[key]
return errors
| Python |
#!/usr/bin/python
#
# Copyright 2006-2007 Will Drewry <redpig@dataspill.org>
# Some portions copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
"""valgrind XML output parser for extracting error data"""
__author__ = 'Will Drewry'
from xml.sax._exceptions import SAXParseException
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
import copy
class ErrorFrame:
"""Contains frame information"""
def __init__(self):
self.instruction_pointer = ''
self.obj = ''
self.function_name = ''
self.dir = ''
self.file = ''
self.line = ''
def __repr__(self):
return self.__str__()
def __str__(self):
return '{instruction_pointer:%s, obj:%s, function_name:%s, dir:%s, file:%s, line:%s}' % (
self.instruction_pointer, self.obj, self.function_name, self.dir, self.file, self.line)
def __eq__(self, a):
raise "TODO: ErrorFrame,__eq__"
class ErrorCount:
"""container class for XML error counts"""
def __init__(self):
self.unique = ''
self.count = ''
class Error:
"""container class for XML error data"""
def __init__(self):
self.unique = ''
self.tid = ''
self.kind = ''
self.what = ''
self.frames = []
self.count = 0
def __eq__(self, a):
if a.unique != self.unique:
return False
if a.tid != self.tid:
return False
if a.kind != self.kind:
return False
if a.what != self.what:
return False
if a.frames != self.frames:
return False
if a.count != self.count:
return False
return True
def __repr__(self):
return self.__str__()
def __str__(self):
return '{unique:%s, tid:%s, kind:%s, what:%s, count:%s, frames:%s}' % (
self.unique, self.tid, self.kind, self.what, self.count, self.frames)
class HandlerState:
"""stack of elements and errors extracted"""
def __init__(self):
self.elements = []
self.errors = []
self.errorcounts = []
class Handler(ContentHandler):
"""handler for SAX XML processing of valgrind error output"""
def __init__(self):
self.__state = HandlerState()
ContentHandler.__init__(self)
def errorcounts(self):
"""provides a copy of populated errorcounts"""
return copy.deepcopy(self.__state.errorcounts)
def errors(self):
"""provides a copy of populated errors"""
return copy.deepcopy(self.__state.errors)
def startElement(self, name, attrs):
"""extracts error elements and their children"""
# attrs is unused in valgrind output
if len(attrs) != 0:
pass # TODO: exception perhaps?
self.__state.elements.insert(0, name)
if name == 'error': # errors should never be nested
self.__state.errors.insert(0, Error())
elif name == 'frame':
self.__state.errors[0].frames.insert(0, ErrorFrame())
elif name == 'pair': # assume this only occurs in errorcounts
self.__state.errorcounts.insert(0, ErrorCount())
def endElement(self, name):
"""handles proper nesting of errors"""
self.__state.elements.pop(0)
if name == 'error':
# Clean up frame ordering
self.__state.errors[0].frames.reverse()
def characters(self, ch):
"""appends CDATA to the appropriate data structure per character"""
if len(self.__state.elements) < 2:
return
element = self.__state.elements[0]
if self.__state.elements[1] == 'error':
if element == 'unique':
self.__state.errors[0].unique += ch
elif element == 'tid':
self.__state.errors[0].tid += ch
elif element == 'kind':
self.__state.errors[0].kind += ch
elif element == 'what':
self.__state.errors[0].what += ch
elif self.__state.elements[1] == 'frame' and \
self.__state.elements[2] == 'stack':
if element == 'ip':
self.__state.errors[0].frames[0].instruction_pointer += ch
elif element == 'obj':
self.__state.errors[0].frames[0].obj += ch
elif element == 'fn':
self.__state.errors[0].frames[0].function_name += ch
elif element == 'dir':
self.__state.errors[0].frames[0].dir += ch
elif element == 'file':
self.__state.errors[0].frames[0].file += ch
elif element == 'line':
self.__state.errors[0].frames[0].line += ch
elif self.__state.elements[1] == 'pair' and \
self.__state.elements[2] == 'errorcounts':
if element == 'count':
self.__state.errorcounts[0].count += ch
elif element == 'unique':
self.__state.errorcounts[0].unique += ch
class Parser:
"""complete encapsulation of the SAX parsing of valgrind error output"""
def __init__(self):
self.__parser = make_parser()
self.__handler = Handler()
self.__parser.setContentHandler(self.__handler)
def parse(self, s=''):
"""calls the SAX parser and returns the parsed error array"""
try:
self.__parser.parse(s)
except SAXParseException:
# Accept what we could grab
# TODO(wad@google.com): look primarily for the
# "junk after document element" exc.
print "[flayer] an error occurred during error parsing.\n"
print "[flayer] some data may be missing.\n"
pass
errors = {}
errorcount = {}
for ec in self.__handler.errorcounts():
if ec.unique != '' and ec.count != '':
errorcount[int(ec.unique, 16)] = int(str(ec.count))
for error in self.__handler.errors():
key = int(error.unique, 16)
errors[key] = copy.copy(error)
# Sometimes the error count is lost with
# valgrind's malformed xml output.
if errorcount.has_key(key):
errors[key].count = errorcount[key]
return errors
| Python |
#!/usr/bin/python
#
# Copyright 2006-2007 Will Drewry <redpig@dataspill.org>
# Some portions copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
"""interface for running the valgrind command"""
__author__ = "Will Drewry"
import copy
import os
import subprocess
class InvalidExecutable(RuntimeError): pass
class Runner(dict):
DEFAULT_ARGUMENTS = {
'tool':'flayer',
'xml':True,
'db-attach':True,
'db-command':'"xterm -e \"gdb -nw %f %p\""',
'alter-branch':{},
'taint-file':True,
'taint-network':True,
'file-filter':'',
'log-file':'',
}
def __init__(self):
self.update(Runner.DEFAULT_ARGUMENTS)
self.bufsize = 4096
self.executable = '/usr/bin/valgrind'
self.environment = dict(os.environ)
def _HandleArgumentValue(self, value):
"""takes a name and value and returns the final string"""
if type(value) is str or type(value) is unicode:
# Escape quotes only and set the value.
# *** This is not for security!! ***
escaped_value = copy.copy(value)
# TODO: make these constants
for pair in [["'", '"'], ['"', '\\"']]:
escaped_value = escaped_value.replace(pair[0], pair[1])
return escaped_value
elif type(value) is bool:
if value == True:
return 'yes'
else:
return 'no'
elif type(value) is list:
return ','.join([self._HandleArgumentValue(v) for v in value])
elif type(value) is dict:
merged = []
for item in value.items():
merged.append(':'.join([item[0], self._HandleArgumentValue(item[1])]))
return self._HandleArgumentValue(merged)
else:
return str(value)
def _GetArguments(self):
"""returns the arguments as a usable array"""
arguments = []
for name in self.keys():
value = self._HandleArgumentValue(self[name])
arguments.append('='.join(['--'+name, value]))
return arguments
def __GetArguments(self):
"""indirect reference for 'arguments' property'"""
return self._GetArguments()
def _SetArguments(self, value):
"""will auto-set arguments"""
raise NotYetImplemented, "this will be implemented later"
def __SetArguments(self, value):
"""indirect reference for 'arguments' property'"""
return self._SetArguments(value)
arguments = property(__GetArguments, __SetArguments,
doc="""Get or set the current arguments""")
def run(self, additional_arguments=[],verbose=False,*io):
"""executes valgrind with the options and returns the popen object"""
# Test for correctness
if type(self.executable) != str:
raise InvalidExecutable, 'Executable must a string'
if not os.path.exists(self.executable):
raise InvalidExecutable, 'Executable not found. Full path required.'
arguments = [self.executable] + self._GetArguments() + additional_arguments
if verbose:
print "Running %s\n" % ' '.join(arguments)
stdin=subprocess.PIPE
stdout=subprocess.PIPE
stderr=subprocess.PIPE
if len(io) > 0:
stdin = io[0]
if len(io) > 1:
stdout = io[1]
if len(io) > 2:
stderr = io[2]
process = subprocess.Popen(arguments,
self.bufsize,
env=self.environment,
stdin=stdin,
stdout=stdout,
stderr=stderr,
close_fds=True)
return process
| Python |
#!/usr/bin/python
#
# Copyright 2006-2007 Will Drewry <redpig@dataspill.org>
# Some portions copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
"""interface for running the valgrind command"""
__author__ = "Will Drewry"
import copy
import os
import subprocess
class InvalidExecutable(RuntimeError): pass
class Runner(dict):
DEFAULT_ARGUMENTS = {
'tool':'flayer',
'xml':True,
'db-attach':True,
'db-command':'"xterm -e \"gdb -nw %f %p\""',
'alter-branch':{},
'taint-file':True,
'taint-network':True,
'file-filter':'',
'log-file':'',
}
def __init__(self):
self.update(Runner.DEFAULT_ARGUMENTS)
self.bufsize = 4096
self.executable = '/usr/bin/valgrind'
self.environment = dict(os.environ)
def _HandleArgumentValue(self, value):
"""takes a name and value and returns the final string"""
if type(value) is str or type(value) is unicode:
# Escape quotes only and set the value.
# *** This is not for security!! ***
escaped_value = copy.copy(value)
# TODO: make these constants
for pair in [["'", '"'], ['"', '\\"']]:
escaped_value = escaped_value.replace(pair[0], pair[1])
return escaped_value
elif type(value) is bool:
if value == True:
return 'yes'
else:
return 'no'
elif type(value) is list:
return ','.join([self._HandleArgumentValue(v) for v in value])
elif type(value) is dict:
merged = []
for item in value.items():
merged.append(':'.join([item[0], self._HandleArgumentValue(item[1])]))
return self._HandleArgumentValue(merged)
else:
return str(value)
def _GetArguments(self):
"""returns the arguments as a usable array"""
arguments = []
for name in self.keys():
value = self._HandleArgumentValue(self[name])
arguments.append('='.join(['--'+name, value]))
return arguments
def __GetArguments(self):
"""indirect reference for 'arguments' property'"""
return self._GetArguments()
def _SetArguments(self, value):
"""will auto-set arguments"""
raise NotYetImplemented, "this will be implemented later"
def __SetArguments(self, value):
"""indirect reference for 'arguments' property'"""
return self._SetArguments(value)
arguments = property(__GetArguments, __SetArguments,
doc="""Get or set the current arguments""")
def run(self, additional_arguments=[],verbose=False,*io):
"""executes valgrind with the options and returns the popen object"""
# Test for correctness
if type(self.executable) != str:
raise InvalidExecutable, 'Executable must a string'
if not os.path.exists(self.executable):
raise InvalidExecutable, 'Executable not found. Full path required.'
arguments = [self.executable] + self._GetArguments() + additional_arguments
if verbose:
print "Running %s\n" % ' '.join(arguments)
stdin=subprocess.PIPE
stdout=subprocess.PIPE
stderr=subprocess.PIPE
if len(io) > 0:
stdin = io[0]
if len(io) > 1:
stdout = io[1]
if len(io) > 2:
stderr = io[2]
process = subprocess.Popen(arguments,
self.bufsize,
env=self.environment,
stdin=stdin,
stdout=stdout,
stderr=stderr,
close_fds=True)
return process
| Python |
VERSION = '0.0.1'
| Python |
#!/usr/bin/python
#
# Copyright 2006-2007 Will Drewry <redpig@dataspill.org>
# Some portions copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
"""flayer - a fuzzing framework for bypassing basic structural error checking
...
"""
__author__ = 'Will Drewry'
__revision__ = '$Id: $'
import copy
from distutils.spawn import find_executable
import os
import shelve
import subprocess
import sys
import tempfile
import valgrind.error_parser
import valgrind.runner
class Flayer:
"""wrapper for valgrind/memcheck undef-as-taint and alter-branch arguments"""
VERSION = '0.0.1'
def __init__(self, program='/bin/true', args=[], env={}):
self.__runner = valgrind.runner.Runner()
self.__runner.executable = find_executable('valgrind')
self.__args = []
self.set_command(program, args, env)
self.__taint = ''
self.set_taint('nfs') # TODO: e
self.__taint_filter = {}
#self.set_taint_network_filter('')
self.set_taint_file_filter('')
self.__errors = {}
self.__shelf = None
self.__tmpdir = tempfile.mkdtemp()
self.__runner['log-file'] = self.__tmpdir + '/flayer'
def __cleanup_tmpdir(self):
"""attempts to cleanup the tmpdir"""
for root, dirs, files in os.walk(self.__tmpdir):
for f in files:
os.unlink(os.path.join(root, f)) # todo use join
os.rmdir(root)
def __del__(self):
"""properly clean up the temporary files on destruction"""
self.__cleanup_tmpdir()
def Errors(self):
"""returns the valgrind output errors"""
return copy.deepcopy(self.__errors)
def GetTmpDir(self):
"""returns the tmpdir"""
return self.__tmpdir
def ResetTmpDir(self):
"""resets the tmpdir and cleans up one if it exists"""
self.__cleanup_tmpdir()
self.__tmpdir = tempfile.mkdtemp()
self.__runner['log-file'] = self.__tmpdir + '/flayer'
# TODO: change these to properties
def get_taint(self):
taint = ''
if self.__runner.has_key('taint-network') and \
self.__runner['taint-network']:
taint += 'n'
if self.__runner.has_key('taint-file') and \
self.__runner['taint-file']:
taint += 'f'
taint += 's'
return taint
def set_taint(self, value):
# TODO validate
self.__runner['taint-network'] = False
self.__runner['taint-file'] = False
for ch in value:
if ch == 'n':
self.__runner['taint-network'] = True
elif ch == 'f' or ch == 's': # no diff now...
self.__runner['taint-file'] = True
else:
raise RuntimeError, "Request value not yet implemented: " + ch
def set_taint_network_filter(self, value):
"""specified the host or port traffic to mark"""
raise RuntimeError, "NOT YET IMPLEMENTED"
def set_taint_file_filter(self, value):
"""specified the path prefix for file activity to mark"""
self.__runner['file-filter'] = value
def get_taint_file_filter(self):
if self.__runner.has_key('file-filter'):
return copy.copy(self.__runner['file-filter'])
else:
return ''
def get_taint_network_filter(self):
if self.__runner.has_key('network-filter'):
return copy.copy(self.__runner['network-filter'])
else:
return ''
def Run(self, verbose=False, *io):
"""runs the specified command under valgrind-flayer and gets the errors"""
process = self.__runner.run(self.__args, verbose, *io)
self.__errors_file = ''.join([self.__tmpdir, '/flayer.', str(process.pid)])
return process
def ProcessLastRun(self):
self.__errors = {}
self._ReadErrors(self.__errors_file)
def _ReadErrors(self, f):
"""opens the valgrind error output and parses it"""
p = valgrind.error_parser.Parser()
self.__errors = p.parse(open(f))
def clear_branch_alterations(self):
self.__runner['alter-branch'] = {}
def add_branch_alteration(self, address, action):
if action:
self.__runner['alter-branch'][address] = '1'
else:
self.__runner['alter-branch'][address] = '0'
def del_branch_alteration(self, address):
if self.__runner['alter-branch'].has_key(address):
self.__runner['alter-branch'].pop(address)
def get_branch_alterations(self):
return copy.deepcopy(self.__runner['alter-branch'])
def set_command(self, command, args=[], env={}):
"""sets the target program command, arguments, and environment"""
self.__args = copy.copy(args)
self.__args.insert(0, command)
if env != {}:
self.__runner.environment.update(env)
def get_command(self):
"""gets the target program command, arguments, and env"""
return (self.__args[0], # command
self.__args[1:], # arguments
copy.copy(self.__runner.environment)) # environment
def About(self):
"""returns a nice 'about' text"""
return """
Flayer is a framework for automating and easing the use of
two valgrind features: --undef-as-taint and --alter-branch.
It is the proof of concept implementation of the paper.
The flayer suite (libflayer, valgrind/flayer)
provides a system which traces user input through memory
and opens doors for it.
What does this mean technically? Traditional fuzzing is
limited in its overall code coverage. It is often blocked
early in the fuzzing process by protocol banner checks and other
version and sanity checks. This suite allows for these checks to be
forcibly skipped at runtime. This breathes new life into the
good, ol' fashion Fuzz[1] technique by providing access to
program internals without specifying a complicated protocol.
However, this system can be used with almost any existing fuzzing
technique to allow for enhanced code coverage.
Flayer was conceived of and written by Will Drewry <redpig@dataspill.org>.
Tavis Ormandy <taviso@sdf.lonestar.org> created the manual fuzzing
technique that flayer automates.
[1] http://www.cs.wisc.edu/~bart/fuzz
"""
def FullCommand(self):
vg = [self.__runner.executable] + self.__runner.arguments
command = ' '.join(vg + self.__args)
return command
def Banner(self):
"""display a banner when running in interactive shell mode"""
vg = [self.__runner.executable] + self.__runner.arguments
command = ' '.join(vg + self.__args)
return """
Welcome to Flayer %s!
Type 'help()' for extra details or 'about()' for more
on flayer.
Current settings:
- Command: %s
- Taint settings: %s
- Temporary directory: %s
""" % (Flayer.VERSION, command, self.get_taint(), self.__tmpdir)
if __name__ == '__main__':
program, args = ('', [])
if len(sys.argv) >= 2:
program = sys.argv[1]
args = sys.argv[2:]
import wrappers.commandline
cli = wrappers.commandline.Shell(Flayer(program, args))
cli.Export()
cli.Banner()
| Python |
#!/usr/bin/python
#
# Copyright 2006-2007 Will Drewry <redpig@dataspill.org>
# Some portions copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
"""command line interface for flayer"""
__author__ = 'Will Drewry'
__revision__ = '$Id: $'
import copy
import os
import shelve
import signal
import subprocess
import sys
# A kludge to add "subtraction" to list.
class SList(list):
def __sub__(self, a):
res = []
for value in self:
try:
a.index(value)
except ValueError:
res.append(value)
return res
class Shell:
# Should I just subclass Flayer?
"""makes the Flayer library suitable for interactive python shell use"""
def __init__(self, flayer=None):
if flayer == None:
self.__flayer = Flayer()
else:
self.__flayer = flayer
# Altered extra data
self._altered = {}
# Track the last files
self._lastio = {}
self._lastio['stdout'] = ''
self._lastio['stderr'] = ''
# Track past runs
self._past_errors = []
def Banner(self):
"""displays a welcome banner for the shell"""
print "%s\n" % self.__flayer.Banner()
def Export(self):
"""exports all functions starting with CLI_ and lower()s"""
for exportable in filter(lambda x: x[:3] == '_E_', self.__class__.__dict__):
name = exportable[3:].lower()
sys.modules['__main__'].__dict__[name] = getattr(self, exportable)
# All command line methods
def _E_Summary(self):
"""outputs a summary of errors from valgrind
This prints a formatted list of errors from
valgrind by kind. In particular, it includes
debugging information from the last frame of
the error stack trace.
Arguments:
- none
"""
header = " id frame information"
format = "%-5s %-15s %s %s"
# Sort into kinds
kinds = {}
for e in self.__flayer.Errors().values():
if kinds.has_key(e.kind):
kinds[e.kind].append(e)
else:
kinds[e.kind] = [e]
for kind in kinds.items():
print "==> " + kind[0]
print header
for e in kind[1]:
file = os.path.join(e.frames[0].dir, e.frames[0].file) + ':' + \
e.frames[0].line
if file == ':':
file = ''
print format % (e.unique, e.frames[0].instruction_pointer,
e.frames[0].function_name, file)
def _E_Details(self, error_id):
"""outputs detailed error information by id
This prints all of the data collected about a particular
error.
Arguments:
- error_id: error id integer
"""
errors = self.__flayer.Errors()
if not errors.has_key(error_id):
print "Error id '%s' not found." % error_id
return
error = errors[error_id]
print "Error %s:" % error_id
print "- Thread Id: %s" % error.tid
print "- Kind: %s" % error.kind
print "- What: %s" % error.what
print "- Count: %d" % error.count
print "- Frames:"
for id in range(0, len(error.frames)):
frame = error.frames[id]
efile = os.path.join(frame.dir, frame.file)
print " Frame %d:" % id
print " - Instruction pointer: %s" % frame.instruction_pointer
print " - Object: %s" % frame.obj
print " - Function: %s" % frame.function_name
print " - File: %s" % efile
print " - Line number: %s" % frame.line
def _E_Snippet(self, error_id, range=10):
"""outputs code snippet from the top level stack frame if available
This command will output the first range lines before the conditional
and the following range lines.
Arguments:
- error_id: error id integer
- range: number of lines of code to show [10]
"""
if range < 1:
print "Range must be positive"
return
# TODO(wad): autoconvert error_id
#if type(error_id) is int:
errors = self.__flayer.Errors()
if not errors.has_key(error_id):
print "Error id '%s' not found." % error_id
return
error = errors[error_id]
if len(error.frames) == 0:
print "Error id '%s' has no debugging information." % error_id
return
frame = error.frames[0]
efile = os.path.join(frame.dir, frame.file)
# TODO(wad): bust this out to a helper
try:
f = file(os.path.join(frame.dir, frame.file))
# TODO(wad): catch explicit errors
except:
print ("Cannot open referenced file: %s" %
os.path.join(frame.dir, frame.file))
return
line = 1
try:
while line < (int(frame.line) - range):
f.readline() # eat it.
line += 1
while line < (int(frame.line) + range):
# New lines included.
if line == int(frame.line):
sys.stdout.write('|%s' % f.readline())
else:
sys.stdout.write(' %s' % f.readline())
line += 1
# TODO(wad): except explicitly around each readline
except:
print "exception"
return
def _E_Taint(self, value=None):
"""gets or sets arguments for tainting
This command will set or retrieve the value
of the current valgrind/flayer tainting arguments.
Each setting is a character and valid characters are
as follows: n, f, and s. Later 'e' will be added.
'f' indicates that file I/O buffers will be tainted.
'n' indicates that network I/O buffers will be tainted.
's' indicates that fd 0 I/O buffers will be tainted.
'e' will indicate environment variable tainting.
A value of None will result in a copy of the current
taint values being returned.
Arguments:
- value: a string containing the arguments above (def: None)
"""
if value == None:
return self.__flayer.get_taint()
else:
return self.__flayer.set_taint(value)
def _E_Filter(self, file=None, network=None):
"""gets or sets filtering for taint arguments
This command will set or retrieve the value
of the --*-filter arguments used by
valgrind/flayer. Specifically, file path prefixes are used
to indicate which input buffers to taint while
network host/port pairs can be specified.
When all the arguments are None, the current settings
will be returned.
Arguments:
- file: the path prefix of file to taint (def: None)
- network: the "host:port" pair to taint (def: None)
"""
if file == None and network == None:
return {'file':self.__flayer.get_taint_file_filter(),
'network':self.__flayer.get_taint_network_filter() }
if file != None:
self.__flayer.set_taint_file_filter(file)
if network != None:
self.__flayer.set_taint_network_filter(file)
def _E_Command(self, command=None, args=[], env={}):
"""gets/sets the target command
This function sets or gets the value of the target
command and its arguments for use in valgrind.
The command can be an explicit path or in the PATH
environment variable. The arguments should be a
list. The environment should be a dict and will be added
to the current environment variable - NOT override it.
Currently, these will _NOT_ be run under a shell.
Arguments:
- command: string containing the target command (def: None)
- args: list of arguments for the command (def: [])
- env: dict of environment variables (def: {})
"""
if command is None:
return self.__flayer.get_command()
return self.__flayer.set_command(command, args, env)
def _E_Run(self, verbose=False):
"""calls valgrind with the configured target, args, and environment
This command executes valgrind with the current
configuration of target executable, arguments, and
environment variables needed. It will also automatically
process the output log from valgrind.
Arguments:
- verbose: prints additional information (def: False)
"""
# Setup stdout and stderr files for this process.
try:
os.unlink(self._lastio['stdout'])
os.unlink(self._lastio['stderr'])
except:
pass # Should be empty or valid. Kludge!
# For now, this will clobber any past runs
self._lastio['stdout'] = self.__flayer.GetTmpDir() + "/out"
self._lastio['stderr'] = self.__flayer.GetTmpDir() + "/err"
stdin = subprocess.PIPE
stdout = open(self._lastio['stdout'], 'w')
stderr = open(self._lastio['stderr'], 'w')
process = self.__flayer.Run()
# Setup a signal handler to make SIGINT kill the process
def handler(signum, frame):
os.kill(process.pid)
orig_handler = signal.signal(signal.SIGINT, handler)
process.stdin.close()
print "Process ID: %d\n" % process.pid
# XXX: dump out lastio on these calls.
#print "You may check on its progress with the following commands: \n"
#print "running(), exit_code(), stdout(), stderr()\n"
print "Press Ctrl+C to send a SIGTERM to the running process.\n"
try:
ret, ret = os.wait() # pid is first - don't care.
except:
pass
# Remove the SIGINT handler
signal.signal(signal.SIGINT, orig_handler)
stdout.close()
stderr.close()
if verbose:
print "Return code: %d\n" % ret
self.__flayer.ProcessLastRun()
# XXX: does this need to be a deepcopy?
self._past_errors.append(self.__flayer.Errors().values())
def _E_ErrorDiff(self, run_a, run_b, kind='TaintedCondition'):
"""returns the difference between to ErrorSets
This command will return the difference between the error sets
generated by the specific runs. See PastErrors() for more.
Arguments:
- run_a: integer index of the run's errors
- run_b: integer index of the run's errors
"""
a = SList(self._past_errors[run_a])
b = SList(self._past_errors[run_b])
return b - a
def _E_PastErrors(self):
"""returns the list of past errors
!!TODO!! make this print a pretty list
Arguments:
- None
"""
return self._past_errors
def _E_ClearErrors(self):
"""clears the list of past errors
!!TODO!! make this print a pretty list
Arguments:
- None
"""
self._past_errors = []
def _E_Alter(self, error_id=None, action=None, address=None):
"""gets/sets runtime conditional behavior in the target
This command gets or sets the branch altering
functionality of valgrind. It allows for conditional
blocks that make use of tainted data to be forced
to be followed or skipped. It will output a pretty
summary of alterations and returns a dict of instruction
pointer to action.
If an error is listed and the action is not specified,
the alteration will be removed if it exists.
Arguments:
- error_id: string of the unique error id (def: None)
- action: bool specifying whether to follow the branch (def: None)
- address: unsigned long specifying the address to modify (def: None)
"""
# TODO(wad): store alter info by IP
if error_id is None and address is None:
print "address action frame information"
alts = self.__flayer.get_branch_alterations()
format = "%-7s %-6s %s %s"
for ip, e in self._altered.items():
if e is None:
print format % (ip, alts[ip], 'unknown', '')
else:
file = os.path.join(e.frames[0].dir, e.frames[0].file) + ':' + \
e.frames[0].line
if file == ':':
file = ''
print format % (ip, alts[ip],
e.frames[0].function_name, file)
return self.__flayer.get_branch_alterations()
instruction_pointer = address
# Validate error_id, kind, and get address
error = None
if error_id is not None:
errors = self.__flayer.Errors()
if errors.has_key(error_id):
error = errors[error_id]
if error is None:
print "No matching error id found."
return
if error.kind != 'TaintedCondition':
print 'Error must be of kind TaintedCondition'
return
instruction_pointer = error.frames[0].instruction_pointer
if action is None and self._altered.has_key(instruction_pointer):
# TODO: add another method for deletion
self.__flayer.del_branch_alteration(instruction_pointer)
return self._altered.pop(instruction_pointer)
else:
self.__flayer.add_branch_alteration(instruction_pointer, action)
self._altered[instruction_pointer] = error
return {instruction_pointer:action}
def _E_Load(self, path):
"""loads an existing flayer session from file
This loads an existing session including all
relevant configured data.
Arguments:
- path: string with the path to the savefile
"""
shelf = shelve.open(path)
# TODO: FIX TEMP DIR PROBLEM ON RELOAD
self.__flayer = shelf['flayer']
self._altered = shelf['altered']
shelf.close()
self.__flayer.ResetTmpDir()
def _E_Save(self, path=""):
"""saves the current session to file
This saves all relevant configuration data to
continue the current session from the point
at which it is called.
Arguments:
- path: string with the path to the savefile
"""
shelf = shelve.open(path)
shelf['flayer'] = self.__flayer
shelf['altered'] = self._altered
# TODO add readline history support
shelf.sync()
shelf.close()
def _E_About(self):
"""returns more information about Flayer!
Run it and see.
Arguments:
- none
"""
print "%s\n" % self.__flayer.About()
def _E_Help(self, topic='overview'):
"""provides overall and detail help for each shell command
This provides the overview and detailed help
you are reading now. In addition, it falls through
to the builtin help if nothing matches.
Arguments:
- topic -- (default: 'overview')
"""
if topic == 'overview':
print "Available commands:"
format = " %-15s -- %s"
for command in filter(lambda x: x[:3] == '_E_', self.__class__.__dict__):
doc = self.__class__.__dict__[command].__doc__
name = command[3:].lower()
if doc is None:
print format % (name, "No documentation. Bad Developer!")
else:
print format % (name, doc.split("\n")[0])
return
for method in self.__class__.__dict__.items():
name = method[0][3:].lower()
if name == topic:
doc = self.__class__.__dict__[method[0]].__doc__.split("\n")
details = "\n".join([l.lstrip() for l in doc])
print "%s -- %s" % (name, details)
return
print "Topic '%s' is unknown to Flayer.\n" % topic
print "Attempting to use the builtin help function.\n"
sys.modules['__builtin__'].help(topic)
| Python |
#!/usr/bin/python
#
# Copyright 2006-2007 Will Drewry <redpig@dataspill.org>
# Some portions copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
"""command line interface for flayer"""
__author__ = 'Will Drewry'
__revision__ = '$Id: $'
import copy
import os
import shelve
import signal
import subprocess
import sys
# A kludge to add "subtraction" to list.
class SList(list):
def __sub__(self, a):
res = []
for value in self:
try:
a.index(value)
except ValueError:
res.append(value)
return res
class Shell:
# Should I just subclass Flayer?
"""makes the Flayer library suitable for interactive python shell use"""
def __init__(self, flayer=None):
if flayer == None:
self.__flayer = Flayer()
else:
self.__flayer = flayer
# Altered extra data
self._altered = {}
# Track the last files
self._lastio = {}
self._lastio['stdout'] = ''
self._lastio['stderr'] = ''
# Track past runs
self._past_errors = []
def Banner(self):
"""displays a welcome banner for the shell"""
print "%s\n" % self.__flayer.Banner()
def Export(self):
"""exports all functions starting with CLI_ and lower()s"""
for exportable in filter(lambda x: x[:3] == '_E_', self.__class__.__dict__):
name = exportable[3:].lower()
sys.modules['__main__'].__dict__[name] = getattr(self, exportable)
# All command line methods
def _E_Summary(self):
"""outputs a summary of errors from valgrind
This prints a formatted list of errors from
valgrind by kind. In particular, it includes
debugging information from the last frame of
the error stack trace.
Arguments:
- none
"""
header = " id frame information"
format = "%-5s %-15s %s %s"
# Sort into kinds
kinds = {}
for e in self.__flayer.Errors().values():
if kinds.has_key(e.kind):
kinds[e.kind].append(e)
else:
kinds[e.kind] = [e]
for kind in kinds.items():
print "==> " + kind[0]
print header
for e in kind[1]:
file = os.path.join(e.frames[0].dir, e.frames[0].file) + ':' + \
e.frames[0].line
if file == ':':
file = ''
print format % (e.unique, e.frames[0].instruction_pointer,
e.frames[0].function_name, file)
def _E_Details(self, error_id):
"""outputs detailed error information by id
This prints all of the data collected about a particular
error.
Arguments:
- error_id: error id integer
"""
errors = self.__flayer.Errors()
if not errors.has_key(error_id):
print "Error id '%s' not found." % error_id
return
error = errors[error_id]
print "Error %s:" % error_id
print "- Thread Id: %s" % error.tid
print "- Kind: %s" % error.kind
print "- What: %s" % error.what
print "- Count: %d" % error.count
print "- Frames:"
for id in range(0, len(error.frames)):
frame = error.frames[id]
efile = os.path.join(frame.dir, frame.file)
print " Frame %d:" % id
print " - Instruction pointer: %s" % frame.instruction_pointer
print " - Object: %s" % frame.obj
print " - Function: %s" % frame.function_name
print " - File: %s" % efile
print " - Line number: %s" % frame.line
def _E_Snippet(self, error_id, range=10):
"""outputs code snippet from the top level stack frame if available
This command will output the first range lines before the conditional
and the following range lines.
Arguments:
- error_id: error id integer
- range: number of lines of code to show [10]
"""
if range < 1:
print "Range must be positive"
return
# TODO(wad): autoconvert error_id
#if type(error_id) is int:
errors = self.__flayer.Errors()
if not errors.has_key(error_id):
print "Error id '%s' not found." % error_id
return
error = errors[error_id]
if len(error.frames) == 0:
print "Error id '%s' has no debugging information." % error_id
return
frame = error.frames[0]
efile = os.path.join(frame.dir, frame.file)
# TODO(wad): bust this out to a helper
try:
f = file(os.path.join(frame.dir, frame.file))
# TODO(wad): catch explicit errors
except:
print ("Cannot open referenced file: %s" %
os.path.join(frame.dir, frame.file))
return
line = 1
try:
while line < (int(frame.line) - range):
f.readline() # eat it.
line += 1
while line < (int(frame.line) + range):
# New lines included.
if line == int(frame.line):
sys.stdout.write('|%s' % f.readline())
else:
sys.stdout.write(' %s' % f.readline())
line += 1
# TODO(wad): except explicitly around each readline
except:
print "exception"
return
def _E_Taint(self, value=None):
"""gets or sets arguments for tainting
This command will set or retrieve the value
of the current valgrind/flayer tainting arguments.
Each setting is a character and valid characters are
as follows: n, f, and s. Later 'e' will be added.
'f' indicates that file I/O buffers will be tainted.
'n' indicates that network I/O buffers will be tainted.
's' indicates that fd 0 I/O buffers will be tainted.
'e' will indicate environment variable tainting.
A value of None will result in a copy of the current
taint values being returned.
Arguments:
- value: a string containing the arguments above (def: None)
"""
if value == None:
return self.__flayer.get_taint()
else:
return self.__flayer.set_taint(value)
def _E_Filter(self, file=None, network=None):
"""gets or sets filtering for taint arguments
This command will set or retrieve the value
of the --*-filter arguments used by
valgrind/flayer. Specifically, file path prefixes are used
to indicate which input buffers to taint while
network host/port pairs can be specified.
When all the arguments are None, the current settings
will be returned.
Arguments:
- file: the path prefix of file to taint (def: None)
- network: the "host:port" pair to taint (def: None)
"""
if file == None and network == None:
return {'file':self.__flayer.get_taint_file_filter(),
'network':self.__flayer.get_taint_network_filter() }
if file != None:
self.__flayer.set_taint_file_filter(file)
if network != None:
self.__flayer.set_taint_network_filter(file)
def _E_Command(self, command=None, args=[], env={}):
"""gets/sets the target command
This function sets or gets the value of the target
command and its arguments for use in valgrind.
The command can be an explicit path or in the PATH
environment variable. The arguments should be a
list. The environment should be a dict and will be added
to the current environment variable - NOT override it.
Currently, these will _NOT_ be run under a shell.
Arguments:
- command: string containing the target command (def: None)
- args: list of arguments for the command (def: [])
- env: dict of environment variables (def: {})
"""
if command is None:
return self.__flayer.get_command()
return self.__flayer.set_command(command, args, env)
def _E_Run(self, verbose=False):
"""calls valgrind with the configured target, args, and environment
This command executes valgrind with the current
configuration of target executable, arguments, and
environment variables needed. It will also automatically
process the output log from valgrind.
Arguments:
- verbose: prints additional information (def: False)
"""
# Setup stdout and stderr files for this process.
try:
os.unlink(self._lastio['stdout'])
os.unlink(self._lastio['stderr'])
except:
pass # Should be empty or valid. Kludge!
# For now, this will clobber any past runs
self._lastio['stdout'] = self.__flayer.GetTmpDir() + "/out"
self._lastio['stderr'] = self.__flayer.GetTmpDir() + "/err"
stdin = subprocess.PIPE
stdout = open(self._lastio['stdout'], 'w')
stderr = open(self._lastio['stderr'], 'w')
process = self.__flayer.Run()
# Setup a signal handler to make SIGINT kill the process
def handler(signum, frame):
os.kill(process.pid)
orig_handler = signal.signal(signal.SIGINT, handler)
process.stdin.close()
print "Process ID: %d\n" % process.pid
# XXX: dump out lastio on these calls.
#print "You may check on its progress with the following commands: \n"
#print "running(), exit_code(), stdout(), stderr()\n"
print "Press Ctrl+C to send a SIGTERM to the running process.\n"
try:
ret, ret = os.wait() # pid is first - don't care.
except:
pass
# Remove the SIGINT handler
signal.signal(signal.SIGINT, orig_handler)
stdout.close()
stderr.close()
if verbose:
print "Return code: %d\n" % ret
self.__flayer.ProcessLastRun()
# XXX: does this need to be a deepcopy?
self._past_errors.append(self.__flayer.Errors().values())
def _E_ErrorDiff(self, run_a, run_b, kind='TaintedCondition'):
"""returns the difference between to ErrorSets
This command will return the difference between the error sets
generated by the specific runs. See PastErrors() for more.
Arguments:
- run_a: integer index of the run's errors
- run_b: integer index of the run's errors
"""
a = SList(self._past_errors[run_a])
b = SList(self._past_errors[run_b])
return b - a
def _E_PastErrors(self):
"""returns the list of past errors
!!TODO!! make this print a pretty list
Arguments:
- None
"""
return self._past_errors
def _E_ClearErrors(self):
"""clears the list of past errors
!!TODO!! make this print a pretty list
Arguments:
- None
"""
self._past_errors = []
def _E_Alter(self, error_id=None, action=None, address=None):
"""gets/sets runtime conditional behavior in the target
This command gets or sets the branch altering
functionality of valgrind. It allows for conditional
blocks that make use of tainted data to be forced
to be followed or skipped. It will output a pretty
summary of alterations and returns a dict of instruction
pointer to action.
If an error is listed and the action is not specified,
the alteration will be removed if it exists.
Arguments:
- error_id: string of the unique error id (def: None)
- action: bool specifying whether to follow the branch (def: None)
- address: unsigned long specifying the address to modify (def: None)
"""
# TODO(wad): store alter info by IP
if error_id is None and address is None:
print "address action frame information"
alts = self.__flayer.get_branch_alterations()
format = "%-7s %-6s %s %s"
for ip, e in self._altered.items():
if e is None:
print format % (ip, alts[ip], 'unknown', '')
else:
file = os.path.join(e.frames[0].dir, e.frames[0].file) + ':' + \
e.frames[0].line
if file == ':':
file = ''
print format % (ip, alts[ip],
e.frames[0].function_name, file)
return self.__flayer.get_branch_alterations()
instruction_pointer = address
# Validate error_id, kind, and get address
error = None
if error_id is not None:
errors = self.__flayer.Errors()
if errors.has_key(error_id):
error = errors[error_id]
if error is None:
print "No matching error id found."
return
if error.kind != 'TaintedCondition':
print 'Error must be of kind TaintedCondition'
return
instruction_pointer = error.frames[0].instruction_pointer
if action is None and self._altered.has_key(instruction_pointer):
# TODO: add another method for deletion
self.__flayer.del_branch_alteration(instruction_pointer)
return self._altered.pop(instruction_pointer)
else:
self.__flayer.add_branch_alteration(instruction_pointer, action)
self._altered[instruction_pointer] = error
return {instruction_pointer:action}
def _E_Load(self, path):
"""loads an existing flayer session from file
This loads an existing session including all
relevant configured data.
Arguments:
- path: string with the path to the savefile
"""
shelf = shelve.open(path)
# TODO: FIX TEMP DIR PROBLEM ON RELOAD
self.__flayer = shelf['flayer']
self._altered = shelf['altered']
shelf.close()
self.__flayer.ResetTmpDir()
def _E_Save(self, path=""):
"""saves the current session to file
This saves all relevant configuration data to
continue the current session from the point
at which it is called.
Arguments:
- path: string with the path to the savefile
"""
shelf = shelve.open(path)
shelf['flayer'] = self.__flayer
shelf['altered'] = self._altered
# TODO add readline history support
shelf.sync()
shelf.close()
def _E_About(self):
"""returns more information about Flayer!
Run it and see.
Arguments:
- none
"""
print "%s\n" % self.__flayer.About()
def _E_Help(self, topic='overview'):
"""provides overall and detail help for each shell command
This provides the overview and detailed help
you are reading now. In addition, it falls through
to the builtin help if nothing matches.
Arguments:
- topic -- (default: 'overview')
"""
if topic == 'overview':
print "Available commands:"
format = " %-15s -- %s"
for command in filter(lambda x: x[:3] == '_E_', self.__class__.__dict__):
doc = self.__class__.__dict__[command].__doc__
name = command[3:].lower()
if doc is None:
print format % (name, "No documentation. Bad Developer!")
else:
print format % (name, doc.split("\n")[0])
return
for method in self.__class__.__dict__.items():
name = method[0][3:].lower()
if name == topic:
doc = self.__class__.__dict__[method[0]].__doc__.split("\n")
details = "\n".join([l.lstrip() for l in doc])
print "%s -- %s" % (name, details)
return
print "Topic '%s' is unknown to Flayer.\n" % topic
print "Attempting to use the builtin help function.\n"
sys.modules['__builtin__'].help(topic)
| Python |
#!/usr/bin/python
# A dumb example of an automated fuzzer.
#
# Copyright 2006 Will Drewry <redpig@dataspill.org>
# Copyright 2007 Google Inc.
# See docs/COPYING for License details (GPLv2)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
__author__ = "Will Drewry"
"""implements an automated fuzzer using flayer"""
import flayer.core
import flayer.input.fuzz
import os
import random
import subprocess
class Fuzzer:
def __init__(self, program, args=[], env={}, libtaint=''):
self._program = program
self._args = args
self._env = env
self._flayer = flayer.core.Flayer(program, args, env, libtaint)
self._input = flayer.input.fuzz.FuzzFile()
self._altered = {}
self._cond_input = random.Random()
def set_seed(self, seed=0):
self._input.set_seed(seed)
self._cond_input.seed(seed)
def get_seed(self):
return self._input.get_seed()
def Run(self, count=1, input_is='file', seed=None):
if seed is not None:
self._set_seed(seed)
# Generate input
print "Generating random input..."
self._input.Run()
if input_is == 'file':
args.append(self._input.get_target())
self._flayer.set_command(self._program,
self._args,
self._env)
self._flayer.set_taint('f')
self._flayer.set_taint_file_filter(self._input.get_target())
runs = 0
while runs < count:
runs += 1
lead = "== %d ==" % runs
altered = ','.join(["%s:%d" % (a[0],a[1]) for a in self._altered.items()])
print "%s altered: %s" % (lead, altered)
process = self._flayer.Run()
process.stdin.close()
#process.stdout.close() # Write stdout to a log...
ret, ret = os.wait()
print "%s return code %d" % (lead, ret)
self._flayer.ProcessLastRun()
errors = self._flayer.Errors()
# Fuzz paths while keeping constant fuzz input.
# Set up for next run
#last_altered = copy.copy(self._altered)
for e in errors.items():
# Look for one hit wonders
if e[1].kind == 'TaintedCondition' and e[1].count == 1:
action = self._cond_input.choice([True, False])
ip = e[1].frames[0].instruction_pointer
self._altered[ip] = action
self._flayer.add_branch_alteration(ip, action)
for address in self._altered.keys():
self._flayer.del_branch_alteration(address)
action = self._cond_input.choice([True, False])
self._altered[address] = action
self._flayer.add_branch_alteration(address, action)
if __name__ == '__main__':
import sys
runs = sys.argv[1]
seed = sys.argv[2]
program = sys.argv[3]
args = sys.argv[4:]
fuzzer = Fuzzer(program, args, libtaint='/home/wad/sync/bzr/flayer/libtaint/libtaint.so.1.0')
fuzzer.set_seed(seed)
fuzzer.Run(count=runs)
| Python |
#!/usr/bin/python -i
#
# Copyright 2006 Will Drewry <redpig@dataspill.org>
# Copyright 2007 Google Inc.
# See docs/COPYING for License details (GPLv2)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
__author__ = "Will Drewry"
import flayer.core
import flayer.wrappers.commandline
import sys
if __name__ == '__main__':
program, args = ('', [])
if len(sys.argv) >= 2:
program = sys.argv[1]
args = sys.argv[2:]
cli = flayer.wrappers.commandline.Shell(flayer.core.Flayer(program, args))
cli.Export()
cli.Banner()
| Python |
#!/usr/bin/python -i
#
# Copyright 2006 Will Drewry <redpig@dataspill.org>
# Copyright 2007 Google Inc.
# See docs/COPYING for License details (GPLv2)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
__author__ = "Will Drewry"
import flayer.core
import flayer.wrappers.commandline
import sys
if __name__ == '__main__':
program, args = ('', [])
if len(sys.argv) >= 2:
program = sys.argv[1]
args = sys.argv[2:]
cli = flayer.wrappers.commandline.Shell(flayer.core.Flayer(program, args))
cli.Export()
cli.Banner()
| Python |
#!/usr/bin/python
# A dumb example of an automated fuzzer.
#
# Copyright 2006 Will Drewry <redpig@dataspill.org>
# Copyright 2007 Google Inc.
# See docs/COPYING for License details (GPLv2)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
__author__ = "Will Drewry"
"""implements an automated fuzzer using flayer"""
import flayer.core
import flayer.input.fuzz
import os
import random
import subprocess
class Fuzzer:
def __init__(self, program, args=[], env={}, libtaint=''):
self._program = program
self._args = args
self._env = env
self._flayer = flayer.core.Flayer(program, args, env, libtaint)
self._input = flayer.input.fuzz.FuzzFile()
self._altered = {}
self._cond_input = random.Random()
def set_seed(self, seed=0):
self._input.set_seed(seed)
self._cond_input.seed(seed)
def get_seed(self):
return self._input.get_seed()
def Run(self, count=1, input_is='file', seed=None):
if seed is not None:
self._set_seed(seed)
# Generate input
print "Generating random input..."
self._input.Run()
if input_is == 'file':
args.append(self._input.get_target())
self._flayer.set_command(self._program,
self._args,
self._env)
self._flayer.set_taint('f')
self._flayer.set_taint_file_filter(self._input.get_target())
runs = 0
while runs < count:
runs += 1
lead = "== %d ==" % runs
altered = ','.join(["%s:%d" % (a[0],a[1]) for a in self._altered.items()])
print "%s altered: %s" % (lead, altered)
process = self._flayer.Run()
process.stdin.close()
#process.stdout.close() # Write stdout to a log...
ret, ret = os.wait()
print "%s return code %d" % (lead, ret)
self._flayer.ProcessLastRun()
errors = self._flayer.Errors()
# Fuzz paths while keeping constant fuzz input.
# Set up for next run
#last_altered = copy.copy(self._altered)
for e in errors.items():
# Look for one hit wonders
if e[1].kind == 'TaintedCondition' and e[1].count == 1:
action = self._cond_input.choice([True, False])
ip = e[1].frames[0].instruction_pointer
self._altered[ip] = action
self._flayer.add_branch_alteration(ip, action)
for address in self._altered.keys():
self._flayer.del_branch_alteration(address)
action = self._cond_input.choice([True, False])
self._altered[address] = action
self._flayer.add_branch_alteration(address, action)
if __name__ == '__main__':
import sys
runs = sys.argv[1]
seed = sys.argv[2]
program = sys.argv[3]
args = sys.argv[4:]
fuzzer = Fuzzer(program, args, libtaint='/home/wad/sync/bzr/flayer/libtaint/libtaint.so.1.0')
fuzzer.set_seed(seed)
fuzzer.Run(count=runs)
| Python |
#!/usr/bin/python
#
# Copyright 2006-2007 Will Drewry <redpig@dataspill.org>
# Some portions copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
"""flayer - a fuzzing framework for bypassing basic structural error checking
...
"""
__author__ = 'Will Drewry'
__revision__ = '$Id: $'
import copy
from distutils.spawn import find_executable
import os
import shelve
import subprocess
import sys
import tempfile
import valgrind.error_parser
import valgrind.runner
class Flayer:
"""wrapper for valgrind/memcheck undef-as-taint and alter-branch arguments"""
VERSION = '0.0.1'
def __init__(self, program='/bin/true', args=[], env={}):
self.__runner = valgrind.runner.Runner()
self.__runner.executable = find_executable('valgrind')
self.__args = []
self.set_command(program, args, env)
self.__taint = ''
self.set_taint('nfs') # TODO: e
self.__taint_filter = {}
#self.set_taint_network_filter('')
self.set_taint_file_filter('')
self.__errors = {}
self.__shelf = None
self.__tmpdir = tempfile.mkdtemp()
self.__runner['log-file'] = self.__tmpdir + '/flayer'
def __cleanup_tmpdir(self):
"""attempts to cleanup the tmpdir"""
for root, dirs, files in os.walk(self.__tmpdir):
for f in files:
os.unlink(os.path.join(root, f)) # todo use join
os.rmdir(root)
def __del__(self):
"""properly clean up the temporary files on destruction"""
self.__cleanup_tmpdir()
def Errors(self):
"""returns the valgrind output errors"""
return copy.deepcopy(self.__errors)
def GetTmpDir(self):
"""returns the tmpdir"""
return self.__tmpdir
def ResetTmpDir(self):
"""resets the tmpdir and cleans up one if it exists"""
self.__cleanup_tmpdir()
self.__tmpdir = tempfile.mkdtemp()
self.__runner['log-file'] = self.__tmpdir + '/flayer'
# TODO: change these to properties
def get_taint(self):
taint = ''
if self.__runner.has_key('taint-network') and \
self.__runner['taint-network']:
taint += 'n'
if self.__runner.has_key('taint-file') and \
self.__runner['taint-file']:
taint += 'f'
taint += 's'
return taint
def set_taint(self, value):
# TODO validate
self.__runner['taint-network'] = False
self.__runner['taint-file'] = False
for ch in value:
if ch == 'n':
self.__runner['taint-network'] = True
elif ch == 'f' or ch == 's': # no diff now...
self.__runner['taint-file'] = True
else:
raise RuntimeError, "Request value not yet implemented: " + ch
def set_taint_network_filter(self, value):
"""specified the host or port traffic to mark"""
raise RuntimeError, "NOT YET IMPLEMENTED"
def set_taint_file_filter(self, value):
"""specified the path prefix for file activity to mark"""
self.__runner['file-filter'] = value
def get_taint_file_filter(self):
if self.__runner.has_key('file-filter'):
return copy.copy(self.__runner['file-filter'])
else:
return ''
def get_taint_network_filter(self):
if self.__runner.has_key('network-filter'):
return copy.copy(self.__runner['network-filter'])
else:
return ''
def Run(self, verbose=False, *io):
"""runs the specified command under valgrind-flayer and gets the errors"""
process = self.__runner.run(self.__args, verbose, *io)
self.__errors_file = ''.join([self.__tmpdir, '/flayer.', str(process.pid)])
return process
def ProcessLastRun(self):
self.__errors = {}
self._ReadErrors(self.__errors_file)
def _ReadErrors(self, f):
"""opens the valgrind error output and parses it"""
p = valgrind.error_parser.Parser()
self.__errors = p.parse(open(f))
def clear_branch_alterations(self):
self.__runner['alter-branch'] = {}
def add_branch_alteration(self, address, action):
if action:
self.__runner['alter-branch'][address] = '1'
else:
self.__runner['alter-branch'][address] = '0'
def del_branch_alteration(self, address):
if self.__runner['alter-branch'].has_key(address):
self.__runner['alter-branch'].pop(address)
def get_branch_alterations(self):
return copy.deepcopy(self.__runner['alter-branch'])
def set_command(self, command, args=[], env={}):
"""sets the target program command, arguments, and environment"""
self.__args = copy.copy(args)
self.__args.insert(0, command)
if env != {}:
self.__runner.environment.update(env)
def get_command(self):
"""gets the target program command, arguments, and env"""
return (self.__args[0], # command
self.__args[1:], # arguments
copy.copy(self.__runner.environment)) # environment
def About(self):
"""returns a nice 'about' text"""
return """
Flayer is a framework for automating and easing the use of
two valgrind features: --undef-as-taint and --alter-branch.
It is the proof of concept implementation of the paper.
The flayer suite (libflayer, valgrind/flayer)
provides a system which traces user input through memory
and opens doors for it.
What does this mean technically? Traditional fuzzing is
limited in its overall code coverage. It is often blocked
early in the fuzzing process by protocol banner checks and other
version and sanity checks. This suite allows for these checks to be
forcibly skipped at runtime. This breathes new life into the
good, ol' fashion Fuzz[1] technique by providing access to
program internals without specifying a complicated protocol.
However, this system can be used with almost any existing fuzzing
technique to allow for enhanced code coverage.
Flayer was conceived of and written by Will Drewry <redpig@dataspill.org>.
Tavis Ormandy <taviso@sdf.lonestar.org> created the manual fuzzing
technique that flayer automates.
[1] http://www.cs.wisc.edu/~bart/fuzz
"""
def FullCommand(self):
vg = [self.__runner.executable] + self.__runner.arguments
command = ' '.join(vg + self.__args)
return command
def Banner(self):
"""display a banner when running in interactive shell mode"""
vg = [self.__runner.executable] + self.__runner.arguments
command = ' '.join(vg + self.__args)
return """
Welcome to Flayer %s!
Type 'help()' for extra details or 'about()' for more
on flayer.
Current settings:
- Command: %s
- Taint settings: %s
- Temporary directory: %s
""" % (Flayer.VERSION, command, self.get_taint(), self.__tmpdir)
if __name__ == '__main__':
program, args = ('', [])
if len(sys.argv) >= 2:
program = sys.argv[1]
args = sys.argv[2:]
import wrappers.commandline
cli = wrappers.commandline.Shell(Flayer(program, args))
cli.Export()
cli.Banner()
| Python |
#!/usr/bin/python
#
# Copyright 2006-2007 Will Drewry <redpig@dataspill.org>
# Some portions copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
"""contains classes for fuzzing various inputs
with the random class
import input.fuzz
f = input.fuzz.FuzzFile()
f.Run()
command(..,[f.get_target()],..)
run()
"""
__author__ = "Will Drewry"
import binascii
import os
import random
import tempfile
#### Support classes - where should this go?
class Fuzz:
def __init__(self, seed=0, block_size=4096):
# Note: SystemRandom appears to ignore seeding.
self._rand = random.Random()
self._seed = seed
self._rand.seed(seed)
self._target = None # subclass must supply
self._block_size = block_size
self._max_bytes = 1024*1024
def set_target(self, target=None):
"""sets the fuzz output target"""
self._target = target
def get_target(self):
"""returns the current target - not a copy"""
return self._target
def set_seed(self, seed=0):
"""sets the fuzz seed"""
self._seed = seed
self._rand.seed(seed)
def get_seed(self):
"""returns the current seed"""
return self._seed
def set_block_size(self, size=4096):
"""sets the fuzz block_size"""
self._block_size = size
def get_block_size(self):
"""returns the current block_size"""
return self._block_size
def set_maximum_bytes(self, bytes):
self._max_bytes = bytes
def get_maximum_bytes(self, bytes):
return self._max_bytes
def __del__(self):
self.CleanUp()
def CleanUp(self):
pass
def Run(self):
self._Run()
def _Run(self):
raise RuntimeError, "_Run() should be implemented in a subclass"
class FuzzWritable(Fuzz):
def _Run(self):
"""writes up to the given limit to _target"""
if self._target is None:
raise RuntimeError, "No target set"
bytes = 0
while bytes < self._max_bytes:
bytes += self._block_size
bits = self._rand.getrandbits(self._block_size * 8)
# annoying way to convert to bytes without looping.
# haven't benchmarked - may be slower.
hexed = hex(bits)[2:-1]
if len(hexed) % 2 != 0:
hexed += '0'
payload = binascii.unhexlify(hexed)
self._target.write(payload)
class FuzzFile(FuzzWritable):
def __init__(self, seed=0, block_size=4096):
FuzzWritable.__init__(self, seed, block_size)
self.set_file()
def set_file(self, filename=tempfile.mktemp()):
# clean up any old files
if self._target is not None:
os.file.remove(self._target.name)
self._target = file(filename, 'w')
def get_file(self):
return self._target.name
def CleanUp(self):
# Clean up tmp file
if self._target is not None and \
os.path.exists(self._target.name):
os.remove(self._target.name)
def set_target(self, target):
self.set_file(target)
def get_target(self):
return self.get_file()
| Python |
#!/usr/bin/python
#
# Copyright 2006-2007 Will Drewry <redpig@dataspill.org>
# Some portions copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
"""contains classes for fuzzing various inputs
with the random class
import input.fuzz
f = input.fuzz.FuzzFile()
f.Run()
command(..,[f.get_target()],..)
run()
"""
__author__ = "Will Drewry"
import binascii
import os
import random
import tempfile
#### Support classes - where should this go?
class Fuzz:
def __init__(self, seed=0, block_size=4096):
# Note: SystemRandom appears to ignore seeding.
self._rand = random.Random()
self._seed = seed
self._rand.seed(seed)
self._target = None # subclass must supply
self._block_size = block_size
self._max_bytes = 1024*1024
def set_target(self, target=None):
"""sets the fuzz output target"""
self._target = target
def get_target(self):
"""returns the current target - not a copy"""
return self._target
def set_seed(self, seed=0):
"""sets the fuzz seed"""
self._seed = seed
self._rand.seed(seed)
def get_seed(self):
"""returns the current seed"""
return self._seed
def set_block_size(self, size=4096):
"""sets the fuzz block_size"""
self._block_size = size
def get_block_size(self):
"""returns the current block_size"""
return self._block_size
def set_maximum_bytes(self, bytes):
self._max_bytes = bytes
def get_maximum_bytes(self, bytes):
return self._max_bytes
def __del__(self):
self.CleanUp()
def CleanUp(self):
pass
def Run(self):
self._Run()
def _Run(self):
raise RuntimeError, "_Run() should be implemented in a subclass"
class FuzzWritable(Fuzz):
def _Run(self):
"""writes up to the given limit to _target"""
if self._target is None:
raise RuntimeError, "No target set"
bytes = 0
while bytes < self._max_bytes:
bytes += self._block_size
bits = self._rand.getrandbits(self._block_size * 8)
# annoying way to convert to bytes without looping.
# haven't benchmarked - may be slower.
hexed = hex(bits)[2:-1]
if len(hexed) % 2 != 0:
hexed += '0'
payload = binascii.unhexlify(hexed)
self._target.write(payload)
class FuzzFile(FuzzWritable):
def __init__(self, seed=0, block_size=4096):
FuzzWritable.__init__(self, seed, block_size)
self.set_file()
def set_file(self, filename=tempfile.mktemp()):
# clean up any old files
if self._target is not None:
os.file.remove(self._target.name)
self._target = file(filename, 'w')
def get_file(self):
return self._target.name
def CleanUp(self):
# Clean up tmp file
if self._target is not None and \
os.path.exists(self._target.name):
os.remove(self._target.name)
def set_target(self, target):
self.set_file(target)
def get_target(self):
return self.get_file()
| Python |
#!/usr/bin/python
#
# Copyright 2006-2007 Will Drewry <redpig@dataspill.org>
# Some portions copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
"""valgrind XML output parser for extracting error data"""
__author__ = 'Will Drewry'
from xml.sax._exceptions import SAXParseException
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
import copy
class ErrorFrame:
"""Contains frame information"""
def __init__(self):
self.instruction_pointer = ''
self.obj = ''
self.function_name = ''
self.dir = ''
self.file = ''
self.line = ''
def __repr__(self):
return self.__str__()
def __str__(self):
return '{instruction_pointer:%s, obj:%s, function_name:%s, dir:%s, file:%s, line:%s}' % (
self.instruction_pointer, self.obj, self.function_name, self.dir, self.file, self.line)
def __eq__(self, a):
raise "TODO: ErrorFrame,__eq__"
class ErrorCount:
"""container class for XML error counts"""
def __init__(self):
self.unique = ''
self.count = ''
class Error:
"""container class for XML error data"""
def __init__(self):
self.unique = ''
self.tid = ''
self.kind = ''
self.what = ''
self.frames = []
self.count = 0
def __eq__(self, a):
if a.unique != self.unique:
return False
if a.tid != self.tid:
return False
if a.kind != self.kind:
return False
if a.what != self.what:
return False
if a.frames != self.frames:
return False
if a.count != self.count:
return False
return True
def __repr__(self):
return self.__str__()
def __str__(self):
return '{unique:%s, tid:%s, kind:%s, what:%s, count:%s, frames:%s}' % (
self.unique, self.tid, self.kind, self.what, self.count, self.frames)
class HandlerState:
"""stack of elements and errors extracted"""
def __init__(self):
self.elements = []
self.errors = []
self.errorcounts = []
class Handler(ContentHandler):
"""handler for SAX XML processing of valgrind error output"""
def __init__(self):
self.__state = HandlerState()
ContentHandler.__init__(self)
def errorcounts(self):
"""provides a copy of populated errorcounts"""
return copy.deepcopy(self.__state.errorcounts)
def errors(self):
"""provides a copy of populated errors"""
return copy.deepcopy(self.__state.errors)
def startElement(self, name, attrs):
"""extracts error elements and their children"""
# attrs is unused in valgrind output
if len(attrs) != 0:
pass # TODO: exception perhaps?
self.__state.elements.insert(0, name)
if name == 'error': # errors should never be nested
self.__state.errors.insert(0, Error())
elif name == 'frame':
self.__state.errors[0].frames.insert(0, ErrorFrame())
elif name == 'pair': # assume this only occurs in errorcounts
self.__state.errorcounts.insert(0, ErrorCount())
def endElement(self, name):
"""handles proper nesting of errors"""
self.__state.elements.pop(0)
if name == 'error':
# Clean up frame ordering
self.__state.errors[0].frames.reverse()
def characters(self, ch):
"""appends CDATA to the appropriate data structure per character"""
if len(self.__state.elements) < 2:
return
element = self.__state.elements[0]
if self.__state.elements[1] == 'error':
if element == 'unique':
self.__state.errors[0].unique += ch
elif element == 'tid':
self.__state.errors[0].tid += ch
elif element == 'kind':
self.__state.errors[0].kind += ch
elif element == 'what':
self.__state.errors[0].what += ch
elif self.__state.elements[1] == 'frame' and \
self.__state.elements[2] == 'stack':
if element == 'ip':
self.__state.errors[0].frames[0].instruction_pointer += ch
elif element == 'obj':
self.__state.errors[0].frames[0].obj += ch
elif element == 'fn':
self.__state.errors[0].frames[0].function_name += ch
elif element == 'dir':
self.__state.errors[0].frames[0].dir += ch
elif element == 'file':
self.__state.errors[0].frames[0].file += ch
elif element == 'line':
self.__state.errors[0].frames[0].line += ch
elif self.__state.elements[1] == 'pair' and \
self.__state.elements[2] == 'errorcounts':
if element == 'count':
self.__state.errorcounts[0].count += ch
elif element == 'unique':
self.__state.errorcounts[0].unique += ch
class Parser:
"""complete encapsulation of the SAX parsing of valgrind error output"""
def __init__(self):
self.__parser = make_parser()
self.__handler = Handler()
self.__parser.setContentHandler(self.__handler)
def parse(self, s=''):
"""calls the SAX parser and returns the parsed error array"""
try:
self.__parser.parse(s)
except SAXParseException:
# Accept what we could grab
# TODO(wad@google.com): look primarily for the
# "junk after document element" exc.
print "[flayer] an error occurred during error parsing.\n"
print "[flayer] some data may be missing.\n"
pass
errors = {}
errorcount = {}
for ec in self.__handler.errorcounts():
if ec.unique != '' and ec.count != '':
errorcount[int(ec.unique, 16)] = int(str(ec.count))
for error in self.__handler.errors():
key = int(error.unique, 16)
errors[key] = copy.copy(error)
# Sometimes the error count is lost with
# valgrind's malformed xml output.
if errorcount.has_key(key):
errors[key].count = errorcount[key]
return errors
| Python |
#!/usr/bin/python
#
# Copyright 2006-2007 Will Drewry <redpig@dataspill.org>
# Some portions copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
"""valgrind XML output parser for extracting error data"""
__author__ = 'Will Drewry'
from xml.sax._exceptions import SAXParseException
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
import copy
class ErrorFrame:
"""Contains frame information"""
def __init__(self):
self.instruction_pointer = ''
self.obj = ''
self.function_name = ''
self.dir = ''
self.file = ''
self.line = ''
def __repr__(self):
return self.__str__()
def __str__(self):
return '{instruction_pointer:%s, obj:%s, function_name:%s, dir:%s, file:%s, line:%s}' % (
self.instruction_pointer, self.obj, self.function_name, self.dir, self.file, self.line)
def __eq__(self, a):
raise "TODO: ErrorFrame,__eq__"
class ErrorCount:
"""container class for XML error counts"""
def __init__(self):
self.unique = ''
self.count = ''
class Error:
"""container class for XML error data"""
def __init__(self):
self.unique = ''
self.tid = ''
self.kind = ''
self.what = ''
self.frames = []
self.count = 0
def __eq__(self, a):
if a.unique != self.unique:
return False
if a.tid != self.tid:
return False
if a.kind != self.kind:
return False
if a.what != self.what:
return False
if a.frames != self.frames:
return False
if a.count != self.count:
return False
return True
def __repr__(self):
return self.__str__()
def __str__(self):
return '{unique:%s, tid:%s, kind:%s, what:%s, count:%s, frames:%s}' % (
self.unique, self.tid, self.kind, self.what, self.count, self.frames)
class HandlerState:
"""stack of elements and errors extracted"""
def __init__(self):
self.elements = []
self.errors = []
self.errorcounts = []
class Handler(ContentHandler):
"""handler for SAX XML processing of valgrind error output"""
def __init__(self):
self.__state = HandlerState()
ContentHandler.__init__(self)
def errorcounts(self):
"""provides a copy of populated errorcounts"""
return copy.deepcopy(self.__state.errorcounts)
def errors(self):
"""provides a copy of populated errors"""
return copy.deepcopy(self.__state.errors)
def startElement(self, name, attrs):
"""extracts error elements and their children"""
# attrs is unused in valgrind output
if len(attrs) != 0:
pass # TODO: exception perhaps?
self.__state.elements.insert(0, name)
if name == 'error': # errors should never be nested
self.__state.errors.insert(0, Error())
elif name == 'frame':
self.__state.errors[0].frames.insert(0, ErrorFrame())
elif name == 'pair': # assume this only occurs in errorcounts
self.__state.errorcounts.insert(0, ErrorCount())
def endElement(self, name):
"""handles proper nesting of errors"""
self.__state.elements.pop(0)
if name == 'error':
# Clean up frame ordering
self.__state.errors[0].frames.reverse()
def characters(self, ch):
"""appends CDATA to the appropriate data structure per character"""
if len(self.__state.elements) < 2:
return
element = self.__state.elements[0]
if self.__state.elements[1] == 'error':
if element == 'unique':
self.__state.errors[0].unique += ch
elif element == 'tid':
self.__state.errors[0].tid += ch
elif element == 'kind':
self.__state.errors[0].kind += ch
elif element == 'what':
self.__state.errors[0].what += ch
elif self.__state.elements[1] == 'frame' and \
self.__state.elements[2] == 'stack':
if element == 'ip':
self.__state.errors[0].frames[0].instruction_pointer += ch
elif element == 'obj':
self.__state.errors[0].frames[0].obj += ch
elif element == 'fn':
self.__state.errors[0].frames[0].function_name += ch
elif element == 'dir':
self.__state.errors[0].frames[0].dir += ch
elif element == 'file':
self.__state.errors[0].frames[0].file += ch
elif element == 'line':
self.__state.errors[0].frames[0].line += ch
elif self.__state.elements[1] == 'pair' and \
self.__state.elements[2] == 'errorcounts':
if element == 'count':
self.__state.errorcounts[0].count += ch
elif element == 'unique':
self.__state.errorcounts[0].unique += ch
class Parser:
"""complete encapsulation of the SAX parsing of valgrind error output"""
def __init__(self):
self.__parser = make_parser()
self.__handler = Handler()
self.__parser.setContentHandler(self.__handler)
def parse(self, s=''):
"""calls the SAX parser and returns the parsed error array"""
try:
self.__parser.parse(s)
except SAXParseException:
# Accept what we could grab
# TODO(wad@google.com): look primarily for the
# "junk after document element" exc.
print "[flayer] an error occurred during error parsing.\n"
print "[flayer] some data may be missing.\n"
pass
errors = {}
errorcount = {}
for ec in self.__handler.errorcounts():
if ec.unique != '' and ec.count != '':
errorcount[int(ec.unique, 16)] = int(str(ec.count))
for error in self.__handler.errors():
key = int(error.unique, 16)
errors[key] = copy.copy(error)
# Sometimes the error count is lost with
# valgrind's malformed xml output.
if errorcount.has_key(key):
errors[key].count = errorcount[key]
return errors
| Python |
#!/usr/bin/python
#
# Copyright 2006-2007 Will Drewry <redpig@dataspill.org>
# Some portions copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
"""interface for running the valgrind command"""
__author__ = "Will Drewry"
import copy
import os
import subprocess
class InvalidExecutable(RuntimeError): pass
class Runner(dict):
DEFAULT_ARGUMENTS = {
'tool':'flayer',
'xml':True,
'db-attach':True,
'db-command':'"xterm -e \"gdb -nw %f %p\""',
'alter-branch':{},
'taint-file':True,
'taint-network':True,
'file-filter':'',
'log-file':'',
}
def __init__(self):
self.update(Runner.DEFAULT_ARGUMENTS)
self.bufsize = 4096
self.executable = '/usr/bin/valgrind'
self.environment = dict(os.environ)
def _HandleArgumentValue(self, value):
"""takes a name and value and returns the final string"""
if type(value) is str or type(value) is unicode:
# Escape quotes only and set the value.
# *** This is not for security!! ***
escaped_value = copy.copy(value)
# TODO: make these constants
for pair in [["'", '"'], ['"', '\\"']]:
escaped_value = escaped_value.replace(pair[0], pair[1])
return escaped_value
elif type(value) is bool:
if value == True:
return 'yes'
else:
return 'no'
elif type(value) is list:
return ','.join([self._HandleArgumentValue(v) for v in value])
elif type(value) is dict:
merged = []
for item in value.items():
merged.append(':'.join([item[0], self._HandleArgumentValue(item[1])]))
return self._HandleArgumentValue(merged)
else:
return str(value)
def _GetArguments(self):
"""returns the arguments as a usable array"""
arguments = []
for name in self.keys():
value = self._HandleArgumentValue(self[name])
arguments.append('='.join(['--'+name, value]))
return arguments
def __GetArguments(self):
"""indirect reference for 'arguments' property'"""
return self._GetArguments()
def _SetArguments(self, value):
"""will auto-set arguments"""
raise NotYetImplemented, "this will be implemented later"
def __SetArguments(self, value):
"""indirect reference for 'arguments' property'"""
return self._SetArguments(value)
arguments = property(__GetArguments, __SetArguments,
doc="""Get or set the current arguments""")
def run(self, additional_arguments=[],verbose=False,*io):
"""executes valgrind with the options and returns the popen object"""
# Test for correctness
if type(self.executable) != str:
raise InvalidExecutable, 'Executable must a string'
if not os.path.exists(self.executable):
raise InvalidExecutable, 'Executable not found. Full path required.'
arguments = [self.executable] + self._GetArguments() + additional_arguments
if verbose:
print "Running %s\n" % ' '.join(arguments)
stdin=subprocess.PIPE
stdout=subprocess.PIPE
stderr=subprocess.PIPE
if len(io) > 0:
stdin = io[0]
if len(io) > 1:
stdout = io[1]
if len(io) > 2:
stderr = io[2]
process = subprocess.Popen(arguments,
self.bufsize,
env=self.environment,
stdin=stdin,
stdout=stdout,
stderr=stderr,
close_fds=True)
return process
| Python |
#!/usr/bin/python
#
# Copyright 2006-2007 Will Drewry <redpig@dataspill.org>
# Some portions copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
"""interface for running the valgrind command"""
__author__ = "Will Drewry"
import copy
import os
import subprocess
class InvalidExecutable(RuntimeError): pass
class Runner(dict):
DEFAULT_ARGUMENTS = {
'tool':'flayer',
'xml':True,
'db-attach':True,
'db-command':'"xterm -e \"gdb -nw %f %p\""',
'alter-branch':{},
'taint-file':True,
'taint-network':True,
'file-filter':'',
'log-file':'',
}
def __init__(self):
self.update(Runner.DEFAULT_ARGUMENTS)
self.bufsize = 4096
self.executable = '/usr/bin/valgrind'
self.environment = dict(os.environ)
def _HandleArgumentValue(self, value):
"""takes a name and value and returns the final string"""
if type(value) is str or type(value) is unicode:
# Escape quotes only and set the value.
# *** This is not for security!! ***
escaped_value = copy.copy(value)
# TODO: make these constants
for pair in [["'", '"'], ['"', '\\"']]:
escaped_value = escaped_value.replace(pair[0], pair[1])
return escaped_value
elif type(value) is bool:
if value == True:
return 'yes'
else:
return 'no'
elif type(value) is list:
return ','.join([self._HandleArgumentValue(v) for v in value])
elif type(value) is dict:
merged = []
for item in value.items():
merged.append(':'.join([item[0], self._HandleArgumentValue(item[1])]))
return self._HandleArgumentValue(merged)
else:
return str(value)
def _GetArguments(self):
"""returns the arguments as a usable array"""
arguments = []
for name in self.keys():
value = self._HandleArgumentValue(self[name])
arguments.append('='.join(['--'+name, value]))
return arguments
def __GetArguments(self):
"""indirect reference for 'arguments' property'"""
return self._GetArguments()
def _SetArguments(self, value):
"""will auto-set arguments"""
raise NotYetImplemented, "this will be implemented later"
def __SetArguments(self, value):
"""indirect reference for 'arguments' property'"""
return self._SetArguments(value)
arguments = property(__GetArguments, __SetArguments,
doc="""Get or set the current arguments""")
def run(self, additional_arguments=[],verbose=False,*io):
"""executes valgrind with the options and returns the popen object"""
# Test for correctness
if type(self.executable) != str:
raise InvalidExecutable, 'Executable must a string'
if not os.path.exists(self.executable):
raise InvalidExecutable, 'Executable not found. Full path required.'
arguments = [self.executable] + self._GetArguments() + additional_arguments
if verbose:
print "Running %s\n" % ' '.join(arguments)
stdin=subprocess.PIPE
stdout=subprocess.PIPE
stderr=subprocess.PIPE
if len(io) > 0:
stdin = io[0]
if len(io) > 1:
stdout = io[1]
if len(io) > 2:
stderr = io[2]
process = subprocess.Popen(arguments,
self.bufsize,
env=self.environment,
stdin=stdin,
stdout=stdout,
stderr=stderr,
close_fds=True)
return process
| Python |
VERSION = '0.0.1'
| Python |
#!/usr/bin/python
#
# Copyright 2006-2007 Will Drewry <redpig@dataspill.org>
# Some portions copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
"""flayer - a fuzzing framework for bypassing basic structural error checking
...
"""
__author__ = 'Will Drewry'
__revision__ = '$Id: $'
import copy
from distutils.spawn import find_executable
import os
import shelve
import subprocess
import sys
import tempfile
import valgrind.error_parser
import valgrind.runner
class Flayer:
"""wrapper for valgrind/memcheck undef-as-taint and alter-branch arguments"""
VERSION = '0.0.1'
def __init__(self, program='/bin/true', args=[], env={}):
self.__runner = valgrind.runner.Runner()
self.__runner.executable = find_executable('valgrind')
self.__args = []
self.set_command(program, args, env)
self.__taint = ''
self.set_taint('nfs') # TODO: e
self.__taint_filter = {}
#self.set_taint_network_filter('')
self.set_taint_file_filter('')
self.__errors = {}
self.__shelf = None
self.__tmpdir = tempfile.mkdtemp()
self.__runner['log-file'] = self.__tmpdir + '/flayer'
def __cleanup_tmpdir(self):
"""attempts to cleanup the tmpdir"""
for root, dirs, files in os.walk(self.__tmpdir):
for f in files:
os.unlink(os.path.join(root, f)) # todo use join
os.rmdir(root)
def __del__(self):
"""properly clean up the temporary files on destruction"""
self.__cleanup_tmpdir()
def Errors(self):
"""returns the valgrind output errors"""
return copy.deepcopy(self.__errors)
def GetTmpDir(self):
"""returns the tmpdir"""
return self.__tmpdir
def ResetTmpDir(self):
"""resets the tmpdir and cleans up one if it exists"""
self.__cleanup_tmpdir()
self.__tmpdir = tempfile.mkdtemp()
self.__runner['log-file'] = self.__tmpdir + '/flayer'
# TODO: change these to properties
def get_taint(self):
taint = ''
if self.__runner.has_key('taint-network') and \
self.__runner['taint-network']:
taint += 'n'
if self.__runner.has_key('taint-file') and \
self.__runner['taint-file']:
taint += 'f'
taint += 's'
return taint
def set_taint(self, value):
# TODO validate
self.__runner['taint-network'] = False
self.__runner['taint-file'] = False
for ch in value:
if ch == 'n':
self.__runner['taint-network'] = True
elif ch == 'f' or ch == 's': # no diff now...
self.__runner['taint-file'] = True
else:
raise RuntimeError, "Request value not yet implemented: " + ch
def set_taint_network_filter(self, value):
"""specified the host or port traffic to mark"""
raise RuntimeError, "NOT YET IMPLEMENTED"
def set_taint_file_filter(self, value):
"""specified the path prefix for file activity to mark"""
self.__runner['file-filter'] = value
def get_taint_file_filter(self):
if self.__runner.has_key('file-filter'):
return copy.copy(self.__runner['file-filter'])
else:
return ''
def get_taint_network_filter(self):
if self.__runner.has_key('network-filter'):
return copy.copy(self.__runner['network-filter'])
else:
return ''
def Run(self, verbose=False, *io):
"""runs the specified command under valgrind-flayer and gets the errors"""
process = self.__runner.run(self.__args, verbose, *io)
self.__errors_file = ''.join([self.__tmpdir, '/flayer.', str(process.pid)])
return process
def ProcessLastRun(self):
self.__errors = {}
self._ReadErrors(self.__errors_file)
def _ReadErrors(self, f):
"""opens the valgrind error output and parses it"""
p = valgrind.error_parser.Parser()
self.__errors = p.parse(open(f))
def clear_branch_alterations(self):
self.__runner['alter-branch'] = {}
def add_branch_alteration(self, address, action):
if action:
self.__runner['alter-branch'][address] = '1'
else:
self.__runner['alter-branch'][address] = '0'
def del_branch_alteration(self, address):
if self.__runner['alter-branch'].has_key(address):
self.__runner['alter-branch'].pop(address)
def get_branch_alterations(self):
return copy.deepcopy(self.__runner['alter-branch'])
def set_command(self, command, args=[], env={}):
"""sets the target program command, arguments, and environment"""
self.__args = copy.copy(args)
self.__args.insert(0, command)
if env != {}:
self.__runner.environment.update(env)
def get_command(self):
"""gets the target program command, arguments, and env"""
return (self.__args[0], # command
self.__args[1:], # arguments
copy.copy(self.__runner.environment)) # environment
def About(self):
"""returns a nice 'about' text"""
return """
Flayer is a framework for automating and easing the use of
two valgrind features: --undef-as-taint and --alter-branch.
It is the proof of concept implementation of the paper.
The flayer suite (libflayer, valgrind/flayer)
provides a system which traces user input through memory
and opens doors for it.
What does this mean technically? Traditional fuzzing is
limited in its overall code coverage. It is often blocked
early in the fuzzing process by protocol banner checks and other
version and sanity checks. This suite allows for these checks to be
forcibly skipped at runtime. This breathes new life into the
good, ol' fashion Fuzz[1] technique by providing access to
program internals without specifying a complicated protocol.
However, this system can be used with almost any existing fuzzing
technique to allow for enhanced code coverage.
Flayer was conceived of and written by Will Drewry <redpig@dataspill.org>.
Tavis Ormandy <taviso@sdf.lonestar.org> created the manual fuzzing
technique that flayer automates.
[1] http://www.cs.wisc.edu/~bart/fuzz
"""
def FullCommand(self):
vg = [self.__runner.executable] + self.__runner.arguments
command = ' '.join(vg + self.__args)
return command
def Banner(self):
"""display a banner when running in interactive shell mode"""
vg = [self.__runner.executable] + self.__runner.arguments
command = ' '.join(vg + self.__args)
return """
Welcome to Flayer %s!
Type 'help()' for extra details or 'about()' for more
on flayer.
Current settings:
- Command: %s
- Taint settings: %s
- Temporary directory: %s
""" % (Flayer.VERSION, command, self.get_taint(), self.__tmpdir)
if __name__ == '__main__':
program, args = ('', [])
if len(sys.argv) >= 2:
program = sys.argv[1]
args = sys.argv[2:]
import wrappers.commandline
cli = wrappers.commandline.Shell(Flayer(program, args))
cli.Export()
cli.Banner()
| Python |
#!/usr/bin/python
#
# Copyright 2006-2007 Will Drewry <redpig@dataspill.org>
# Some portions copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
"""command line interface for flayer"""
__author__ = 'Will Drewry'
__revision__ = '$Id: $'
import copy
import os
import shelve
import signal
import subprocess
import sys
# A kludge to add "subtraction" to list.
class SList(list):
def __sub__(self, a):
res = []
for value in self:
try:
a.index(value)
except ValueError:
res.append(value)
return res
class Shell:
# Should I just subclass Flayer?
"""makes the Flayer library suitable for interactive python shell use"""
def __init__(self, flayer=None):
if flayer == None:
self.__flayer = Flayer()
else:
self.__flayer = flayer
# Altered extra data
self._altered = {}
# Track the last files
self._lastio = {}
self._lastio['stdout'] = ''
self._lastio['stderr'] = ''
# Track past runs
self._past_errors = []
def Banner(self):
"""displays a welcome banner for the shell"""
print "%s\n" % self.__flayer.Banner()
def Export(self):
"""exports all functions starting with CLI_ and lower()s"""
for exportable in filter(lambda x: x[:3] == '_E_', self.__class__.__dict__):
name = exportable[3:].lower()
sys.modules['__main__'].__dict__[name] = getattr(self, exportable)
# All command line methods
def _E_Summary(self):
"""outputs a summary of errors from valgrind
This prints a formatted list of errors from
valgrind by kind. In particular, it includes
debugging information from the last frame of
the error stack trace.
Arguments:
- none
"""
header = " id frame information"
format = "%-5s %-15s %s %s"
# Sort into kinds
kinds = {}
for e in self.__flayer.Errors().values():
if kinds.has_key(e.kind):
kinds[e.kind].append(e)
else:
kinds[e.kind] = [e]
for kind in kinds.items():
print "==> " + kind[0]
print header
for e in kind[1]:
file = os.path.join(e.frames[0].dir, e.frames[0].file) + ':' + \
e.frames[0].line
if file == ':':
file = ''
print format % (e.unique, e.frames[0].instruction_pointer,
e.frames[0].function_name, file)
def _E_Details(self, error_id):
"""outputs detailed error information by id
This prints all of the data collected about a particular
error.
Arguments:
- error_id: error id integer
"""
errors = self.__flayer.Errors()
if not errors.has_key(error_id):
print "Error id '%s' not found." % error_id
return
error = errors[error_id]
print "Error %s:" % error_id
print "- Thread Id: %s" % error.tid
print "- Kind: %s" % error.kind
print "- What: %s" % error.what
print "- Count: %d" % error.count
print "- Frames:"
for id in range(0, len(error.frames)):
frame = error.frames[id]
efile = os.path.join(frame.dir, frame.file)
print " Frame %d:" % id
print " - Instruction pointer: %s" % frame.instruction_pointer
print " - Object: %s" % frame.obj
print " - Function: %s" % frame.function_name
print " - File: %s" % efile
print " - Line number: %s" % frame.line
def _E_Snippet(self, error_id, range=10):
"""outputs code snippet from the top level stack frame if available
This command will output the first range lines before the conditional
and the following range lines.
Arguments:
- error_id: error id integer
- range: number of lines of code to show [10]
"""
if range < 1:
print "Range must be positive"
return
# TODO(wad): autoconvert error_id
#if type(error_id) is int:
errors = self.__flayer.Errors()
if not errors.has_key(error_id):
print "Error id '%s' not found." % error_id
return
error = errors[error_id]
if len(error.frames) == 0:
print "Error id '%s' has no debugging information." % error_id
return
frame = error.frames[0]
efile = os.path.join(frame.dir, frame.file)
# TODO(wad): bust this out to a helper
try:
f = file(os.path.join(frame.dir, frame.file))
# TODO(wad): catch explicit errors
except:
print ("Cannot open referenced file: %s" %
os.path.join(frame.dir, frame.file))
return
line = 1
try:
while line < (int(frame.line) - range):
f.readline() # eat it.
line += 1
while line < (int(frame.line) + range):
# New lines included.
if line == int(frame.line):
sys.stdout.write('|%s' % f.readline())
else:
sys.stdout.write(' %s' % f.readline())
line += 1
# TODO(wad): except explicitly around each readline
except:
print "exception"
return
def _E_Taint(self, value=None):
"""gets or sets arguments for tainting
This command will set or retrieve the value
of the current valgrind/flayer tainting arguments.
Each setting is a character and valid characters are
as follows: n, f, and s. Later 'e' will be added.
'f' indicates that file I/O buffers will be tainted.
'n' indicates that network I/O buffers will be tainted.
's' indicates that fd 0 I/O buffers will be tainted.
'e' will indicate environment variable tainting.
A value of None will result in a copy of the current
taint values being returned.
Arguments:
- value: a string containing the arguments above (def: None)
"""
if value == None:
return self.__flayer.get_taint()
else:
return self.__flayer.set_taint(value)
def _E_Filter(self, file=None, network=None):
"""gets or sets filtering for taint arguments
This command will set or retrieve the value
of the --*-filter arguments used by
valgrind/flayer. Specifically, file path prefixes are used
to indicate which input buffers to taint while
network host/port pairs can be specified.
When all the arguments are None, the current settings
will be returned.
Arguments:
- file: the path prefix of file to taint (def: None)
- network: the "host:port" pair to taint (def: None)
"""
if file == None and network == None:
return {'file':self.__flayer.get_taint_file_filter(),
'network':self.__flayer.get_taint_network_filter() }
if file != None:
self.__flayer.set_taint_file_filter(file)
if network != None:
self.__flayer.set_taint_network_filter(file)
def _E_Command(self, command=None, args=[], env={}):
"""gets/sets the target command
This function sets or gets the value of the target
command and its arguments for use in valgrind.
The command can be an explicit path or in the PATH
environment variable. The arguments should be a
list. The environment should be a dict and will be added
to the current environment variable - NOT override it.
Currently, these will _NOT_ be run under a shell.
Arguments:
- command: string containing the target command (def: None)
- args: list of arguments for the command (def: [])
- env: dict of environment variables (def: {})
"""
if command is None:
return self.__flayer.get_command()
return self.__flayer.set_command(command, args, env)
def _E_Run(self, verbose=False):
"""calls valgrind with the configured target, args, and environment
This command executes valgrind with the current
configuration of target executable, arguments, and
environment variables needed. It will also automatically
process the output log from valgrind.
Arguments:
- verbose: prints additional information (def: False)
"""
# Setup stdout and stderr files for this process.
try:
os.unlink(self._lastio['stdout'])
os.unlink(self._lastio['stderr'])
except:
pass # Should be empty or valid. Kludge!
# For now, this will clobber any past runs
self._lastio['stdout'] = self.__flayer.GetTmpDir() + "/out"
self._lastio['stderr'] = self.__flayer.GetTmpDir() + "/err"
stdin = subprocess.PIPE
stdout = open(self._lastio['stdout'], 'w')
stderr = open(self._lastio['stderr'], 'w')
process = self.__flayer.Run()
# Setup a signal handler to make SIGINT kill the process
def handler(signum, frame):
os.kill(process.pid)
orig_handler = signal.signal(signal.SIGINT, handler)
process.stdin.close()
print "Process ID: %d\n" % process.pid
# XXX: dump out lastio on these calls.
#print "You may check on its progress with the following commands: \n"
#print "running(), exit_code(), stdout(), stderr()\n"
print "Press Ctrl+C to send a SIGTERM to the running process.\n"
try:
ret, ret = os.wait() # pid is first - don't care.
except:
pass
# Remove the SIGINT handler
signal.signal(signal.SIGINT, orig_handler)
stdout.close()
stderr.close()
if verbose:
print "Return code: %d\n" % ret
self.__flayer.ProcessLastRun()
# XXX: does this need to be a deepcopy?
self._past_errors.append(self.__flayer.Errors().values())
def _E_ErrorDiff(self, run_a, run_b, kind='TaintedCondition'):
"""returns the difference between to ErrorSets
This command will return the difference between the error sets
generated by the specific runs. See PastErrors() for more.
Arguments:
- run_a: integer index of the run's errors
- run_b: integer index of the run's errors
"""
a = SList(self._past_errors[run_a])
b = SList(self._past_errors[run_b])
return b - a
def _E_PastErrors(self):
"""returns the list of past errors
!!TODO!! make this print a pretty list
Arguments:
- None
"""
return self._past_errors
def _E_ClearErrors(self):
"""clears the list of past errors
!!TODO!! make this print a pretty list
Arguments:
- None
"""
self._past_errors = []
def _E_Alter(self, error_id=None, action=None, address=None):
"""gets/sets runtime conditional behavior in the target
This command gets or sets the branch altering
functionality of valgrind. It allows for conditional
blocks that make use of tainted data to be forced
to be followed or skipped. It will output a pretty
summary of alterations and returns a dict of instruction
pointer to action.
If an error is listed and the action is not specified,
the alteration will be removed if it exists.
Arguments:
- error_id: string of the unique error id (def: None)
- action: bool specifying whether to follow the branch (def: None)
- address: unsigned long specifying the address to modify (def: None)
"""
# TODO(wad): store alter info by IP
if error_id is None and address is None:
print "address action frame information"
alts = self.__flayer.get_branch_alterations()
format = "%-7s %-6s %s %s"
for ip, e in self._altered.items():
if e is None:
print format % (ip, alts[ip], 'unknown', '')
else:
file = os.path.join(e.frames[0].dir, e.frames[0].file) + ':' + \
e.frames[0].line
if file == ':':
file = ''
print format % (ip, alts[ip],
e.frames[0].function_name, file)
return self.__flayer.get_branch_alterations()
instruction_pointer = address
# Validate error_id, kind, and get address
error = None
if error_id is not None:
errors = self.__flayer.Errors()
if errors.has_key(error_id):
error = errors[error_id]
if error is None:
print "No matching error id found."
return
if error.kind != 'TaintedCondition':
print 'Error must be of kind TaintedCondition'
return
instruction_pointer = error.frames[0].instruction_pointer
if action is None and self._altered.has_key(instruction_pointer):
# TODO: add another method for deletion
self.__flayer.del_branch_alteration(instruction_pointer)
return self._altered.pop(instruction_pointer)
else:
self.__flayer.add_branch_alteration(instruction_pointer, action)
self._altered[instruction_pointer] = error
return {instruction_pointer:action}
def _E_Load(self, path):
"""loads an existing flayer session from file
This loads an existing session including all
relevant configured data.
Arguments:
- path: string with the path to the savefile
"""
shelf = shelve.open(path)
# TODO: FIX TEMP DIR PROBLEM ON RELOAD
self.__flayer = shelf['flayer']
self._altered = shelf['altered']
shelf.close()
self.__flayer.ResetTmpDir()
def _E_Save(self, path=""):
"""saves the current session to file
This saves all relevant configuration data to
continue the current session from the point
at which it is called.
Arguments:
- path: string with the path to the savefile
"""
shelf = shelve.open(path)
shelf['flayer'] = self.__flayer
shelf['altered'] = self._altered
# TODO add readline history support
shelf.sync()
shelf.close()
def _E_About(self):
"""returns more information about Flayer!
Run it and see.
Arguments:
- none
"""
print "%s\n" % self.__flayer.About()
def _E_Help(self, topic='overview'):
"""provides overall and detail help for each shell command
This provides the overview and detailed help
you are reading now. In addition, it falls through
to the builtin help if nothing matches.
Arguments:
- topic -- (default: 'overview')
"""
if topic == 'overview':
print "Available commands:"
format = " %-15s -- %s"
for command in filter(lambda x: x[:3] == '_E_', self.__class__.__dict__):
doc = self.__class__.__dict__[command].__doc__
name = command[3:].lower()
if doc is None:
print format % (name, "No documentation. Bad Developer!")
else:
print format % (name, doc.split("\n")[0])
return
for method in self.__class__.__dict__.items():
name = method[0][3:].lower()
if name == topic:
doc = self.__class__.__dict__[method[0]].__doc__.split("\n")
details = "\n".join([l.lstrip() for l in doc])
print "%s -- %s" % (name, details)
return
print "Topic '%s' is unknown to Flayer.\n" % topic
print "Attempting to use the builtin help function.\n"
sys.modules['__builtin__'].help(topic)
| Python |
#!/usr/bin/python
#
# Copyright 2006-2007 Will Drewry <redpig@dataspill.org>
# Some portions copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
"""command line interface for flayer"""
__author__ = 'Will Drewry'
__revision__ = '$Id: $'
import copy
import os
import shelve
import signal
import subprocess
import sys
# A kludge to add "subtraction" to list.
class SList(list):
def __sub__(self, a):
res = []
for value in self:
try:
a.index(value)
except ValueError:
res.append(value)
return res
class Shell:
# Should I just subclass Flayer?
"""makes the Flayer library suitable for interactive python shell use"""
def __init__(self, flayer=None):
if flayer == None:
self.__flayer = Flayer()
else:
self.__flayer = flayer
# Altered extra data
self._altered = {}
# Track the last files
self._lastio = {}
self._lastio['stdout'] = ''
self._lastio['stderr'] = ''
# Track past runs
self._past_errors = []
def Banner(self):
"""displays a welcome banner for the shell"""
print "%s\n" % self.__flayer.Banner()
def Export(self):
"""exports all functions starting with CLI_ and lower()s"""
for exportable in filter(lambda x: x[:3] == '_E_', self.__class__.__dict__):
name = exportable[3:].lower()
sys.modules['__main__'].__dict__[name] = getattr(self, exportable)
# All command line methods
def _E_Summary(self):
"""outputs a summary of errors from valgrind
This prints a formatted list of errors from
valgrind by kind. In particular, it includes
debugging information from the last frame of
the error stack trace.
Arguments:
- none
"""
header = " id frame information"
format = "%-5s %-15s %s %s"
# Sort into kinds
kinds = {}
for e in self.__flayer.Errors().values():
if kinds.has_key(e.kind):
kinds[e.kind].append(e)
else:
kinds[e.kind] = [e]
for kind in kinds.items():
print "==> " + kind[0]
print header
for e in kind[1]:
file = os.path.join(e.frames[0].dir, e.frames[0].file) + ':' + \
e.frames[0].line
if file == ':':
file = ''
print format % (e.unique, e.frames[0].instruction_pointer,
e.frames[0].function_name, file)
def _E_Details(self, error_id):
"""outputs detailed error information by id
This prints all of the data collected about a particular
error.
Arguments:
- error_id: error id integer
"""
errors = self.__flayer.Errors()
if not errors.has_key(error_id):
print "Error id '%s' not found." % error_id
return
error = errors[error_id]
print "Error %s:" % error_id
print "- Thread Id: %s" % error.tid
print "- Kind: %s" % error.kind
print "- What: %s" % error.what
print "- Count: %d" % error.count
print "- Frames:"
for id in range(0, len(error.frames)):
frame = error.frames[id]
efile = os.path.join(frame.dir, frame.file)
print " Frame %d:" % id
print " - Instruction pointer: %s" % frame.instruction_pointer
print " - Object: %s" % frame.obj
print " - Function: %s" % frame.function_name
print " - File: %s" % efile
print " - Line number: %s" % frame.line
def _E_Snippet(self, error_id, range=10):
"""outputs code snippet from the top level stack frame if available
This command will output the first range lines before the conditional
and the following range lines.
Arguments:
- error_id: error id integer
- range: number of lines of code to show [10]
"""
if range < 1:
print "Range must be positive"
return
# TODO(wad): autoconvert error_id
#if type(error_id) is int:
errors = self.__flayer.Errors()
if not errors.has_key(error_id):
print "Error id '%s' not found." % error_id
return
error = errors[error_id]
if len(error.frames) == 0:
print "Error id '%s' has no debugging information." % error_id
return
frame = error.frames[0]
efile = os.path.join(frame.dir, frame.file)
# TODO(wad): bust this out to a helper
try:
f = file(os.path.join(frame.dir, frame.file))
# TODO(wad): catch explicit errors
except:
print ("Cannot open referenced file: %s" %
os.path.join(frame.dir, frame.file))
return
line = 1
try:
while line < (int(frame.line) - range):
f.readline() # eat it.
line += 1
while line < (int(frame.line) + range):
# New lines included.
if line == int(frame.line):
sys.stdout.write('|%s' % f.readline())
else:
sys.stdout.write(' %s' % f.readline())
line += 1
# TODO(wad): except explicitly around each readline
except:
print "exception"
return
def _E_Taint(self, value=None):
"""gets or sets arguments for tainting
This command will set or retrieve the value
of the current valgrind/flayer tainting arguments.
Each setting is a character and valid characters are
as follows: n, f, and s. Later 'e' will be added.
'f' indicates that file I/O buffers will be tainted.
'n' indicates that network I/O buffers will be tainted.
's' indicates that fd 0 I/O buffers will be tainted.
'e' will indicate environment variable tainting.
A value of None will result in a copy of the current
taint values being returned.
Arguments:
- value: a string containing the arguments above (def: None)
"""
if value == None:
return self.__flayer.get_taint()
else:
return self.__flayer.set_taint(value)
def _E_Filter(self, file=None, network=None):
"""gets or sets filtering for taint arguments
This command will set or retrieve the value
of the --*-filter arguments used by
valgrind/flayer. Specifically, file path prefixes are used
to indicate which input buffers to taint while
network host/port pairs can be specified.
When all the arguments are None, the current settings
will be returned.
Arguments:
- file: the path prefix of file to taint (def: None)
- network: the "host:port" pair to taint (def: None)
"""
if file == None and network == None:
return {'file':self.__flayer.get_taint_file_filter(),
'network':self.__flayer.get_taint_network_filter() }
if file != None:
self.__flayer.set_taint_file_filter(file)
if network != None:
self.__flayer.set_taint_network_filter(file)
def _E_Command(self, command=None, args=[], env={}):
"""gets/sets the target command
This function sets or gets the value of the target
command and its arguments for use in valgrind.
The command can be an explicit path or in the PATH
environment variable. The arguments should be a
list. The environment should be a dict and will be added
to the current environment variable - NOT override it.
Currently, these will _NOT_ be run under a shell.
Arguments:
- command: string containing the target command (def: None)
- args: list of arguments for the command (def: [])
- env: dict of environment variables (def: {})
"""
if command is None:
return self.__flayer.get_command()
return self.__flayer.set_command(command, args, env)
def _E_Run(self, verbose=False):
"""calls valgrind with the configured target, args, and environment
This command executes valgrind with the current
configuration of target executable, arguments, and
environment variables needed. It will also automatically
process the output log from valgrind.
Arguments:
- verbose: prints additional information (def: False)
"""
# Setup stdout and stderr files for this process.
try:
os.unlink(self._lastio['stdout'])
os.unlink(self._lastio['stderr'])
except:
pass # Should be empty or valid. Kludge!
# For now, this will clobber any past runs
self._lastio['stdout'] = self.__flayer.GetTmpDir() + "/out"
self._lastio['stderr'] = self.__flayer.GetTmpDir() + "/err"
stdin = subprocess.PIPE
stdout = open(self._lastio['stdout'], 'w')
stderr = open(self._lastio['stderr'], 'w')
process = self.__flayer.Run()
# Setup a signal handler to make SIGINT kill the process
def handler(signum, frame):
os.kill(process.pid)
orig_handler = signal.signal(signal.SIGINT, handler)
process.stdin.close()
print "Process ID: %d\n" % process.pid
# XXX: dump out lastio on these calls.
#print "You may check on its progress with the following commands: \n"
#print "running(), exit_code(), stdout(), stderr()\n"
print "Press Ctrl+C to send a SIGTERM to the running process.\n"
try:
ret, ret = os.wait() # pid is first - don't care.
except:
pass
# Remove the SIGINT handler
signal.signal(signal.SIGINT, orig_handler)
stdout.close()
stderr.close()
if verbose:
print "Return code: %d\n" % ret
self.__flayer.ProcessLastRun()
# XXX: does this need to be a deepcopy?
self._past_errors.append(self.__flayer.Errors().values())
def _E_ErrorDiff(self, run_a, run_b, kind='TaintedCondition'):
"""returns the difference between to ErrorSets
This command will return the difference between the error sets
generated by the specific runs. See PastErrors() for more.
Arguments:
- run_a: integer index of the run's errors
- run_b: integer index of the run's errors
"""
a = SList(self._past_errors[run_a])
b = SList(self._past_errors[run_b])
return b - a
def _E_PastErrors(self):
"""returns the list of past errors
!!TODO!! make this print a pretty list
Arguments:
- None
"""
return self._past_errors
def _E_ClearErrors(self):
"""clears the list of past errors
!!TODO!! make this print a pretty list
Arguments:
- None
"""
self._past_errors = []
def _E_Alter(self, error_id=None, action=None, address=None):
"""gets/sets runtime conditional behavior in the target
This command gets or sets the branch altering
functionality of valgrind. It allows for conditional
blocks that make use of tainted data to be forced
to be followed or skipped. It will output a pretty
summary of alterations and returns a dict of instruction
pointer to action.
If an error is listed and the action is not specified,
the alteration will be removed if it exists.
Arguments:
- error_id: string of the unique error id (def: None)
- action: bool specifying whether to follow the branch (def: None)
- address: unsigned long specifying the address to modify (def: None)
"""
# TODO(wad): store alter info by IP
if error_id is None and address is None:
print "address action frame information"
alts = self.__flayer.get_branch_alterations()
format = "%-7s %-6s %s %s"
for ip, e in self._altered.items():
if e is None:
print format % (ip, alts[ip], 'unknown', '')
else:
file = os.path.join(e.frames[0].dir, e.frames[0].file) + ':' + \
e.frames[0].line
if file == ':':
file = ''
print format % (ip, alts[ip],
e.frames[0].function_name, file)
return self.__flayer.get_branch_alterations()
instruction_pointer = address
# Validate error_id, kind, and get address
error = None
if error_id is not None:
errors = self.__flayer.Errors()
if errors.has_key(error_id):
error = errors[error_id]
if error is None:
print "No matching error id found."
return
if error.kind != 'TaintedCondition':
print 'Error must be of kind TaintedCondition'
return
instruction_pointer = error.frames[0].instruction_pointer
if action is None and self._altered.has_key(instruction_pointer):
# TODO: add another method for deletion
self.__flayer.del_branch_alteration(instruction_pointer)
return self._altered.pop(instruction_pointer)
else:
self.__flayer.add_branch_alteration(instruction_pointer, action)
self._altered[instruction_pointer] = error
return {instruction_pointer:action}
def _E_Load(self, path):
"""loads an existing flayer session from file
This loads an existing session including all
relevant configured data.
Arguments:
- path: string with the path to the savefile
"""
shelf = shelve.open(path)
# TODO: FIX TEMP DIR PROBLEM ON RELOAD
self.__flayer = shelf['flayer']
self._altered = shelf['altered']
shelf.close()
self.__flayer.ResetTmpDir()
def _E_Save(self, path=""):
"""saves the current session to file
This saves all relevant configuration data to
continue the current session from the point
at which it is called.
Arguments:
- path: string with the path to the savefile
"""
shelf = shelve.open(path)
shelf['flayer'] = self.__flayer
shelf['altered'] = self._altered
# TODO add readline history support
shelf.sync()
shelf.close()
def _E_About(self):
"""returns more information about Flayer!
Run it and see.
Arguments:
- none
"""
print "%s\n" % self.__flayer.About()
def _E_Help(self, topic='overview'):
"""provides overall and detail help for each shell command
This provides the overview and detailed help
you are reading now. In addition, it falls through
to the builtin help if nothing matches.
Arguments:
- topic -- (default: 'overview')
"""
if topic == 'overview':
print "Available commands:"
format = " %-15s -- %s"
for command in filter(lambda x: x[:3] == '_E_', self.__class__.__dict__):
doc = self.__class__.__dict__[command].__doc__
name = command[3:].lower()
if doc is None:
print format % (name, "No documentation. Bad Developer!")
else:
print format % (name, doc.split("\n")[0])
return
for method in self.__class__.__dict__.items():
name = method[0][3:].lower()
if name == topic:
doc = self.__class__.__dict__[method[0]].__doc__.split("\n")
details = "\n".join([l.lstrip() for l in doc])
print "%s -- %s" % (name, details)
return
print "Topic '%s' is unknown to Flayer.\n" % topic
print "Attempting to use the builtin help function.\n"
sys.modules['__builtin__'].help(topic)
| Python |
from google.appengine.api import datastore_errors
from google.appengine.api import users
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.db import Model
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util
from pyamf import amf3
from pyamf.remoting.gateway.google import WebAppGateway
import logging
import os
import pyamf
def echo(data):
return 'PyAMF says: ' + data
def get_upload_url():
"""returns an upload url"""
return blobstore.create_upload_url('/upload')
def main():
debug = True
logging.getLogger().setLevel(logging.DEBUG)
services = {
'services.echo' : echo,
'services.getUploadURL': get_upload_url,
}
gateway = WebAppGateway(services, logger = logging, debug = debug)
paths = [
('/gateway', gateway),
]
application = webapp.WSGIApplication(paths, debug = debug)
util.run_wsgi_app(application)
if __name__ == '__main__':
main() | Python |
#!/usr/bin/env python
#
import os
import urllib
import logging
from google.appengine.ext import blobstore
from google.appengine.ext import webapp
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
class FlexHandler(webapp.RequestHandler):
def get(self):
"""docstring for get"""
self.response.out.write(template.render('flex.html', None))
class MainHandler(webapp.RequestHandler):
def get(self):
upload_url = blobstore.create_upload_url('/upload')
self.response.out.write('<html><body>')
self.response.out.write('<form action="%s" method="POST" enctype="multipart/form-data">' % upload_url)
self.response.out.write("""Upload File: <input type="file" name="file"><br> <input type="submit"
name="submit" value="Submit"> </form></body></html>""")
class UploadHandler(blobstore_handlers.BlobstoreUploadHandler):
def post(self):
upload_files = self.get_uploads('file') # 'file' is file upload field in the form
blob_info = upload_files[0]
#self.redirect('/serve/%s' % blob_info.key())
self.redirect('/blob_info_key/%s' % blob_info.key())
class BlobInfoKeyHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, resource):
self.response.out.write(str(urllib.unquote(resource)))
class ServeHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, resource):
resource = str(urllib.unquote(resource))
blob_info = blobstore.BlobInfo.get(resource)
self.send_blob(blob_info)
def main():
logging.getLogger().setLevel(logging.DEBUG)
application = webapp.WSGIApplication(
[('/', MainHandler),
('/upload', UploadHandler),
('/serve/([^/]+)?', ServeHandler),
('/flex', FlexHandler),
('/blob_info_key/([^/]+)?', BlobInfoKeyHandler),
], debug=True)
run_wsgi_app(application)
if __name__ == '__main__':
main() | Python |
#!/usr/bin/env python
#
import os
import urllib
import logging
from google.appengine.ext import blobstore
from google.appengine.ext import webapp
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
class FlexHandler(webapp.RequestHandler):
def get(self):
"""docstring for get"""
self.response.out.write(template.render('flex.html', None))
class MainHandler(webapp.RequestHandler):
def get(self):
upload_url = blobstore.create_upload_url('/upload')
self.response.out.write('<html><body>')
self.response.out.write('<form action="%s" method="POST" enctype="multipart/form-data">' % upload_url)
self.response.out.write("""Upload File: <input type="file" name="file"><br> <input type="submit"
name="submit" value="Submit"> </form></body></html>""")
class UploadHandler(blobstore_handlers.BlobstoreUploadHandler):
def post(self):
upload_files = self.get_uploads('file') # 'file' is file upload field in the form
blob_info = upload_files[0]
#self.redirect('/serve/%s' % blob_info.key())
self.redirect('/blob_info_key/%s' % blob_info.key())
class BlobInfoKeyHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, resource):
self.response.out.write(str(urllib.unquote(resource)))
class ServeHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, resource):
resource = str(urllib.unquote(resource))
blob_info = blobstore.BlobInfo.get(resource)
self.send_blob(blob_info)
def main():
logging.getLogger().setLevel(logging.DEBUG)
application = webapp.WSGIApplication(
[('/', MainHandler),
('/upload', UploadHandler),
('/serve/([^/]+)?', ServeHandler),
('/flex', FlexHandler),
('/blob_info_key/([^/]+)?', BlobInfoKeyHandler),
], debug=True)
run_wsgi_app(application)
if __name__ == '__main__':
main() | Python |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Local Shared Object implementation.
Local Shared Object (LSO), sometimes known as Adobe Flash cookies, is a
cookie-like data entity used by the Adobe Flash Player and Gnash. The players
allow web content to read and write LSO data to the computer's local drive on
a per-domain basis.
@see: U{Local Shared Object on WikiPedia (external)
<http://en.wikipedia.org/wiki/Local_Shared_Object>}
@see: U{Local Shared Object envelope (external)
<http://osflash.org/documentation/amf/envelopes/sharedobject>}
@since: 0.1.0
"""
import pyamf
from pyamf import util
#: Magic Number - 2 bytes
HEADER_VERSION = '\x00\xbf'
#: Marker - 10 bytes
HEADER_SIGNATURE = 'TCSO\x00\x04\x00\x00\x00\x00'
#: Padding - 4 bytes
PADDING_BYTE = '\x00'
def decode(stream, strict=True):
"""
Decodes a SOL stream. C{strict} mode ensures that the sol stream is as spec
compatible as possible.
@param strict: Ensure that the SOL stream is as spec compatible as possible.
@type strict: C{bool}
@return: A C{tuple} containing the C{root_name} and a C{dict} of name,
value pairs.
@rtype: C{tuple}
@raise DecodeError: Unknown SOL version in header.
@raise DecodeError: Inconsistent stream header length.
@raise DecodeError: Invalid signature.
@raise DecodeError: Invalid padding read.
@raise DecodeError: Missing padding byte.
"""
if not isinstance(stream, util.BufferedByteStream):
stream = util.BufferedByteStream(stream)
# read the version
version = stream.read(2)
if version != HEADER_VERSION:
raise pyamf.DecodeError('Unknown SOL version in header')
# read the length
length = stream.read_ulong()
if strict and stream.remaining() != length:
raise pyamf.DecodeError('Inconsistent stream header length')
# read the signature
signature = stream.read(10)
if signature != HEADER_SIGNATURE:
raise pyamf.DecodeError('Invalid signature')
length = stream.read_ushort()
root_name = stream.read_utf8_string(length)
# read padding
if stream.read(3) != PADDING_BYTE * 3:
raise pyamf.DecodeError('Invalid padding read')
decoder = pyamf.get_decoder(stream.read_uchar())
decoder.stream = stream
values = {}
while 1:
if stream.at_eof():
break
name = decoder.readString()
value = decoder.readElement()
# read the padding
if stream.read(1) != PADDING_BYTE:
raise pyamf.DecodeError('Missing padding byte')
values[name] = value
return (root_name, values)
def encode(name, values, strict=True, encoding=pyamf.AMF0):
"""
Produces a SharedObject encoded stream based on the name and values.
@param name: The root name of the SharedObject.
@type name: C{basestring}
@param values: A C{dict} of name value pairs to be encoded in the stream.
@type values: C{dict}
@param strict: Ensure that the SOL stream is as spec compatible as possible.
@type strict: C{bool}
@return: A SharedObject encoded stream.
@rtype: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
"""
encoder = pyamf.get_encoder(encoding)
encoder.stream = stream = util.BufferedByteStream()
# write the header
stream.write(HEADER_VERSION)
if strict is True:
length_pos = stream.tell()
stream.write_ulong(0)
# write the signature
stream.write(HEADER_SIGNATURE)
# write the root name
if not isinstance(name, unicode):
name = unicode(name)
stream.write_ushort(len(name))
stream.write_utf8_string(name)
# write the padding
stream.write(PADDING_BYTE * 3)
stream.write_uchar(encoding)
for n, v in values.iteritems():
encoder.writeString(n, writeType=False)
encoder.writeElement(v)
# write the padding
stream.write(PADDING_BYTE)
if strict:
stream.seek(length_pos)
stream.write_ulong(stream.remaining() - 4)
stream.seek(0)
return stream
def load(name_or_file):
"""
Loads a sol file and returns a L{SOL} object.
@param name_or_file: Name of file, or file-object.
@type name_or_file: C{str} or C{StringIO}
@raise ValueError: Readable stream expected.
"""
f = name_or_file
opened = False
if isinstance(name_or_file, basestring):
f = open(name_or_file, 'rb')
opened = True
elif not hasattr(f, 'read'):
raise ValueError('Readable stream expected')
name, values = decode(f.read())
s = SOL(name)
for n, v in values.iteritems():
s[n] = v
if opened is True:
f.close()
return s
def save(sol, name_or_file, encoding=pyamf.AMF0):
"""
Writes a L{SOL} object to C{name_or_file}.
@param sol:
@type sol:
@param name_or_file: Name of file, or file-object.
@type name_or_file: C{str} or C{StringIO}
@param encoding: AMF encoding type.
@type encoding: C{int}
@raise ValueError: Writable stream expected.
"""
f = name_or_file
opened = False
if isinstance(name_or_file, basestring):
f = open(name_or_file, 'wb+')
opened = True
elif not hasattr(f, 'write'):
raise ValueError('Writable stream expected')
f.write(encode(sol.name, sol, encoding=encoding).getvalue())
if opened:
f.close()
class SOL(dict):
"""
Local Shared Object class, allows easy manipulation of the internals of a
C{sol} file.
"""
def __init__(self, name):
self.name = name
def save(self, name_or_file, encoding=pyamf.AMF0):
save(self, name_or_file, encoding)
def __repr__(self):
return '<%s %s %s at 0x%x>' % (self.__class__.__name__,
self.name, dict.__repr__(self), id(self))
LSO = SOL
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
AMF3 implementation.
C{AMF3} is the default serialization for
U{ActionScript<http://en.wikipedia.org/wiki/ActionScript>} 3.0 and provides
various advantages over L{AMF0<pyamf.amf0>}, which is used for ActionScript 1.0
and 2.0. It adds support for sending C{int} and C{uint} objects as integers and
supports data types that are available only in ActionScript 3.0, such as
L{ByteArray} and L{ArrayCollection}.
@see: U{Official AMF3 Specification in English (external)
<http://opensource.adobe.com/wiki/download/attachments/1114283/amf3_spec_05_05_08.pdf>}
@see: U{Official AMF3 Specification in Japanese (external)
<http://opensource.adobe.com/wiki/download/attachments/1114283/JP_amf3_spec_121207.pdf>}
@see: U{AMF3 documentation on OSFlash (external)
<http://osflash.org/documentation/amf3>}
@since: 0.1
"""
import types
import datetime
import zlib
import pyamf
from pyamf import util
from pyamf.flex import ObjectProxy, ArrayCollection
#: If True encode/decode lists/tuples to L{ArrayCollections<ArrayCollection>}
#: and dicts to L{ObjectProxy}
use_proxies_default = False
try:
set()
except NameError:
from sets import Set as set
#: The undefined type is represented by the undefined type marker. No further
#: information is encoded for this value.
TYPE_UNDEFINED = '\x00'
#: The null type is represented by the null type marker. No further
#: information is encoded for this value.
TYPE_NULL = '\x01'
#: The false type is represented by the false type marker and is used to
#: encode a Boolean value of C{false}. No further information is encoded for
#: this value.
TYPE_BOOL_FALSE = '\x02'
#: The true type is represented by the true type marker and is used to encode
#: a Boolean value of C{true}. No further information is encoded for this
#: value.
TYPE_BOOL_TRUE = '\x03'
#: In AMF 3 integers are serialized using a variable length signed 29-bit
#: integer.
#: @see: U{Parsing Integers on OSFlash (external)
#: <http://osflash.org/documentation/amf3/parsing_integers>}
TYPE_INTEGER = '\x04'
#: This type is used to encode an ActionScript Number or an ActionScript
#: C{int} of value greater than or equal to 2^28 or an ActionScript uint of
#: value greater than or equal to 2^29. The encoded value is is always an 8
#: byte IEEE-754 double precision floating point value in network byte order
#: (sign bit in low memory). The AMF 3 number type is encoded in the same
#: manner as the AMF 0 L{Number<pyamf.amf0.TYPE_NUMBER>} type.
TYPE_NUMBER = '\x05'
#: ActionScript String values are represented using a single string type in
#: AMF 3 - the concept of string and long string types from AMF 0 is not used.
#: Strings can be sent as a reference to a previously occurring String by
#: using an index to the implicit string reference table. Strings are encoding
#: using UTF-8 - however the header may either describe a string literal or a
#: string reference.
TYPE_STRING = '\x06'
#: ActionScript 3.0 introduced a new XML type however the legacy C{XMLDocument}
#: type from ActionScript 1.0 and 2.0.is retained in the language as
#: C{flash.xml.XMLDocument}. Similar to AMF 0, the structure of an
#: C{XMLDocument} needs to be flattened into a string representation for
#: serialization. As with other strings in AMF, the content is encoded in
#: UTF-8. XMLDocuments can be sent as a reference to a previously occurring
#: C{XMLDocument} instance by using an index to the implicit object reference
#: table.
#: @see: U{OSFlash documentation (external)
#: <http://osflash.org/documentation/amf3#x07_-_xml_legacy_flash.xml.xmldocument_class>}
TYPE_XML = '\x07'
#: In AMF 3 an ActionScript Date is serialized simply as the number of
#: milliseconds elapsed since the epoch of midnight, 1st Jan 1970 in the
#: UTC time zone. Local time zone information is not sent.
TYPE_DATE = '\x08'
#: ActionScript Arrays are described based on the nature of their indices,
#: i.e. their type and how they are positioned in the Array.
TYPE_ARRAY = '\x09'
#: A single AMF 3 type handles ActionScript Objects and custom user classes.
TYPE_OBJECT = '\x0A'
#: ActionScript 3.0 introduces a new top-level XML class that supports
#: U{E4X<http://en.wikipedia.org/wiki/E4X>} syntax.
#: For serialization purposes the XML type needs to be flattened into a
#: string representation. As with other strings in AMF, the content is
#: encoded using UTF-8.
TYPE_XMLSTRING = '\x0B'
#: ActionScript 3.0 introduces the L{ByteArray} type to hold an Array
#: of bytes. AMF 3 serializes this type using a variable length encoding
#: 29-bit integer for the byte-length prefix followed by the raw bytes
#: of the L{ByteArray}.
#: @see: U{Parsing ByteArrays on OSFlash (external)
#: <http://osflash.org/documentation/amf3/parsing_byte_arrays>}
TYPE_BYTEARRAY = '\x0C'
#: Reference bit.
REFERENCE_BIT = 0x01
#: The maximum that can be represented by an signed 29 bit integer.
MAX_29B_INT = 0x0FFFFFFF
#: The minimum that can be represented by an signed 29 bit integer.
MIN_29B_INT = -0x10000000
ENCODED_INT_CACHE = {}
class ObjectEncoding:
"""
AMF object encodings.
"""
#: Property list encoding.
#: The remaining integer-data represents the number of class members that
#: exist. The property names are read as string-data. The values are then
#: read as AMF3-data.
STATIC = 0x00
#: Externalizable object.
#: What follows is the value of the "inner" object, including type code.
#: This value appears for objects that implement IExternalizable, such as
#: L{ArrayCollection} and L{ObjectProxy}.
EXTERNAL = 0x01
#: Name-value encoding.
#: The property names and values are encoded as string-data followed by
#: AMF3-data until there is an empty string property name. If there is a
#: class-def reference there are no property names and the number of values
#: is equal to the number of properties in the class-def.
DYNAMIC = 0x02
#: Proxy object.
PROXY = 0x03
class DataOutput(object):
"""
I am a C{StringIO} type object containing byte data from the AMF stream.
ActionScript 3.0 introduced the C{flash.utils.ByteArray} class to support
the manipulation of raw data in the form of an Array of bytes.
I provide a set of methods for writing binary data with ActionScript 3.0.
This class is the I/O counterpart to the L{DataInput} class, which reads
binary data.
@see: U{IDataOutput on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/flash/utils/IDataOutput.html>}
"""
def __init__(self, encoder):
"""
@param encoder: Encoder containing the stream.
@type encoder: L{amf3.Encoder<pyamf.amf3.Encoder>}
"""
self.encoder = encoder
self.stream = encoder.stream
def writeBoolean(self, value):
"""
Writes a Boolean value.
@type value: C{bool}
@param value: A C{Boolean} value determining which byte is written.
If the parameter is C{True}, C{1} is written; if C{False}, C{0} is
written.
@raise ValueError: Non-boolean value found.
"""
if isinstance(value, bool):
if value is True:
self.stream.write_uchar(1)
else:
self.stream.write_uchar(0)
else:
raise ValueError("Non-boolean value found")
def writeByte(self, value):
"""
Writes a byte.
@type value: C{int}
"""
self.stream.write_char(value)
def writeUnsignedByte(self, value):
"""
Writes an unsigned byte.
@type value: C{int}
@since: 0.5
"""
return self.stream.write_uchar(value)
def writeDouble(self, value):
"""
Writes an IEEE 754 double-precision (64-bit) floating
point number.
@type value: C{number}
"""
self.stream.write_double(value)
def writeFloat(self, value):
"""
Writes an IEEE 754 single-precision (32-bit) floating
point number.
@type value: C{float}
"""
self.stream.write_float(value)
def writeInt(self, value):
"""
Writes a 32-bit signed integer.
@type value: C{int}
"""
self.stream.write_long(value)
def writeMultiByte(self, value, charset):
"""
Writes a multibyte string to the datastream using the
specified character set.
@type value: C{str}
@param value: The string value to be written.
@type charset: C{str}
@param charset: The string denoting the character set to use. Possible
character set strings include C{shift-jis}, C{cn-gb},
C{iso-8859-1} and others.
@see: U{Supported character sets on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/charset-codes.html>}
"""
self.stream.write(unicode(value).encode(charset))
def writeObject(self, value, use_references=True, use_proxies=None):
"""
Writes an object to data stream in AMF serialized format.
@param value: The object to be serialized.
@type use_references: C{bool}
@param use_references:
"""
self.encoder.writeElement(value, use_references, use_proxies)
def writeShort(self, value):
"""
Writes a 16-bit integer.
@type value: C{int}
@param value: A byte value as an integer.
"""
self.stream.write_short(value)
def writeUnsignedShort(self, value):
"""
Writes a 16-bit unsigned integer.
@type value: C{int}
@param value: A byte value as an integer.
@since: 0.5
"""
self.stream.write_ushort(value)
def writeUnsignedInt(self, value):
"""
Writes a 32-bit unsigned integer.
@type value: C{int}
@param value: A byte value as an unsigned integer.
"""
self.stream.write_ulong(value)
def writeUTF(self, value):
"""
Writes a UTF-8 string to the data stream.
The length of the UTF-8 string in bytes is written first,
as a 16-bit integer, followed by the bytes representing the
characters of the string.
@type value: C{str}
@param value: The string value to be written.
"""
if not isinstance(value, unicode):
value = unicode(value, 'utf8')
buf = util.BufferedByteStream()
buf.write_utf8_string(value)
bytes = buf.getvalue()
self.stream.write_ushort(len(bytes))
self.stream.write(bytes)
def writeUTFBytes(self, value):
"""
Writes a UTF-8 string. Similar to L{writeUTF}, but does
not prefix the string with a 16-bit length word.
@type value: C{str}
@param value: The string value to be written.
"""
val = None
if isinstance(value, unicode):
val = value
else:
val = unicode(value, 'utf8')
self.stream.write_utf8_string(val)
class DataInput(object):
"""
I provide a set of methods for reading binary data with ActionScript 3.0.
This class is the I/O counterpart to the L{DataOutput} class,
which writes binary data.
@see: U{IDataInput on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/flash/utils/IDataInput.html>}
"""
def __init__(self, decoder):
"""
@param decoder: AMF3 decoder containing the stream.
@type decoder: L{amf3.Decoder<pyamf.amf3.Decoder>}
"""
assert isinstance(decoder, Decoder)
self.decoder = decoder
self.stream = decoder.stream
def readBoolean(self):
"""
Read C{Boolean}.
@raise ValueError: Error reading Boolean.
@rtype: C{bool}
@return: A Boolean value, C{True} if the byte
is nonzero, C{False} otherwise.
"""
byte = self.stream.read(1)
if byte == '\x00':
return False
elif byte == '\x01':
return True
else:
raise ValueError("Error reading boolean")
def readByte(self):
"""
Reads a signed byte.
@rtype: C{int}
@return: The returned value is in the range -128 to 127.
"""
return self.stream.read_char()
def readDouble(self):
"""
Reads an IEEE 754 double-precision floating point number from the
data stream.
@rtype: C{number}
@return: An IEEE 754 double-precision floating point number.
"""
return self.stream.read_double()
def readFloat(self):
"""
Reads an IEEE 754 single-precision floating point number from the
data stream.
@rtype: C{number}
@return: An IEEE 754 single-precision floating point number.
"""
return self.stream.read_float()
def readInt(self):
"""
Reads a signed 32-bit integer from the data stream.
@rtype: C{int}
@return: The returned value is in the range -2147483648 to 2147483647.
"""
return self.stream.read_long()
def readMultiByte(self, length, charset):
"""
Reads a multibyte string of specified length from the data stream
using the specified character set.
@type length: C{int}
@param length: The number of bytes from the data stream to read.
@type charset: C{str}
@param charset: The string denoting the character set to use.
@rtype: C{str}
@return: UTF-8 encoded string.
"""
#FIXME nick: how to work out the code point byte size (on the fly)?
bytes = self.stream.read(length)
return unicode(bytes, charset)
def readObject(self):
"""
Reads an object from the data stream.
@return: The deserialized object.
"""
return self.decoder.readElement()
def readShort(self):
"""
Reads a signed 16-bit integer from the data stream.
@rtype: C{uint}
@return: The returned value is in the range -32768 to 32767.
"""
return self.stream.read_short()
def readUnsignedByte(self):
"""
Reads an unsigned byte from the data stream.
@rtype: C{uint}
@return: The returned value is in the range 0 to 255.
"""
return self.stream.read_uchar()
def readUnsignedInt(self):
"""
Reads an unsigned 32-bit integer from the data stream.
@rtype: C{uint}
@return: The returned value is in the range 0 to 4294967295.
"""
return self.stream.read_ulong()
def readUnsignedShort(self):
"""
Reads an unsigned 16-bit integer from the data stream.
@rtype: C{uint}
@return: The returned value is in the range 0 to 65535.
"""
return self.stream.read_ushort()
def readUTF(self):
"""
Reads a UTF-8 string from the data stream.
The string is assumed to be prefixed with an unsigned
short indicating the length in bytes.
@rtype: C{str}
@return: A UTF-8 string produced by the byte
representation of characters.
"""
length = self.stream.read_ushort()
return self.stream.read_utf8_string(length)
def readUTFBytes(self, length):
"""
Reads a sequence of C{length} UTF-8 bytes from the data
stream and returns a string.
@type length: C{int}
@param length: The number of bytes from the data stream to read.
@rtype: C{str}
@return: A UTF-8 string produced by the byte representation of
characters of specified C{length}.
"""
return self.readMultiByte(length, 'utf-8')
class ByteArray(util.BufferedByteStream, DataInput, DataOutput):
"""
I am a C{StringIO} type object containing byte data from the AMF stream.
ActionScript 3.0 introduced the C{flash.utils.ByteArray} class to support
the manipulation of raw data in the form of an Array of bytes.
Supports C{zlib} compression.
Possible uses of the C{ByteArray} class:
- Creating a custom protocol to connect to a client.
- Writing your own AMF/Remoting packet.
- Optimizing the size of your data by using custom data types.
@see: U{ByteArray on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/flash/utils/ByteArray.html>}
"""
class __amf__:
amf3 = True
def __init__(self, *args, **kwargs):
self.context = kwargs.pop('context', Context())
util.BufferedByteStream.__init__(self, *args, **kwargs)
DataInput.__init__(self, Decoder(self, self.context))
DataOutput.__init__(self, Encoder(self, self.context))
self.compressed = False
def __cmp__(self, other):
if isinstance(other, ByteArray):
return cmp(self.getvalue(), other.getvalue())
return cmp(self.getvalue(), other)
def __str__(self):
buf = self.getvalue()
if self.compressed:
buf = zlib.compress(buf)
#FIXME nick: hacked
buf = buf[0] + '\xda' + buf[2:]
return buf
def compress(self):
"""
Forces compression of the underlying stream.
"""
self.compressed = True
class ClassDefinition(object):
"""
"""
def __init__(self, alias):
self.alias = alias
self.reference = None
alias.compile()
self.attr_len = 0
if alias.static_attrs:
self.attr_len = len(alias.static_attrs)
self.encoding = ObjectEncoding.DYNAMIC
if alias.external:
self.encoding = ObjectEncoding.EXTERNAL
elif not alias.dynamic:
if alias.static_attrs == alias.encodable_properties:
self.encoding = ObjectEncoding.STATIC
def __repr__(self):
return '<%s.ClassDefinition reference=%r encoding=%r alias=%r at 0x%x>' % (
self.__class__.__module__, self.reference, self.encoding, self.alias, id(self))
class Context(pyamf.BaseContext):
"""
I hold the AMF3 context for en/decoding streams.
@ivar strings: A list of string references.
@type strings: C{list}
@ivar classes: A list of L{ClassDefinition}.
@type classes: C{list}
@ivar legacy_xml: A list of legacy encoded XML documents.
@type legacy_xml: C{list}
"""
def __init__(self, exceptions=True):
self.strings = util.IndexedCollection(use_hash=True, exceptions=False)
self.classes = {}
self.class_ref = {}
self.legacy_xml = util.IndexedCollection(exceptions=False)
self.object_aliases = util.IndexedMap(exceptions=False) # Maps one object to another
self.class_idx = 0
pyamf.BaseContext.__init__(self, exceptions=exceptions)
def clear(self):
"""
Clears the context.
"""
pyamf.BaseContext.clear(self)
self.strings.clear()
self.classes = {}
self.class_ref = {}
self.legacy_xml.clear()
self.object_aliases.clear()
self.class_idx = 0
def setObjectAlias(self, obj, alias):
"""
Maps an object to an aliased object.
@since: 0.4
"""
self.object_aliases.map(obj, alias)
def getObjectAlias(self, obj):
"""
Get an alias of an object.
@since: 0.4
@raise pyamf.ReferenceError: Unknown object alias.
@raise pyamf.ReferenceError: Unknown mapped alias.
"""
ref = self.object_aliases.getReferenceTo(obj)
if ref is None:
if self.exceptions is False:
return None
raise pyamf.ReferenceError('Unknown object alias for %r' % (obj,))
mapped = self.object_aliases.getMappedByReference(ref)
if mapped is None:
if self.exceptions is False:
return None
raise pyamf.ReferenceError('Unknown mapped alias for %r' % (obj,))
return mapped
def getString(self, ref):
"""
Gets a string based on a reference C{ref}.
@param ref: The reference index.
@type ref: C{str}
@raise pyamf.ReferenceError: The referenced string could not be found.
@rtype: C{str}
@return: The referenced string.
"""
i = self.strings.getByReference(ref)
if i is None and self.exceptions:
raise pyamf.ReferenceError("String reference %r not found" % (ref,))
return i
def getStringReference(self, s):
"""
Return string reference.
@type s: C{str}
@param s: The referenced string.
@raise pyamf.ReferenceError: The string reference could not be found.
@return: The reference index to the string.
@rtype: C{int}
"""
i = self.strings.getReferenceTo(s)
if i is None and self.exceptions:
raise pyamf.ReferenceError("Reference for string %r not found" % (s,))
return i
def addString(self, s):
"""
Creates a reference to C{s}. If the reference already exists, that
reference is returned.
@type s: C{str}
@param s: The string to be referenced.
@rtype: C{int}
@return: The reference index.
@raise TypeError: The parameter C{s} is not of C{basestring} type.
@raise pyamf.ReferenceError: Trying to store a reference to an empty string.
"""
if not isinstance(s, basestring):
raise TypeError
if len(s) == 0:
if not self.exceptions:
return None
# do not store empty string references
raise pyamf.ReferenceError("Cannot store a reference to an empty string")
return self.strings.append(s)
def getClassByReference(self, ref):
"""
Return class reference.
@raise pyamf.ReferenceError: The class reference could not be found.
@return: Class reference.
"""
try:
return self.class_ref[ref]
except KeyError:
if not self.exceptions:
return None
raise pyamf.ReferenceError("Class reference %r not found" % (
ref,))
def getClass(self, klass):
"""
Return class reference.
@raise pyamf.ReferenceError: The class reference could not be found.
@return: Class reference.
"""
try:
return self.classes[klass]
except KeyError:
if not self.exceptions:
return None
raise pyamf.ReferenceError("Class alias for %r not found" % (
klass,))
def addClass(self, alias, klass):
"""
Creates a reference to C{class_def}.
@param alias: C{ClassDefinition} instance.
"""
ref = self.class_idx
self.class_ref[ref] = alias
cd = self.classes[klass] = alias
cd.reference = ref
self.class_idx += 1
return ref
def getLegacyXML(self, ref):
"""
Return the legacy XML reference. This is the C{flash.xml.XMLDocument}
class in ActionScript 3.0 and the top-level C{XML} class in
ActionScript 1.0 and 2.0.
@type ref: C{int}
@param ref: The reference index.
@raise pyamf.ReferenceError: The legacy XML reference could not be found.
@return: Instance of L{ET<util.ET>}
"""
i = self.legacy_xml.getByReference(ref)
if i is None:
if not self.exceptions:
return None
raise pyamf.ReferenceError("Legacy XML reference %r not found" % (ref,))
return i
def getLegacyXMLReference(self, doc):
"""
Return legacy XML reference.
@type doc: L{ET<util.ET>}
@param doc: The XML document to reference.
@raise pyamf.ReferenceError: The reference could not be found.
@return: The reference to C{doc}.
@rtype: C{int}
"""
i = self.legacy_xml.getReferenceTo(doc)
if i is None:
if not self.exceptions:
return None
raise pyamf.ReferenceError("Reference for document %r not found" % (doc,))
return i
def addLegacyXML(self, doc):
"""
Creates a reference to C{doc}.
If C{doc} is already referenced that index will be returned. Otherwise
a new index will be created.
@type doc: L{ET<util.ET>}
@param doc: The XML document to reference.
@rtype: C{int}
@return: The reference to C{doc}.
"""
return self.legacy_xml.append(doc)
def __copy__(self):
return self.__class__(exceptions=self.exceptions)
class Decoder(pyamf.BaseDecoder):
"""
Decodes an AMF3 data stream.
"""
context_class = Context
type_map = {
TYPE_UNDEFINED: 'readUndefined',
TYPE_NULL: 'readNull',
TYPE_BOOL_FALSE: 'readBoolFalse',
TYPE_BOOL_TRUE: 'readBoolTrue',
TYPE_INTEGER: 'readSignedInteger',
TYPE_NUMBER: 'readNumber',
TYPE_STRING: 'readString',
TYPE_XML: 'readXML',
TYPE_DATE: 'readDate',
TYPE_ARRAY: 'readArray',
TYPE_OBJECT: 'readObject',
TYPE_XMLSTRING: 'readXMLString',
TYPE_BYTEARRAY: 'readByteArray',
}
def __init__(self, *args, **kwargs):
self.use_proxies = kwargs.pop('use_proxies', use_proxies_default)
pyamf.BaseDecoder.__init__(self, *args, **kwargs)
def readUndefined(self):
"""
Read undefined.
"""
return pyamf.Undefined
def readNull(self):
"""
Read null.
@return: C{None}
@rtype: C{None}
"""
return None
def readBoolFalse(self):
"""
Returns C{False}.
@return: C{False}
@rtype: C{bool}
"""
return False
def readBoolTrue(self):
"""
Returns C{True}.
@return: C{True}
@rtype: C{bool}
"""
return True
def readNumber(self):
"""
Read number.
"""
return self.stream.read_double()
def readUnsignedInteger(self):
"""
Reads and returns an unsigned integer from the stream.
"""
return self.readInteger(False)
def readSignedInteger(self):
"""
Reads and returns a signed integer from the stream.
"""
return self.readInteger(True)
def readInteger(self, signed=False):
"""
Reads and returns an integer from the stream.
@type signed: C{bool}
@see: U{Parsing integers on OSFlash
<http://osflash.org/amf3/parsing_integers>} for the AMF3 integer data
format.
"""
return decode_int(self.stream, signed)
def readString(self, use_references=True):
"""
Reads and returns a string from the stream.
@type use_references: C{bool}
"""
def readLength():
x = self.readUnsignedInteger()
return (x >> 1, x & REFERENCE_BIT == 0)
length, is_reference = readLength()
if use_references and is_reference:
return self.context.getString(length)
if length == 0:
return u''
result = self.stream.read_utf8_string(length)
if len(result) != 0 and use_references:
self.context.addString(result)
return result
def readDate(self):
"""
Read date from the stream.
The timezone is ignored as the date is always in UTC.
"""
ref = self.readUnsignedInteger()
if ref & REFERENCE_BIT == 0:
return self.context.getObject(ref >> 1)
ms = self.stream.read_double()
result = util.get_datetime(ms / 1000.0)
if self.timezone_offset is not None:
result += self.timezone_offset
self.context.addObject(result)
return result
def readArray(self):
"""
Reads an array from the stream.
@warning: There is a very specific problem with AMF3 where the first
three bytes of an encoded empty C{dict} will mirror that of an encoded
C{{'': 1, '2': 2}}
@see: U{Docuverse blog (external)
<http://www.docuverse.com/blog/donpark/2007/05/14/flash-9-amf3-bug>}
"""
size = self.readUnsignedInteger()
if size & REFERENCE_BIT == 0:
return self.context.getObject(size >> 1)
size >>= 1
key = self.readString().encode('utf8')
if key == '':
# integer indexes only -> python list
result = []
self.context.addObject(result)
for i in xrange(size):
result.append(self.readElement())
return result
result = pyamf.MixedArray()
self.context.addObject(result)
while key != "":
result[key] = self.readElement()
key = self.readString().encode('utf8')
for i in xrange(size):
el = self.readElement()
result[i] = el
return result
def _getClassDefinition(self, ref):
"""
Reads class definition from the stream.
"""
is_ref = ref & REFERENCE_BIT == 0
ref >>= 1
if is_ref:
class_def = self.context.getClassByReference(ref)
return class_def, class_def.alias
name = self.readString()
alias = None
if name == '':
name = pyamf.ASObject
try:
alias = pyamf.get_class_alias(name)
except pyamf.UnknownClassAlias:
if self.strict:
raise
alias = pyamf.TypedObjectClassAlias(pyamf.TypedObject, name)
class_def = ClassDefinition(alias)
class_def.encoding = ref & 0x03
class_def.attr_len = ref >> 2
class_def.static_properties = []
if class_def.attr_len > 0:
for i in xrange(class_def.attr_len):
key = self.readString().encode('utf8')
class_def.static_properties.append(key)
self.context.addClass(class_def, alias.klass)
return class_def, alias
def readObject(self, use_proxies=None):
"""
Reads an object from the stream.
@raise pyamf.EncodeError: Decoding an object in amf3 tagged as amf0
only is not allowed.
@raise pyamf.DecodeError: Unknown object encoding.
"""
if use_proxies is None:
use_proxies = self.use_proxies
def readStatic(class_def, obj):
for attr in class_def.static_properties:
obj[attr] = self.readElement()
def readDynamic(class_def, obj):
attr = self.readString().encode('utf8')
while attr != '':
obj[attr] = self.readElement()
attr = self.readString().encode('utf8')
ref = self.readUnsignedInteger()
if ref & REFERENCE_BIT == 0:
obj = self.context.getObject(ref >> 1)
if use_proxies is True:
obj = self.readProxyObject(obj)
return obj
ref >>= 1
class_def, alias = self._getClassDefinition(ref)
obj = alias.createInstance(codec=self)
obj_attrs = dict()
self.context.addObject(obj)
if class_def.encoding in (ObjectEncoding.EXTERNAL, ObjectEncoding.PROXY):
obj.__readamf__(DataInput(self))
elif class_def.encoding == ObjectEncoding.DYNAMIC:
readStatic(class_def, obj_attrs)
readDynamic(class_def, obj_attrs)
elif class_def.encoding == ObjectEncoding.STATIC:
readStatic(class_def, obj_attrs)
else:
raise pyamf.DecodeError("Unknown object encoding")
alias.applyAttributes(obj, obj_attrs, codec=self)
if use_proxies is True:
obj = self.readProxyObject(obj)
return obj
def readProxyObject(self, proxy):
"""
Return the source object of a proxied object.
@since: 0.4
"""
if isinstance(proxy, ArrayCollection):
return list(proxy)
elif isinstance(proxy, ObjectProxy):
return proxy._amf_object
return proxy
def _readXML(self, legacy=False):
"""
Reads an object from the stream.
@type legacy: C{bool}
@param legacy: The read XML is in 'legacy' format.
"""
ref = self.readUnsignedInteger()
if ref & REFERENCE_BIT == 0:
return self.context.getObject(ref >> 1)
xmlstring = self.stream.read(ref >> 1)
x = util.ET.fromstring(xmlstring)
self.context.addObject(x)
if legacy is True:
self.context.addLegacyXML(x)
return x
def readXMLString(self):
"""
Reads a string from the data stream and converts it into
an XML Tree.
@return: The XML Document.
@rtype: L{ET<util.ET>}
"""
return self._readXML()
def readXML(self):
"""
Read a legacy XML Document from the stream.
@return: The XML Document.
@rtype: L{ET<util.ET>}
"""
return self._readXML(True)
def readByteArray(self):
"""
Reads a string of data from the stream.
Detects if the L{ByteArray} was compressed using C{zlib}.
@see: L{ByteArray}
@note: This is not supported in ActionScript 1.0 and 2.0.
"""
ref = self.readUnsignedInteger()
if ref & REFERENCE_BIT == 0:
return self.context.getObject(ref >> 1)
buffer = self.stream.read(ref >> 1)
try:
buffer = zlib.decompress(buffer)
compressed = True
except zlib.error:
compressed = False
obj = ByteArray(buffer, context=self.context)
obj.compressed = compressed
self.context.addObject(obj)
return obj
class Encoder(pyamf.BaseEncoder):
"""
Encodes an AMF3 data stream.
"""
context_class = Context
type_map = [
((types.BuiltinFunctionType, types.BuiltinMethodType,
types.FunctionType, types.GeneratorType, types.ModuleType,
types.LambdaType, types.MethodType), "writeFunc"),
((bool,), "writeBoolean"),
((types.NoneType,), "writeNull"),
((int,long), "writeInteger"),
((float,), "writeNumber"),
(types.StringTypes, "writeString"),
((ByteArray,), "writeByteArray"),
((datetime.date, datetime.datetime, datetime.time), "writeDate"),
((util.is_ET_element,), "writeXML"),
((pyamf.UndefinedType,), "writeUndefined"),
((types.ClassType, types.TypeType), "writeClass"),
((types.InstanceType, types.ObjectType,), "writeInstance"),
]
def __init__(self, *args, **kwargs):
self.use_proxies = kwargs.pop('use_proxies', use_proxies_default)
self.string_references = kwargs.pop('string_references', True)
pyamf.BaseEncoder.__init__(self, *args, **kwargs)
def writeElement(self, data, use_references=True, use_proxies=None):
"""
Writes the data.
@param data: The data to be encoded to the AMF3 data stream.
@type data: C{mixed}
@param use_references: Default is C{True}.
@type use_references: C{bool}
@raise EncodeError: Cannot find encoder func for C{data}.
"""
func = self._writeElementFunc(data)
if func is None:
raise pyamf.EncodeError("Unknown type %r" % (data,))
func(data, use_references=use_references, use_proxies=use_proxies)
def writeClass(self, *args, **kwargs):
"""
Classes cannot be serialised.
"""
raise pyamf.EncodeError("Class objects cannot be serialised")
def writeUndefined(self, *args, **kwargs):
"""
Writes an C{pyamf.Undefined} value to the stream.
"""
self.stream.write(TYPE_UNDEFINED)
def writeNull(self, *args, **kwargs):
"""
Writes a C{null} value to the stream.
"""
self.stream.write(TYPE_NULL)
def writeBoolean(self, n, **kwargs):
"""
Writes a Boolean to the stream.
"""
t = TYPE_BOOL_TRUE
if not n:
t = TYPE_BOOL_FALSE
self.stream.write(t)
def _writeInteger(self, n):
"""
AMF3 integers are encoded.
@param n: The integer data to be encoded to the AMF3 data stream.
@type n: integer data
@see: U{Parsing Integers on OSFlash
<http://osflash.org/documentation/amf3/parsing_integers>}
for more info.
"""
try:
self.stream.write(ENCODED_INT_CACHE[n])
except KeyError:
ENCODED_INT_CACHE[n] = encode_int(n)
self.stream.write(ENCODED_INT_CACHE[n])
def writeInteger(self, n, **kwargs):
"""
Writes an integer to the stream.
@type n: integer data
@param n: The integer data to be encoded to the AMF3 data stream.
@type use_references: C{bool}
@kwarg use_references: Default is C{True}.
"""
if n < MIN_29B_INT or n > MAX_29B_INT:
self.writeNumber(float(n))
return
self.stream.write(TYPE_INTEGER)
self.stream.write(encode_int(n))
def writeNumber(self, n, **kwargs):
"""
Writes a float to the stream.
@type n: C{float}
"""
self.stream.write(TYPE_NUMBER)
self.stream.write_double(n)
def _writeString(self, n, **kwargs):
"""
Writes a raw string to the stream.
@type n: C{str} or C{unicode}
@param n: The string data to be encoded to the AMF3 data stream.
"""
if n == '':
self.stream.write_uchar(REFERENCE_BIT)
return
t = type(n)
if t is str:
bytes = n
elif t is unicode:
bytes = n.encode('utf8')
else:
bytes = unicode(n).encode('utf8')
n = bytes
if self.string_references:
ref = self.context.getStringReference(n)
if ref is not None:
self._writeInteger(ref << 1)
return
self.context.addString(n)
self._writeInteger((len(bytes) << 1) | REFERENCE_BIT)
self.stream.write(bytes)
def writeString(self, n, writeType=True, **kwargs):
"""
Writes a string to the stream. If C{n} is not a unicode string, an
attempt will be made to convert it.
@type n: C{basestring}
@param n: The string data to be encoded to the AMF3 data stream.
"""
if writeType:
self.stream.write(TYPE_STRING)
self._writeString(n, **kwargs)
def writeDate(self, n, use_references=True, **kwargs):
"""
Writes a C{datetime} instance to the stream.
@type n: L{datetime}
@param n: The C{Date} data to be encoded to the AMF3 data stream.
@type use_references: C{bool}
@param use_references: Default is C{True}.
"""
if isinstance(n, datetime.time):
raise pyamf.EncodeError('A datetime.time instance was found but '
'AMF3 has no way to encode time objects. Please use '
'datetime.datetime instead (got:%r)' % (n,))
self.stream.write(TYPE_DATE)
if use_references is True:
ref = self.context.getObjectReference(n)
if ref is not None:
self._writeInteger(ref << 1)
return
self.context.addObject(n)
self.stream.write_uchar(REFERENCE_BIT)
if self.timezone_offset is not None:
n -= self.timezone_offset
ms = util.get_timestamp(n)
self.stream.write_double(ms * 1000.0)
def writeList(self, n, use_references=True, use_proxies=None):
"""
Writes a C{tuple}, C{set} or C{list} to the stream.
@type n: One of C{__builtin__.tuple}, C{__builtin__.set}
or C{__builtin__.list}
@param n: The C{list} data to be encoded to the AMF3 data stream.
@type use_references: C{bool}
@param use_references: Default is C{True}.
"""
# Encode lists as ArrayCollections
if use_proxies is None:
use_proxies = self.use_proxies
if use_proxies:
ref_obj = self.context.getObjectAlias(n)
if ref_obj is None:
proxy = ArrayCollection(n)
self.context.setObjectAlias(n, proxy)
ref_obj = proxy
self.writeObject(ref_obj, use_references, use_proxies=False)
return
self.stream.write(TYPE_ARRAY)
if use_references:
ref = self.context.getObjectReference(n)
if ref is not None:
self._writeInteger(ref << 1)
return
self.context.addObject(n)
self._writeInteger((len(n) << 1) | REFERENCE_BIT)
self.stream.write_uchar(0x01)
[self.writeElement(x) for x in n]
def writeDict(self, n, use_references=True, use_proxies=None):
"""
Writes a C{dict} to the stream.
@type n: C{__builtin__.dict}
@param n: The C{dict} data to be encoded to the AMF3 data stream.
@type use_references: C{bool}
@param use_references: Default is C{True}.
@raise ValueError: Non C{int}/C{str} key value found in the C{dict}
@raise EncodeError: C{dict} contains empty string keys.
"""
# Design bug in AMF3 that cannot read/write empty key strings
# http://www.docuverse.com/blog/donpark/2007/05/14/flash-9-amf3-bug
# for more info
if '' in n:
raise pyamf.EncodeError("dicts cannot contain empty string keys")
if use_proxies is None:
use_proxies = self.use_proxies
if use_proxies is True:
ref_obj = self.context.getObjectAlias(n)
if ref_obj is None:
proxy = ObjectProxy(pyamf.ASObject(n))
self.context.setObjectAlias(n, proxy)
ref_obj = proxy
self.writeObject(ref_obj, use_references, use_proxies=False)
return
self.stream.write(TYPE_ARRAY)
if use_references:
ref = self.context.getObjectReference(n)
if ref is not None:
self._writeInteger(ref << 1)
return
self.context.addObject(n)
# The AMF3 spec demands that all str based indicies be listed first
keys = n.keys()
int_keys = []
str_keys = []
for x in keys:
if isinstance(x, (int, long)):
int_keys.append(x)
elif isinstance(x, (str, unicode)):
str_keys.append(x)
else:
raise ValueError("Non int/str key value found in dict")
# Make sure the integer keys are within range
l = len(int_keys)
for x in int_keys:
if l < x <= 0:
# treat as a string key
str_keys.append(x)
del int_keys[int_keys.index(x)]
int_keys.sort()
# If integer keys don't start at 0, they will be treated as strings
if len(int_keys) > 0 and int_keys[0] != 0:
for x in int_keys:
str_keys.append(str(x))
del int_keys[int_keys.index(x)]
self._writeInteger(len(int_keys) << 1 | REFERENCE_BIT)
for x in str_keys:
self._writeString(x)
self.writeElement(n[x])
self.stream.write_uchar(0x01)
for k in int_keys:
self.writeElement(n[k])
def writeInstance(self, obj, **kwargs):
"""
Read class definition.
@param obj: The class instance to be encoded.
"""
kls = obj.__class__
if kls is pyamf.MixedArray:
f = self._write_elem_func_cache[kls] = self.writeDict
elif kls in (list, set, tuple):
f = self._write_elem_func_cache[kls] = self.writeList
else:
f = self._write_elem_func_cache[kls] = self.writeObject
f(obj, **kwargs)
def writeObject(self, obj, use_references=True, use_proxies=None):
"""
Writes an object to the stream.
@param obj: The object data to be encoded to the AMF3 data stream.
@type obj: object data
@param use_references: Default is C{True}.
@type use_references: C{bool}
@raise EncodeError: Encoding an object in amf3 tagged as amf0 only.
"""
if use_proxies is None:
use_proxies = self.use_proxies
if use_proxies is True and obj.__class__ is dict:
ref_obj = self.context.getObjectAlias(obj)
if ref_obj is None:
proxy = ObjectProxy(obj)
self.context.setObjectAlias(obj, proxy)
ref_obj = proxy
self.writeObject(ref_obj, use_references, use_proxies=False)
return
self.stream.write(TYPE_OBJECT)
if use_references:
ref = self.context.getObjectReference(obj)
if ref is not None:
self._writeInteger(ref << 1)
return
self.context.addObject(obj)
# object is not referenced, serialise it
kls = obj.__class__
definition = self.context.getClass(kls)
alias = None
class_ref = False # if the class definition is a reference
if definition:
class_ref = True
alias = definition.alias
if alias.anonymous and definition.reference is not None:
class_ref = True
else:
try:
alias = pyamf.get_class_alias(kls)
except pyamf.UnknownClassAlias:
alias_klass = util.get_class_alias(kls)
meta = util.get_class_meta(kls)
alias = alias_klass(kls, defer=True, **meta)
definition = ClassDefinition(alias)
self.context.addClass(definition, alias.klass)
if class_ref:
self.stream.write(definition.reference)
else:
ref = 0
if definition.encoding != ObjectEncoding.EXTERNAL:
ref += definition.attr_len << 4
final_reference = encode_int(ref | definition.encoding << 2 |
REFERENCE_BIT << 1 | REFERENCE_BIT)
self.stream.write(final_reference)
definition.reference = encode_int(
definition.reference << 2 | REFERENCE_BIT)
if alias.anonymous:
self.stream.write_uchar(0x01)
else:
self._writeString(alias.alias)
# work out what the final reference for the class will be.
# this is okay because the next time an object of the same
# class is encoded, class_ref will be True and never get here
# again.
if alias.external:
obj.__writeamf__(DataOutput(self))
return
sa, da = alias.getEncodableAttributes(obj, codec=self)
if sa:
if not class_ref:
[self._writeString(attr) for attr in alias.static_attrs]
[self.writeElement(sa[attr]) for attr in alias.static_attrs]
if definition.encoding == ObjectEncoding.STATIC:
return
if definition.encoding == ObjectEncoding.DYNAMIC:
if da:
for attr, value in da.iteritems():
self._writeString(attr)
self.writeElement(value)
self.stream.write_uchar(0x01)
def writeByteArray(self, n, use_references=True, **kwargs):
"""
Writes a L{ByteArray} to the data stream.
@param n: The L{ByteArray} data to be encoded to the AMF3 data stream.
@type n: L{ByteArray}
@param use_references: Default is C{True}.
@type use_references: C{bool}
"""
self.stream.write(TYPE_BYTEARRAY)
if use_references:
ref = self.context.getObjectReference(n)
if ref is not None:
self._writeInteger(ref << 1)
return
self.context.addObject(n)
buf = str(n)
l = len(buf)
self._writeInteger(l << 1 | REFERENCE_BIT)
self.stream.write(buf)
def writeXML(self, n, use_references=True, use_proxies=None):
"""
Writes a XML string to the data stream.
@type n: L{ET<util.ET>}
@param n: The XML Document to be encoded to the AMF3 data stream.
@type use_references: C{bool}
@param use_references: Default is C{True}.
"""
i = self.context.getLegacyXMLReference(n)
if i is None:
is_legacy = True
else:
is_legacy = False
if is_legacy is True:
self.stream.write(TYPE_XMLSTRING)
else:
self.stream.write(TYPE_XML)
if use_references:
ref = self.context.getObjectReference(n)
if ref is not None:
self._writeInteger(ref << 1)
return
self.context.addObject(n)
self._writeString(util.ET.tostring(n, 'utf-8'))
def decode(stream, context=None, strict=False):
"""
A helper function to decode an AMF3 datastream.
@type stream: L{BufferedByteStream<util.BufferedByteStream>}
@param stream: AMF3 data.
@type context: L{Context}
@param context: Context.
"""
decoder = Decoder(stream, context, strict)
while 1:
try:
yield decoder.readElement()
except pyamf.EOStream:
break
def encode(*args, **kwargs):
"""
A helper function to encode an element into AMF3 format.
@type args: List of args to encode.
@keyword context: Any initial context to use.
@type context: L{Context}
@return: C{StringIO} type object containing the encoded AMF3 data.
@rtype: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
"""
context = kwargs.get('context', None)
buf = util.BufferedByteStream()
encoder = Encoder(buf, context)
for element in args:
encoder.writeElement(element)
return buf
def encode_int(n):
"""
Encodes an int as a variable length signed 29-bit integer as defined by
the spec.
@param n: The integer to be encoded
@return: The encoded string
@rtype: C{str}
@raise OverflowError: Out of range.
"""
if n < MIN_29B_INT or n > MAX_29B_INT:
raise OverflowError("Out of range")
if n < 0:
n += 0x20000000
bytes = ''
real_value = None
if n > 0x1fffff:
real_value = n
n >>= 1
bytes += chr(0x80 | ((n >> 21) & 0xff))
if n > 0x3fff:
bytes += chr(0x80 | ((n >> 14) & 0xff))
if n > 0x7f:
bytes += chr(0x80 | ((n >> 7) & 0xff))
if real_value is not None:
n = real_value
if n > 0x1fffff:
bytes += chr(n & 0xff)
else:
bytes += chr(n & 0x7f)
return bytes
def decode_int(stream, signed=False):
"""
Decode C{int}.
"""
n = result = 0
b = stream.read_uchar()
while b & 0x80 != 0 and n < 3:
result <<= 7
result |= b & 0x7f
b = stream.read_uchar()
n += 1
if n < 3:
result <<= 7
result |= b
else:
result <<= 8
result |= b
if result & 0x10000000 != 0:
if signed:
result -= 0x20000000
else:
result <<= 1
result += 1
return result
try:
from cpyamf.amf3 import encode_int, decode_int
except ImportError:
pass
pyamf.register_class(ByteArray)
for x in range(0, 20):
ENCODED_INT_CACHE[x] = encode_int(x)
del x
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE for details.
"""
SQLAlchemy adapter module.
@see: U{SQLAlchemy homepage (external)<http://www.sqlalchemy.org>}
@since: 0.4
"""
from sqlalchemy.orm import collections
import pyamf
from pyamf.adapters import util
pyamf.add_type(collections.InstrumentedList, util.to_list)
pyamf.add_type(collections.InstrumentedDict, util.to_dict)
pyamf.add_type(collections.InstrumentedSet, util.to_set)
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Google App Engine adapter module.
Sets up basic type mapping and class mappings for using the Datastore API
in Google App Engine.
@see: U{Datastore API on Google App Engine (external)
<http://code.google.com/appengine/docs/datastore>}
@since: 0.3.1
"""
from google.appengine.ext import db
from google.appengine.ext.db import polymodel
import datetime
import pyamf
from pyamf.util import imports
from pyamf.adapters import util
class ModelStub(object):
"""
This class represents a L{db.Model} or L{db.Expando} class as the typed
object is being read from the AMF stream. Once the attributes have been
read from the stream and through the magic of Python, the instance of this
class will be converted into the correct type.
@ivar klass: The referenced class either L{db.Model} or L{db.Expando}.
This is used so we can proxy some of the method calls during decoding.
@type klass: L{db.Model} or L{db.Expando}
@see: L{DataStoreClassAlias.applyAttributes}
"""
def __init__(self, klass):
self.klass = klass
def properties(self):
return self.klass.properties()
def dynamic_properties(self):
return []
class GAEReferenceCollection(dict):
"""
This helper class holds a dict of klass to key/objects loaded from the
Datastore.
@since: 0.4.1
"""
def _getClass(self, klass):
if not issubclass(klass, (db.Model, db.Expando)):
raise TypeError('expected db.Model/db.Expando class, got %s' % (klass,))
if klass not in self.keys():
self[klass] = {}
return self[klass]
def getClassKey(self, klass, key):
"""
Return an instance based on klass/key.
If an instance cannot be found then L{KeyError} is raised.
@param klass: The class of the instance.
@param key: The key of the instance.
@return: The instance linked to the C{klass}/C{key}.
@rtype: Instance of L{klass}.
"""
if not isinstance(key, basestring):
raise TypeError('basestring type expected for test, got %s' % (repr(key),))
d = self._getClass(klass)
return d[key]
def addClassKey(self, klass, key, obj):
"""
Adds an object to the collection, based on klass and key.
@param klass: The class of the object.
@param key: The datastore key of the object.
@param obj: The loaded instance from the datastore.
"""
if not isinstance(key, basestring):
raise TypeError('basestring type expected for test, got %s' % (repr(key),))
d = self._getClass(klass)
d[key] = obj
class DataStoreClassAlias(pyamf.ClassAlias):
"""
This class contains all the business logic to interact with Google's
Datastore API's. Any L{db.Model} or L{db.Expando} classes will use this
class alias for encoding/decoding.
We also add a number of indexes to the encoder context to aggressively
decrease the number of Datastore API's that we need to complete.
"""
# The name of the attribute used to represent the key
KEY_ATTR = '_key'
def _compile_base_class(self, klass):
if klass in (db.Model, polymodel.PolyModel):
return
pyamf.ClassAlias._compile_base_class(self, klass)
def getCustomProperties(self):
props = [self.KEY_ATTR]
self.reference_properties = {}
self.properties = {}
reverse_props = []
for name, prop in self.klass.properties().iteritems():
self.properties[name] = prop
props.append(name)
if isinstance(prop, db.ReferenceProperty):
self.reference_properties[name] = prop
if issubclass(self.klass, polymodel.PolyModel):
del self.properties['_class']
props.remove('_class')
# check if the property is a defined as a collection_name. These types
# of properties are read-only and the datastore freaks out if you
# attempt to meddle with it. We delete the attribute entirely ..
for name, value in self.klass.__dict__.iteritems():
if isinstance(value, db._ReverseReferenceProperty):
reverse_props.append(name)
self.static_attrs.update(props)
self.encodable_properties.update(self.properties.keys())
self.decodable_properties.update(self.properties.keys())
self.readonly_attrs.update(reverse_props)
if not self.reference_properties:
self.reference_properties = None
if not self.properties:
self.properties = None
def getEncodableAttributes(self, obj, codec=None):
sa, da = pyamf.ClassAlias.getEncodableAttributes(self, obj, codec=codec)
sa[self.KEY_ATTR] = str(obj.key()) if obj.is_saved() else None
gae_objects = getGAEObjects(codec.context) if codec else None
if self.reference_properties and gae_objects:
for name, prop in self.reference_properties.iteritems():
klass = prop.reference_class
key = prop.get_value_for_datastore(obj)
if not key:
continue
key = str(key)
try:
sa[name] = gae_objects.getClassKey(klass, key)
except KeyError:
ref_obj = getattr(obj, name)
gae_objects.addClassKey(klass, key, ref_obj)
sa[name] = ref_obj
if da:
for k, v in da.copy().iteritems():
if k.startswith('_'):
del da[k]
if not da:
da = {}
for attr in obj.dynamic_properties():
da[attr] = getattr(obj, attr)
if not da:
da = None
return sa, da
def createInstance(self, codec=None):
return ModelStub(self.klass)
def getDecodableAttributes(self, obj, attrs, codec=None):
try:
key = attrs[self.KEY_ATTR]
except KeyError:
key = attrs[self.KEY_ATTR] = None
attrs = pyamf.ClassAlias.getDecodableAttributes(self, obj, attrs, codec=codec)
del attrs[self.KEY_ATTR]
new_obj = None
# attempt to load the object from the datastore if KEY_ATTR exists.
if key and codec:
new_obj = loadInstanceFromDatastore(self.klass, key, codec)
# clean up the stub
if isinstance(obj, ModelStub) and hasattr(obj, 'klass'):
del obj.klass
if new_obj:
obj.__dict__ = new_obj.__dict__.copy()
obj.__class__ = self.klass
apply_init = True
if self.properties:
for k in [k for k in attrs.keys() if k in self.properties.keys()]:
prop = self.properties[k]
v = attrs[k]
if isinstance(prop, db.FloatProperty) and isinstance(v, (int, long)):
attrs[k] = float(v)
elif isinstance(prop, db.ListProperty) and v is None:
attrs[k] = []
elif isinstance(v, datetime.datetime):
# Date/Time Property fields expect specific types of data
# whereas PyAMF only decodes into datetime.datetime objects.
if isinstance(prop, db.DateProperty):
attrs[k] = v.date()
elif isinstance(prop, db.TimeProperty):
attrs[k] = v.time()
if new_obj is None and isinstance(v, ModelStub) and prop.required and k in self.reference_properties:
apply_init = False
del attrs[k]
# If the object does not exist in the datastore, we must fire the
# class constructor. This sets internal attributes that pyamf has
# no business messing with ..
if new_obj is None and apply_init is True:
obj.__init__(**attrs)
return attrs
def getGAEObjects(context):
"""
Returns a reference to the C{gae_objects} on the context. If it doesn't
exist then it is created.
@param context: The context to load the C{gae_objects} index from.
@type context: Instance of L{pyamf.BaseContext}
@return: The C{gae_objects} index reference.
@rtype: Instance of L{GAEReferenceCollection}
@since: 0.4.1
"""
if not hasattr(context, 'gae_objects'):
context.gae_objects = GAEReferenceCollection()
return context.gae_objects
def loadInstanceFromDatastore(klass, key, codec=None):
"""
Attempt to load an instance from the datastore, based on C{klass}
and C{key}. We create an index on the codec's context (if it exists)
so we can check that first before accessing the datastore.
@param klass: The class that will be loaded from the datastore.
@type klass: Sub-class of L{db.Model} or L{db.Expando}
@param key: The key which is used to uniquely identify the instance in the
datastore.
@type key: C{str}
@param codec: The codec to reference the C{gae_objects} index. If
supplied,The codec must have have a context attribute.
@type codec: Instance of L{pyamf.BaseEncoder} or L{pyamf.BaseDecoder}
@return: The loaded instance from the datastore.
@rtype: Instance of C{klass}.
@since: 0.4.1
"""
if not issubclass(klass, (db.Model, db.Expando)):
raise TypeError('expected db.Model/db.Expando class, got %s' % (klass,))
if not isinstance(key, basestring):
raise TypeError('string expected for key, got %s', (repr(key),))
key = str(key)
if codec is None:
return klass.get(key)
gae_objects = getGAEObjects(codec.context)
try:
return gae_objects.getClassKey(klass, key)
except KeyError:
pass
obj = klass.get(key)
gae_objects.addClassKey(klass, key, obj)
return obj
def writeGAEObject(self, object, *args, **kwargs):
"""
The GAE Datastore creates new instances of objects for each get request.
This is a problem for PyAMF as it uses the id(obj) of the object to do
reference checking.
We could just ignore the problem, but the objects are conceptually the
same so the effort should be made to attempt to resolve references for a
given object graph.
We create a new map on the encoder context object which contains a dict of
C{object.__class__: {key1: object1, key2: object2, .., keyn: objectn}}. We
use the datastore key to do the reference checking.
@since: 0.4.1
"""
if not (isinstance(object, db.Model) and object.is_saved()):
self.writeNonGAEObject(object, *args, **kwargs)
return
context = self.context
kls = object.__class__
s = str(object.key())
gae_objects = getGAEObjects(context)
try:
referenced_object = gae_objects.getClassKey(kls, s)
except KeyError:
referenced_object = object
gae_objects.addClassKey(kls, s, object)
self.writeNonGAEObject(referenced_object, *args, **kwargs)
def install_gae_reference_model_hook(mod):
"""
Called when L{pyamf.amf0} or L{pyamf.amf3} are imported. Attaches the
L{writeGAEObject} method to the C{Encoder} class in that module.
@param mod: The module imported.
@since: 0.4.1
"""
if not hasattr(mod.Encoder, 'writeNonGAEObject'):
mod.Encoder.writeNonGAEObject = mod.Encoder.writeObject
mod.Encoder.writeObject = writeGAEObject
# initialise the module here: hook into pyamf
pyamf.add_type(db.Query, util.to_list)
pyamf.register_alias_type(DataStoreClassAlias, db.Model, db.Expando)
# hook the L{writeGAEObject} method to the Encoder class on import
imports.when_imported('pyamf.amf0', install_gae_reference_model_hook)
imports.when_imported('pyamf.amf3', install_gae_reference_model_hook)
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
`array` adapter module.
Will convert all array.array instances to a python list before encoding. All
type information is lost (but degrades nicely).
@since: 0.5
"""
import array
import pyamf
from pyamf.adapters import util
if hasattr(array, 'array'):
pyamf.add_type(array.ArrayType, util.to_list)
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Adapter for the stdlib C{sets} module.
@since: 0.4
"""
import sets
import pyamf
from pyamf.adapters import util
if hasattr(sets, 'ImmutableSet'):
pyamf.add_type(sets.ImmutableSet, util.to_tuple)
if hasattr(sets, 'Set'):
pyamf.add_type(sets.Set, util.to_tuple)
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
C{django.db.models.fields} adapter module.
@see: U{Django Project<http://www.djangoproject.com>}
@since: 0.4
"""
from django.db.models import fields
import pyamf
def convert_NOT_PROVIDED(x, encoder):
"""
@rtype: L{Undefined<pyamf.Undefined>}
"""
return pyamf.Undefined
pyamf.add_type(lambda x: x is fields.NOT_PROVIDED, convert_NOT_PROVIDED)
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Django query adapter module.
Sets up basic type mapping and class mappings for a
Django models.
@see: U{Django Project<http://www.djangoproject.com>}
@since: 0.1b
"""
from django.db.models import query
import pyamf
from pyamf.adapters import util
pyamf.add_type(query.QuerySet, util.to_list)
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
collections adapter module.
@since: 0.5
"""
import collections
import pyamf
from pyamf.adapters import util
if hasattr(collections, 'deque'):
pyamf.add_type(collections.deque, util.to_list)
if hasattr(collections, 'defaultdict'):
pyamf.add_type(collections.defaultdict, util.to_dict)
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Adapter for the C{decimal} module.
@since: 0.4
"""
import decimal
import pyamf
def convert_Decimal(x, encoder):
"""
Called when an instance of L{decimal.Decimal} is about to be encoded to
an AMF stream.
@param x: The L{decimal.Decimal} instance to encode.
@param encoder: The L{pyamf.BaseEncoder} instance about to perform the
operation.
@return: If the encoder is in 'strict' mode then C{x} will be converted to
a float. Otherwise an L{pyamf.EncodeError} with a friendly message is
raised.
"""
if encoder is not None and isinstance(encoder, pyamf.BaseEncoder):
if encoder.strict is False:
return float(x)
raise pyamf.EncodeError('Unable to encode decimal.Decimal instances as '
'there is no way to guarantee exact conversion. Use strict=False to '
'convert to a float.')
if hasattr(decimal, 'Decimal'):
pyamf.add_type(decimal.Decimal, convert_Decimal)
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Useful helpers for adapters.
@since: 0.4
"""
import __builtin__
if not hasattr(__builtin__, 'set'):
from sets import Set as set
def to_list(obj, encoder):
"""
Converts an arbitrary object C{obj} to a list.
@rtype: L{list}
"""
return list(obj)
def to_dict(obj, encoder):
"""
Converts an arbitrary object C{obj} to a dict.
@rtype: L{dict}
"""
return dict(obj)
def to_set(obj, encoder):
"""
Converts an arbitrary object C{obj} to a set.
@rtype: L{set}
"""
return set(obj)
def to_tuple(x, encoder):
"""
Converts an arbitrary object C{obj} to a tuple.
@rtype: L{tuple}
"""
return tuple(x)
def to_string(x, encoder):
"""
Converts an arbitrary object C{obj} to a string.
@rtype: L{tuple}
@since: 0.5
"""
return str(x)
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
C{django.utils.translation} adapter module.
@see: U{Django Project<http://www.djangoproject.com>}
@since: 0.4.2
"""
from django.utils.translation import ugettext_lazy
import pyamf
def convert_lazy(l, encoder=None):
if l.__class__._delegate_unicode:
return unicode(l)
if l.__class__._delegate_str:
return str(l)
raise ValueError('Don\'t know how to convert lazy value %s' % (repr(l),))
pyamf.add_type(type(ugettext_lazy('foo')), convert_lazy)
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
The adapter package provides additional functionality for other Python
packages. This includes registering classes, setting up type maps etc.
@since: 0.1.0
"""
import os.path
import glob
from pyamf.util import imports
class PackageImporter(object):
"""
Package importer used for lazy module loading.
"""
def __init__(self, name):
self.name = name
def __call__(self, mod):
__import__('%s.%s' % ('pyamf.adapters', self.name))
adapters_registered = False
def register_adapters():
global adapters_registered
if adapters_registered is True:
return
try:
import pkg_resources
packageDir = pkg_resources.resource_filename('pyamf', 'adapters')
except:
packageDir = os.path.dirname(__file__)
for f in glob.glob(os.path.join(packageDir, '*.py')):
mod = os.path.basename(f).split(os.path.extsep, 1)[0]
if mod == '__init__' or not mod.startswith('_'):
continue
try:
register_adapter(mod[1:].replace('_', '.'), PackageImporter(mod))
except ImportError:
pass
adapters_registered = True
def register_adapter(mod, func):
"""
Registers a callable to be executed when a module is imported. If the
module already exists then the callable will be executed immediately.
You can register the same module multiple times, the callables will be
executed in the order they were registered. The root module must exist
(i.e. be importable) otherwise an C{ImportError} will be thrown.
@param mod: The fully qualified module string, as used in the imports
statement. E.g. 'foo.bar.baz'. The string must map to a module
otherwise the callable will not fire.
@type mod: C{str}
@param func: The function to call when C{mod} is imported. This function
must take one arg, the newly imported C{module} object.
@type func: callable
@raise TypeError: C{func} must be callable
"""
if not callable(func):
raise TypeError('func must be callable')
imports.when_imported(str(mod), func)
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
C{django.db.models} adapter module.
@see: U{Django Project<http://www.djangoproject.com>}
@since: 0.4.1
"""
from django.db.models.base import Model
from django.db.models import fields
from django.db.models.fields import related, files
import datetime
import pyamf
from pyamf.util import imports
class DjangoReferenceCollection(dict):
"""
This helper class holds a dict of klass to pk/objects loaded from the
underlying db.
@since: 0.5
"""
def _getClass(self, klass):
if klass not in self.keys():
self[klass] = {}
return self[klass]
def getClassKey(self, klass, key):
"""
Return an instance based on klass/key.
If an instance cannot be found then L{KeyError} is raised.
@param klass: The class of the instance.
@param key: The primary_key of the instance.
@return: The instance linked to the C{klass}/C{key}.
@rtype: Instance of L{klass}.
"""
d = self._getClass(klass)
return d[key]
def addClassKey(self, klass, key, obj):
"""
Adds an object to the collection, based on klass and key.
@param klass: The class of the object.
@param key: The datastore key of the object.
@param obj: The loaded instance from the datastore.
"""
d = self._getClass(klass)
d[key] = obj
class DjangoClassAlias(pyamf.ClassAlias):
"""
"""
def getCustomProperties(self):
self.fields = {}
self.relations = {}
self.columns = []
self.meta = self.klass._meta
for x in self.meta.local_fields:
if isinstance(x, files.FileField):
self.readonly_attrs.update([x.name])
if not isinstance(x, related.ForeignKey):
self.fields[x.name] = x
else:
self.relations[x.name] = x
self.columns.append(x.attname)
for k, v in self.klass.__dict__.iteritems():
if isinstance(v, related.ReverseManyRelatedObjectsDescriptor):
self.fields[k] = v.field
parent_fields = []
for field in self.meta.parents.values():
parent_fields.append(field.attname)
del self.relations[field.name]
self.exclude_attrs.update(parent_fields)
props = self.fields.keys()
self.static_attrs.update(props)
self.encodable_properties.update(props)
self.decodable_properties.update(props)
def _compile_base_class(self, klass):
if klass is Model:
return
pyamf.ClassAlias._compile_base_class(self, klass)
def _encodeValue(self, field, value):
if value is fields.NOT_PROVIDED:
return pyamf.Undefined
if value is None:
return value
# deal with dates ..
if isinstance(field, fields.DateTimeField):
return value
elif isinstance(field, fields.DateField):
return datetime.datetime(value.year, value.month, value.day, 0, 0, 0)
elif isinstance(field, fields.TimeField):
return datetime.datetime(1970, 1, 1,
value.hour, value.minute, value.second, value.microsecond)
elif isinstance(value, files.FieldFile):
return value.name
return value
def _decodeValue(self, field, value):
if value is pyamf.Undefined:
return fields.NOT_PROVIDED
if isinstance(field, fields.AutoField) and value == 0:
return None
elif isinstance(field, fields.DateTimeField):
# deal with dates
return value
elif isinstance(field, fields.DateField):
if not value:
return None
return datetime.date(value.year, value.month, value.day)
elif isinstance(field, fields.TimeField):
if not value:
return None
return datetime.time(value.hour, value.minute, value.second, value.microsecond)
return value
def getEncodableAttributes(self, obj, **kwargs):
sa, da = pyamf.ClassAlias.getEncodableAttributes(self, obj, **kwargs)
for name, prop in self.fields.iteritems():
if name not in sa:
continue
if isinstance(prop, related.ManyToManyField):
sa[name] = [x for x in getattr(obj, name).all()]
else:
sa[name] = self._encodeValue(prop, getattr(obj, name))
if not da:
da = {}
keys = da.keys()
for key in keys:
if key.startswith('_'):
del da[key]
elif key in self.columns:
del da[key]
for name, relation in self.relations.iteritems():
if '_%s_cache' % name in obj.__dict__:
da[name] = getattr(obj, name)
else:
da[name] = pyamf.Undefined
if not da:
da = None
return sa, da
def getDecodableAttributes(self, obj, attrs, **kwargs):
attrs = pyamf.ClassAlias.getDecodableAttributes(self, obj, attrs, **kwargs)
for n in self.decodable_properties:
f = self.fields[n]
attrs[f.attname] = self._decodeValue(f, attrs[n])
# primary key of django object must always be set first for
# relationships with other model objects to work properly
# and dict.iteritems() does not guarantee order
#
# django also forces the use only one attribute as primary key, so
# our obj._meta.pk.attname check is sufficient)
try:
setattr(obj, obj._meta.pk.attname, attrs[obj._meta.pk.attname])
del attrs[obj._meta.pk.attname]
except KeyError:
pass
return attrs
def getDjangoObjects(context):
"""
Returns a reference to the C{django_objects} on the context. If it doesn't
exist then it is created.
@param context: The context to load the C{django_objects} index from.
@type context: Instance of L{pyamf.BaseContext}
@return: The C{django_objects} index reference.
@rtype: Instance of L{DjangoReferenceCollection}
@since: 0.5
"""
if not hasattr(context, 'django_objects'):
context.django_objects = DjangoReferenceCollection()
return context.django_objects
def writeDjangoObject(self, obj, *args, **kwargs):
"""
The Django ORM creates new instances of objects for each db request.
This is a problem for PyAMF as it uses the id(obj) of the object to do
reference checking.
We could just ignore the problem, but the objects are conceptually the
same so the effort should be made to attempt to resolve references for a
given object graph.
We create a new map on the encoder context object which contains a dict of
C{object.__class__: {key1: object1, key2: object2, .., keyn: objectn}}. We
use the primary key to do the reference checking.
@since: 0.5
"""
if not isinstance(obj, Model):
self.writeNonDjangoObject(obj, *args, **kwargs)
return
context = self.context
kls = obj.__class__
s = obj.pk
django_objects = getDjangoObjects(context)
try:
referenced_object = django_objects.getClassKey(kls, s)
except KeyError:
referenced_object = obj
django_objects.addClassKey(kls, s, obj)
self.writeNonDjangoObject(referenced_object, *args, **kwargs)
def install_django_reference_model_hook(mod):
"""
Called when L{pyamf.amf0} or L{pyamf.amf3} are imported. Attaches the
L{writeDjangoObject} method to the C{Encoder} class in that module.
@param mod: The module imported.
@since: 0.4.1
"""
if not hasattr(mod.Encoder, 'writeNonDjangoObject'):
mod.Encoder.writeNonDjangoObject = mod.Encoder.writeObject
mod.Encoder.writeObject = writeDjangoObject
# initialise the module here: hook into pyamf
pyamf.register_alias_type(DjangoClassAlias, Model)
# hook the L{writeDjangobject} method to the Encoder class on import
imports.when_imported('pyamf.amf0', install_django_reference_model_hook)
imports.when_imported('pyamf.amf3', install_django_reference_model_hook)
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE for details.
"""
SQLAlchemy adapter module.
@see: U{SQLAlchemy homepage (external)<http://www.sqlalchemy.org>}
@since: 0.4
"""
from sqlalchemy import orm, __version__
try:
from sqlalchemy.orm import class_mapper
except ImportError:
from sqlalchemy.orm.util import class_mapper
import pyamf
UnmappedInstanceError = None
try:
class_mapper(dict)
except Exception, e:
UnmappedInstanceError = e.__class__
class SaMappedClassAlias(pyamf.ClassAlias):
KEY_ATTR = 'sa_key'
LAZY_ATTR = 'sa_lazy'
EXCLUDED_ATTRS = [
'_entity_name', '_instance_key', '_sa_adapter', '_sa_appender',
'_sa_class_manager', '_sa_initiator', '_sa_instance_state',
'_sa_instrumented', '_sa_iterator', '_sa_remover', '_sa_session_id',
'_state'
]
STATE_ATTR = '_sa_instance_state'
if __version__.startswith('0.4'):
STATE_ATTR = '_state'
def getCustomProperties(self):
self.mapper = class_mapper(self.klass)
self.exclude_attrs.update(self.EXCLUDED_ATTRS)
self.properties = []
for prop in self.mapper.iterate_properties:
self.properties.append(prop.key)
self.encodable_properties.update(self.properties)
self.decodable_properties.update(self.properties)
self.static_attrs.update(self.properties)
def getEncodableAttributes(self, obj, **kwargs):
"""
Returns a C{tuple} containing a dict of static and dynamic attributes
for C{obj}.
"""
sa, da = pyamf.ClassAlias.getEncodableAttributes(self, obj, **kwargs)
if not da:
da = {}
lazy_attrs = []
# primary_key_from_instance actually changes obj.__dict__ if
# primary key properties do not already exist in obj.__dict__
da[self.KEY_ATTR] = self.mapper.primary_key_from_instance(obj)
for attr in self.properties:
if attr not in obj.__dict__:
lazy_attrs.append(attr)
da[self.LAZY_ATTR] = lazy_attrs
return sa, da
def getDecodableAttributes(self, obj, attrs, **kwargs):
"""
"""
attrs = pyamf.ClassAlias.getDecodableAttributes(self, obj, attrs, **kwargs)
# Delete lazy-loaded attrs.
#
# Doing it this way ensures that lazy-loaded attributes are not
# attached to the object, even if there is a default value specified
# in the __init__ method.
#
# This is the correct behavior, because SQLAlchemy ignores __init__.
# So, an object retreived from a DB with SQLAlchemy will not have a
# lazy-loaded value, even if __init__ specifies a default value.
if self.LAZY_ATTR in attrs:
obj_state = None
if hasattr(orm.attributes, 'instance_state'):
obj_state = orm.attributes.instance_state(obj)
for lazy_attr in attrs[self.LAZY_ATTR]:
if lazy_attr in obj.__dict__:
# Delete directly from the dict, so
# SA callbacks are not triggered.
del obj.__dict__[lazy_attr]
# Delete from committed_state so SA thinks this attribute was
# never modified.
#
# If the attribute was set in the __init__ method,
# SA will think it is modified and will try to update
# it in the database.
if obj_state is not None:
if lazy_attr in obj_state.committed_state:
del obj_state.committed_state[lazy_attr]
if lazy_attr in obj_state.dict:
del obj_state.dict[lazy_attr]
if lazy_attr in attrs:
del attrs[lazy_attr]
del attrs[self.LAZY_ATTR]
if self.KEY_ATTR in attrs:
del attrs[self.KEY_ATTR]
return attrs
def is_class_sa_mapped(klass):
"""
@rtype: C{bool}
"""
if not isinstance(klass, type):
klass = type(klass)
try:
class_mapper(klass)
except UnmappedInstanceError:
return False
return True
pyamf.register_alias_type(SaMappedClassAlias, is_class_sa_mapped)
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
AMF0 implementation.
C{AMF0} supports the basic data types used for the NetConnection, NetStream,
LocalConnection, SharedObjects and other classes in the Adobe Flash Player.
@see: U{Official AMF0 Specification in English (external)
<http://opensource.adobe.com/wiki/download/attachments/1114283/amf0_spec_121207.pdf>}
@see: U{Official AMF0 Specification in Japanese (external)
<http://opensource.adobe.com/wiki/download/attachments/1114283/JP_amf0_spec_121207.pdf>}
@see: U{AMF documentation on OSFlash (external)
<http://osflash.org/documentation/amf>}
@since: 0.1
"""
import datetime
import types
import copy
import pyamf
from pyamf import util
#: Represented as 9 bytes: 1 byte for C{0×00} and 8 bytes a double
#: representing the value of the number.
TYPE_NUMBER = '\x00'
#: Represented as 2 bytes: 1 byte for C{0×01} and a second, C{0×00}
#: for C{False}, C{0×01} for C{True}.
TYPE_BOOL = '\x01'
#: Represented as 3 bytes + len(String): 1 byte C{0×02}, then a UTF8 string,
#: including the top two bytes representing string length as a C{int}.
TYPE_STRING = '\x02'
#: Represented as 1 byte, C{0×03}, then pairs of UTF8 string, the key, and
#: an AMF element, ended by three bytes, C{0×00} C{0×00} C{0×09}.
TYPE_OBJECT = '\x03'
#: MovieClip does not seem to be supported by Remoting.
#: It may be used by other AMF clients such as SharedObjects.
TYPE_MOVIECLIP = '\x04'
#: 1 single byte, C{0×05} indicates null.
TYPE_NULL = '\x05'
#: 1 single byte, C{0×06} indicates null.
TYPE_UNDEFINED = '\x06'
#: When an ActionScript object refers to itself, such C{this.self = this},
#: or when objects are repeated within the same scope (for example, as the
#: two parameters of the same function called), a code of C{0×07} and an
#: C{int}, the reference number, are written.
TYPE_REFERENCE = '\x07'
#: A MixedArray is indicated by code C{0×08}, then a Long representing the
#: highest numeric index in the array, or 0 if there are none or they are
#: all negative. After that follow the elements in key : value pairs.
TYPE_MIXEDARRAY = '\x08'
#: @see: L{TYPE_OBJECT}
TYPE_OBJECTTERM = '\x09'
#: An array is indicated by C{0x0A}, then a Long for array length, then the
#: array elements themselves. Arrays are always sparse; values for
#: inexistant keys are set to null (C{0×06}) to maintain sparsity.
TYPE_ARRAY = '\x0A'
#: Date is represented as C{00x0B}, then a double, then an C{int}. The double
#: represents the number of milliseconds since 01/01/1970. The C{int} represents
#: the timezone offset in minutes between GMT. Note for the latter than values
#: greater than 720 (12 hours) are represented as M{2^16} - the value. Thus GMT+1
#: is 60 while GMT-5 is 65236.
TYPE_DATE = '\x0B'
#: LongString is reserved for strings larger then M{2^16} characters long. It
#: is represented as C{00x0C} then a LongUTF.
TYPE_LONGSTRING = '\x0C'
#: Trying to send values which don’t make sense, such as prototypes, functions,
#: built-in objects, etc. will be indicated by a single C{00x0D} byte.
TYPE_UNSUPPORTED = '\x0D'
#: Remoting Server -> Client only.
#: @see: L{RecordSet}
#: @see: U{RecordSet structure on OSFlash (external)
#: <http://osflash.org/documentation/amf/recordset>}
TYPE_RECORDSET = '\x0E'
#: The XML element is indicated by C{00x0F} and followed by a LongUTF containing
#: the string representation of the XML object. The receiving gateway may which
#: to wrap this string inside a language-specific standard XML object, or simply
#: pass as a string.
TYPE_XML = '\x0F'
#: A typed object is indicated by C{0×10}, then a UTF string indicating class
#: name, and then the same structure as a normal C{0×03} Object. The receiving
#: gateway may use a mapping scheme, or send back as a vanilla object or
#: associative array.
TYPE_TYPEDOBJECT = '\x10'
#: An AMF message sent from an AVM+ client such as the Flash Player 9 may break
#: out into L{AMF3<pyamf.amf3>} mode. In this case the next byte will be the
#: AMF3 type code and the data will be in AMF3 format until the decoded object
#: reaches it’s logical conclusion (for example, an object has no more keys).
TYPE_AMF3 = '\x11'
class Context(pyamf.BaseContext):
"""
I hold the AMF0 context for en/decoding streams.
AMF0 object references start at index 1.
@ivar amf3_objs: A list of objects that have been decoded in
L{AMF3<pyamf.amf3>}.
@type amf3_objs: L{util.IndexedCollection}
"""
def __init__(self, **kwargs):
self.amf3_objs = []
pyamf.BaseContext.__init__(self, **kwargs)
def clear(self):
"""
Clears the context.
"""
pyamf.BaseContext.clear(self)
self.amf3_objs = []
if hasattr(self, 'amf3_context'):
self.amf3_context.clear()
def hasAMF3ObjectReference(self, obj):
"""
Gets a reference for an object.
@raise ReferenceError: Unknown AMF3 object reference.
"""
return obj in self.amf3_objs
o = self.amf3_objs.getReferenceTo(obj)
if o is None and self.exceptions:
raise pyamf.ReferenceError(
'Unknown AMF3 reference for (%r)' % (obj,))
return o
def addAMF3Object(self, obj):
"""
Adds an AMF3 reference to C{obj}.
@type obj: C{mixed}
@param obj: The object to add to the context.
@rtype: C{int}
@return: Reference to C{obj}.
"""
return self.amf3_objs.append(obj)
def __copy__(self):
cpy = self.__class__(exceptions=self.exceptions)
cpy.amf3_objs = copy.copy(self.amf3_objs)
return cpy
class Decoder(pyamf.BaseDecoder):
"""
Decodes an AMF0 stream.
"""
context_class = Context
# XXX nick: Do we need to support TYPE_MOVIECLIP here?
type_map = {
TYPE_NUMBER: 'readNumber',
TYPE_BOOL: 'readBoolean',
TYPE_STRING: 'readString',
TYPE_OBJECT: 'readObject',
TYPE_NULL: 'readNull',
TYPE_UNDEFINED: 'readUndefined',
TYPE_REFERENCE: 'readReference',
TYPE_MIXEDARRAY: 'readMixedArray',
TYPE_ARRAY: 'readList',
TYPE_DATE: 'readDate',
TYPE_LONGSTRING: 'readLongString',
# TODO: do we need a special value here?
TYPE_UNSUPPORTED:'readNull',
TYPE_XML: 'readXML',
TYPE_TYPEDOBJECT:'readTypedObject',
TYPE_AMF3: 'readAMF3'
}
def readNumber(self):
"""
Reads a ActionScript C{Number} value.
In ActionScript 1 and 2 the C{NumberASTypes} type represents all numbers,
both floats and integers.
@rtype: C{int} or C{float}
"""
return _check_for_int(self.stream.read_double())
def readBoolean(self):
"""
Reads a ActionScript C{Boolean} value.
@rtype: C{bool}
@return: Boolean.
"""
return bool(self.stream.read_uchar())
def readNull(self):
"""
Reads a ActionScript C{null} value.
@return: C{None}
@rtype: C{None}
"""
return None
def readUndefined(self):
"""
Reads an ActionScript C{undefined} value.
@return: L{Undefined<pyamf.Undefined>}
"""
return pyamf.Undefined
def readMixedArray(self):
"""
Read mixed array.
@rtype: C{dict}
@return: C{dict} read from the stream
"""
len = self.stream.read_ulong()
obj = pyamf.MixedArray()
self.context.addObject(obj)
self._readObject(obj)
ikeys = []
for key in obj.keys():
try:
ikey = int(key)
ikeys.append((key, ikey))
obj[ikey] = obj[key]
del obj[key]
except ValueError:
# XXX: do we want to ignore this?
pass
ikeys.sort()
return obj
def readList(self):
"""
Read a C{list} from the data stream.
@rtype: C{list}
@return: C{list}
"""
obj = []
self.context.addObject(obj)
len = self.stream.read_ulong()
for i in xrange(len):
obj.append(self.readElement())
return obj
def readTypedObject(self):
"""
Reads an ActionScript object from the stream and attempts to
'cast' it.
@see: L{load_class<pyamf.load_class>}
"""
classname = self.readString()
alias = None
try:
alias = pyamf.get_class_alias(classname)
ret = alias.createInstance(codec=self)
except pyamf.UnknownClassAlias:
if self.strict:
raise
ret = pyamf.TypedObject(classname)
self.context.addObject(ret)
self._readObject(ret, alias)
return ret
def readAMF3(self):
"""
Read AMF3 elements from the data stream.
@rtype: C{mixed}
@return: The AMF3 element read from the stream
"""
if not hasattr(self.context, 'amf3_context'):
self.context.amf3_context = pyamf.get_context(pyamf.AMF3, exceptions=False)
if not hasattr(self.context, 'amf3_decoder'):
self.context.amf3_decoder = pyamf.get_decoder(
pyamf.AMF3, self.stream, self.context.amf3_context)
decoder = self.context.amf3_decoder
element = decoder.readElement()
self.context.addAMF3Object(element)
return element
def readString(self):
"""
Reads a string from the data stream.
@rtype: C{str}
@return: string
"""
len = self.stream.read_ushort()
return self.stream.read_utf8_string(len)
def _readObject(self, obj, alias=None):
obj_attrs = dict()
key = self.readString().encode('utf8')
while self.stream.peek() != TYPE_OBJECTTERM:
obj_attrs[key] = self.readElement()
key = self.readString().encode('utf8')
# discard the end marker (TYPE_OBJECTTERM)
self.stream.read(1)
if alias:
alias.applyAttributes(obj, obj_attrs, codec=self)
else:
util.set_attrs(obj, obj_attrs)
def readObject(self):
"""
Reads an object from the data stream.
@rtype: L{ASObject<pyamf.ASObject>}
"""
obj = pyamf.ASObject()
self.context.addObject(obj)
self._readObject(obj)
return obj
def readReference(self):
"""
Reads a reference from the data stream.
@raise pyamf.ReferenceError: Unknown reference.
"""
idx = self.stream.read_ushort()
o = self.context.getObject(idx)
if o is None:
raise pyamf.ReferenceError('Unknown reference %d' % (idx,))
return o
def readDate(self):
"""
Reads a UTC date from the data stream. Client and servers are
responsible for applying their own timezones.
Date: C{0x0B T7 T6} .. C{T0 Z1 Z2 T7} to C{T0} form a 64 bit
Big Endian number that specifies the number of nanoseconds
that have passed since 1/1/1970 0:00 to the specified time.
This format is UTC 1970. C{Z1} and C{Z0} for a 16 bit Big
Endian number indicating the indicated time's timezone in
minutes.
"""
ms = self.stream.read_double() / 1000.0
tz = self.stream.read_short()
# Timezones are ignored
d = util.get_datetime(ms)
if self.timezone_offset:
d = d + self.timezone_offset
self.context.addObject(d)
return d
def readLongString(self):
"""
Read UTF8 string.
"""
len = self.stream.read_ulong()
return self.stream.read_utf8_string(len)
def readXML(self):
"""
Read XML.
"""
data = self.readLongString()
xml = util.ET.fromstring(data)
self.context.addObject(xml)
return xml
class Encoder(pyamf.BaseEncoder):
"""
Encodes an AMF0 stream.
@ivar use_amf3: A flag to determine whether this encoder knows about AMF3.
@type use_amf3: C{bool}
"""
context_class = Context
type_map = [
((types.BuiltinFunctionType, types.BuiltinMethodType,
types.FunctionType, types.GeneratorType, types.ModuleType,
types.LambdaType, types.MethodType), "writeFunc"),
((types.NoneType,), "writeNull"),
((bool,), "writeBoolean"),
((int,long,float), "writeNumber"),
((types.StringTypes,), "writeString"),
((pyamf.ASObject,), "writeObject"),
((pyamf.MixedArray,), "writeMixedArray"),
((types.ListType, types.TupleType,), "writeArray"),
((datetime.date, datetime.datetime, datetime.time), "writeDate"),
((util.is_ET_element,), "writeXML"),
((lambda x: x is pyamf.Undefined,), "writeUndefined"),
((types.ClassType, types.TypeType), "writeClass"),
((types.InstanceType,types.ObjectType,), "writeObject"),
]
def __init__(self, *args, **kwargs):
self.use_amf3 = kwargs.pop('use_amf3', False)
pyamf.BaseEncoder.__init__(self, *args, **kwargs)
def writeType(self, t):
"""
Writes the type to the stream.
@type t: C{str}
@param t: ActionScript type.
@raise pyamf.EncodeError: AMF0 type is not recognized.
"""
self.stream.write(t)
def writeUndefined(self, data):
"""
Writes the L{undefined<TYPE_UNDEFINED>} data type to the stream.
@param data: The C{undefined} data to be encoded to the AMF0 data
stream.
@type data: C{undefined} data
"""
self.writeType(TYPE_UNDEFINED)
def writeClass(self, *args, **kwargs):
"""
Classes cannot be serialised.
"""
raise pyamf.EncodeError("Class objects cannot be serialised")
def writeFunc(self, *args, **kwargs):
"""
Functions cannot be serialised.
"""
raise pyamf.EncodeError("Callables cannot be serialised")
def writeUnsupported(self, data):
"""
Writes L{unsupported<TYPE_UNSUPPORTED>} data type to the
stream.
@param data: The C{unsupported} data to be encoded to the AMF0
data stream.
@type data: C{unsupported} data
"""
self.writeType(TYPE_UNSUPPORTED)
def _writeElementFunc(self, data):
"""
Gets a function based on the type of data.
@see: L{pyamf.BaseEncoder._writeElementFunc}
"""
# There is a very specific use case that we must check for.
# In the context there is an array of amf3_objs that contain
# references to objects that are to be encoded in amf3.
if self.use_amf3 and self.context.hasAMF3ObjectReference(data):
return self.writeAMF3
return pyamf.BaseEncoder._writeElementFunc(self, data)
def writeElement(self, data):
"""
Writes the data.
@type data: C{mixed}
@param data: The data to be encoded to the AMF0 data stream.
@raise EncodeError: Cannot find encoder func.
"""
func = self._writeElementFunc(data)
if func is None:
raise pyamf.EncodeError("Cannot find encoder func for %r" % (data,))
func(data)
def writeNull(self, n):
"""
Write null type to data stream.
@type n: C{None}
@param n: Is ignored.
"""
self.writeType(TYPE_NULL)
def writeArray(self, a):
"""
Write array to the stream.
@type a: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param a: The array data to be encoded to the AMF0 data stream.
"""
alias = self.context.getClassAlias(a.__class__)
if alias.external:
# a is a subclassed list with a registered alias - push to the
# correct method
self.writeObject(a)
return
if self.writeReference(a) is not None:
return
self.context.addObject(a)
self.writeType(TYPE_ARRAY)
self.stream.write_ulong(len(a))
for data in a:
self.writeElement(data)
def writeNumber(self, n):
"""
Write number to the data stream.
@type n: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param n: The number data to be encoded to the AMF0 data stream.
"""
self.writeType(TYPE_NUMBER)
self.stream.write_double(float(n))
def writeBoolean(self, b):
"""
Write boolean to the data stream.
@type b: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param b: The boolean data to be encoded to the AMF0 data stream.
"""
self.writeType(TYPE_BOOL)
if b:
self.stream.write_uchar(1)
else:
self.stream.write_uchar(0)
def writeString(self, s, writeType=True):
"""
Write string to the data stream.
@type s: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param s: The string data to be encoded to the AMF0 data stream.
@type writeType: C{bool}
@param writeType: Write data type.
"""
t = type(s)
if t is str:
pass
elif isinstance(s, unicode):
s = s.encode('utf8')
elif not isinstance(s, basestring):
s = unicode(s).encode('utf8')
l = len(s)
if writeType:
if l > 0xffff:
self.writeType(TYPE_LONGSTRING)
else:
self.writeType(TYPE_STRING)
if l > 0xffff:
self.stream.write_ulong(l)
else:
self.stream.write_ushort(l)
self.stream.write(s)
def writeReference(self, o):
"""
Write reference to the data stream.
@type o: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param o: The reference data to be encoded to the AMF0 data
stream.
"""
idx = self.context.getObjectReference(o)
if idx is None:
return None
self.writeType(TYPE_REFERENCE)
self.stream.write_ushort(idx)
return idx
def _writeDict(self, o):
"""
Write C{dict} to the data stream.
@type o: C{iterable}
@param o: The C{dict} data to be encoded to the AMF0 data
stream.
"""
for key, val in o.iteritems():
self.writeString(key, False)
self.writeElement(val)
def writeMixedArray(self, o):
"""
Write mixed array to the data stream.
@type o: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param o: The mixed array data to be encoded to the AMF0
data stream.
"""
if self.writeReference(o) is not None:
return
self.context.addObject(o)
self.writeType(TYPE_MIXEDARRAY)
# TODO: optimise this
# work out the highest integer index
try:
# list comprehensions to save the day
max_index = max([y[0] for y in o.items()
if isinstance(y[0], (int, long))])
if max_index < 0:
max_index = 0
except ValueError:
max_index = 0
self.stream.write_ulong(max_index)
self._writeDict(o)
self._writeEndObject()
def _writeEndObject(self):
self.stream.write('\x00\x00')
self.writeType(TYPE_OBJECTTERM)
def writeObject(self, o):
"""
Write object to the stream.
@type o: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param o: The object data to be encoded to the AMF0 data stream.
"""
if self.use_amf3:
self.writeAMF3(o)
return
if self.writeReference(o) is not None:
return
self.context.addObject(o)
alias = self.context.getClassAlias(o.__class__)
alias.compile()
if alias.amf3:
self.writeAMF3(o)
return
if alias.anonymous:
self.writeType(TYPE_OBJECT)
else:
self.writeType(TYPE_TYPEDOBJECT)
self.writeString(alias.alias, False)
sa, da = alias.getEncodableAttributes(o, codec=self)
if sa:
for key in alias.static_attrs:
self.writeString(key, False)
self.writeElement(sa[key])
if da:
for key, value in da.iteritems():
self.writeString(key, False)
self.writeElement(value)
self._writeEndObject()
def writeDate(self, d):
"""
Writes a date to the data stream.
@type d: Instance of C{datetime.datetime}
@param d: The date to be encoded to the AMF0 data stream.
"""
if isinstance(d, datetime.time):
raise pyamf.EncodeError('A datetime.time instance was found but '
'AMF0 has no way to encode time objects. Please use '
'datetime.datetime instead (got:%r)' % (d,))
# According to the Red5 implementation of AMF0, dates references are
# created, but not used.
if self.timezone_offset is not None:
d -= self.timezone_offset
secs = util.get_timestamp(d)
tz = 0
self.writeType(TYPE_DATE)
self.stream.write_double(secs * 1000.0)
self.stream.write_short(tz)
def writeXML(self, e):
"""
Write XML to the data stream.
@type e: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param e: The XML data to be encoded to the AMF0 data stream.
"""
if self.use_amf3 is True:
self.writeAMF3(e)
return
self.writeType(TYPE_XML)
data = util.ET.tostring(e, 'utf-8')
self.stream.write_ulong(len(data))
self.stream.write(data)
def writeAMF3(self, data):
"""
Writes an element to the datastream in L{AMF3<pyamf.amf3>} format.
@type data: C{mixed}
@param data: The data to be encoded to the AMF0 data stream.
"""
if not hasattr(self.context, 'amf3_context'):
self.context.amf3_context = pyamf.get_context(pyamf.AMF3, exceptions=False)
if not hasattr(self.context, 'amf3_encoder'):
self.context.amf3_encoder = pyamf.get_encoder(
pyamf.AMF3, self.stream, self.context.amf3_context)
self.context.addAMF3Object(data)
encoder = self.context.amf3_encoder
self.writeType(TYPE_AMF3)
encoder.writeElement(data)
def decode(*args, **kwargs):
"""
A helper function to decode an AMF0 datastream.
"""
decoder = Decoder(*args, **kwargs)
while 1:
try:
yield decoder.readElement()
except pyamf.EOStream:
break
def encode(*args, **kwargs):
"""
A helper function to encode an element into the AMF0 format.
@type element: C{mixed}
@keyword element: The element to encode
@type context: L{Context<pyamf.amf0.Context>}
@keyword context: AMF0 C{Context} to use for the encoding. This holds
previously referenced objects etc.
@rtype: C{StringIO}
@return: The encoded stream.
"""
encoder = Encoder(**kwargs)
for element in args:
encoder.writeElement(element)
return encoder.stream
class RecordSet(object):
"""
I represent the C{RecordSet} class used in Adobe Flash Remoting to hold
(amongst other things) SQL records.
@ivar columns: The columns to send.
@type columns: List of strings.
@ivar items: The C{RecordSet} data.
@type items: List of lists, the order of the data corresponds to the order
of the columns.
@ivar service: Service linked to the C{RecordSet}.
@type service:
@ivar id: The id of the C{RecordSet}.
@type id: C{str}
@see: U{RecordSet on OSFlash (external)
<http://osflash.org/documentation/amf/recordset>}
"""
class __amf__:
alias = 'RecordSet'
static = ('serverInfo',)
dynamic = False
def __init__(self, columns=[], items=[], service=None, id=None):
self.columns = columns
self.items = items
self.service = service
self.id = id
def _get_server_info(self):
ret = pyamf.ASObject(totalCount=len(self.items), cursor=1, version=1,
initialData=self.items, columnNames=self.columns)
if self.service is not None:
ret.update({'serviceName': str(self.service['name'])})
if self.id is not None:
ret.update({'id':str(self.id)})
return ret
def _set_server_info(self, val):
self.columns = val['columnNames']
self.items = val['initialData']
try:
# TODO nick: find relevant service and link in here.
self.service = dict(name=val['serviceName'])
except KeyError:
self.service = None
try:
self.id = val['id']
except KeyError:
self.id = None
serverInfo = property(_get_server_info, _set_server_info)
def __repr__(self):
ret = '<%s.%s object' % (self.__module__, self.__class__.__name__)
if self.id is not None:
ret += ' id=%s' % self.id
if self.service is not None:
ret += ' service=%s' % self.service
ret += ' at 0x%x>' % id(self)
return ret
pyamf.register_class(RecordSet)
def _check_for_int(x):
"""
This is a compatibility function that takes a C{float} and converts it to an
C{int} if the values are equal.
"""
try:
y = int(x)
except (OverflowError, ValueError):
pass
else:
# There is no way in AMF0 to distinguish between integers and floats
if x == x and y == x:
return y
return x
# check for some Python 2.3 problems with floats
try:
float('nan')
except ValueError:
pass
else:
if float('nan') == 0:
def check_nan(func):
def f2(x):
if str(x).lower().find('nan') >= 0:
return x
return f2.func(x)
f2.func = func
return f2
_check_for_int = check_nan(_check_for_int)
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
AMF3 RemoteObject support.
@see: U{RemoteObject on LiveDocs
<http://livedocs.adobe.com/flex/3/langref/mx/rpc/remoting/RemoteObject.html>}
@since: 0.1.0
"""
import calendar
import time
import uuid
import sys
import pyamf
from pyamf import remoting
from pyamf.flex import messaging
class BaseServerError(pyamf.BaseError):
"""
Base server error.
"""
class ServerCallFailed(BaseServerError):
"""
A catchall error.
"""
_amf_code = 'Server.Call.Failed'
def generate_random_id():
return str(uuid.uuid4())
def generate_acknowledgement(request=None):
ack = messaging.AcknowledgeMessage()
ack.messageId = generate_random_id()
ack.clientId = generate_random_id()
ack.timestamp = calendar.timegm(time.gmtime())
if request:
ack.correlationId = request.messageId
return ack
def generate_error(request, cls, e, tb, include_traceback=False):
"""
Builds an L{ErrorMessage<pyamf.flex.messaging.ErrorMessage>} based on the
last traceback and the request that was sent.
"""
import traceback
if hasattr(cls, '_amf_code'):
code = cls._amf_code
else:
code = cls.__name__
detail = ''
rootCause = None
if include_traceback:
detail = []
rootCause = e
for x in traceback.format_exception(cls, e, tb):
detail.append(x.replace("\\n", ''))
return messaging.ErrorMessage(messageId=generate_random_id(),
clientId=generate_random_id(), timestamp=calendar.timegm(time.gmtime()),
correlationId = request.messageId, faultCode=code, faultString=str(e),
faultDetail=str(detail), extendedData=detail, rootCause=rootCause)
class RequestProcessor(object):
def __init__(self, gateway):
self.gateway = gateway
def buildErrorResponse(self, request, error=None):
"""
Builds an error response.
@param request: The AMF request
@type request: L{Request<pyamf.remoting.Request>}
@return: The AMF response
@rtype: L{Response<pyamf.remoting.Response>}
"""
if error is not None:
cls, e, tb = error
else:
cls, e, tb = sys.exc_info()
return generate_error(request, cls, e, tb, self.gateway.debug)
def _getBody(self, amf_request, ro_request, **kwargs):
"""
@raise ServerCallFailed: Unknown request.
"""
if isinstance(ro_request, messaging.CommandMessage):
return self._processCommandMessage(amf_request, ro_request, **kwargs)
elif isinstance(ro_request, messaging.RemotingMessage):
return self._processRemotingMessage(amf_request, ro_request, **kwargs)
elif isinstance(ro_request, messaging.AsyncMessage):
return self._processAsyncMessage(amf_request, ro_request, **kwargs)
else:
raise ServerCallFailed("Unknown request: %s" % ro_request)
def _processCommandMessage(self, amf_request, ro_request, **kwargs):
"""
@raise ServerCallFailed: Unknown Command operation.
@raise ServerCallFailed: Authorization is not supported in RemoteObject.
"""
ro_response = generate_acknowledgement(ro_request)
if ro_request.operation == messaging.CommandMessage.PING_OPERATION:
ro_response.body = True
return remoting.Response(ro_response)
elif ro_request.operation == messaging.CommandMessage.LOGIN_OPERATION:
raise ServerCallFailed("Authorization is not supported in RemoteObject")
elif ro_request.operation == messaging.CommandMessage.DISCONNECT_OPERATION:
return remoting.Response(ro_response)
else:
raise ServerCallFailed("Unknown Command operation %s" % ro_request.operation)
def _processAsyncMessage(self, amf_request, ro_request, **kwargs):
ro_response = generate_acknowledgement(ro_request)
ro_response.body = True
return remoting.Response(ro_response)
def _processRemotingMessage(self, amf_request, ro_request, **kwargs):
ro_response = generate_acknowledgement(ro_request)
service_name = ro_request.operation
if hasattr(ro_request, 'destination') and ro_request.destination:
service_name = '%s.%s' % (ro_request.destination, service_name)
service_request = self.gateway.getServiceRequest(amf_request,
service_name)
# fire the preprocessor (if there is one)
self.gateway.preprocessRequest(service_request, *ro_request.body,
**kwargs)
ro_response.body = self.gateway.callServiceRequest(service_request,
*ro_request.body, **kwargs)
return remoting.Response(ro_response)
def __call__(self, amf_request, **kwargs):
"""
Processes an AMF3 Remote Object request.
@param amf_request: The request to be processed.
@type amf_request: L{Request<pyamf.remoting.Request>}
@return: The response to the request.
@rtype: L{Response<pyamf.remoting.Response>}
"""
ro_request = amf_request.body[0]
try:
return self._getBody(amf_request, ro_request, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except:
return remoting.Response(self.buildErrorResponse(ro_request),
status=remoting.STATUS_ERROR)
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Gateway for Google App Engine.
This gateway allows you to expose functions in Google App Engine web
applications to AMF clients and servers.
@see: U{Google App Engine homepage (external)
<http://code.google.com/appengine>}
@since: 0.3.1
"""
import sys
import os.path
try:
sys.path.remove(os.path.dirname(os.path.abspath(__file__)))
except ValueError:
pass
google = __import__('google.appengine.ext.webapp')
webapp = google.appengine.ext.webapp
from pyamf import remoting
from pyamf.remoting import gateway
__all__ = ['WebAppGateway']
class WebAppGateway(webapp.RequestHandler, gateway.BaseGateway):
"""
Google App Engine Remoting Gateway.
"""
__name__ = None
def __init__(self, *args, **kwargs):
gateway.BaseGateway.__init__(self, *args, **kwargs)
def getResponse(self, request):
"""
Processes the AMF request, returning an AMF response.
@param request: The AMF Request.
@type request: L{Envelope<pyamf.remoting.Envelope>}
@rtype: L{Envelope<pyamf.remoting.Envelope>}
@return: The AMF Response.
"""
response = remoting.Envelope(request.amfVersion, request.clientType)
for name, message in request:
self.request.amf_request = message
processor = self.getProcessor(message)
response[name] = processor(message, http_request=self.request)
return response
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers['Server'] = gateway.SERVER_NAME
self.error(405)
self.response.out.write("405 Method Not Allowed\n\n"
"To access this PyAMF gateway you must use POST requests "
"(%s received)" % self.request.method)
def post(self):
body = self.request.body_file.read()
stream = None
timezone_offset = self._get_timezone_offset()
# Decode the request
try:
request = remoting.decode(body, strict=self.strict,
logger=self.logger, timezone_offset=timezone_offset)
except (KeyboardInterrupt, SystemExit):
raise
except:
fe = gateway.format_exception()
if self.logger:
self.logger.exception(fe)
response = ("400 Bad Request\n\nThe request body was unable to "
"be successfully decoded.")
if self.debug:
response += "\n\nTraceback:\n\n%s" % fe
self.error(400)
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers['Server'] = gateway.SERVER_NAME
self.response.out.write(response)
return
if self.logger:
self.logger.info("AMF Request: %r" % request)
# Process the request
try:
response = self.getResponse(request)
except (KeyboardInterrupt, SystemExit):
raise
except:
fe = gateway.format_exception()
if self.logger:
self.logger.exception(fe)
response = "500 Internal Server Error\n\nThe request was " \
"unable to be successfully processed."
if self.debug:
response += "\n\nTraceback:\n\n%s" % fe
self.error(500)
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers['Server'] = gateway.SERVER_NAME
self.response.out.write(response)
return
if self.logger:
self.logger.info("AMF Response: %r" % response)
# Encode the response
try:
stream = remoting.encode(response, strict=self.strict,
logger=self.logger, timezone_offset=timezone_offset)
except:
fe = gateway.format_exception()
if self.logger:
self.logger.exception(fe)
response = "500 Internal Server Error\n\nThe request was " \
"unable to be encoded."
if self.debug:
response += "\n\nTraceback:\n\n%s" % fe
self.error(500)
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers['Server'] = gateway.SERVER_NAME
self.response.out.write(response)
return
response = stream.getvalue()
self.response.headers['Content-Type'] = remoting.CONTENT_TYPE
self.response.headers['Content-Length'] = str(len(response))
self.response.headers['Server'] = gateway.SERVER_NAME
self.response.out.write(response)
def __call__(self, *args, **kwargs):
return self
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Twisted server implementation.
This gateway allows you to expose functions in Twisted to AMF clients and
servers.
@see: U{Twisted homepage (external)<http://twistedmatrix.com>}
@since: 0.1.0
"""
import sys
import os.path
try:
sys.path.remove('')
except ValueError:
pass
try:
sys.path.remove(os.path.dirname(os.path.abspath(__file__)))
except ValueError:
pass
twisted = __import__('twisted')
__import__('twisted.internet.defer')
__import__('twisted.internet.threads')
__import__('twisted.web.resource')
__import__('twisted.web.server')
defer = twisted.internet.defer
threads = twisted.internet.threads
resource = twisted.web.resource
server = twisted.web.server
from pyamf import remoting
from pyamf.remoting import gateway, amf0, amf3
__all__ = ['TwistedGateway']
class AMF0RequestProcessor(amf0.RequestProcessor):
"""
A Twisted friendly implementation of
L{amf0.RequestProcessor<pyamf.remoting.amf0.RequestProcessor>}
"""
def __call__(self, request, *args, **kwargs):
"""
Calls the underlying service method.
@return: A C{Deferred} that will contain the AMF L{Response}.
@rtype: C{twisted.internet.defer.Deferred}
"""
try:
service_request = self.gateway.getServiceRequest(
request, request.target)
except gateway.UnknownServiceError:
return defer.succeed(self.buildErrorResponse(request))
response = remoting.Response(None)
deferred_response = defer.Deferred()
def eb(failure):
errMesg = "%s: %s" % (failure.type, failure.getErrorMessage())
if self.gateway.logger:
self.gateway.logger.error(errMesg)
self.gateway.logger.info(failure.getTraceback())
deferred_response.callback(self.buildErrorResponse(
request, (failure.type, failure.value, failure.tb)))
def response_cb(result):
if self.gateway.logger:
self.gateway.logger.debug("AMF Response: %s" % (result,))
response.body = result
deferred_response.callback(response)
def preprocess_cb(result):
d = defer.maybeDeferred(self._getBody, request, response,
service_request, **kwargs)
d.addCallback(response_cb).addErrback(eb)
def auth_cb(result):
if result is not True:
response.status = remoting.STATUS_ERROR
response.body = remoting.ErrorFault(code='AuthenticationError',
description='Authentication failed')
deferred_response.callback(response)
return
d = defer.maybeDeferred(self.gateway.preprocessRequest,
service_request, *args, **kwargs)
d.addCallback(preprocess_cb).addErrback(eb)
# we have a valid service, now attempt authentication
d = defer.maybeDeferred(self.authenticateRequest, request,
service_request, **kwargs)
d.addCallback(auth_cb).addErrback(eb)
return deferred_response
class AMF3RequestProcessor(amf3.RequestProcessor):
"""
A Twisted friendly implementation of
L{amf3.RequestProcessor<pyamf.remoting.amf3.RequestProcessor>}
"""
def _processRemotingMessage(self, amf_request, ro_request, **kwargs):
ro_response = amf3.generate_acknowledgement(ro_request)
amf_response = remoting.Response(ro_response, status=remoting.STATUS_OK)
try:
service_name = ro_request.operation
if hasattr(ro_request, 'destination') and ro_request.destination:
service_name = '%s.%s' % (ro_request.destination, service_name)
service_request = self.gateway.getServiceRequest(amf_request,
service_name)
except gateway.UnknownServiceError:
return defer.succeed(remoting.Response(
self.buildErrorResponse(ro_request),
status=remoting.STATUS_ERROR))
deferred_response = defer.Deferred()
def eb(failure):
errMesg = "%s: %s" % (failure.type, failure.getErrorMessage())
if self.gateway.logger:
self.gateway.logger.error(errMesg)
self.gateway.logger.info(failure.getTraceback())
ro_response = self.buildErrorResponse(ro_request, (failure.type,
failure.value, failure.tb))
deferred_response.callback(remoting.Response(ro_response,
status=remoting.STATUS_ERROR))
def response_cb(result):
ro_response.body = result
res = remoting.Response(ro_response)
if self.gateway.logger:
self.gateway.logger.debug("AMF Response: %r" % (res,))
deferred_response.callback(res)
def process_cb(result):
d = defer.maybeDeferred(self.gateway.callServiceRequest,
service_request, *ro_request.body, **kwargs)
d.addCallback(response_cb).addErrback(eb)
d = defer.maybeDeferred(self.gateway.preprocessRequest, service_request,
*ro_request.body, **kwargs)
d.addCallback(process_cb).addErrback(eb)
return deferred_response
def __call__(self, amf_request, **kwargs):
"""
Calls the underlying service method.
@return: A C{deferred} that will contain the AMF L{Response}.
@rtype: C{Deferred<twisted.internet.defer.Deferred>}
"""
deferred_response = defer.Deferred()
ro_request = amf_request.body[0]
def cb(amf_response):
deferred_response.callback(amf_response)
def eb(failure):
errMesg = "%s: %s" % (failure.type, failure.getErrorMessage())
if self.gateway.logger:
self.gateway.logger.error(errMesg)
self.gateway.logger.info(failure.getTraceback())
deferred_response.callback(self.buildErrorResponse(ro_request,
(failure.type, failure.value, failure.tb)))
d = defer.maybeDeferred(self._getBody, amf_request, ro_request, **kwargs)
d.addCallback(cb).addErrback(eb)
return deferred_response
class TwistedGateway(gateway.BaseGateway, resource.Resource):
"""
Twisted Remoting gateway for C{twisted.web}.
@ivar expose_request: Forces the underlying HTTP request to be the first
argument to any service call.
@type expose_request: C{bool}
"""
allowedMethods = ('POST',)
def __init__(self, *args, **kwargs):
if 'expose_request' not in kwargs:
kwargs['expose_request'] = True
gateway.BaseGateway.__init__(self, *args, **kwargs)
resource.Resource.__init__(self)
def _finaliseRequest(self, request, status, content, mimetype='text/plain'):
"""
Finalises the request.
@param request: The HTTP Request.
@type request: C{http.Request}
@param status: The HTTP status code.
@type status: C{int}
@param content: The content of the response.
@type content: C{str}
@param mimetype: The MIME type of the request.
@type mimetype: C{str}
"""
request.setResponseCode(status)
request.setHeader("Content-Type", mimetype)
request.setHeader("Content-Length", str(len(content)))
request.setHeader("Server", gateway.SERVER_NAME)
request.write(content)
request.finish()
def render_POST(self, request):
"""
Read remoting request from the client.
@type request: The HTTP Request.
@param request: C{twisted.web.http.Request}
"""
def handleDecodeError(failure):
"""
Return HTTP 400 Bad Request.
"""
errMesg = "%s: %s" % (failure.type, failure.getErrorMessage())
if self.logger:
self.logger.error(errMesg)
self.logger.info(failure.getTraceback())
body = "400 Bad Request\n\nThe request body was unable to " \
"be successfully decoded."
if self.debug:
body += "\n\nTraceback:\n\n%s" % failure.getTraceback()
self._finaliseRequest(request, 400, body)
request.content.seek(0, 0)
timezone_offset = self._get_timezone_offset()
d = threads.deferToThread(remoting.decode, request.content.read(),
strict=self.strict, logger=self.logger,
timezone_offset=timezone_offset)
def cb(amf_request):
if self.logger:
self.logger.debug("AMF Request: %r" % amf_request)
x = self.getResponse(request, amf_request)
x.addCallback(self.sendResponse, request)
# Process the request
d.addCallback(cb).addErrback(handleDecodeError)
return server.NOT_DONE_YET
def sendResponse(self, amf_response, request):
def cb(result):
self._finaliseRequest(request, 200, result.getvalue(),
remoting.CONTENT_TYPE)
def eb(failure):
"""
Return 500 Internal Server Error.
"""
errMesg = "%s: %s" % (failure.type, failure.getErrorMessage())
if self.logger:
self.logger.error(errMesg)
self.logger.info(failure.getTraceback())
body = "500 Internal Server Error\n\nThere was an error encoding " \
"the response."
if self.debug:
body += "\n\nTraceback:\n\n%s" % failure.getTraceback()
self._finaliseRequest(request, 500, body)
timezone_offset = self._get_timezone_offset()
d = threads.deferToThread(remoting.encode, amf_response,
strict=self.strict, logger=self.logger,
timezone_offset=timezone_offset)
d.addCallback(cb).addErrback(eb)
def getProcessor(self, request):
"""
Determines the request processor, based on the request.
@param request: The AMF message.
@type request: L{Request<pyamf.remoting.Request>}
"""
if request.target == 'null':
return AMF3RequestProcessor(self)
return AMF0RequestProcessor(self)
def getResponse(self, http_request, amf_request):
"""
Processes the AMF request, returning an AMF L{Response}.
@param http_request: The underlying HTTP Request
@type http_request: C{twisted.web.http.Request}
@param amf_request: The AMF Request.
@type amf_request: L{Envelope<pyamf.remoting.Envelope>}
"""
response = remoting.Envelope(amf_request.amfVersion,
amf_request.clientType)
dl = []
def cb(body, name):
response[name] = body
for name, message in amf_request:
processor = self.getProcessor(message)
http_request.amf_request = message
d = defer.maybeDeferred(
processor, message, http_request=http_request)
dl.append(d.addCallback(cb, name))
def cb2(result):
return response
def eb(failure):
"""
Return 500 Internal Server Error.
"""
errMesg = "%s: %s" % (failure.type, failure.getErrorMessage())
if self.logger:
self.logger.error(errMesg)
self.logger.info(failure.getTraceback())
body = "500 Internal Server Error\n\nThe request was unable to " \
"be successfully processed."
if self.debug:
body += "\n\nTraceback:\n\n%s" % failure.getTraceback()
self._finaliseRequest(http_request, 500, body)
d = defer.DeferredList(dl)
return d.addCallback(cb2).addErrback(eb)
def authenticateRequest(self, service_request, username, password, **kwargs):
"""
Processes an authentication request. If no authenticator is supplied,
then authentication succeeds.
@return: C{Deferred}.
@rtype: C{twisted.internet.defer.Deferred}
"""
authenticator = self.getAuthenticator(service_request)
if self.logger:
self.logger.debug('Authenticator expands to: %r' % authenticator)
if authenticator is None:
return defer.succeed(True)
args = (username, password)
if hasattr(authenticator, '_pyamf_expose_request'):
http_request = kwargs.get('http_request', None)
args = (http_request,) + args
return defer.maybeDeferred(authenticator, *args)
def preprocessRequest(self, service_request, *args, **kwargs):
"""
Preprocesses a request.
"""
processor = self.getPreprocessor(service_request)
if self.logger:
self.logger.debug('Preprocessor expands to: %r' % processor)
if processor is None:
return
args = (service_request,) + args
if hasattr(processor, '_pyamf_expose_request'):
http_request = kwargs.get('http_request', None)
args = (http_request,) + args
return defer.maybeDeferred(processor, *args)
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Gateway for the Django framework.
This gateway allows you to expose functions in Django to AMF clients and
servers.
@see: U{Django homepage (external)<http://djangoproject.com>}
@since: 0.1.0
"""
django = __import__('django.http')
http = django.http
conf = __import__('django.conf')
conf = conf.conf
import pyamf
from pyamf import remoting
from pyamf.remoting import gateway
__all__ = ['DjangoGateway']
class DjangoGateway(gateway.BaseGateway):
"""
An instance of this class is suitable as a Django view.
An example usage would be through C{urlconf}::
from django.conf.urls.defaults import *
urlpatterns = patterns('',
(r'^gateway/', 'yourproject.yourapp.gateway.gw_instance'),
)
where C{yourproject.yourapp.gateway.gw_instance} refers to an instance of
this class.
@ivar expose_request: The standard Django view always has the request
object as the first parameter. To disable this functionality, set this
to C{False}.
@type expose_request: C{bool}
"""
def __init__(self, *args, **kwargs):
kwargs['expose_request'] = kwargs.get('expose_request', True)
try:
tz = conf.settings.AMF_TIME_OFFSET
except AttributeError:
tz = None
try:
debug = conf.settings.DEBUG
except AttributeError:
debug = False
kwargs['timezone_offset'] = kwargs.get('timezone_offset', tz)
kwargs['debug'] = kwargs.get('debug', debug)
gateway.BaseGateway.__init__(self, *args, **kwargs)
def getResponse(self, http_request, request):
"""
Processes the AMF request, returning an AMF response.
@param http_request: The underlying HTTP Request.
@type http_request: C{HTTPRequest<django.core.http.HTTPRequest>}
@param request: The AMF Request.
@type request: L{Envelope<pyamf.remoting.Envelope>}
@rtype: L{Envelope<pyamf.remoting.Envelope>}
@return: The AMF Response.
"""
response = remoting.Envelope(request.amfVersion, request.clientType)
for name, message in request:
http_request.amf_request = message
processor = self.getProcessor(message)
response[name] = processor(message, http_request=http_request)
return response
def __call__(self, http_request):
"""
Processes and dispatches the request.
@param http_request: The C{HTTPRequest} object.
@type http_request: C{HTTPRequest}
@return: The response to the request.
@rtype: C{HTTPResponse}
"""
if http_request.method != 'POST':
return http.HttpResponseNotAllowed(['POST'])
stream = None
timezone_offset = self._get_timezone_offset()
# Decode the request
try:
request = remoting.decode(http_request.raw_post_data,
strict=self.strict, logger=self.logger,
timezone_offset=timezone_offset)
except (pyamf.DecodeError, IOError):
fe = gateway.format_exception()
if self.logger:
self.logger.exception(fe)
response = "400 Bad Request\n\nThe request body was unable to " \
"be successfully decoded."
if self.debug:
response += "\n\nTraceback:\n\n%s" % fe
return http.HttpResponseBadRequest(mimetype='text/plain', content=response)
except (KeyboardInterrupt, SystemExit):
raise
except:
fe = gateway.format_exception()
if self.logger:
self.logger.exception(fe)
response = ('500 Internal Server Error\n\n'
'An unexpected error occurred.')
if self.debug:
response += "\n\nTraceback:\n\n%s" % fe
return http.HttpResponseServerError(mimetype='text/plain',
content=response)
if self.logger:
self.logger.info("AMF Request: %r" % request)
# Process the request
try:
response = self.getResponse(http_request, request)
except (KeyboardInterrupt, SystemExit):
raise
except:
fe = gateway.format_exception()
if self.logger:
self.logger.exception(fe)
response = "500 Internal Server Error\n\nThe request was " \
"unable to be successfully processed."
if self.debug:
response += "\n\nTraceback:\n\n%s" % fe
return http.HttpResponseServerError(mimetype='text/plain',
content=response)
if self.logger:
self.logger.info("AMF Response: %r" % response)
# Encode the response
try:
stream = remoting.encode(response, strict=self.strict,
logger=self.logger, timezone_offset=timezone_offset)
except:
fe = gateway.format_exception()
if self.logger:
self.logger.exception(fe)
response = ("500 Internal Server Error\n\nThe request was "
"unable to be encoded.")
if self.debug:
response += "\n\nTraceback:\n\n%s" % fe
return http.HttpResponseServerError(mimetype='text/plain', content=response)
buf = stream.getvalue()
http_response = http.HttpResponse(mimetype=remoting.CONTENT_TYPE)
http_response['Server'] = gateway.SERVER_NAME
http_response['Content-Length'] = str(len(buf))
http_response.write(buf)
return http_response
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Remoting server implementations.
@since: 0.1.0
"""
import sys
import types
import datetime
import pyamf
from pyamf import remoting, util
try:
from platform import python_implementation
impl = python_implementation()
except ImportError:
impl = 'Python'
SERVER_NAME = 'PyAMF/%s %s/%s' % (
'.'.join(map(lambda x: str(x), pyamf.__version__)), impl,
'.'.join(map(lambda x: str(x), sys.version_info[0:3]))
)
class BaseServiceError(pyamf.BaseError):
"""
Base service error.
"""
class UnknownServiceError(BaseServiceError):
"""
Client made a request for an unknown service.
"""
_amf_code = 'Service.ResourceNotFound'
class UnknownServiceMethodError(BaseServiceError):
"""
Client made a request for an unknown method.
"""
_amf_code = 'Service.MethodNotFound'
class InvalidServiceMethodError(BaseServiceError):
"""
Client made a request for an invalid methodname.
"""
_amf_code = 'Service.MethodInvalid'
class ServiceWrapper(object):
"""
Wraps a supplied service with extra functionality.
@ivar service: The original service.
@type service: C{callable}
@ivar description: A description of the service.
@type description: C{str}
"""
def __init__(self, service, description=None, authenticator=None,
expose_request=None, preprocessor=None):
self.service = service
self.description = description
self.authenticator = authenticator
self.expose_request = expose_request
self.preprocessor = preprocessor
def __cmp__(self, other):
if isinstance(other, ServiceWrapper):
return cmp(self.__dict__, other.__dict__)
return cmp(self.service, other)
def _get_service_func(self, method, params):
"""
@raise InvalidServiceMethodError: Calls to private methods are not
allowed.
@raise UnknownServiceMethodError: Unknown method.
@raise InvalidServiceMethodError: Service method must be callable.
"""
service = None
if isinstance(self.service, (type, types.ClassType)):
service = self.service()
else:
service = self.service
if method is not None:
method = str(method)
if method.startswith('_'):
raise InvalidServiceMethodError(
"Calls to private methods are not allowed")
try:
func = getattr(service, method)
except AttributeError:
raise UnknownServiceMethodError(
"Unknown method %s" % str(method))
if not callable(func):
raise InvalidServiceMethodError(
"Service method %s must be callable" % str(method))
return func
if not callable(service):
raise UnknownServiceMethodError(
"Unknown method %s" % str(self.service))
return service
def __call__(self, method, params):
"""
Executes the service.
If the service is a class, it will be instantiated.
@param method: The method to call on the service.
@type method: C{None} or C{mixed}
@param params: The params to pass to the service.
@type params: C{list} or C{tuple}
@return: The result of the execution.
@rtype: C{mixed}
"""
func = self._get_service_func(method, params)
return func(*params)
def getMethods(self):
"""
Gets a C{dict} of valid method callables for the underlying service
object.
"""
callables = {}
for name in dir(self.service):
method = getattr(self.service, name)
if name.startswith('_') or not callable(method):
continue
callables[name] = method
return callables
def getAuthenticator(self, service_request=None):
if service_request == None:
return self.authenticator
methods = self.getMethods()
if service_request.method is None:
if hasattr(self.service, '_pyamf_authenticator'):
return self.service._pyamf_authenticator
if service_request.method not in methods:
return self.authenticator
method = methods[service_request.method]
if hasattr(method, '_pyamf_authenticator'):
return method._pyamf_authenticator
return self.authenticator
def mustExposeRequest(self, service_request=None):
if service_request == None:
return self.expose_request
methods = self.getMethods()
if service_request.method is None:
if hasattr(self.service, '_pyamf_expose_request'):
return self.service._pyamf_expose_request
return self.expose_request
if service_request.method not in methods:
return self.expose_request
method = methods[service_request.method]
if hasattr(method, '_pyamf_expose_request'):
return method._pyamf_expose_request
return self.expose_request
def getPreprocessor(self, service_request=None):
if service_request == None:
return self.preprocessor
methods = self.getMethods()
if service_request.method is None:
if hasattr(self.service, '_pyamf_preprocessor'):
return self.service._pyamf_preprocessor
if service_request.method not in methods:
return self.preprocessor
method = methods[service_request.method]
if hasattr(method, '_pyamf_preprocessor'):
return method._pyamf_preprocessor
return self.preprocessor
class ServiceRequest(object):
"""
Remoting service request.
@ivar request: The request to service.
@type request: L{Envelope<pyamf.remoting.Envelope>}
@ivar service: Facilitates the request.
@type service: L{ServiceWrapper}
@ivar method: The method to call on the service. A value of C{None}
means that the service will be called directly.
@type method: C{None} or C{str}
"""
def __init__(self, amf_request, service, method):
self.request = amf_request
self.service = service
self.method = method
def __call__(self, *args):
return self.service(self.method, args)
class ServiceCollection(dict):
"""
I hold a collection of services, mapping names to objects.
"""
def __contains__(self, value):
if isinstance(value, basestring):
return value in self.keys()
return value in self.values()
class BaseGateway(object):
"""
Generic Remoting gateway.
@ivar services: A map of service names to callables.
@type services: L{ServiceCollection}
@ivar authenticator: A callable that will check the credentials of
the request before allowing access to the service. Will return a
C{bool} value.
@type authenticator: C{Callable} or C{None}
@ivar preprocessor: Called before the actual service method is invoked.
Useful for setting up sessions etc.
@type preprocessor: C{Callable} or C{None}
@ivar logger: A logging instance.
@ivar strict: Defines whether the gateway should use strict en/decoding.
@type strict: C{bool}
@ivar timezone_offset: A L{datetime.timedelta} between UTC and the
timezone to be encoded. Most dates should be handled as UTC to avoid
confusion but for older legacy systems this is not an option. Supplying
an int as this will be interpretted in seconds.
@ivar debug: Provides debugging information when an error occurs. Use only
in non production settings.
@type debug: C{bool}
"""
_request_class = ServiceRequest
def __init__(self, services={}, **kwargs):
if not hasattr(services, 'iteritems'):
raise TypeError("dict type required for services")
self.services = ServiceCollection()
self.authenticator = kwargs.pop('authenticator', None)
self.preprocessor = kwargs.pop('preprocessor', None)
self.expose_request = kwargs.pop('expose_request', False)
self.strict = kwargs.pop('strict', False)
self.logger = kwargs.pop('logger', None)
self.timezone_offset = kwargs.pop('timezone_offset', None)
self.debug = kwargs.pop('debug', False)
if kwargs:
raise TypeError('Unknown kwargs: %r' % (kwargs,))
for name, service in services.iteritems():
self.addService(service, name)
def addService(self, service, name=None, description=None,
authenticator=None, expose_request=None, preprocessor=None):
"""
Adds a service to the gateway.
@param service: The service to add to the gateway.
@type service: C{callable}, class instance, or a module
@param name: The name of the service.
@type name: C{str}
@raise pyamf.remoting.RemotingError: Service already exists.
@raise TypeError: C{service} cannot be a scalar value.
@raise TypeError: C{service} must be C{callable} or a module.
"""
if isinstance(service, (int, long, float, basestring)):
raise TypeError("Service cannot be a scalar value")
allowed_types = (types.ModuleType, types.FunctionType, types.DictType,
types.MethodType, types.InstanceType, types.ObjectType)
if not callable(service) and not isinstance(service, allowed_types):
raise TypeError("Service must be a callable, module, or an object")
if name is None:
# TODO: include the module in the name
if isinstance(service, (type, types.ClassType)):
name = service.__name__
elif isinstance(service, types.FunctionType):
name = service.func_name
elif isinstance(service, types.ModuleType):
name = service.__name__
else:
name = str(service)
if name in self.services:
raise remoting.RemotingError("Service %s already exists" % name)
self.services[name] = ServiceWrapper(service, description,
authenticator, expose_request, preprocessor)
def _get_timezone_offset(self):
if self.timezone_offset is None:
return None
if isinstance(self.timezone_offset, datetime.timedelta):
return self.timezone_offset
return datetime.timedelta(seconds=self.timezone_offset)
def removeService(self, service):
"""
Removes a service from the gateway.
@param service: The service to remove from the gateway.
@type service: C{callable} or a class instance
@raise NameError: Service not found.
"""
if service not in self.services:
raise NameError("Service %s not found" % str(service))
for name, wrapper in self.services.iteritems():
if isinstance(service, basestring) and service == name:
del self.services[name]
return
elif isinstance(service, ServiceWrapper) and wrapper == service:
del self.services[name]
return
elif isinstance(service, (type, types.ClassType,
types.FunctionType)) and wrapper.service == service:
del self.services[name]
return
# shouldn't ever get here
raise RuntimeError("Something went wrong ...")
def getServiceRequest(self, request, target):
"""
Returns a service based on the message.
@raise UnknownServiceError: Unknown service.
@param request: The AMF request.
@type request: L{Request<pyamf.remoting.Request>}
@rtype: L{ServiceRequest}
"""
try:
return self._request_class(
request.envelope, self.services[target], None)
except KeyError:
pass
try:
sp = target.split('.')
name, meth = '.'.join(sp[:-1]), sp[-1]
return self._request_class(
request.envelope, self.services[name], meth)
except (ValueError, KeyError):
pass
raise UnknownServiceError("Unknown service %s" % target)
def getProcessor(self, request):
"""
Returns request processor.
@param request: The AMF message.
@type request: L{Request<remoting.Request>}
"""
if request.target == 'null':
from pyamf.remoting import amf3
return amf3.RequestProcessor(self)
else:
from pyamf.remoting import amf0
return amf0.RequestProcessor(self)
def getResponse(self, amf_request):
"""
Returns the response to the request.
Any implementing gateway must define this function.
@param amf_request: The AMF request.
@type amf_request: L{Envelope<pyamf.remoting.Envelope>}
@return: The AMF response.
@rtype: L{Envelope<pyamf.remoting.Envelope>}
"""
raise NotImplementedError
def mustExposeRequest(self, service_request):
"""
Decides whether the underlying http request should be exposed as the
first argument to the method call. This is granular, looking at the
service method first, then at the service level and finally checking
the gateway.
@rtype: C{bool}
"""
expose_request = service_request.service.mustExposeRequest(service_request)
if expose_request is None:
if self.expose_request is None:
return False
return self.expose_request
return expose_request
def getAuthenticator(self, service_request):
"""
Gets an authenticator callable based on the service_request. This is
granular, looking at the service method first, then at the service
level and finally to see if there is a global authenticator function
for the gateway. Returns C{None} if one could not be found.
"""
auth = service_request.service.getAuthenticator(service_request)
if auth is None:
return self.authenticator
return auth
def authenticateRequest(self, service_request, username, password, **kwargs):
"""
Processes an authentication request. If no authenticator is supplied,
then authentication succeeds.
@return: Returns a C{bool} based on the result of authorization. A
value of C{False} will stop processing the request and return an
error to the client.
@rtype: C{bool}
"""
authenticator = self.getAuthenticator(service_request)
if authenticator is None:
return True
args = (username, password)
if hasattr(authenticator, '_pyamf_expose_request'):
http_request = kwargs.get('http_request', None)
args = (http_request,) + args
return authenticator(*args) == True
def getPreprocessor(self, service_request):
"""
Gets a preprocessor callable based on the service_request. This is
granular, looking at the service method first, then at the service
level and finally to see if there is a global preprocessor function
for the gateway. Returns C{None} if one could not be found.
"""
preproc = service_request.service.getPreprocessor(service_request)
if preproc is None:
return self.preprocessor
return preproc
def preprocessRequest(self, service_request, *args, **kwargs):
"""
Preprocesses a request.
"""
processor = self.getPreprocessor(service_request)
if processor is None:
return
args = (service_request,) + args
if hasattr(processor, '_pyamf_expose_request'):
http_request = kwargs.get('http_request', None)
args = (http_request,) + args
return processor(*args)
def callServiceRequest(self, service_request, *args, **kwargs):
"""
Executes the service_request call
"""
if self.mustExposeRequest(service_request):
http_request = kwargs.get('http_request', None)
args = (http_request,) + args
return service_request(*args)
def authenticate(func, c, expose_request=False):
"""
A decorator that facilitates authentication per method. Setting
C{expose_request} to C{True} will set the underlying request object (if
there is one), usually HTTP and set it to the first argument of the
authenticating callable. If there is no request object, the default is
C{None}.
@raise TypeError: C{func} and authenticator must be callable.
"""
if not callable(func):
raise TypeError('func must be callable')
if not callable(c):
raise TypeError('Authenticator must be callable')
attr = func
if isinstance(func, types.UnboundMethodType):
attr = func.im_func
if expose_request is True:
c = globals()['expose_request'](c)
setattr(attr, '_pyamf_authenticator', c)
return func
def expose_request(func):
"""
A decorator that adds an expose_request flag to the underlying callable.
@raise TypeError: C{func} must be callable.
"""
if not callable(func):
raise TypeError("func must be callable")
if isinstance(func, types.UnboundMethodType):
setattr(func.im_func, '_pyamf_expose_request', True)
else:
setattr(func, '_pyamf_expose_request', True)
return func
def preprocess(func, c, expose_request=False):
"""
A decorator that facilitates preprocessing per method. Setting
C{expose_request} to C{True} will set the underlying request object (if
there is one), usually HTTP and set it to the first argument of the
preprocessing callable. If there is no request object, the default is
C{None}.
@raise TypeError: C{func} and preprocessor must be callable.
"""
if not callable(func):
raise TypeError('func must be callable')
if not callable(c):
raise TypeError('Preprocessor must be callable')
attr = func
if isinstance(func, types.UnboundMethodType):
attr = func.im_func
if expose_request is True:
c = globals()['expose_request'](c)
setattr(attr, '_pyamf_preprocessor', c)
return func
def format_exception():
import traceback
f = util.StringIO()
traceback.print_exc(file=f)
return f.getvalue()
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
WSGI server implementation.
The Python Web Server Gateway Interface (WSGI) is a simple and universal
interface between web servers and web applications or frameworks.
The WSGI interface has two sides: the "server" or "gateway" side, and the
"application" or "framework" side. The server side invokes a callable
object (usually a function or a method) that is provided by the application
side. Additionally WSGI provides middlewares; a WSGI middleware implements
both sides of the API, so that it can be inserted "between" a WSGI server
and a WSGI application -- the middleware will act as an application from
the server's point of view, and as a server from the application's point
of view.
@see: U{WSGI homepage (external)<http://wsgi.org>}
@see: U{PEP-333 (external)<http://www.python.org/peps/pep-0333.html>}
@since: 0.1.0
"""
import pyamf
from pyamf import remoting
from pyamf.remoting import gateway
__all__ = ['WSGIGateway']
class WSGIGateway(gateway.BaseGateway):
"""
WSGI Remoting Gateway.
"""
def getResponse(self, request, environ):
"""
Processes the AMF request, returning an AMF response.
@param request: The AMF Request.
@type request: L{Envelope<pyamf.remoting.Envelope>}
@rtype: L{Envelope<pyamf.remoting.Envelope>}
@return: The AMF Response.
"""
response = remoting.Envelope(request.amfVersion, request.clientType)
for name, message in request:
processor = self.getProcessor(message)
environ['pyamf.request'] = message
response[name] = processor(message, http_request=environ)
return response
def badRequestMethod(self, environ, start_response):
"""
Return HTTP 400 Bad Request.
"""
response = "400 Bad Request\n\nTo access this PyAMF gateway you " \
"must use POST requests (%s received)" % environ['REQUEST_METHOD']
start_response('400 Bad Request', [
('Content-Type', 'text/plain'),
('Content-Length', str(len(response))),
('Server', gateway.SERVER_NAME),
])
return [response]
def __call__(self, environ, start_response):
"""
@rtype: C{StringIO}
@return: File-like object.
"""
if environ['REQUEST_METHOD'] != 'POST':
return self.badRequestMethod(environ, start_response)
body = environ['wsgi.input'].read(int(environ['CONTENT_LENGTH']))
stream = None
timezone_offset = self._get_timezone_offset()
# Decode the request
try:
request = remoting.decode(body, strict=self.strict,
logger=self.logger, timezone_offset=timezone_offset)
except (pyamf.DecodeError, IOError):
if self.logger:
self.logger.exception(gateway.format_exception())
response = "400 Bad Request\n\nThe request body was unable to " \
"be successfully decoded."
if self.debug:
response += "\n\nTraceback:\n\n%s" % gateway.format_exception()
start_response('400 Bad Request', [
('Content-Type', 'text/plain'),
('Content-Length', str(len(response))),
('Server', gateway.SERVER_NAME),
])
return [response]
except (KeyboardInterrupt, SystemExit):
raise
except:
if self.logger:
self.logger.exception(gateway.format_exception())
response = "500 Internal Server Error\n\nAn unexpected error " \
"occurred whilst decoding."
if self.debug:
response += "\n\nTraceback:\n\n%s" % gateway.format_exception()
start_response('500 Internal Server Error', [
('Content-Type', 'text/plain'),
('Content-Length', str(len(response))),
('Server', gateway.SERVER_NAME),
])
return [response]
if self.logger:
self.logger.info("AMF Request: %r" % request)
# Process the request
try:
response = self.getResponse(request, environ)
except (KeyboardInterrupt, SystemExit):
raise
except:
if self.logger:
self.logger.exception(gateway.format_exception())
response = "500 Internal Server Error\n\nThe request was " \
"unable to be successfully processed."
if self.debug:
response += "\n\nTraceback:\n\n%s" % gateway.format_exception()
start_response('500 Internal Server Error', [
('Content-Type', 'text/plain'),
('Content-Length', str(len(response))),
('Server', gateway.SERVER_NAME),
])
return [response]
if self.logger:
self.logger.info("AMF Response: %r" % response)
# Encode the response
try:
stream = remoting.encode(response, strict=self.strict,
timezone_offset=timezone_offset)
except:
if self.logger:
self.logger.exception(gateway.format_exception())
response = "500 Internal Server Error\n\nThe request was " \
"unable to be encoded."
if self.debug:
response += "\n\nTraceback:\n\n%s" % gateway.format_exception()
start_response('500 Internal Server Error', [
('Content-Type', 'text/plain'),
('Content-Length', str(len(response))),
('Server', gateway.SERVER_NAME),
])
return [response]
response = stream.getvalue()
start_response('200 OK', [
('Content-Type', remoting.CONTENT_TYPE),
('Content-Length', str(len(response))),
('Server', gateway.SERVER_NAME),
])
return [response]
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
AMF0 Remoting support.
@since: 0.1.0
"""
import traceback
import sys
from pyamf import remoting
from pyamf.remoting import gateway
class RequestProcessor(object):
def __init__(self, gateway):
self.gateway = gateway
def authenticateRequest(self, request, service_request, *args, **kwargs):
"""
Authenticates the request against the service.
@param request: The AMF request
@type request: L{Request<pyamf.remoting.Request>}
"""
username = password = None
if 'Credentials' in request.headers:
cred = request.headers['Credentials']
username = cred['userid']
password = cred['password']
return self.gateway.authenticateRequest(service_request, username,
password, *args, **kwargs)
def buildErrorResponse(self, request, error=None):
"""
Builds an error response.
@param request: The AMF request
@type request: L{Request<pyamf.remoting.Request>}
@return: The AMF response
@rtype: L{Response<pyamf.remoting.Response>}
"""
if error is not None:
cls, e, tb = error
else:
cls, e, tb = sys.exc_info()
return remoting.Response(build_fault(cls, e, tb, self.gateway.debug),
status=remoting.STATUS_ERROR)
def _getBody(self, request, response, service_request, **kwargs):
if 'DescribeService' in request.headers:
return service_request.service.description
return self.gateway.callServiceRequest(service_request, *request.body,
**kwargs)
def __call__(self, request, *args, **kwargs):
"""
Processes an AMF0 request.
@param request: The request to be processed.
@type request: L{Request<pyamf.remoting.Request>}
@return: The response to the request.
@rtype: L{Response<pyamf.remoting.Response>}
"""
response = remoting.Response(None)
try:
service_request = self.gateway.getServiceRequest(request,
request.target)
except gateway.UnknownServiceError:
return self.buildErrorResponse(request)
# we have a valid service, now attempt authentication
try:
authd = self.authenticateRequest(request, service_request, *args,
**kwargs)
except (SystemExit, KeyboardInterrupt):
raise
except:
return self.buildErrorResponse(request)
if not authd:
# authentication failed
response.status = remoting.STATUS_ERROR
response.body = remoting.ErrorFault(code='AuthenticationError',
description='Authentication failed')
return response
# authentication succeeded, now fire the preprocessor (if there is one)
try:
self.gateway.preprocessRequest(service_request, *args, **kwargs)
except (SystemExit, KeyboardInterrupt):
raise
except:
return self.buildErrorResponse(request)
try:
response.body = self._getBody(request, response, service_request,
*args, **kwargs)
return response
except (SystemExit, KeyboardInterrupt):
raise
except:
return self.buildErrorResponse(request)
def build_fault(cls, e, tb, include_traceback=False):
"""
Builds a L{ErrorFault<pyamf.remoting.ErrorFault>} object based on the last
exception raised.
If include_traceback is C{False} then the traceback will not be added to
the L{remoting.ErrorFault}.
"""
if hasattr(cls, '_amf_code'):
code = cls._amf_code
else:
code = cls.__name__
details = None
if include_traceback:
details = str(traceback.format_exception(cls, e, tb)).replace("\\n", '')
return remoting.ErrorFault(code=code, description=str(e), details=details)
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Remoting client implementation.
@since: 0.1.0
"""
import httplib
import urlparse
import pyamf
from pyamf import remoting
#: Default AMF client type.
#: @see: L{ClientTypes<pyamf.ClientTypes>}
DEFAULT_CLIENT_TYPE = pyamf.ClientTypes.Flash6
#: Default user agent is C{PyAMF/x.x.x}.
DEFAULT_USER_AGENT = 'PyAMF/%s' % '.'.join(map(lambda x: str(x),
pyamf.__version__))
HTTP_OK = 200
def convert_args(args):
if args == (tuple(),):
return []
else:
return [x for x in args]
class ServiceMethodProxy(object):
"""
Serves as a proxy for calling a service method.
@ivar service: The parent service.
@type service: L{ServiceProxy}
@ivar name: The name of the method.
@type name: C{str} or C{None}
@see: L{ServiceProxy.__getattr__}
"""
def __init__(self, service, name):
self.service = service
self.name = name
def __call__(self, *args):
"""
Inform the proxied service that this function has been called.
"""
return self.service._call(self, *args)
def __str__(self):
"""
Returns the full service name, including the method name if there is
one.
"""
service_name = str(self.service)
if self.name is not None:
service_name = '%s.%s' % (service_name, self.name)
return service_name
class ServiceProxy(object):
"""
Serves as a service object proxy for RPC calls. Generates
L{ServiceMethodProxy} objects for method calls.
@see: L{RequestWrapper} for more info.
@ivar _gw: The parent gateway
@type _gw: L{RemotingService}
@ivar _name: The name of the service
@type _name: C{str}
@ivar _auto_execute: If set to C{True}, when a service method is called,
the AMF request is immediately sent to the remote gateway and a
response is returned. If set to C{False}, a L{RequestWrapper} is
returned, waiting for the underlying gateway to fire the
L{execute<RemotingService.execute>} method.
"""
def __init__(self, gw, name, auto_execute=True):
self._gw = gw
self._name = name
self._auto_execute = auto_execute
def __getattr__(self, name):
return ServiceMethodProxy(self, name)
def _call(self, method_proxy, *args):
"""
Executed when a L{ServiceMethodProxy} is called. Adds a request to the
underlying gateway. If C{_auto_execute} is set to C{True}, then the
request is immediately called on the remote gateway.
"""
request = self._gw.addRequest(method_proxy, *args)
if self._auto_execute:
response = self._gw.execute_single(request)
# XXX nick: What to do about Fault objects here?
return response.body
return request
def __call__(self, *args):
"""
This allows services to be 'called' without a method name.
"""
return self._call(ServiceMethodProxy(self, None), *args)
def __str__(self):
"""
Returns a string representation of the name of the service.
"""
return self._name
class RequestWrapper(object):
"""
A container object that wraps a service method request.
@ivar gw: The underlying gateway.
@type gw: L{RemotingService}
@ivar id: The id of the request.
@type id: C{str}
@ivar service: The service proxy.
@type service: L{ServiceProxy}
@ivar args: The args used to invoke the call.
@type args: C{list}
"""
def __init__(self, gw, id_, service, *args):
self.gw = gw
self.id = id_
self.service = service
self.args = args
def __str__(self):
return str(self.id)
def setResponse(self, response):
"""
A response has been received by the gateway
"""
# XXX nick: What to do about Fault objects here?
self.response = response
self.result = self.response.body
if isinstance(self.result, remoting.ErrorFault):
self.result.raiseException()
def _get_result(self):
"""
Returns the result of the called remote request. If the request has not
yet been called, an C{AttributeError} exception is raised.
"""
if not hasattr(self, '_result'):
raise AttributeError("'RequestWrapper' object has no attribute 'result'")
return self._result
def _set_result(self, result):
self._result = result
result = property(_get_result, _set_result)
class RemotingService(object):
"""
Acts as a client for AMF calls.
@ivar url: The url of the remote gateway. Accepts C{http} or C{https}
as valid schemes.
@type url: C{str}
@ivar requests: The list of pending requests to process.
@type requests: C{list}
@ivar request_number: A unique identifier for tracking the number of
requests.
@ivar amf_version: The AMF version to use.
See L{ENCODING_TYPES<pyamf.ENCODING_TYPES>}.
@type amf_version: C{int}
@ivar referer: The referer, or HTTP referer, identifies the address of the
client. Ignored by default.
@type referer: C{str}
@ivar client_type: The client type. See L{ClientTypes<pyamf.ClientTypes>}.
@type client_type: C{int}
@ivar user_agent: Contains information about the user agent (client)
originating the request. See L{DEFAULT_USER_AGENT}.
@type user_agent: C{str}
@ivar connection: The underlying connection to the remoting server.
@type connection: C{httplib.HTTPConnection} or C{httplib.HTTPSConnection}
@ivar headers: A list of persistent headers to send with each request.
@type headers: L{HeaderCollection<pyamf.remoting.HeaderCollection>}
@ivar http_headers: A dict of HTTP headers to apply to the underlying
HTTP connection.
@type http_headers: L{dict}
@ivar strict: Whether to use strict AMF en/decoding or not.
@type strict: C{bool}
"""
def __init__(self, url, amf_version=pyamf.AMF0, client_type=DEFAULT_CLIENT_TYPE,
referer=None, user_agent=DEFAULT_USER_AGENT, strict=False,
logger=None):
self.logger = logger
self.original_url = url
self.requests = []
self.request_number = 1
self.user_agent = user_agent
self.referer = referer
self.amf_version = amf_version
self.client_type = client_type
self.headers = remoting.HeaderCollection()
self.http_headers = {}
self.strict = strict
self._setUrl(url)
def _setUrl(self, url):
"""
@param url: Gateway URL.
@type url: C{str}
@raise ValueError: Unknown scheme.
"""
self.url = urlparse.urlparse(url)
self._root_url = urlparse.urlunparse(['', ''] + list(self.url[2:]))
port = None
hostname = None
if hasattr(self.url, 'port'):
if self.url.port is not None:
port = self.url.port
else:
if ':' not in self.url[1]:
hostname = self.url[1]
port = None
else:
sp = self.url[1].split(':')
hostname, port = sp[0], sp[1]
port = int(port)
if hostname is None:
if hasattr(self.url, 'hostname'):
hostname = self.url.hostname
if self.url[0] == 'http':
if port is None:
port = httplib.HTTP_PORT
self.connection = httplib.HTTPConnection(hostname, port)
elif self.url[0] == 'https':
if port is None:
port = httplib.HTTPS_PORT
self.connection = httplib.HTTPSConnection(hostname, port)
else:
raise ValueError('Unknown scheme')
location = '%s://%s:%s%s' % (self.url[0], hostname, port, self.url[2])
if self.logger:
self.logger.info('Connecting to %s' % location)
self.logger.debug('Referer: %s' % self.referer)
self.logger.debug('User-Agent: %s' % self.user_agent)
def addHeader(self, name, value, must_understand=False):
"""
Sets a persistent header to send with each request.
@param name: Header name.
@type name: C{str}
@param must_understand: Default is C{False}.
@type must_understand: C{bool}
"""
self.headers[name] = value
self.headers.set_required(name, must_understand)
def addHTTPHeader(self, name, value):
"""
Adds a header to the underlying HTTP connection.
"""
self.http_headers[name] = value
def removeHTTPHeader(self, name):
"""
Deletes an HTTP header.
"""
del self.http_headers[name]
def getService(self, name, auto_execute=True):
"""
Returns a L{ServiceProxy} for the supplied name. Sets up an object that
can have method calls made to it that build the AMF requests.
@param auto_execute: Default is C{True}.
@type auto_execute: C{bool}
@raise TypeError: C{string} type required for C{name}.
@rtype: L{ServiceProxy}
"""
if not isinstance(name, basestring):
raise TypeError('string type required')
return ServiceProxy(self, name, auto_execute)
def getRequest(self, id_):
"""
Gets a request based on the id.
@raise LookupError: Request not found.
"""
for request in self.requests:
if request.id == id_:
return request
raise LookupError("Request %s not found" % id_)
def addRequest(self, service, *args):
"""
Adds a request to be sent to the remoting gateway.
"""
wrapper = RequestWrapper(self, '/%d' % self.request_number,
service, *args)
self.request_number += 1
self.requests.append(wrapper)
if self.logger:
self.logger.debug('Adding request %s%r' % (wrapper.service, args))
return wrapper
def removeRequest(self, service, *args):
"""
Removes a request from the pending request list.
@raise LookupError: Request not found.
"""
if isinstance(service, RequestWrapper):
if self.logger:
self.logger.debug('Removing request: %s' % (
self.requests[self.requests.index(service)]))
del self.requests[self.requests.index(service)]
return
for request in self.requests:
if request.service == service and request.args == args:
if self.logger:
self.logger.debug('Removing request: %s' % (
self.requests[self.requests.index(request)]))
del self.requests[self.requests.index(request)]
return
raise LookupError("Request not found")
def getAMFRequest(self, requests):
"""
Builds an AMF request L{Envelope<pyamf.remoting.Envelope>} from a
supplied list of requests.
@param requests: List of requests
@type requests: C{list}
@rtype: L{Envelope<pyamf.remoting.Envelope>}
"""
envelope = remoting.Envelope(self.amf_version, self.client_type)
if self.logger:
self.logger.debug('AMF version: %s' % self.amf_version)
self.logger.debug('Client type: %s' % self.client_type)
for request in requests:
service = request.service
args = list(request.args)
envelope[request.id] = remoting.Request(str(service), args)
envelope.headers = self.headers
return envelope
def _get_execute_headers(self):
headers = self.http_headers.copy()
headers.update({
'Content-Type': remoting.CONTENT_TYPE,
'User-Agent': self.user_agent
})
if self.referer is not None:
headers['Referer'] = self.referer
return headers
def execute_single(self, request):
"""
Builds, sends and handles the response to a single request, returning
the response.
@param request:
@type request:
@rtype:
"""
if self.logger:
self.logger.debug('Executing single request: %s' % request)
body = remoting.encode(self.getAMFRequest([request]), strict=self.strict)
if self.logger:
self.logger.debug('Sending POST request to %s' % self._root_url)
self.connection.request('POST', self._root_url,
body.getvalue(),
self._get_execute_headers()
)
envelope = self._getResponse()
self.removeRequest(request)
return envelope[request.id]
def execute(self):
"""
Builds, sends and handles the responses to all requests listed in
C{self.requests}.
"""
body = remoting.encode(self.getAMFRequest(self.requests), strict=self.strict)
if self.logger:
self.logger.debug('Sending POST request to %s' % self._root_url)
self.connection.request('POST', self._root_url,
body.getvalue(),
self._get_execute_headers()
)
envelope = self._getResponse()
for response in envelope:
request = self.getRequest(response[0])
response = response[1]
request.setResponse(response)
self.removeRequest(request)
def _getResponse(self):
"""
Gets and handles the HTTP response from the remote gateway.
@raise RemotingError: HTTP Gateway reported error status.
@raise RemotingError: Incorrect MIME type received.
"""
if self.logger:
self.logger.debug('Waiting for response...')
http_response = self.connection.getresponse()
if self.logger:
self.logger.debug('Got response status: %s' % http_response.status)
self.logger.debug('Content-Type: %s' % http_response.getheader('Content-Type'))
if http_response.status != HTTP_OK:
if self.logger:
self.logger.debug('Body: %s' % http_response.read())
if hasattr(httplib, 'responses'):
raise remoting.RemotingError("HTTP Gateway reported status %d %s" % (
http_response.status, httplib.responses[http_response.status]))
raise remoting.RemotingError("HTTP Gateway reported status %d" % (
http_response.status,))
content_type = http_response.getheader('Content-Type')
if content_type != remoting.CONTENT_TYPE:
if self.logger:
self.logger.debug('Body = %s' % http_response.read())
raise remoting.RemotingError("Incorrect MIME type received. (got: %s)" % content_type)
content_length = http_response.getheader('Content-Length')
bytes = ''
if self.logger:
self.logger.debug('Content-Length: %s' % content_length)
self.logger.debug('Server: %s' % http_response.getheader('Server'))
if content_length in (None, ''):
bytes = http_response.read()
else:
bytes = http_response.read(int(content_length))
if self.logger:
self.logger.debug('Read %d bytes for the response' % len(bytes))
response = remoting.decode(bytes, strict=self.strict)
if self.logger:
self.logger.debug('Response: %s' % response)
if remoting.APPEND_TO_GATEWAY_URL in response.headers:
self.original_url += response.headers[remoting.APPEND_TO_GATEWAY_URL]
self._setUrl(self.original_url)
elif remoting.REPLACE_GATEWAY_URL in response.headers:
self.original_url = response.headers[remoting.REPLACE_GATEWAY_URL]
self._setUrl(self.original_url)
if remoting.REQUEST_PERSISTENT_HEADER in response.headers:
data = response.headers[remoting.REQUEST_PERSISTENT_HEADER]
for k, v in data.iteritems():
self.headers[k] = v
http_response.close()
return response
def setCredentials(self, username, password):
"""
Sets authentication credentials for accessing the remote gateway.
"""
self.addHeader('Credentials', dict(userid=unicode(username),
password=unicode(password)), True)
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
AMF Remoting support.
A Remoting request from the client consists of a short preamble, headers, and
bodies. The preamble contains basic information about the nature of the
request. Headers can be used to request debugging information, send
authentication info, tag transactions, etc. Bodies contain actual Remoting
requests and responses. A single Remoting envelope can contain several
requests; Remoting supports batching out of the box.
Client headers and bodies need not be responded to in a one-to-one manner.
That is, a body or header may not require a response. Debug information is
requested by a header but sent back as a body object. The response index is
essential for the Adobe Flash Player to understand the response therefore.
@see: U{Remoting Envelope on OSFlash (external)
<http://osflash.org/documentation/amf/envelopes/remoting>}
@see: U{Remoting Headers on OSFlash (external)
<http://osflash.org/amf/envelopes/remoting/headers>}
@see: U{Remoting Debug Headers on OSFlash (external)
<http://osflash.org/documentation/amf/envelopes/remoting/debuginfo>}
@since: 0.1.0
"""
import pyamf
from pyamf import util
__all__ = ['Envelope', 'Request', 'Response', 'decode', 'encode']
#: Succesful call.
STATUS_OK = 0
#: Reserved for runtime errors.
STATUS_ERROR = 1
#: Debug information.
STATUS_DEBUG = 2
#: List of available status response codes.
STATUS_CODES = {
STATUS_OK: '/onResult',
STATUS_ERROR: '/onStatus',
STATUS_DEBUG: '/onDebugEvents'
}
#: AMF mimetype.
CONTENT_TYPE = 'application/x-amf'
ERROR_CALL_FAILED, = range(1)
ERROR_CODES = {
ERROR_CALL_FAILED: 'Server.Call.Failed'
}
APPEND_TO_GATEWAY_URL = 'AppendToGatewayUrl'
REPLACE_GATEWAY_URL = 'ReplaceGatewayUrl'
REQUEST_PERSISTENT_HEADER = 'RequestPersistentHeader'
class RemotingError(pyamf.BaseError):
"""
Generic remoting error class.
"""
class RemotingCallFailed(RemotingError):
"""
Raised if C{Server.Call.Failed} received.
"""
pyamf.add_error_class(RemotingCallFailed, ERROR_CODES[ERROR_CALL_FAILED])
class HeaderCollection(dict):
"""
Collection of AMF message headers.
"""
def __init__(self, raw_headers={}):
self.required = []
for (k, ig, v) in raw_headers:
self[k] = v
if ig:
self.required.append(k)
def is_required(self, idx):
"""
@raise KeyError: Unknown header found.
"""
if not idx in self:
raise KeyError("Unknown header %s" % str(idx))
return idx in self.required
def set_required(self, idx, value=True):
"""
@raise KeyError: Unknown header found.
"""
if not idx in self:
raise KeyError("Unknown header %s" % str(idx))
if not idx in self.required:
self.required.append(idx)
def __len__(self):
return len(self.keys())
class Envelope(object):
"""
I wrap an entire request, encapsulating headers and bodies.
There can be more than one request in a single transaction.
@ivar amfVersion: AMF encoding version. See L{pyamf.ENCODING_TYPES}
@type amfVersion: C{int} or C{None}
@ivar clientType: Client type. See L{ClientTypes<pyamf.ClientTypes>}
@type clientType: C{int} or C{None}
@ivar headers: AMF headers, a list of name, value pairs. Global to each
request.
@type headers: L{HeaderCollection}
@ivar bodies: A list of requests/response messages
@type bodies: L{list} containing tuples of the key of the request and
the instance of the L{Message}
"""
def __init__(self, amfVersion=None, clientType=None):
self.amfVersion = amfVersion
self.clientType = clientType
self.headers = HeaderCollection()
self.bodies = []
def __repr__(self):
r = "<Envelope amfVersion=%s clientType=%s>\n" % (
self.amfVersion, self.clientType)
for h in self.headers:
r += " " + repr(h) + "\n"
for request in iter(self):
r += " " + repr(request) + "\n"
r += "</Envelope>"
return r
def __setitem__(self, name, value):
if not isinstance(value, Message):
raise TypeError("Message instance expected")
idx = 0
found = False
for body in self.bodies:
if name == body[0]:
self.bodies[idx] = (name, value)
found = True
idx = idx + 1
if not found:
self.bodies.append((name, value))
value.envelope = self
def __getitem__(self, name):
for body in self.bodies:
if name == body[0]:
return body[1]
raise KeyError("'%r'" % (name,))
def __iter__(self):
for body in self.bodies:
yield body[0], body[1]
raise StopIteration
def __len__(self):
return len(self.bodies)
def iteritems(self):
for body in self.bodies:
yield body
raise StopIteration
def keys(self):
return [body[0] for body in self.bodies]
def items(self):
return self.bodies
def __contains__(self, name):
for body in self.bodies:
if name == body[0]:
return True
return False
def __eq__(self, other):
if isinstance(other, Envelope):
return (self.amfVersion == other.amfVersion and
self.clientType == other.clientType and
self.headers == other.headers and
self.bodies == other.bodies)
if hasattr(other, 'keys') and hasattr(other, 'items'):
keys, o_keys = self.keys(), other.keys()
if len(o_keys) != len(keys):
return False
for k in o_keys:
if k not in keys:
return False
keys.remove(k)
for k, v in other.items():
if self[k] != v:
return False
return True
class Message(object):
"""
I represent a singular request/response, containing a collection of
headers and one body of data.
I am used to iterate over all requests in the L{Envelope}.
@ivar envelope: The parent envelope of this AMF Message.
@type envelope: L{Envelope}
@ivar body: The body of the message.
@type body: C{mixed}
@ivar headers: The message headers.
@type headers: C{dict}
"""
def __init__(self, envelope, body):
self.envelope = envelope
self.body = body
def _get_headers(self):
return self.envelope.headers
headers = property(_get_headers)
class Request(Message):
"""
An AMF Request payload.
@ivar target: The target of the request
@type target: C{basestring}
"""
def __init__(self, target, body=[], envelope=None):
Message.__init__(self, envelope, body)
self.target = target
def __repr__(self):
return "<%s target=%s>%s</%s>" % (
type(self).__name__, repr(self.target), repr(self.body), type(self).__name__)
class Response(Message):
"""
An AMF Response.
@ivar status: The status of the message. Default is L{STATUS_OK}.
@type status: Member of L{STATUS_CODES}.
"""
def __init__(self, body, status=STATUS_OK, envelope=None):
Message.__init__(self, envelope, body)
self.status = status
def __repr__(self):
return "<%s status=%s>%s</%s>" % (
type(self).__name__, _get_status(self.status), repr(self.body),
type(self).__name__
)
class BaseFault(object):
"""
I represent a C{Fault} message (C{mx.rpc.Fault}).
@ivar level: The level of the fault.
@type level: C{str}
@ivar code: A simple code describing the fault.
@type code: C{str}
@ivar details: Any extra details of the fault.
@type details: C{str}
@ivar description: Text description of the fault.
@type description: C{str}
@see: U{mx.rpc.Fault on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/mx/rpc/Fault.html>}
"""
level = None
class __amf__:
static = ('level', 'code', 'type', 'details', 'description')
def __init__(self, *args, **kwargs):
self.code = kwargs.get('code', '')
self.type = kwargs.get('type', '')
self.details = kwargs.get('details', '')
self.description = kwargs.get('description', '')
def __repr__(self):
x = '%s level=%s' % (self.__class__.__name__, self.level)
if self.code not in ('', None):
x += ' code=%s' % repr(self.code)
if self.type not in ('', None):
x += ' type=%s' % repr(self.type)
if self.description not in ('', None):
x += ' description=%s' % repr(self.description)
if self.details not in ('', None):
x += '\nTraceback:\n%s' % (repr(self.details),)
return x
def raiseException(self):
"""
Raises an exception based on the fault object. There is no traceback
available.
"""
raise get_exception_from_fault(self), self.description, None
class ErrorFault(BaseFault):
"""
I represent an error level fault.
"""
level = 'error'
def _read_header(stream, decoder, strict=False):
"""
Read AMF L{Message} header.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param stream: AMF data.
@type decoder: L{amf0.Decoder<pyamf.amf0.Decoder>}
@param decoder: AMF decoder instance
@type strict: C{bool}
@param strict: Use strict decoding policy. Default is C{False}.
@raise DecodeError: The data that was read from the stream
does not match the header length.
@rtype: C{tuple}
@return:
- Name of the header.
- A C{bool} determining if understanding this header is
required.
- Value of the header.
"""
name_len = stream.read_ushort()
name = stream.read_utf8_string(name_len)
required = bool(stream.read_uchar())
data_len = stream.read_ulong()
pos = stream.tell()
data = decoder.readElement()
if strict and pos + data_len != stream.tell():
raise pyamf.DecodeError(
"Data read from stream does not match header length")
return (name, required, data)
def _write_header(name, header, required, stream, encoder, strict=False):
"""
Write AMF message header.
@type name: C{str}
@param name: Name of the header.
@type header:
@param header: Raw header data.
@type required: L{bool}
@param required: Required header.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param stream: AMF data.
@type encoder: L{amf0.Encoder<pyamf.amf0.Encoder>}
or L{amf3.Encoder<pyamf.amf3.Encoder>}
@param encoder: AMF encoder instance.
@type strict: C{bool}
@param strict: Use strict encoding policy. Default is C{False}.
"""
stream.write_ushort(len(name))
stream.write_utf8_string(name)
stream.write_uchar(required)
write_pos = stream.tell()
stream.write_ulong(0)
old_pos = stream.tell()
encoder.writeElement(header)
new_pos = stream.tell()
if strict:
stream.seek(write_pos)
stream.write_ulong(new_pos - old_pos)
stream.seek(new_pos)
def _read_body(stream, decoder, strict=False, logger=None):
"""
Read AMF message body.
@param stream: AMF data.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param decoder: AMF decoder instance.
@type decoder: L{amf0.Decoder<pyamf.amf0.Decoder>}
@param strict: Use strict decoding policy. Default is C{False}.
@type strict: C{bool}
@raise DecodeError: Data read from stream does not match body length.
@param logger: Used to log interesting events whilst reading a remoting
body.
@type logger: A L{logging.Logger} instance or C{None}.
@rtype: C{tuple}
@return: A C{tuple} containing:
- ID of the request
- L{Request} or L{Response}
"""
def _read_args():
"""
@raise pyamf.DecodeError: Array type required for request body.
"""
if stream.read(1) != '\x0a':
raise pyamf.DecodeError("Array type required for request body")
x = stream.read_ulong()
return [decoder.readElement() for i in xrange(x)]
target = stream.read_utf8_string(stream.read_ushort())
response = stream.read_utf8_string(stream.read_ushort())
status = STATUS_OK
is_request = True
for code, s in STATUS_CODES.iteritems():
if not target.endswith(s):
continue
is_request = False
status = code
target = target[:0 - len(s)]
if logger:
logger.debug('Remoting target: %r' % (target,))
data_len = stream.read_ulong()
pos = stream.tell()
if is_request:
data = _read_args()
else:
data = decoder.readElement()
if strict and pos + data_len != stream.tell():
raise pyamf.DecodeError("Data read from stream does not match body "
"length (%d != %d)" % (pos + data_len, stream.tell(),))
if is_request:
return response, Request(target, body=data)
if status == STATUS_ERROR and isinstance(data, pyamf.ASObject):
data = get_fault(data)
return target, Response(data, status)
def _write_body(name, message, stream, encoder, strict=False):
"""
Write AMF message body.
@param name: The name of the request.
@type name: C{basestring}
@param message: The AMF payload.
@type message: L{Request} or L{Response}
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@type encoder: L{amf0.Encoder<pyamf.amf0.Encoder>}
@param encoder: Encoder to use.
@type strict: C{bool}
@param strict: Use strict encoding policy. Default is C{False}.
@raise TypeError: Unknown message type for C{message}.
"""
def _encode_body(message):
if isinstance(message, Response):
encoder.writeElement(message.body)
return
stream.write('\x0a')
stream.write_ulong(len(message.body))
for x in message.body:
encoder.writeElement(x)
if not isinstance(message, (Request, Response)):
raise TypeError("Unknown message type")
target = None
if isinstance(message, Request):
target = unicode(message.target)
else:
target = u"%s%s" % (name, _get_status(message.status))
target = target.encode('utf8')
stream.write_ushort(len(target))
stream.write_utf8_string(target)
response = 'null'
if isinstance(message, Request):
response = name
stream.write_ushort(len(response))
stream.write_utf8_string(response)
if not strict:
stream.write_ulong(0)
_encode_body(message)
return
write_pos = stream.tell()
stream.write_ulong(0)
old_pos = stream.tell()
_encode_body(message)
new_pos = stream.tell()
stream.seek(write_pos)
stream.write_ulong(new_pos - old_pos)
stream.seek(new_pos)
def _get_status(status):
"""
Get status code.
@type status: C{str}
@raise ValueError: The status code is unknown.
@return: Status code.
@see: L{STATUS_CODES}
"""
if status not in STATUS_CODES.keys():
# TODO print that status code..
raise ValueError("Unknown status code")
return STATUS_CODES[status]
def get_fault_class(level, **kwargs):
if level == 'error':
return ErrorFault
return BaseFault
def get_fault(data):
try:
level = data['level']
del data['level']
except KeyError:
level = 'error'
e = {}
for x, y in data.iteritems():
if isinstance(x, unicode):
e[str(x)] = y
else:
e[x] = y
return get_fault_class(level, **e)(**e)
def decode(stream, context=None, strict=False, logger=None, timezone_offset=None):
"""
Decodes the incoming stream as a remoting message.
@param stream: AMF data.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param context: Context.
@type context: L{amf0.Context<pyamf.amf0.Context>} or
L{amf3.Context<pyamf.amf3.Context>}
@param strict: Enforce strict decoding. Default is C{False}.
@type strict: C{bool}
@param logger: Used to log interesting events whilst decoding a remoting
message.
@type logger: A L{logging.Logger} instance or C{None}.
@param timezone_offset: The difference between the current timezone and
UTC. Date/times should always be handled in UTC to avoid confusion but
this is required for legacy systems.
@type timezone_offset: L{datetime.timedelta}
@raise DecodeError: Malformed stream.
@raise RuntimeError: Decoder is unable to fully consume the
stream buffer.
@return: Message envelope.
@rtype: L{Envelope}
"""
if not isinstance(stream, util.BufferedByteStream):
stream = util.BufferedByteStream(stream)
if logger is not None:
logger.debug('remoting.decode start')
msg = Envelope()
msg.amfVersion = stream.read_uchar()
# see http://osflash.org/documentation/amf/envelopes/remoting#preamble
# why we are doing this...
if msg.amfVersion > 0x09:
raise pyamf.DecodeError("Malformed stream (amfVersion=%d)" %
msg.amfVersion)
if context is None:
context = pyamf.get_context(pyamf.AMF0, exceptions=False)
decoder = pyamf.get_decoder(pyamf.AMF0, stream, context=context,
strict=strict, timezone_offset=timezone_offset)
msg.clientType = stream.read_uchar()
header_count = stream.read_ushort()
for i in xrange(header_count):
name, required, data = _read_header(stream, decoder, strict)
msg.headers[name] = data
if required:
msg.headers.set_required(name)
body_count = stream.read_short()
for i in range(body_count):
context.clear()
target, payload = _read_body(stream, decoder, strict, logger)
msg[target] = payload
if strict and stream.remaining() > 0:
raise RuntimeError("Unable to fully consume the buffer")
if logger is not None:
logger.debug('remoting.decode end')
return msg
def encode(msg, context=None, strict=False, logger=None, timezone_offset=None):
"""
Encodes AMF stream and returns file object.
@type msg: L{Envelope}
@param msg: The message to encode.
@type strict: C{bool}
@param strict: Determines whether encoding should be strict. Specifically
header/body lengths will be written correctly, instead of the default 0.
Default is C{False}. Introduced in 0.4.
@param logger: Used to log interesting events whilst encoding a remoting
message.
@type logger: A L{logging.Logger} instance or C{None}.
@param timezone_offset: The difference between the current timezone and
UTC. Date/times should always be handled in UTC to avoid confusion but
this is required for legacy systems.
@type timezone_offset: L{datetime.timedelta}
@rtype: C{StringIO}
@return: File object.
"""
stream = util.BufferedByteStream()
if context is None:
context = pyamf.get_context(pyamf.AMF0, exceptions=False)
encoder = pyamf.get_encoder(pyamf.AMF0, stream, context=context,
timezone_offset=timezone_offset, strict=strict)
if msg.clientType == pyamf.ClientTypes.Flash9:
encoder.use_amf3 = True
stream.write_uchar(msg.amfVersion)
stream.write_uchar(msg.clientType)
stream.write_short(len(msg.headers))
for name, header in msg.headers.iteritems():
_write_header(
name, header, int(msg.headers.is_required(name)),
stream, encoder, strict)
stream.write_short(len(msg))
for name, message in msg.iteritems():
encoder.context.clear()
_write_body(name, message, stream, encoder, strict)
stream.seek(0)
return stream
def get_exception_from_fault(fault):
"""
@raise RemotingError: Default exception from fault.
"""
# XXX nick: threading problems here?
try:
return pyamf.ERROR_CLASS_MAP[fault.code]
except KeyError:
# default to RemotingError
return RemotingError
pyamf.register_class(ErrorFault)
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Tools for doing dynamic imports.
@since: 0.3
"""
import sys
__all__ = ['when_imported']
#: A list of callables to be executed when the module is imported.
post_load_hooks = {}
#: List of modules that have already been loaded.
loaded_modules = []
class ModuleFinder(object):
"""
This is a special module finder object that executes a collection of
callables when a specific module has been imported. An instance of this
is placed in C{sys.meta_path}, which is consulted before C{sys.modules} -
allowing us to provide this functionality.
@see: L{when_imported}
@since: 0.5
"""
def find_module(self, name, path):
"""
Called when an import is made. If there are hooks waiting for this
module to be imported then we stop the normal import process and
manually load the module.
@param name: The name of the module being imported.
@param path The root path of the module (if a package). We ignore this.
@return: If we want to hook this module, we return a C{loader}
interface (which is this instance again). If not we return C{None}
to allow the standard import process to continue.
"""
if name in loaded_modules or name not in post_load_hooks:
return None
return self
def load_module(self, name):
"""
If we get this far, then there are hooks waiting to be called on
import of this module. We manually load the module and then run the
hooks.
@param name: The name of the module to import.
"""
loaded_modules.append(name)
parent, child = split_module(name)
__import__(name, {}, {}, [])
mod = sys.modules[name]
run_hooks(name, mod)
return mod
def run_hooks(name, module):
"""
Run all hooks for a module.
Load an unactivated "lazy" module object.
"""
try:
for hook in post_load_hooks[name]:
hook(module)
finally:
del post_load_hooks[name]
def split_module(name):
"""
Splits a module name into its parent and child parts.
>>> split_module('foo.bar.baz')
'foo.bar', 'baz'
>>> split_module('foo')
None, 'foo'
"""
try:
splitpos = name.rindex('.') + 1
return name[:splitpos - 1], name[splitpos:]
except ValueError:
return None, name
def when_imported(name, hook):
"""
Call C{hook(module)} when module named C{name} is first used.
'hook' must accept one argument: the module object named by 'name', which
must be a fully qualified (i.e. absolute) module name. The hook should
not raise any exceptions, or it will prevent later hooks from running.
If the module has already been imported normally, 'hook(module)' is
called immediately, and the module object is returned from this function.
If the module has not been imported, then the hook is called when the
module is first imported.
"""
if name in loaded_modules or name in sys.modules:
hook(sys.modules[name])
return
if name not in post_load_hooks:
post_load_hooks[name] = []
post_load_hooks[name].append(hook)
# this is required for reloading this module
for obj in sys.meta_path:
if obj.__class__ is ModuleFinder:
break
else:
sys.meta_path.insert(0, ModuleFinder())
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
AMF Utilities.
@since: 0.1.0
"""
import struct
import calendar
import datetime
import types
import inspect
import pyamf
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
set
except NameError:
from sets import Set as set
#: XML types.
xml_types = None
ET = None
#: On some Python versions retrieving a negative timestamp, like
#: C{datetime.datetime.utcfromtimestamp(-31536000.0)} is broken.
negative_timestamp_broken = False
int_types = [int]
str_types = [str]
# py3k support
try:
int_types.append(long)
except NameError:
pass
try:
str_types.append(unicode)
except NameError:
pass
#: Numeric types.
int_types = tuple(int_types)
#: String types.
str_types = tuple(str_types)
PosInf = 1e300000
NegInf = -1e300000
# we do this instead of float('nan') because windows throws a wobbler.
NaN = PosInf / PosInf
def find_xml_lib():
"""
Run through a predefined order looking through the various C{ElementTree}
implementations so that any type can be encoded but PyAMF will return
elements as the first implementation found.
We work through the C implementations first - then the pure Python
versions. The downside to this is that a possible of three libraries will
be loaded into memory that are not used but the libs are small
(relatively) and the flexibility that this gives seems to outweigh the
cost. Time will tell.
@since: 0.4
"""
global xml_types, ET
xml_types = []
try:
import xml.etree.cElementTree as cET
ET = cET
xml_types.append(type(cET.Element('foo')))
except ImportError:
pass
try:
import cElementTree as cET
if ET is None:
ET = cET
xml_types.append(type(cET.Element('foo')))
except ImportError:
pass
try:
import xml.etree.ElementTree as pET
if ET is None:
ET = pET
xml_types.append(pET._ElementInterface)
except ImportError:
pass
try:
import elementtree.ElementTree as pET
if ET is None:
ET = pET
xml_types.append(pET._ElementInterface)
except ImportError:
pass
for x in xml_types[:]:
# hack for jython
if x.__name__ == 'instance':
xml_types.remove(x)
xml_types = tuple(xml_types)
return xml_types
class StringIOProxy(object):
"""
I am a C{StringIO} type object containing byte data from the AMF stream.
@see: U{ByteArray on OSFlash (external)
<http://osflash.org/documentation/amf3#x0c_-_bytearray>}
@see: U{Parsing ByteArrays on OSFlash (external)
<http://osflash.org/documentation/amf3/parsing_byte_arrays>}
"""
_wrapped_class = StringIO
def __init__(self, buf=None):
"""
@raise TypeError: Unable to coerce C{buf} to C{StringIO}.
"""
self._buffer = StringIOProxy._wrapped_class()
if isinstance(buf, (str, unicode)):
self._buffer.write(buf)
elif hasattr(buf, 'getvalue'):
self._buffer.write(buf.getvalue())
elif hasattr(buf, 'read') and hasattr(buf, 'seek') and hasattr(buf, 'tell'):
old_pos = buf.tell()
buf.seek(0)
self._buffer.write(buf.read())
buf.seek(old_pos)
elif buf is None:
pass
else:
raise TypeError("Unable to coerce buf->StringIO")
self._get_len()
self._len_changed = False
self._buffer.seek(0, 0)
def getvalue(self):
"""
Get raw data from buffer.
"""
return self._buffer.getvalue()
def read(self, n=-1):
"""
Reads C{n} bytes from the stream.
"""
bytes = self._buffer.read(n)
return bytes
def seek(self, pos, mode=0):
"""
Sets the file-pointer offset, measured from the beginning of this stream,
at which the next write operation will occur.
@param pos:
@type pos: C{int}
@param mode:
@type mode: C{int}
"""
return self._buffer.seek(pos, mode)
def tell(self):
"""
Returns the position of the stream pointer.
"""
return self._buffer.tell()
def truncate(self, size=0):
"""
Truncates the stream to the specified length.
@param size: The length of the stream, in bytes.
@type size: C{int}
"""
if size == 0:
self._buffer = StringIOProxy._wrapped_class()
self._len_changed = True
return
cur_pos = self.tell()
self.seek(0)
buf = self.read(size)
self._buffer = StringIOProxy._wrapped_class()
self._buffer.write(buf)
self.seek(cur_pos)
self._len_changed = True
def write(self, s):
"""
Writes the content of the specified C{s} into this buffer.
@param s:
@type s:
"""
self._buffer.write(s)
self._len_changed = True
def _get_len(self):
"""
Return total number of bytes in buffer.
"""
if hasattr(self._buffer, 'len'):
self._len = self._buffer.len
return
old_pos = self._buffer.tell()
self._buffer.seek(0, 2)
self._len = self._buffer.tell()
self._buffer.seek(old_pos)
def __len__(self):
if not self._len_changed:
return self._len
self._get_len()
self._len_changed = False
return self._len
def consume(self):
"""
Chops the tail off the stream starting at 0 and ending at C{tell()}.
The stream pointer is set to 0 at the end of this function.
@since: 0.4
"""
try:
bytes = self.read()
except IOError:
bytes = ''
self.truncate()
if len(bytes) > 0:
self.write(bytes)
self.seek(0)
class DataTypeMixIn(object):
"""
Provides methods for reading and writing basic data types for file-like
objects.
@ivar endian: Byte ordering used to represent the data. Default byte order
is L{ENDIAN_NETWORK}.
@type endian: C{str}
"""
#: Network byte order
ENDIAN_NETWORK = "!"
#: Native byte order
ENDIAN_NATIVE = "@"
#: Little endian
ENDIAN_LITTLE = "<"
#: Big endian
ENDIAN_BIG = ">"
endian = ENDIAN_NETWORK
def _read(self, length):
"""
Reads C{length} bytes from the stream. If an attempt to read past the
end of the buffer is made, L{IOError} is raised.
"""
bytes = self.read(length)
if len(bytes) != length:
self.seek(0 - len(bytes), 1)
raise IOError("Tried to read %d byte(s) from the stream" % length)
return bytes
def _is_big_endian(self):
"""
Whether this system is big endian or not.
@rtype: C{bool}
"""
if self.endian == DataTypeMixIn.ENDIAN_NATIVE:
return DataTypeMixIn._system_endian == DataTypeMixIn.ENDIAN_BIG
return self.endian in (DataTypeMixIn.ENDIAN_BIG, DataTypeMixIn.ENDIAN_NETWORK)
def read_uchar(self):
"""
Reads an C{unsigned char} from the stream.
"""
return struct.unpack("B", self._read(1))[0]
def write_uchar(self, c):
"""
Writes an C{unsigned char} to the stream.
@param c: Unsigned char
@type c: C{int}
@raise TypeError: Unexpected type for int C{c}.
@raise OverflowError: Not in range.
"""
if type(c) not in int_types:
raise TypeError('expected an int (got:%r)' % (type(c),))
if not 0 <= c <= 255:
raise OverflowError("Not in range, %d" % c)
self.write(struct.pack("B", c))
def read_char(self):
"""
Reads a C{char} from the stream.
"""
return struct.unpack("b", self._read(1))[0]
def write_char(self, c):
"""
Write a C{char} to the stream.
@param c: char
@type c: C{int}
@raise TypeError: Unexpected type for int C{c}.
@raise OverflowError: Not in range.
"""
if type(c) not in int_types:
raise TypeError('expected an int (got:%r)' % (type(c),))
if not -128 <= c <= 127:
raise OverflowError("Not in range, %d" % c)
self.write(struct.pack("b", c))
def read_ushort(self):
"""
Reads a 2 byte unsigned integer from the stream.
"""
return struct.unpack("%sH" % self.endian, self._read(2))[0]
def write_ushort(self, s):
"""
Writes a 2 byte unsigned integer to the stream.
@param s: 2 byte unsigned integer
@type s: C{int}
@raise TypeError: Unexpected type for int C{s}.
@raise OverflowError: Not in range.
"""
if type(s) not in int_types:
raise TypeError('expected an int (got:%r)' % (type(s),))
if not 0 <= s <= 65535:
raise OverflowError("Not in range, %d" % s)
self.write(struct.pack("%sH" % self.endian, s))
def read_short(self):
"""
Reads a 2 byte integer from the stream.
"""
return struct.unpack("%sh" % self.endian, self._read(2))[0]
def write_short(self, s):
"""
Writes a 2 byte integer to the stream.
@param s: 2 byte integer
@type s: C{int}
@raise TypeError: Unexpected type for int C{s}.
@raise OverflowError: Not in range.
"""
if type(s) not in int_types:
raise TypeError('expected an int (got:%r)' % (type(s),))
if not -32768 <= s <= 32767:
raise OverflowError("Not in range, %d" % s)
self.write(struct.pack("%sh" % self.endian, s))
def read_ulong(self):
"""
Reads a 4 byte unsigned integer from the stream.
"""
return struct.unpack("%sL" % self.endian, self._read(4))[0]
def write_ulong(self, l):
"""
Writes a 4 byte unsigned integer to the stream.
@param l: 4 byte unsigned integer
@type l: C{int}
@raise TypeError: Unexpected type for int C{l}.
@raise OverflowError: Not in range.
"""
if type(l) not in int_types:
raise TypeError('expected an int (got:%r)' % (type(l),))
if not 0 <= l <= 4294967295:
raise OverflowError("Not in range, %d" % l)
self.write(struct.pack("%sL" % self.endian, l))
def read_long(self):
"""
Reads a 4 byte integer from the stream.
"""
return struct.unpack("%sl" % self.endian, self._read(4))[0]
def write_long(self, l):
"""
Writes a 4 byte integer to the stream.
@param l: 4 byte integer
@type l: C{int}
@raise TypeError: Unexpected type for int C{l}.
@raise OverflowError: Not in range.
"""
if type(l) not in int_types:
raise TypeError('expected an int (got:%r)' % (type(l),))
if not -2147483648 <= l <= 2147483647:
raise OverflowError("Not in range, %d" % l)
self.write(struct.pack("%sl" % self.endian, l))
def read_24bit_uint(self):
"""
Reads a 24 bit unsigned integer from the stream.
@since: 0.4
"""
order = None
if not self._is_big_endian():
order = [0, 8, 16]
else:
order = [16, 8, 0]
n = 0
for x in order:
n += (self.read_uchar() << x)
return n
def write_24bit_uint(self, n):
"""
Writes a 24 bit unsigned integer to the stream.
@since: 0.4
@param n: 24 bit unsigned integer
@type n: C{int}
@raise TypeError: Unexpected type for int C{n}.
@raise OverflowError: Not in range.
"""
if type(n) not in int_types:
raise TypeError('expected an int (got:%r)' % (type(n),))
if not 0 <= n <= 0xffffff:
raise OverflowError("n is out of range")
order = None
if not self._is_big_endian():
order = [0, 8, 16]
else:
order = [16, 8, 0]
for x in order:
self.write_uchar((n >> x) & 0xff)
def read_24bit_int(self):
"""
Reads a 24 bit integer from the stream.
@since: 0.4
"""
n = self.read_24bit_uint()
if n & 0x800000 != 0:
# the int is signed
n -= 0x1000000
return n
def write_24bit_int(self, n):
"""
Writes a 24 bit integer to the stream.
@since: 0.4
@param n: 24 bit integer
@type n: C{int}
@raise TypeError: Unexpected type for int C{n}.
@raise OverflowError: Not in range.
"""
if type(n) not in int_types:
raise TypeError('expected an int (got:%r)' % (type(n),))
if not -8388608 <= n <= 8388607:
raise OverflowError("n is out of range")
order = None
if not self._is_big_endian():
order = [0, 8, 16]
else:
order = [16, 8, 0]
if n < 0:
n += 0x1000000
for x in order:
self.write_uchar((n >> x) & 0xff)
def read_double(self):
"""
Reads an 8 byte float from the stream.
"""
return struct.unpack("%sd" % self.endian, self._read(8))[0]
def write_double(self, d):
"""
Writes an 8 byte float to the stream.
@param d: 8 byte float
@type d: C{float}
@raise TypeError: Unexpected type for float C{d}.
"""
if not type(d) is float:
raise TypeError('expected a float (got:%r)' % (type(d),))
self.write(struct.pack("%sd" % self.endian, d))
def read_float(self):
"""
Reads a 4 byte float from the stream.
"""
return struct.unpack("%sf" % self.endian, self._read(4))[0]
def write_float(self, f):
"""
Writes a 4 byte float to the stream.
@param f: 4 byte float
@type f: C{float}
@raise TypeError: Unexpected type for float C{f}.
"""
if type(f) is not float:
raise TypeError('expected a float (got:%r)' % (type(f),))
self.write(struct.pack("%sf" % self.endian, f))
def read_utf8_string(self, length):
"""
Reads a UTF-8 string from the stream.
@rtype: C{unicode}
"""
str = struct.unpack("%s%ds" % (self.endian, length), self.read(length))[0]
return unicode(str, "utf8")
def write_utf8_string(self, u):
"""
Writes a unicode object to the stream in UTF-8.
@param u: unicode object
@raise TypeError: Unexpected type for str C{u}.
"""
if type(u) not in str_types:
raise TypeError('expected a str (got:%r)' % (type(u),))
bytes = u.encode("utf8")
self.write(struct.pack("%s%ds" % (self.endian, len(bytes)), bytes))
if struct.pack('@H', 1)[0] == '\x01':
DataTypeMixIn._system_endian = DataTypeMixIn.ENDIAN_LITTLE
else:
DataTypeMixIn._system_endian = DataTypeMixIn.ENDIAN_BIG
class BufferedByteStream(StringIOProxy, DataTypeMixIn):
"""
An extension of C{StringIO}.
Features:
- Raises L{IOError} if reading past end.
- Allows you to C{peek()} at the next byte.
@see: L{cBufferedByteStream<cpyamf.util.cBufferedByteStream>}
"""
def __init__(self, buf=None):
"""
@param buf: Initial byte stream.
@type buf: C{str} or C{StringIO} instance
"""
StringIOProxy.__init__(self, buf=buf)
self.seek(0)
def read(self, length=-1):
"""
Reads up to the specified number of bytes from the stream into
the specified byte array of specified length.
@raise IOError: Attempted to read past the end of the buffer.
"""
if length == -1 and self.at_eof():
raise IOError('Attempted to read from the buffer but already at '
'the end')
elif length > 0 and self.tell() + length > len(self):
raise IOError('Attempted to read %d bytes from the buffer but '
'only %d remain' % (length, len(self) - self.tell()))
return StringIOProxy.read(self, length)
def peek(self, size=1):
"""
Looks C{size} bytes ahead in the stream, returning what it finds,
returning the stream pointer to its initial position.
@param size: Default is 1.
@type size: C{int}
@raise ValueError: Trying to peek backwards.
@rtype:
@return: Bytes.
"""
if size == -1:
return self.peek(len(self) - self.tell())
if size < -1:
raise ValueError("Cannot peek backwards")
bytes = ''
pos = self.tell()
while not self.at_eof() and len(bytes) != size:
bytes += self.read(1)
self.seek(pos)
return bytes
def remaining(self):
"""
Returns number of remaining bytes.
@rtype: C{number}
@return: Number of remaining bytes.
"""
return len(self) - self.tell()
def at_eof(self):
"""
Returns C{True} if the internal pointer is at the end of the stream.
@rtype: C{bool}
"""
return self.tell() == len(self)
def append(self, data):
"""
Append data to the end of the stream. The pointer will not move if
this operation is successful.
@param data: The data to append to the stream.
@type data: C{str} or C{unicode}
@raise TypeError: data is not C{str} or C{unicode}
"""
t = self.tell()
# seek to the end of the stream
self.seek(0, 2)
if hasattr(data, 'getvalue'):
self.write_utf8_string(data.getvalue())
else:
self.write_utf8_string(data)
self.seek(t)
def __add__(self, other):
old_pos = self.tell()
old_other_pos = other.tell()
new = BufferedByteStream(self)
other.seek(0)
new.seek(0, 2)
new.write(other.read())
self.seek(old_pos)
other.seek(old_other_pos)
new.seek(0)
return new
def hexdump(data):
"""
Get hexadecimal representation of C{StringIO} data.
@type data:
@param data:
@rtype: C{str}
@return: Hexadecimal string.
"""
import string
hex = ascii = buf = ""
index = 0
for c in data:
hex += "%02x " % ord(c)
if c in string.printable and c not in string.whitespace:
ascii += c
else:
ascii += "."
if len(ascii) == 16:
buf += "%04x: %s %s %s\n" % (index, hex[:24], hex[24:], ascii)
hex = ascii = ""
index += 16
if len(ascii):
buf += "%04x: %-24s %-24s %s\n" % (index, hex[:24], hex[24:], ascii)
return buf
def get_timestamp(d):
"""
Returns a UTC timestamp for a C{datetime.datetime} object.
@type d: C{datetime.datetime}
@param d: The date object.
@return: UTC timestamp.
@rtype: C{str}
@note: Inspiration taken from the U{Intertwingly blog
<http://intertwingly.net/blog/2007/09/02/Dealing-With-Dates>}.
"""
if isinstance(d, datetime.date) and not isinstance(d, datetime.datetime):
d = datetime.datetime.combine(d, datetime.time(0, 0, 0, 0))
msec = str(d.microsecond).rjust(6).replace(' ', '0')
return float('%s.%s' % (calendar.timegm(d.utctimetuple()), msec))
def get_datetime(secs):
"""
Return a UTC date from a timestamp.
@type secs: C{long}
@param secs: Seconds since 1970.
@return: UTC timestamp.
@rtype: C{datetime.datetime}
"""
if secs < 0 and negative_timestamp_broken:
return datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=secs)
return datetime.datetime.utcfromtimestamp(secs)
def get_properties(obj):
"""
@since: 0.5
"""
if hasattr(obj, 'keys'):
return set(obj.keys())
elif hasattr(obj, '__dict__'):
return obj.__dict__.keys()
return []
def get_attrs(obj):
"""
Gets a C{dict} of the attrs of an object in a predefined resolution order.
@raise AttributeError: A duplicate attribute was already found in this
collection, are you mixing different key types?
"""
if hasattr(obj, 'iteritems'):
attrs = {}
for k, v in obj.iteritems():
sk = str(k)
if sk in attrs.keys():
raise AttributeError('A duplicate attribute (%s) was '
'already found in this collection, are you mixing '
'different key types?' % (sk,))
attrs[sk] = v
return attrs
elif hasattr(obj, '__dict__'):
return obj.__dict__.copy()
elif hasattr(obj, '__slots__'):
attrs = {}
for k in obj.__slots__:
attrs[k] = getattr(obj, k)
return attrs
return None
def set_attrs(obj, attrs):
"""
A generic function which applies a collection of attributes C{attrs} to
object C{obj}.
@param obj: An instance implementing the C{__setattr__} function
@param attrs: A collection implementing the C{iteritems} function
@type attrs: Usually a dict
"""
if isinstance(obj, (list, dict)):
for k, v in attrs.iteritems():
obj[k] = v
return
for k, v in attrs.iteritems():
setattr(obj, k, v)
def get_class_alias(klass):
"""
Returns a alias class suitable for klass. Defaults to L{pyamf.ClassAlias}
"""
for k, v in pyamf.ALIAS_TYPES.iteritems():
for kl in v:
if isinstance(kl, types.FunctionType):
if kl(klass) is True:
return k
elif isinstance(kl, (type, (types.ClassType, types.ObjectType))):
if issubclass(klass, kl):
return k
return pyamf.ClassAlias
def is_class_sealed(klass):
"""
Returns a C{boolean} whether or not the supplied class can accept dynamic
properties.
@rtype: C{bool}
@since: 0.5
"""
mro = inspect.getmro(klass)
new = False
if mro[-1] is object:
mro = mro[:-1]
new = True
for kls in mro:
if new and '__dict__' in kls.__dict__:
return False
if not hasattr(kls, '__slots__'):
return False
return True
def get_class_meta(klass):
"""
Returns a C{dict} containing meta data based on the supplied class, useful
for class aliasing.
@rtype: C{dict}
@since: 0.5
"""
if not isinstance(klass, (type, types.ClassType)) or klass is object:
raise TypeError('klass must be a class object, got %r' % type(klass))
meta = {
'static_attrs': None,
'exclude_attrs': None,
'readonly_attrs': None,
'amf3': None,
'dynamic': None,
'alias': None,
'external': None
}
if not hasattr(klass, '__amf__'):
return meta
a = klass.__amf__
if type(a) is dict:
in_func = lambda x: x in a
get_func = a.__getitem__
else:
in_func = lambda x: hasattr(a, x)
get_func = lambda x: getattr(a, x)
for prop in ['alias', 'amf3', 'dynamic', 'external']:
if in_func(prop):
meta[prop] = get_func(prop)
for prop in ['static', 'exclude', 'readonly']:
if in_func(prop):
meta[prop + '_attrs'] = list(get_func(prop))
return meta
class IndexedCollection(object):
"""
A class that provides a quick and clean way to store references and
referenced objects.
@note: All attributes on the instance are private.
@ivar exceptions: If C{True} then L{ReferenceError<pyamf.ReferenceError>}
will be raised, otherwise C{None} will be returned.
"""
def __init__(self, use_hash=False, exceptions=True):
if use_hash is True:
self.func = hash
else:
self.func = id
self.exceptions = exceptions
self.clear()
def clear(self):
"""
Clears the index.
"""
self.list = []
self.dict = {}
def getByReference(self, ref):
"""
Returns an object based on the reference.
@raise TypeError: Bad reference type.
@raise pyamf.ReferenceError: Reference not found.
"""
if not isinstance(ref, (int, long)):
raise TypeError("Bad reference type")
try:
return self.list[ref]
except IndexError:
if self.exceptions is False:
return None
raise pyamf.ReferenceError("Reference %r not found" % (ref,))
def getReferenceTo(self, obj):
"""
Returns a reference to C{obj} if it is contained within this index.
@raise pyamf.ReferenceError: Value not found.
"""
try:
return self.dict[self.func(obj)]
except KeyError:
if self.exceptions is False:
return None
raise pyamf.ReferenceError("Value %r not found" % (obj,))
def append(self, obj):
"""
Appends C{obj} to this index.
@note: Uniqueness is not checked
@return: The reference to C{obj} in this index.
"""
h = self.func(obj)
self.list.append(obj)
idx = len(self.list) - 1
self.dict[h] = idx
return idx
def __eq__(self, other):
if isinstance(other, list):
return self.list == other
elif isinstance(other, dict):
return self.dict == other
return False
def __len__(self):
return len(self.list)
def __getitem__(self, idx):
return self.getByReference(idx)
def __contains__(self, obj):
try:
r = self.getReferenceTo(obj)
except pyamf.ReferenceError:
r = None
return r is not None
def __repr__(self):
return '<%s list=%r dict=%r>' % (self.__class__.__name__, self.list, self.dict)
def __iter__(self):
return iter(self.list)
class IndexedMap(IndexedCollection):
"""
Like L{IndexedCollection}, but also maps to another object.
@since: 0.4
"""
def __init__(self, use_hash=False, exceptions=True):
IndexedCollection.__init__(self, use_hash, exceptions)
def clear(self):
"""
Clears the index and mapping.
"""
IndexedCollection.clear(self)
self.mapped = []
def getMappedByReference(self, ref):
"""
Returns the mapped object by reference.
@raise TypeError: Bad reference type.
@raise pyamf.ReferenceError: Reference not found.
"""
if not isinstance(ref, (int, long)):
raise TypeError("Bad reference type.")
try:
return self.mapped[ref]
except IndexError:
if self.exceptions is False:
return None
raise pyamf.ReferenceError("Reference %r not found" % ref)
def append(self, obj):
"""
Appends C{obj} to this index.
@return: The reference to C{obj} in this index.
"""
idx = IndexedCollection.append(self, obj)
diff = (idx + 1) - len(self.mapped)
for i in range(0, diff):
self.mapped.append(None)
return idx
def map(self, obj, mapped_obj):
"""
Maps an object.
"""
idx = self.append(obj)
self.mapped[idx] = mapped_obj
return idx
def is_ET_element(obj):
"""
Determines if the supplied C{obj} param is a valid ElementTree element.
"""
return isinstance(obj, xml_types)
def is_float_broken():
"""
Older versions of Python (<=2.5) and the Windows platform are renowned for
mixing up 'special' floats. This function determines whether this is the
case.
@since: 0.4
@rtype: C{bool}
"""
global NaN
return str(NaN) != str(struct.unpack("!d", '\xff\xf8\x00\x00\x00\x00\x00\x00')[0])
def isNaN(val):
"""
@since: 0.5
"""
return str(float(val)) == str(NaN)
def isPosInf(val):
"""
@since: 0.5
"""
return str(float(val)) == str(PosInf)
def isNegInf(val):
"""
@since: 0.5
"""
return str(float(val)) == str(NegInf)
# init the module from here ..
find_xml_lib()
try:
datetime.datetime.utcfromtimestamp(-31536000.0)
except ValueError:
negative_timestamp_broken = True
if is_float_broken():
def read_double_workaround(self):
global PosInf, NegInf, NaN
"""
Override the L{DataTypeMixIn.read_double} method to fix problems
with doubles by using the third-party C{fpconst} library.
"""
bytes = self.read(8)
if self._is_big_endian():
if bytes == '\xff\xf8\x00\x00\x00\x00\x00\x00':
return NaN
if bytes == '\xff\xf0\x00\x00\x00\x00\x00\x00':
return NegInf
if bytes == '\x7f\xf0\x00\x00\x00\x00\x00\x00':
return PosInf
else:
if bytes == '\x00\x00\x00\x00\x00\x00\xf8\xff':
return NaN
if bytes == '\x00\x00\x00\x00\x00\x00\xf0\xff':
return NegInf
if bytes == '\x00\x00\x00\x00\x00\x00\xf0\x7f':
return PosInf
return struct.unpack("%sd" % self.endian, bytes)[0]
DataTypeMixIn.read_double = read_double_workaround
def write_double_workaround(self, d):
"""
Override the L{DataTypeMixIn.write_double} method to fix problems
with doubles by using the third-party C{fpconst} library.
"""
if type(d) is not float:
raise TypeError('expected a float (got:%r)' % (type(d),))
if isNaN(d):
if self._is_big_endian():
self.write('\xff\xf8\x00\x00\x00\x00\x00\x00')
else:
self.write('\x00\x00\x00\x00\x00\x00\xf8\xff')
elif isNegInf(d):
if self._is_big_endian():
self.write('\xff\xf0\x00\x00\x00\x00\x00\x00')
else:
self.write('\x00\x00\x00\x00\x00\x00\xf0\xff')
elif isPosInf(d):
if self._is_big_endian():
self.write('\x7f\xf0\x00\x00\x00\x00\x00\x00')
else:
self.write('\x00\x00\x00\x00\x00\x00\xf0\x7f')
else:
write_double_workaround.old_func(self, d)
x = DataTypeMixIn.write_double
DataTypeMixIn.write_double = write_double_workaround
write_double_workaround.old_func = x
try:
from cpyamf.util import BufferedByteStream, IndexedCollection, IndexedMap
class StringIOProxy(BufferedByteStream):
_wrapped_class = None
def __init__(self, *args, **kwargs):
BufferedByteStream.__init__(self, *args, **kwargs)
self._buffer = self
class DataTypeMixIn(BufferedByteStream):
#: Network byte order
ENDIAN_NETWORK = "!"
#: Native byte order
ENDIAN_NATIVE = "@"
#: Little endian
ENDIAN_LITTLE = "<"
#: Big endian
ENDIAN_BIG = ">"
except ImportError:
pass
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
File included to make the directory a Python package.
The test_*.py files are special in this directory in that they refer to the
top level module names of the adapter to test. An attempt will be made to
import that module but ignored if it fails (not available on the system).
"""
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Unit tests for Remoting gateways.
@since: 0.1.0
"""
| Python |
spam = 'eggs'
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Remoting tests.
@since: 0.1.0
"""
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Test utilities.
@since: 0.1.0
"""
import unittest
import copy
import pyamf
from pyamf.util import BufferedByteStream
PosInf = 1e300000
NegInf = -1e300000
NaN = PosInf / PosInf
class ClassicSpam:
def __readamf__(self, input):
pass
def __writeamf__(self, output):
pass
class Spam(object):
"""
A generic object to use for object encoding.
"""
def __init__(self, d={}):
self.__dict__.update(d)
def __readamf__(self, input):
pass
def __writeamf__(self, output):
pass
class ClassCacheClearingTestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self._class_cache = pyamf.CLASS_CACHE.copy()
self._class_loaders = copy.copy(pyamf.CLASS_LOADERS)
def tearDown(self):
unittest.TestCase.tearDown(self)
pyamf.CLASS_CACHE = self._class_cache
pyamf.CLASS_LOADERS = self._class_loaders
class EncoderTester(object):
"""
A helper object that takes some input, runs over the encoder
and checks the output.
"""
def __init__(self, encoder, data):
self.encoder = encoder
self.buf = encoder.stream
self.data = data
def getval(self):
t = self.buf.getvalue()
self.buf.truncate(0)
return t
def run(self, testcase):
for n in self.data:
s = n[1:]
n = n[0]
self.encoder.writeElement(n)
if isinstance(s, basestring):
testcase.assertEqual(self.getval(), s)
elif isinstance(s, (tuple, list)):
val = self.getval()
if not check_buffer(val, s):
testcase.fail('%r != %r' % (val, s))
class DecoderTester(object):
"""
A helper object that takes some input, runs over the decoder
and checks the output.
"""
def __init__(self, decoder, data):
self.decoder = decoder
self.buf = decoder.stream
self.data = data
def run(self, testcase):
for n, s in self.data:
self.buf.truncate(0)
self.buf.write(s)
self.buf.seek(0)
testcase.assertEqual(self.decoder.readElement(), n)
if self.buf.remaining() != 0:
from pyamf.util import hexdump
print hexdump(self.buf.getvalue())
# make sure that the entire buffer was consumed
testcase.assertEqual(self.buf.remaining(), 0)
def isNaN(val):
return str(float(val)) == str(NaN)
def isPosInf(val):
return str(float(val)) == str(PosInf)
def isNegInf(val):
return str(float(val)) == str(NegInf)
def check_buffer(buf, parts, inner=False):
assert isinstance(parts, (tuple, list))
orig = buf
parts = [p for p in parts]
for part in parts:
if inner is False:
if isinstance(part, (tuple, list)):
buf = check_buffer(buf, part, inner=True)
else:
if not buf.startswith(part):
return False
buf = buf[len(part):]
else:
for k in parts[:]:
for p in parts[:]:
if isinstance(p, (tuple, list)):
buf = check_buffer(buf, p, inner=True)
else:
if buf.startswith(p):
parts.remove(p)
buf = buf[len(p):]
return buf
return len(buf) == 0
def assert_buffer(testcase, val, s):
if not check_buffer(val, s):
testcase.fail('%r != %r' % (val, s))
def replace_dict(src, dest):
for name in dest.keys():
if name not in src:
del dest[name]
continue
if dest[name] is not src[name]:
dest[name] = src[name]
class BaseCodecMixIn(object):
amf_version = pyamf.AMF0
def setUp(self):
self.context = pyamf.get_context(self.amf_version)
self.stream = BufferedByteStream()
class BaseDecoderMixIn(BaseCodecMixIn):
def setUp(self):
BaseCodecMixIn.setUp(self)
self.decoder = pyamf.get_decoder(
self.amf_version, data=self.stream, context=self.context)
class BaseEncoderMixIn(BaseCodecMixIn):
def setUp(self):
BaseCodecMixIn.setUp(self)
self.encoder = pyamf.get_encoder(
self.amf_version, stream=self.stream, context=self.context)
class NullFileDescriptor(object):
def write(self, *args, **kwargs):
pass
def get_fqcn(klass):
return '%s.%s' % (klass.__module__, klass.__name__)
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Unit tests.
@since: 0.1.0
"""
import unittest
# some Python 2.3 unittest compatibility fixes
if not hasattr(unittest.TestCase, 'assertTrue'):
unittest.TestCase.assertTrue = unittest.TestCase.failUnless
if not hasattr(unittest.TestCase, 'assertFalse'):
unittest.TestCase.assertFalse = unittest.TestCase.failIf
mod_base = 'pyamf.tests'
def suite():
import os.path
from glob import glob
suite = unittest.TestSuite()
for testcase in glob(os.path.join(os.path.dirname(__file__), 'test_*.py')):
mod_name = os.path.basename(testcase).split('.')[0]
full_name = '%s.%s' % (mod_base, mod_name)
mod = __import__(full_name)
for part in full_name.split('.')[1:]:
mod = getattr(mod, part)
suite.addTest(mod.suite())
return suite
def main():
unittest.main(defaultTest='suite')
if __name__ == '__main__':
main()
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Flex Data Management Service implementation.
This module contains the message classes used with Flex Data Management
Service.
@since: 0.1.0
"""
import pyamf
from pyamf.flex.messaging import AsyncMessage, AcknowledgeMessage, ErrorMessage
__all__ = [
'DataMessage',
'SequencedMessage',
'PagedMessage',
'DataErrorMessage'
]
class DataMessage(AsyncMessage):
"""
I am used to transport an operation that occured on a managed object
or collection.
This class of message is transmitted between clients subscribed to a
remote destination as well as between server nodes within a cluster.
The payload of this message describes all of the relevant details of
the operation. This information is used to replicate updates and detect
conflicts.
@see: U{DataMessage on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/mx/data/messages/DataMessage.html>}
"""
def __init__(self):
AsyncMessage.__init__(self)
#: Provides access to the identity map which defines the
#: unique identity of the item affected by this DataMessage
#: (relevant for create/update/delete but not fill operations).
self.identity = None
#: Provides access to the operation/command of this DataMessage.
#:
#: Operations indicate how the remote destination should process
#: this message.
self.operation = None
class SequencedMessage(AcknowledgeMessage):
"""
Response to L{DataMessage} requests.
@see: U{SequencedMessage on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/mx/data/messages/SequencedMessage.html>}
"""
def __init__(self):
AcknowledgeMessage.__init__(self)
#: Provides access to the sequence id for this message.
#:
#: The sequence id is a unique identifier for a sequence
#: within a remote destination. This value is only unique for
#: the endpoint and destination contacted.
self.sequenceId = None
#:
self.sequenceProxies = None
#: Provides access to the sequence size for this message.
#:
#: The sequence size indicates how many items reside in the
#: remote sequence.
self.sequenceSize = None
#:
self.dataMessage = None
class PagedMessage(SequencedMessage):
"""
This messsage provides information about a partial sequence result.
@see: U{PagedMessage on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/mx/data/messages/PagedMessage.html>}
"""
def __init__(self):
SequencedMessage.__init__(self)
#: Provides access to the number of total pages in a sequence
#: based on the current page size.
self.pageCount = None
#: Provides access to the index of the current page in a sequence.
self.pageIndex = None
class DataErrorMessage(ErrorMessage):
"""
Special cases of ErrorMessage will be sent when a data conflict
occurs.
This message provides the conflict information in addition to
the L{ErrorMessage<pyamf.flex.messaging.ErrorMessage>} information.
@see: U{DataErrorMessage on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/mx/data/messages/DataErrorMessage.html>}
"""
def __init__(self):
ErrorMessage.__init__(self)
#: The client oringinated message which caused the conflict.
self.cause = None
#: An array of properties that were found to be conflicting
#: between the client and server objects.
self.propertyNames = None
#: The value that the server had for the object with the
#: conflicting properties.
self.serverObject = None
#: Namespace for C{flex.data} messages.
MESSAGES_NS = 'flex.data.messages'
pyamf.register_package(globals(), MESSAGES_NS)
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Compatibility classes/functions for Flex.
@note: Not available in ActionScript 1.0 and 2.0.
@see: U{Flex on Wikipedia (external)
<http://en.wikipedia.org/wiki/Adobe_Flex>}
@since: 0.1.0
"""
import pyamf
__all__ = ['ArrayCollection', 'ObjectProxy']
class ArrayCollection(list):
"""
I represent the ActionScript 3 based class
C{flex.messaging.io.ArrayCollection} used in the Flex framework.
The C{ArrayCollection} class is a wrapper class that exposes an Array
as a collection that can be accessed and manipulated using the
methods and properties of the C{ICollectionView} or C{IList}
interfaces in the Flex framework.
@see: U{ArrayCollection on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/mx/collections/ArrayCollection.html>}
@note: This class does not implement the RemoteObject part of the
documentation.
@ivar length: [read-only] The number of items in this collection.
Introduced in 0.4.
@type length: C{int}
"""
class __amf__:
external = True
amf3 = True
exclude = ('length',)
def __init__(self, source=None):
if source is not None:
if isinstance(source, dict):
raise TypeError('Cannot convert dicts to ArrayCollection')
if hasattr(source, '__iter__'):
self.extend(source)
def __repr__(self):
return "<flex.messaging.io.ArrayCollection %s>" % list.__repr__(self)
def __readamf__(self, input):
data = input.readObject()
if hasattr(data, 'source'):
data = data.source
else:
if not hasattr(data, '__iter__'):
raise pyamf.DecodeError('Unable to read a list when decoding '
'ArrayCollection')
self.extend(data)
def __writeamf__(self, output):
output.encoder.writeList(
list(self), use_references=True, use_proxies=False)
def _get_length(self):
return len(self)
def _set_length(self, length):
raise RuntimeError("Property length is read-only")
length = property(_get_length, _set_length)
def addItem(self, item):
"""
Adds the specified item to the end of the list.
@param item: The object to add to the collection.
@type item: C{mixed}.
@since: 0.4
"""
self.append(item)
def addItemAt(self, item, index):
"""
Adds the item at the specified index.
@param item: The object to add to the collection.
@type item: C{mixed}.
@param index: The index at which to place the item.
@raise IndexError: If index is less than 0 or greater than the length
of the list.
@since: 0.4
"""
if index < 0:
raise IndexError
if index > len(self):
raise IndexError
self.insert(index, item)
def getItemAt(self, index, prefetch=0):
"""
Gets the item at the specified index.
@param index: The index in the list from which to retrieve the item.
@type index: C{int}
@param prefetch: This param is ignored and is only here as part of the
interface.
@raise IndexError: if C{index < 0} or C{index >= length}
@return: The item at index C{index}.
@rtype: C{mixed}.
@since: 0.4
"""
if index < 0:
raise IndexError
if index > len(self):
raise IndexError
return self.__getitem__(index)
def getItemIndex(self, item):
"""
Returns the index of the item if it is in the list such that
C{getItemAt(index) == item}.
@param item: The item to find.
@type item: C{mixed}.
@return: The index of the item or -1 if the item is not in the list.
@rtype: C{int}
@since: 0.4
"""
try:
return self.index(item)
except ValueError:
return -1
def removeAll(self):
"""
Removes all items from the list.
@since: 0.4
"""
while len(self) > 0:
self.pop()
def removeItemAt(self, index):
"""
Removes the item at the specified index and returns it. Any items that
were after this index are now one index earlier.
@param index: The index from which to remove the item.
@return: The item that was removed.
@rtype: C{mixed}.
@raise IndexError: If index is less than 0 or greater than length.
@since: 0.4
"""
if index < 0:
raise IndexError
if index > len(self):
raise IndexError
x = self[index]
del self[index]
return x
def setItemAt(self, item, index):
"""
Places the item at the specified index. If an item was already at that
index the new item will replace it and it will be returned.
@param item: The new item to be placed at the specified index.
@type item: C{mixed}.
@param index: The index at which to place the item.
@type index: C{int}
@return: The item that was replaced, or C{None}.
@rtype: C{mixed} or C{None}.
@raise IndexError: If index is less than 0 or greater than length.
@since: 0.4
"""
if index < 0:
raise IndexError
if index > len(self):
raise IndexError
tmp = self.__getitem__(index)
self.__setitem__(index, item)
return tmp
def toArray(self):
"""
Returns an Array that is populated in the same order as the C{IList}
implementation.
@return: The array.
@rtype: C{list}
"""
return self
class ObjectProxy(object):
"""
I represent the ActionScript 3 based class C{flex.messaging.io.ObjectProxy}
used in the Flex framework. Flex's C{ObjectProxy} class allows an anonymous,
dynamic ActionScript Object to be bindable and report change events.
@see: U{ObjectProxy on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/mx/utils/ObjectProxy.html>}
"""
class __amf__:
external = True
amf3 = True
def __init__(self, object=None):
if object is None:
self._amf_object = pyamf.ASObject()
else:
self._amf_object = object
def __repr__(self):
return "<flex.messaging.io.ObjectProxy %s>" % self._amf_object
def __getattr__(self, name):
if name == '_amf_object':
return self.__dict__['_amf_object']
return getattr(self.__dict__['_amf_object'], name)
def __setattr__(self, name, value):
if name == '_amf_object':
self.__dict__['_amf_object'] = value
else:
setattr(self._amf_object, name, value)
def __readamf__(self, input):
self._amf_object = input.readObject()
def __writeamf__(self, output):
output.writeObject(self._amf_object, use_proxies=False)
def unproxy_object(obj):
"""
Returns the unproxied version of the object.
"""
if isinstance(obj, ArrayCollection):
return list(obj)
elif isinstance(obj, ObjectProxy):
return obj._amf_object
return obj
pyamf.register_package(globals(), package='flex.messaging.io')
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Flex Messaging implementation.
This module contains the message classes used with Flex Data Services.
@see: U{RemoteObject on OSFlash (external)
<http://osflash.org/documentation/amf3#remoteobject>}
@since: 0.1
"""
import uuid
import pyamf.util
from pyamf import amf3
__all__ = [
'RemotingMessage',
'CommandMessage',
'AcknowledgeMessage',
'ErrorMessage'
]
NAMESPACE = 'flex.messaging.messages'
SMALL_FLAG_MORE = 0x80
class AbstractMessage(object):
"""
Abstract base class for all Flex messages.
Messages have two customizable sections; headers and data. The headers
property provides access to specialized meta information for a specific
message instance. The data property contains the instance specific data
that needs to be delivered and processed by the decoder.
@see: U{AbstractMessage on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/mx/messaging/messages/AbstractMessage.html>}
@ivar body: Specific data that needs to be delivered to the remote
destination.
@type body: C{mixed}
@ivar clientId: Indicates which client sent the message.
@type clientId: C{str}
@ivar destination: Message destination.
@type destination: C{str}
@ivar headers: Message headers. Core header names start with DS.
@type headers: C{dict}
@ivar messageId: Unique Message ID.
@type messageId: C{str}
@ivar timeToLive: How long the message should be considered valid and
deliverable.
@type timeToLive: C{int}
@ivar timestamp: Timestamp when the message was generated.
@type timestamp: C{int}
"""
class __amf__:
amf3 = True
static = ('body', 'clientId', 'destination', 'headers', 'messageId',
'timestamp', 'timeToLive')
dynamic = False
#: Each message pushed from the server will contain this header identifying
#: the client that will receive the message.
DESTINATION_CLIENT_ID_HEADER = "DSDstClientId"
#: Messages are tagged with the endpoint id for the channel they are sent
#: over.
ENDPOINT_HEADER = "DSEndpoint"
#: Messages that need to set remote credentials for a destination carry the
#: C{Base64} encoded credentials in this header.
REMOTE_CREDENTIALS_HEADER = "DSRemoteCredentials"
#: The request timeout value is set on outbound messages by services or
#: channels and the value controls how long the responder will wait for an
#: acknowledgement, result or fault response for the message before timing
#: out the request.
REQUEST_TIMEOUT_HEADER = "DSRequestTimeout"
SMALL_ATTRIBUTE_FLAGS = [0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40]
SMALL_ATTRIBUTES = dict(zip(
SMALL_ATTRIBUTE_FLAGS,
__amf__.static
))
SMALL_UUID_FLAGS = [0x01, 0x02]
SMALL_UUIDS = dict(zip(
SMALL_UUID_FLAGS,
['clientId', 'messageId']
))
def __init__(self, *args, **kwargs):
self.body = kwargs.get('body', None)
self.clientId = kwargs.get('clientId', None)
self.destination = kwargs.get('destination', None)
self.headers = kwargs.get('headers', {})
self.messageId = kwargs.get('messageId', None)
self.timestamp = kwargs.get('timestamp', None)
self.timeToLive = kwargs.get('timeToLive', None)
def __repr__(self):
m = '<%s ' % self.__class__.__name__
for k in self.__dict__:
m += ' %s=%r' % (k, getattr(self, k))
return m + " />"
def decodeSmallAttribute(self, attr, input):
"""
@since: 0.5
"""
obj = input.readObject()
if attr in ['timestamp', 'timeToLive']:
return pyamf.util.get_datetime(obj / 1000.0)
return obj
def encodeSmallAttribute(self, attr):
"""
@since: 0.5
"""
obj = getattr(self, attr)
if not obj:
return obj
if attr in ['timestamp', 'timeToLive']:
return pyamf.util.get_timestamp(obj) * 1000.0
elif attr in ['clientId', 'messageId']:
if isinstance(obj, uuid.UUID):
return None
return obj
def __readamf__(self, input):
flags = read_flags(input)
if len(flags) > 2:
raise pyamf.DecodeError('Expected <=2 (got %d) flags for the '
'AbstractMessage portion of the small message for %r' % (
len(flags), self.__class__))
for index, byte in enumerate(flags):
if index == 0:
for flag in self.SMALL_ATTRIBUTE_FLAGS:
if flag & byte:
attr = self.SMALL_ATTRIBUTES[flag]
setattr(self, attr, self.decodeSmallAttribute(attr, input))
elif index == 1:
for flag in self.SMALL_UUID_FLAGS:
if flag & byte:
attr = self.SMALL_UUIDS[flag]
setattr(self, attr, decode_uuid(input.readObject()))
def __writeamf__(self, output):
flag_attrs = []
uuid_attrs = []
byte = 0
for flag in self.SMALL_ATTRIBUTE_FLAGS:
value = self.encodeSmallAttribute(self.SMALL_ATTRIBUTES[flag])
if value:
byte |= flag
flag_attrs.append(value)
flags = byte
byte = 0
for flag in self.SMALL_UUID_FLAGS:
attr = self.SMALL_UUIDS[flag]
value = getattr(self, attr)
if not value:
continue
byte |= flag
uuid_attrs.append(amf3.ByteArray(value.bytes))
if not byte:
output.writeUnsignedByte(flags)
else:
output.writeUnsignedByte(flags | SMALL_FLAG_MORE)
output.writeUnsignedByte(byte)
[output.writeObject(attr) for attr in flag_attrs]
[output.writeObject(attr) for attr in uuid_attrs]
def getSmallMessage(self):
"""
Return a ISmallMessage representation of this object. If one is not
available, L{NotImplementedError} will be raised.
@since: 0.5
"""
raise NotImplementedError
class AsyncMessage(AbstractMessage):
"""
I am the base class for all asynchronous Flex messages.
@see: U{AsyncMessage on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/mx/messaging/messages/AsyncMessage.html>}
@ivar correlationId: Correlation id of the message.
@type correlationId: C{str}
"""
#: Messages that were sent with a defined subtopic property indicate their
#: target subtopic in this header.
SUBTOPIC_HEADER = "DSSubtopic"
class __amf__:
static = ('correlationId',)
def __init__(self, *args, **kwargs):
AbstractMessage.__init__(self, *args, **kwargs)
self.correlationId = kwargs.get('correlationId', None)
def __readamf__(self, input):
AbstractMessage.__readamf__(self, input)
flags = read_flags(input)
if len(flags) > 1:
raise pyamf.DecodeError('Expected <=1 (got %d) flags for the '
'AsyncMessage portion of the small message for %r' % (
len(flags), self.__class__))
byte = flags[0]
if byte & 0x01:
self.correlationId = input.readObject()
if byte & 0x02:
self.correlationId = decode_uuid(input.readObject())
def __writeamf__(self, output):
AbstractMessage.__writeamf__(self, output)
if not isinstance(self.correlationId, uuid.UUID):
output.writeUnsignedByte(0x01)
output.writeObject(self.correlationId)
else:
output.writeUnsignedByte(0x02)
output.writeObject(pyamf.amf3.ByteArray(self.correlationId.bytes))
def getSmallMessage(self):
"""
Return a ISmallMessage representation of this async message.
@since: 0.5
"""
return AsyncMessageExt(**self.__dict__)
class AcknowledgeMessage(AsyncMessage):
"""
I acknowledge the receipt of a message that was sent previously.
Every message sent within the messaging system must receive an
acknowledgement.
@see: U{AcknowledgeMessage on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/mx/messaging/messages/AcknowledgeMessage.html>}
"""
#: Used to indicate that the acknowledgement is for a message that
#: generated an error.
ERROR_HINT_HEADER = "DSErrorHint"
def __readamf__(self, input):
AsyncMessage.__readamf__(self, input)
flags = read_flags(input)
if len(flags) > 1:
raise pyamf.DecodeError('Expected <=1 (got %d) flags for the '
'AcknowledgeMessage portion of the small message for %r' % (
len(flags), self.__class__))
def __writeamf__(self, output):
AsyncMessage.__writeamf__(self, output)
output.writeUnsignedByte(0)
def getSmallMessage(self):
"""
Return a ISmallMessage representation of this acknowledge message.
@since: 0.5
"""
return AcknowledgeMessageExt(**self.__dict__)
class CommandMessage(AsyncMessage):
"""
Provides a mechanism for sending commands related to publish/subscribe
messaging, ping, and cluster operations.
@see: U{CommandMessage on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/mx/messaging/messages/CommandMessage.html>}
@ivar operation: The command
@type operation: C{int}
@ivar messageRefType: hmm, not sure about this one.
@type messageRefType: C{str}
"""
#: The server message type for authentication commands.
AUTHENTICATION_MESSAGE_REF_TYPE = "flex.messaging.messages.AuthenticationMessage"
#: This is used to test connectivity over the current channel to the remote
#: endpoint.
PING_OPERATION = 5
#: This is used by a remote destination to sync missed or cached messages
#: back to a client as a result of a client issued poll command.
SYNC_OPERATION = 4
#: This is used to request a list of failover endpoint URIs for the remote
#: destination based on cluster membership.
CLUSTER_REQUEST_OPERATION = 7
#: This is used to send credentials to the endpoint so that the user can be
#: logged in over the current channel. The credentials need to be C{Base64}
#: encoded and stored in the body of the message.
LOGIN_OPERATION = 8
#: This is used to log the user out of the current channel, and will
#: invalidate the server session if the channel is HTTP based.
LOGOUT_OPERATION = 9
#: This is used to poll a remote destination for pending, undelivered
#: messages.
POLL_OPERATION = 2
#: Subscribe commands issued by a consumer pass the consumer's C{selector}
#: expression in this header.
SELECTOR_HEADER = "DSSelector"
#: This is used to indicate that the client's session with a remote
#: destination has timed out.
SESSION_INVALIDATE_OPERATION = 10
#: This is used to subscribe to a remote destination.
SUBSCRIBE_OPERATION = 0
#: This is the default operation for new L{CommandMessage} instances.
UNKNOWN_OPERATION = 1000
#: This is used to unsubscribe from a remote destination.
UNSUBSCRIBE_OPERATION = 1
#: This operation is used to indicate that a channel has disconnected.
DISCONNECT_OPERATION = 12
class __amf__:
static = ('operation',)
def __init__(self, *args, **kwargs):
AsyncMessage.__init__(self, *args, **kwargs)
self.operation = kwargs.get('operation', None)
#: Remote destination belonging to a specific service, based upon
#: whether this message type matches the message type the service
#: handles.
self.messageRefType = kwargs.get('messageRefType', None)
def __readamf__(self, input):
AsyncMessage.__readamf__(self, input)
flags = read_flags(input)
if not flags:
return
if len(flags) > 1:
raise pyamf.DecodeError('Expected <=1 (got %d) flags for the '
'CommandMessage portion of the small message for %r' % (
len(flags), self.__class__))
byte = flags[0]
if byte & 0x01:
self.operation = input.readObject()
def __writeamf__(self, output):
AsyncMessage.__writeamf__(self, output)
if self.operation:
output.writeUnsignedByte(0x01)
output.writeObject(self.operation)
else:
output.writeUnsignedByte(0)
def getSmallMessage(self):
"""
Return a ISmallMessage representation of this command message.
@since: 0.5
"""
return CommandMessageExt(**self.__dict__)
class ErrorMessage(AcknowledgeMessage):
"""
I am the Flex error message to be returned to the client.
This class is used to report errors within the messaging system.
@see: U{ErrorMessage on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/mx/messaging/messages/ErrorMessage.html>}
"""
#: If a message may not have been delivered, the faultCode will contain
#: this constant.
MESSAGE_DELIVERY_IN_DOUBT = "Client.Error.DeliveryInDoubt"
#: Header name for the retryable hint header.
#:
#: This is used to indicate that the operation that generated the error may
#: be retryable rather than fatal.
RETRYABLE_HINT_HEADER = "DSRetryableErrorHint"
class __amf__:
static = ('extendedData', 'faultCode', 'faultDetail', 'faultString',
'rootCause')
def __init__(self, *args, **kwargs):
AcknowledgeMessage.__init__(self, *args, **kwargs)
#: Extended data that the remote destination has chosen to associate
#: with this error to facilitate custom error processing on the client.
self.extendedData = kwargs.get('extendedData', {})
#: Fault code for the error.
self.faultCode = kwargs.get('faultCode', None)
#: Detailed description of what caused the error.
self.faultDetail = kwargs.get('faultDetail', None)
#: A simple description of the error.
self.faultString = kwargs.get('faultString', None)
#: Should a traceback exist for the error, this property contains the
#: message.
self.rootCause = kwargs.get('rootCause', {})
def getSmallMessage(self):
"""
Return a ISmallMessage representation of this error message.
@since: 0.5
"""
raise NotImplementedError
class RemotingMessage(AbstractMessage):
"""
I am used to send RPC requests to a remote endpoint.
@see: U{RemotingMessage on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/mx/messaging/messages/RemotingMessage.html>}
"""
class __amf__:
static = ('operation', 'source')
def __init__(self, *args, **kwargs):
AbstractMessage.__init__(self, *args, **kwargs)
#: Name of the remote method/operation that should be called.
self.operation = kwargs.get('operation', None)
#: Name of the service to be called including package name.
#: This property is provided for backwards compatibility.
self.source = kwargs.get('source', None)
class AcknowledgeMessageExt(AcknowledgeMessage):
"""
An L{AcknowledgeMessage}, but implementing C{ISmallMessage}.
@since: 0.5
"""
class __amf__:
external = True
class CommandMessageExt(CommandMessage):
"""
A L{CommandMessage}, but implementing C{ISmallMessage}.
@since: 0.5
"""
class __amf__:
external = True
class AsyncMessageExt(AsyncMessage):
"""
A L{AsyncMessage}, but implementing C{ISmallMessage}.
@since: 0.5
"""
class __amf__:
external = True
def read_flags(input):
"""
@since: 0.5
"""
flags = []
done = False
while not done:
byte = input.readUnsignedByte()
if not byte & SMALL_FLAG_MORE:
done = True
else:
byte = byte ^ SMALL_FLAG_MORE
flags.append(byte)
return flags
def decode_uuid(obj):
"""
Decode a L{ByteArray} contents to a C{uuid.UUID} instance.
@since: 0.5
"""
return uuid.UUID(bytes=str(obj))
pyamf.register_package(globals(), package=NAMESPACE)
pyamf.register_class(AcknowledgeMessageExt, 'DSK')
pyamf.register_class(CommandMessageExt, 'DSC')
pyamf.register_class(AsyncMessageExt, 'DSA')
| Python |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
B{PyAMF} provides B{A}ction B{M}essage B{F}ormat
(U{AMF<http://en.wikipedia.org/wiki/Action_Message_Format>}) support for
Python that is compatible with the Adobe
U{Flash Player<http://en.wikipedia.org/wiki/Flash_Player>}.
@copyright: Copyright (c) 2007-2009 The PyAMF Project. All Rights Reserved.
@contact: U{users@pyamf.org<mailto:users@pyamf.org>}
@see: U{http://pyamf.org}
@since: October 2007
@version: 0.5.1
@status: Production/Stable
"""
import types
import inspect
from pyamf import util
from pyamf.adapters import register_adapters
try:
set
except NameError:
from sets import Set as set
__all__ = [
'register_class',
'register_class_loader',
'encode',
'decode',
'__version__'
]
#: PyAMF version number.
__version__ = (0, 5, 1)
#: Class mapping support.
CLASS_CACHE = {}
#: Class loaders.
CLASS_LOADERS = []
#: Custom type map.
TYPE_MAP = {}
#: Maps error classes to string codes.
ERROR_CLASS_MAP = {}
#: Alias mapping support
ALIAS_TYPES = {}
#: Specifies that objects are serialized using AMF for ActionScript 1.0
#: and 2.0 that were introduced in the Adobe Flash Player 6.
AMF0 = 0
#: Specifies that objects are serialized using AMF for ActionScript 3.0
#: that was introduced in the Adobe Flash Player 9.
AMF3 = 3
#: Supported AMF encoding types.
ENCODING_TYPES = (AMF0, AMF3)
#: Default encoding
DEFAULT_ENCODING = AMF0
class ClientTypes:
"""
Typecodes used to identify AMF clients and servers.
@see: U{Adobe Flash Player on WikiPedia (external)
<http://en.wikipedia.org/wiki/Flash_Player>}
@see: U{Adobe Flash Media Server on WikiPedia (external)
<http://en.wikipedia.org/wiki/Adobe_Flash_Media_Server>}
"""
#: Specifies a Adobe Flash Player 6.0 - 8.0 client.
Flash6 = 0
#: Specifies a Adobe FlashCom / Flash Media Server client.
FlashCom = 1
#: Specifies a Adobe Flash Player 9.0 client or newer.
Flash9 = 3
#: List of AMF client typecodes.
CLIENT_TYPES = []
for x in ClientTypes.__dict__:
if not x.startswith('_'):
CLIENT_TYPES.append(ClientTypes.__dict__[x])
del x
class UndefinedType(object):
def __repr__(self):
return 'pyamf.Undefined'
#: Represents the C{undefined} value in a Adobe Flash Player client.
Undefined = UndefinedType()
class BaseError(Exception):
"""
Base AMF Error.
All AMF related errors should be subclassed from this class.
"""
class DecodeError(BaseError):
"""
Raised if there is an error in decoding an AMF data stream.
"""
class EOStream(BaseError):
"""
Raised if the data stream has come to a natural end.
"""
class ReferenceError(BaseError):
"""
Raised if an AMF data stream refers to a non-existent object
or string reference.
"""
class EncodeError(BaseError):
"""
Raised if the element could not be encoded to the stream.
@bug: See U{Docuverse blog (external)
<http://www.docuverse.com/blog/donpark/2007/05/14/flash-9-amf3-bug>}
for more info about the empty key string array bug.
"""
class ClassAliasError(BaseError):
"""
Generic error for anything class alias related.
"""
class UnknownClassAlias(ClassAliasError):
"""
Raised if the AMF stream specifies an Actionscript class that does not
have a Python class alias.
@see: L{register_class}
"""
class BaseContext(object):
"""
I hold the AMF context for en/decoding streams.
@ivar objects: An indexed collection of referencable objects encountered
during en/decoding.
@type objects: L{util.IndexedCollection}
@ivar class_aliases: A L{dict} of C{class} to L{ClassAlias}
@ivar exceptions: If C{True} then reference errors will be propagated.
@type exceptions: C{bool}
"""
def __init__(self, exceptions=True):
self.objects = util.IndexedCollection(exceptions=False)
self.clear()
self.exceptions = exceptions
def clear(self):
"""
Completely clears the context.
"""
self.objects.clear()
self.class_aliases = {}
def getObject(self, ref):
"""
Gets an object based on a reference.
@raise ReferenceError: Unknown object reference, if L{exceptions} is
C{True}, otherwise C{None} will be returned.
"""
o = self.objects.getByReference(ref)
if o is None and self.exceptions:
raise ReferenceError("Unknown object reference %r" % (ref,))
return o
def getObjectReference(self, obj):
"""
Gets a reference for an object.
@raise ReferenceError: Object not a valid reference,
"""
o = self.objects.getReferenceTo(obj)
if o is None and self.exceptions:
raise ReferenceError("Object %r not a valid reference" % (obj,))
return o
def addObject(self, obj):
"""
Adds a reference to C{obj}.
@type obj: C{mixed}
@param obj: The object to add to the context.
@rtype: C{int}
@return: Reference to C{obj}.
"""
return self.objects.append(obj)
def getClassAlias(self, klass):
"""
Gets a class alias based on the supplied C{klass}.
"""
try:
return self.class_aliases[klass]
except KeyError:
pass
try:
self.class_aliases[klass] = get_class_alias(klass)
except UnknownClassAlias:
# no alias has been found yet .. check subclasses
alias = util.get_class_alias(klass)
self.class_aliases[klass] = alias(klass)
return self.class_aliases[klass]
def __copy__(self):
raise NotImplementedError
class ASObject(dict):
"""
This class represents a Flash Actionscript Object (typed or untyped).
I supply a C{__builtin__.dict} interface to support C{get}/C{setattr}
calls.
@raise AttributeError: Unknown attribute.
"""
class __amf__:
dynamic = True
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
def __getattr__(self, k):
try:
return self[k]
except KeyError:
raise AttributeError('Unknown attribute \'%s\'' % (k,))
def __setattr__(self, k, v):
self[k] = v
def __repr__(self):
return dict.__repr__(self)
def __hash__(self):
return id(self)
class MixedArray(dict):
"""
Used to be able to specify the C{mixedarray} type.
"""
class ClassAlias(object):
"""
Class alias. Provides class/instance meta data to the En/Decoder to allow
fine grain control and some performance increases.
"""
def __init__(self, klass, alias=None, **kwargs):
if not isinstance(klass, (type, types.ClassType)):
raise TypeError('klass must be a class type, got %r' % type(klass))
self.checkClass(klass)
self.klass = klass
self.alias = alias
self.static_attrs = kwargs.get('static_attrs', None)
self.exclude_attrs = kwargs.get('exclude_attrs', None)
self.readonly_attrs = kwargs.get('readonly_attrs', None)
self.proxy_attrs = kwargs.get('proxy_attrs', None)
self.amf3 = kwargs.get('amf3', None)
self.external = kwargs.get('external', None)
self.dynamic = kwargs.get('dynamic', None)
self._compiled = False
self.anonymous = False
self.sealed = None
if self.alias is None:
self.anonymous = True
# we don't set this to None because AMF3 untyped objects have a
# class name of ''
self.alias = ''
else:
if self.alias == '':
raise ValueError('Cannot set class alias as \'\'')
if not kwargs.get('defer', False):
self.compile()
def _checkExternal(self):
if not hasattr(self.klass, '__readamf__'):
raise AttributeError("An externalised class was specified, but"
" no __readamf__ attribute was found for %r" % (self.klass,))
if not hasattr(self.klass, '__writeamf__'):
raise AttributeError("An externalised class was specified, but"
" no __writeamf__ attribute was found for %r" % (self.klass,))
if not isinstance(self.klass.__readamf__, types.UnboundMethodType):
raise TypeError("%s.__readamf__ must be callable" % (
self.klass.__name__,))
if not isinstance(self.klass.__writeamf__, types.UnboundMethodType):
raise TypeError("%s.__writeamf__ must be callable" % (
self.klass.__name__,))
def compile(self):
"""
This compiles the alias into a form that can be of most benefit to the
en/decoder.
"""
if self._compiled:
return
self.decodable_properties = set()
self.encodable_properties = set()
self.inherited_dynamic = None
self.inherited_sealed = None
self.exclude_attrs = set(self.exclude_attrs or [])
self.readonly_attrs = set(self.readonly_attrs or [])
self.static_attrs = set(self.static_attrs or [])
self.proxy_attrs = set(self.proxy_attrs or [])
if self.external:
self._checkExternal()
self._finalise_compile()
# this class is external so no more compiling is necessary
return
self.sealed = util.is_class_sealed(self.klass)
if hasattr(self.klass, '__slots__'):
self.decodable_properties.update(self.klass.__slots__)
self.encodable_properties.update(self.klass.__slots__)
for k, v in self.klass.__dict__.iteritems():
if not isinstance(v, property):
continue
if v.fget:
self.encodable_properties.update([k])
if v.fset:
self.decodable_properties.update([k])
else:
self.readonly_attrs.update([k])
mro = inspect.getmro(self.klass)[1:]
try:
self._compile_base_class(mro[0])
except IndexError:
pass
self.getCustomProperties()
self._finalise_compile()
def _compile_base_class(self, klass):
if klass is object:
return
try:
alias = get_class_alias(klass)
except UnknownClassAlias:
alias = register_class(klass)
alias.compile()
if alias.exclude_attrs:
self.exclude_attrs.update(alias.exclude_attrs)
if alias.readonly_attrs:
self.readonly_attrs.update(alias.readonly_attrs)
if alias.static_attrs:
self.static_attrs.update(alias.static_attrs)
if alias.proxy_attrs:
self.proxy_attrs.update(alias.proxy_attrs)
if alias.encodable_properties:
self.encodable_properties.update(alias.encodable_properties)
if alias.decodable_properties:
self.decodable_properties.update(alias.decodable_properties)
if self.amf3 is None and alias.amf3:
self.amf3 = alias.amf3
if self.dynamic is None and alias.dynamic is not None:
self.inherited_dynamic = alias.dynamic
if alias.sealed is not None:
self.inherited_sealed = alias.sealed
def _finalise_compile(self):
if self.dynamic is None:
self.dynamic = True
if self.inherited_dynamic is not None:
if self.inherited_dynamic is False and not self.sealed and self.inherited_sealed:
self.dynamic = True
else:
self.dynamic = self.inherited_dynamic
if self.sealed:
self.dynamic = False
if self.amf3 is None:
self.amf3 = False
if self.external is None:
self.external = False
if not self.static_attrs:
self.static_attrs = None
else:
self.encodable_properties.update(self.static_attrs)
self.decodable_properties.update(self.static_attrs)
if self.static_attrs is not None:
if self.exclude_attrs:
self.static_attrs.difference_update(self.exclude_attrs)
self.static_attrs = list(self.static_attrs)
self.static_attrs.sort()
if not self.exclude_attrs:
self.exclude_attrs = None
else:
self.encodable_properties.difference_update(self.exclude_attrs)
self.decodable_properties.difference_update(self.exclude_attrs)
if self.exclude_attrs is not None:
self.exclude_attrs = list(self.exclude_attrs)
self.exclude_attrs.sort()
if not self.readonly_attrs:
self.readonly_attrs = None
else:
self.decodable_properties.difference_update(self.readonly_attrs)
if self.readonly_attrs is not None:
self.readonly_attrs = list(self.readonly_attrs)
self.readonly_attrs.sort()
if not self.proxy_attrs:
self.proxy_attrs = None
else:
if not self.amf3:
raise ClassAliasError('amf3 = True must be specified for '
'classes with proxied attributes. Attribute = %r, '
'Class = %r' % (self.proxy_attrs, self.klass,))
self.proxy_attrs = list(self.proxy_attrs)
self.proxy_attrs.sort()
if len(self.decodable_properties) == 0:
self.decodable_properties = None
else:
self.decodable_properties = list(self.decodable_properties)
self.decodable_properties.sort()
if len(self.encodable_properties) == 0:
self.encodable_properties = None
else:
self.encodable_properties = list(self.encodable_properties)
self.encodable_properties.sort()
self.non_static_encodable_properties = None
if self.encodable_properties:
self.non_static_encodable_properties = set(self.encodable_properties)
if self.static_attrs:
self.non_static_encodable_properties.difference_update(self.static_attrs)
self.shortcut_encode = True
if self.encodable_properties or self.static_attrs or self.exclude_attrs:
self.shortcut_encode = False
self._compiled = True
def is_compiled(self):
return self._compiled
def __str__(self):
return self.alias
def __repr__(self):
return '<ClassAlias alias=%s class=%s @ 0x%x>' % (
self.alias, self.klass, id(self))
def __eq__(self, other):
if isinstance(other, basestring):
return self.alias == other
elif isinstance(other, self.__class__):
return self.klass == other.klass
elif isinstance(other, (type, types.ClassType)):
return self.klass == other
else:
return False
def __hash__(self):
return id(self)
def checkClass(self, klass):
"""
This function is used to check if the class being aliased fits certain
criteria. The default is to check that the C{__init__} constructor does
not pass in arguments.
@since: 0.4
@raise TypeError: C{__init__} doesn't support additional arguments
"""
# Check that the constructor of the class doesn't require any additonal
# arguments.
if not (hasattr(klass, '__init__') and hasattr(klass.__init__, 'im_func')):
return
klass_func = klass.__init__.im_func
# built-in classes don't have func_code
if hasattr(klass_func, 'func_code') and (
klass_func.func_code.co_argcount - len(klass_func.func_defaults or []) > 1):
args = list(klass_func.func_code.co_varnames)
values = list(klass_func.func_defaults or [])
if not values:
sign = "%s.__init__(%s)" % (klass.__name__, ", ".join(args))
else:
named_args = zip(args[len(args) - len(values):], values)
sign = "%s.%s.__init__(%s, %s)" % (
klass.__module__, klass.__name__,
", ".join(args[:0-len(values)]),
", ".join(map(lambda x: "%s=%s" % x, named_args)))
raise TypeError("__init__ doesn't support additional arguments: %s"
% sign)
def getEncodableAttributes(self, obj, codec=None):
"""
Returns a C{tuple} containing a dict of static and dynamic attributes
for an object to encode.
@param codec: An optional argument that will contain the en/decoder
instance calling this function.
@since: 0.5
"""
if not self._compiled:
self.compile()
static_attrs = {}
dynamic_attrs = {}
if self.static_attrs:
for attr in self.static_attrs:
try:
static_attrs[attr] = getattr(obj, attr)
except AttributeError:
static_attrs[attr] = Undefined
if not self.dynamic:
if self.non_static_encodable_properties:
for attr in self.non_static_encodable_properties:
dynamic_attrs[attr] = getattr(obj, attr)
if not static_attrs:
static_attrs = None
if not dynamic_attrs:
dynamic_attrs = None
return static_attrs, dynamic_attrs
dynamic_props = util.get_properties(obj)
if not self.shortcut_encode:
dynamic_props = set(dynamic_props)
if self.encodable_properties:
dynamic_props.update(self.encodable_properties)
if self.static_attrs:
dynamic_props.difference_update(self.static_attrs)
if self.exclude_attrs:
dynamic_props.difference_update(self.exclude_attrs)
if self.klass is dict:
for attr in dynamic_props:
dynamic_attrs[attr] = obj[attr]
else:
for attr in dynamic_props:
dynamic_attrs[attr] = getattr(obj, attr)
if self.proxy_attrs is not None:
if static_attrs:
for k, v in static_attrs.copy().iteritems():
if k in self.proxy_attrs:
static_attrs[k] = self.getProxiedAttribute(k, v)
if dynamic_attrs:
for k, v in dynamic_attrs.copy().iteritems():
if k in self.proxy_attrs:
dynamic_attrs[k] = self.getProxiedAttribute(k, v)
if not static_attrs:
static_attrs = None
if not dynamic_attrs:
dynamic_attrs = None
return static_attrs, dynamic_attrs
def getDecodableAttributes(self, obj, attrs, codec=None):
"""
Returns a dictionary of attributes for C{obj} that has been filtered,
based on the supplied C{attrs}. This allows for fine grain control
over what will finally end up on the object or not ..
@param obj: The reference object.
@param attrs: The attrs dictionary that has been decoded.
@param codec: An optional argument that will contain the codec
instance calling this function.
@return: A dictionary of attributes that can be applied to C{obj}
@since: 0.5
"""
if not self._compiled:
self.compile()
changed = False
props = set(attrs.keys())
if self.static_attrs:
missing_attrs = []
for p in self.static_attrs:
if p not in props:
missing_attrs.append(p)
if missing_attrs:
raise AttributeError('Static attributes %r expected '
'when decoding %r' % (missing_attrs, self.klass))
if not self.dynamic:
if not self.decodable_properties:
props = set()
else:
props.intersection_update(self.decodable_properties)
changed = True
if self.readonly_attrs:
props.difference_update(self.readonly_attrs)
changed = True
if self.exclude_attrs:
props.difference_update(self.exclude_attrs)
changed = True
if self.proxy_attrs is not None:
from pyamf import flex
for k in self.proxy_attrs:
try:
v = attrs[k]
except KeyError:
continue
attrs[k] = flex.unproxy_object(v)
if not changed:
return attrs
a = {}
[a.__setitem__(p, attrs[p]) for p in props]
return a
def getProxiedAttribute(self, attr, obj):
"""
Returns the proxied equivalent for C{obj}.
@param attr: The attribute of the proxy request. Useful for class
introspection.
@type attr: C{str}
@param obj: The object to proxy.
@return: The proxied object or the original object if it cannot be
proxied.
"""
# the default is to just check basic types
from pyamf import flex
if type(obj) is list:
return flex.ArrayCollection(obj)
elif type(obj) is dict:
return flex.ObjectProxy(obj)
return obj
def applyAttributes(self, obj, attrs, codec=None):
"""
Applies the collection of attributes C{attrs} to aliased object C{obj}.
Called when decoding reading aliased objects from an AMF byte stream.
Override this to provide fine grain control of application of
attributes to C{obj}.
@param codec: An optional argument that will contain the en/decoder
instance calling this function.
"""
attrs = self.getDecodableAttributes(obj, attrs, codec=codec)
util.set_attrs(obj, attrs)
def getCustomProperties(self):
"""
Overrride this to provide known static properties based on the aliased
class.
@since: 0.5
"""
def createInstance(self, codec=None, *args, **kwargs):
"""
Creates an instance of the klass.
@return: Instance of C{self.klass}.
"""
return self.klass(*args, **kwargs)
class TypedObject(dict):
"""
This class is used when a strongly typed object is decoded but there is no
registered class to apply it to.
This object can only be used for 'simple' streams - i.e. not externalized
data. If encountered, a L{DecodeError} will be raised.
@ivar alias: The alias of the typed object.
@type alias: C{unicode}
@since: 0.4
"""
def __init__(self, alias):
dict.__init__(self)
self.alias = alias
def __readamf__(self, o):
raise DecodeError('Unable to decode an externalised stream with '
'class alias \'%s\'.\n\nThe class alias was found and because '
'strict mode is False an attempt was made to decode the object '
'automatically. To decode this stream, a registered class with '
'the alias and a corresponding __readamf__ method will be '
'required.' % (self.alias,))
def __writeamf__(self, o):
raise EncodeError('Unable to encode an externalised stream with '
'class alias \'%s\'.\n\nThe class alias was found and because '
'strict mode is False an attempt was made to encode the object '
'automatically. To encode this stream, a registered class with '
'the alias and a corresponding __readamf__ method will be '
'required.' % (self.alias,))
class TypedObjectClassAlias(ClassAlias):
"""
@since: 0.4
"""
klass = TypedObject
def __init__(self, klass, alias, *args, **kwargs):
# klass attr is ignored
ClassAlias.__init__(self, self.klass, alias)
def createInstance(self, codec=None):
return self.klass(self.alias)
def checkClass(kls, klass):
pass
class ErrorAlias(ClassAlias):
"""
Adapts Python exception objects to Adobe Flash Player error objects.
@since: 0.5
"""
def getCustomProperties(self):
self.exclude_attrs.update(['args'])
def getEncodableAttributes(self, obj, **kwargs):
sa, da = ClassAlias.getEncodableAttributes(self, obj, **kwargs)
if not da:
da = {}
da['message'] = str(obj)
da['name'] = obj.__class__.__name__
return sa, da
class BaseDecoder(object):
"""
Base AMF decoder.
@ivar context_class: The context for the decoding.
@type context_class: An instance of C{BaseDecoder.context_class}
@ivar type_map:
@type type_map: C{list}
@ivar stream: The underlying data stream.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@ivar strict: Defines how strict the decoding should be. For the time
being this relates to typed objects in the stream that do not have a
registered alias. Introduced in 0.4.
@type strict: C{bool}
@ivar timezone_offset: The offset from UTC for any datetime objects being
decoded. Default to C{None} means no offset.
@type timezone_offset: L{datetime.timedelta}
"""
context_class = BaseContext
type_map = {}
def __init__(self, stream=None, context=None, strict=False, timezone_offset=None):
if isinstance(stream, util.BufferedByteStream):
self.stream = stream
else:
self.stream = util.BufferedByteStream(stream)
if context is None:
self.context = self.context_class()
else:
self.context = context
self.context.exceptions = False
self.strict = strict
self.timezone_offset = timezone_offset
def readElement(self):
"""
Reads an AMF3 element from the data stream.
@raise DecodeError: The ActionScript type is unsupported.
@raise EOStream: No more data left to decode.
"""
pos = self.stream.tell()
try:
t = self.stream.read(1)
except IOError:
raise EOStream
try:
func = getattr(self, self.type_map[t])
except KeyError:
raise DecodeError("Unsupported ActionScript type %r" % (t,))
try:
return func()
except IOError:
self.stream.seek(pos)
raise
def __iter__(self):
try:
while 1:
yield self.readElement()
except EOStream:
raise StopIteration
class CustomTypeFunc(object):
"""
Custom type mappings.
"""
def __init__(self, encoder, func):
self.encoder = encoder
self.func = func
def __call__(self, data, *args, **kwargs):
self.encoder.writeElement(self.func(data, encoder=self.encoder))
class BaseEncoder(object):
"""
Base AMF encoder.
@ivar type_map: A list of types -> functions. The types is a list of
possible instances or functions to call (that return a C{bool}) to
determine the correct function to call to encode the data.
@type type_map: C{list}
@ivar context_class: Holds the class that will create context objects for
the implementing C{Encoder}.
@type context_class: C{type} or C{types.ClassType}
@ivar stream: The underlying data stream.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@ivar context: The context for the encoding.
@type context: An instance of C{BaseEncoder.context_class}
@ivar strict: Whether the encoder should operate in 'strict' mode. Nothing
is really affected by this for the time being - its just here for
flexibility.
@type strict: C{bool}, default is False.
@ivar timezone_offset: The offset from UTC for any datetime objects being
encoded. Default to C{None} means no offset.
@type timezone_offset: L{datetime.timedelta}
"""
context_class = BaseContext
type_map = []
def __init__(self, stream=None, context=None, strict=False, timezone_offset=None):
if isinstance(stream, util.BufferedByteStream):
self.stream = stream
else:
self.stream = util.BufferedByteStream(stream)
if context is None:
self.context = self.context_class()
else:
self.context = context
self.context.exceptions = False
self._write_elem_func_cache = {}
self.strict = strict
self.timezone_offset = timezone_offset
def writeFunc(self, obj, **kwargs):
"""
Not possible to encode functions.
@raise EncodeError: Unable to encode function/methods.
"""
raise EncodeError("Unable to encode function/methods")
def _getWriteElementFunc(self, data):
"""
Gets a function used to encode the data.
@type data: C{mixed}
@param data: Python data.
@rtype: callable or C{None}.
@return: The function used to encode data to the stream.
"""
for type_, func in TYPE_MAP.iteritems():
try:
if isinstance(data, type_):
return CustomTypeFunc(self, func)
except TypeError:
if callable(type_) and type_(data):
return CustomTypeFunc(self, func)
for tlist, method in self.type_map:
for t in tlist:
try:
if isinstance(data, t):
return getattr(self, method)
except TypeError:
if callable(t) and t(data):
return getattr(self, method)
return None
def _writeElementFunc(self, data):
"""
Gets a function used to encode the data.
@type data: C{mixed}
@param data: Python data.
@rtype: callable or C{None}.
@return: The function used to encode data to the stream.
"""
try:
key = data.__class__
except AttributeError:
return self._getWriteElementFunc(data)
try:
return self._write_elem_func_cache[key]
except KeyError:
self._write_elem_func_cache[key] = self._getWriteElementFunc(data)
return self._write_elem_func_cache[key]
def writeElement(self, data):
"""
Writes the data. Overridden in subclass.
@type data: C{mixed}
@param data: The data to be encoded to the data stream.
"""
raise NotImplementedError
def register_class(klass, alias=None):
"""
Registers a class to be used in the data streaming.
@return: The registered L{ClassAlias}.
"""
meta = util.get_class_meta(klass)
if alias is not None:
meta['alias'] = alias
alias_klass = util.get_class_alias(klass)
x = alias_klass(klass, defer=True, **meta)
if not x.anonymous:
CLASS_CACHE[x.alias] = x
CLASS_CACHE[klass] = x
return x
def unregister_class(alias):
"""
Deletes a class from the cache.
If C{alias} is a class, the matching alias is found.
@type alias: C{class} or C{str}
@param alias: Alias for class to delete.
@raise UnknownClassAlias: Unknown alias.
"""
try:
x = CLASS_CACHE[alias]
except KeyError:
raise UnknownClassAlias('Unknown alias %r' % (alias,))
if not x.anonymous:
del CLASS_CACHE[x.alias]
del CLASS_CACHE[x.klass]
return x
def get_class_alias(klass):
"""
Finds the alias registered to the class.
@type klass: C{object} or class object.
@return: The class alias linked to C{klass}.
@rtype: L{ClassAlias}
@raise UnknownClassAlias: Class not found.
"""
if isinstance(klass, basestring):
try:
return CLASS_CACHE[klass]
except KeyError:
return load_class(klass)
if not isinstance(klass, (type, types.ClassType)):
if isinstance(klass, types.InstanceType):
klass = klass.__class__
elif isinstance(klass, types.ObjectType):
klass = type(klass)
try:
return CLASS_CACHE[klass]
except KeyError:
raise UnknownClassAlias('Unknown alias for %r' % (klass,))
def register_class_loader(loader):
"""
Registers a loader that is called to provide the C{Class} for a specific
alias.
The L{loader} is provided with one argument, the C{Class} alias. If the
loader succeeds in finding a suitable class then it should return that
class, otherwise it should return C{None}.
@type loader: C{callable}
@raise TypeError: The C{loader} is not callable.
@raise ValueError: The C{loader} is already registered.
"""
if not callable(loader):
raise TypeError("loader must be callable")
if loader in CLASS_LOADERS:
raise ValueError("loader has already been registered")
CLASS_LOADERS.append(loader)
def unregister_class_loader(loader):
"""
Unregisters a class loader.
@type loader: C{callable}
@param loader: The object to be unregistered
@raise LookupError: The C{loader} was not registered.
"""
if loader not in CLASS_LOADERS:
raise LookupError("loader not found")
CLASS_LOADERS.remove(loader)
def get_module(mod_name):
"""
Load a module based on C{mod_name}.
@type mod_name: C{str}
@param mod_name: The module name.
@return: Module.
@raise ImportError: Unable to import an empty module.
"""
if mod_name is '':
raise ImportError("Unable to import empty module")
mod = __import__(mod_name)
components = mod_name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def load_class(alias):
"""
Finds the class registered to the alias.
The search is done in order:
1. Checks if the class name has been registered via L{register_class} or
L{register_package}.
2. Checks all functions registered via L{register_class_loader}.
3. Attempts to load the class via standard module loading techniques.
@type alias: C{str}
@param alias: The class name.
@raise UnknownClassAlias: The C{alias} was not found.
@raise TypeError: Expecting class type or L{ClassAlias} from loader.
@return: Class registered to the alias.
"""
alias = str(alias)
# Try the CLASS_CACHE first
try:
return CLASS_CACHE[alias]
except KeyError:
pass
# Check each CLASS_LOADERS in turn
for loader in CLASS_LOADERS:
klass = loader(alias)
if klass is None:
continue
if isinstance(klass, (type, types.ClassType)):
return register_class(klass, alias)
elif isinstance(klass, ClassAlias):
CLASS_CACHE[str(alias)] = klass
CLASS_CACHE[klass.klass] = klass
return klass
else:
raise TypeError("Expecting class type or ClassAlias from loader")
# XXX nick: Are there security concerns for loading classes this way?
mod_class = alias.split('.')
if mod_class:
module = '.'.join(mod_class[:-1])
klass = mod_class[-1]
try:
module = get_module(module)
except (ImportError, AttributeError):
# XXX What to do here?
pass
else:
klass = getattr(module, klass)
if isinstance(klass, (type, types.ClassType)):
return register_class(klass, alias)
elif isinstance(klass, ClassAlias):
CLASS_CACHE[str(alias)] = klass
CLASS_CACHE[klass.klass] = klass
return klass.klass
else:
raise TypeError("Expecting class type or ClassAlias from loader")
# All available methods for finding the class have been exhausted
raise UnknownClassAlias("Unknown alias for %r" % (alias,))
def decode(*args, **kwargs):
"""
A generator function to decode a datastream.
@kwarg stream: AMF data.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@type encoding: C{int}
@kwarg encoding: AMF encoding type.
@type context: L{AMF0 Context<pyamf.amf0.Context>} or
L{AMF3 Context<pyamf.amf3.Context>}
@kwarg context: Context.
@return: Each element in the stream.
"""
encoding = kwargs.pop('encoding', DEFAULT_ENCODING)
decoder = _get_decoder_class(encoding)(*args, **kwargs)
while 1:
try:
yield decoder.readElement()
except EOStream:
break
def encode(*args, **kwargs):
"""
A helper function to encode an element.
@type args: C{mixed}
@keyword element: Python data.
@type encoding: C{int}
@keyword encoding: AMF encoding type.
@type context: L{amf0.Context<pyamf.amf0.Context>} or
L{amf3.Context<pyamf.amf3.Context>}
@keyword context: Context.
@rtype: C{StringIO}
@return: File-like object.
"""
encoding = kwargs.pop('encoding', DEFAULT_ENCODING)
encoder = _get_encoder_class(encoding)(**kwargs)
stream = encoder.stream
for el in args:
encoder.writeElement(el)
stream.seek(0)
return stream
def get_decoder(encoding, *args, **kwargs):
"""
Returns a subclassed instance of L{pyamf.BaseDecoder}, based on C{encoding}
"""
return _get_decoder_class(encoding)(*args, **kwargs)
def _get_decoder_class(encoding):
"""
Get compatible decoder.
@type encoding: C{int}
@param encoding: AMF encoding version.
@raise ValueError: AMF encoding version is unknown.
@rtype: L{amf0.Decoder<pyamf.amf0.Decoder>} or
L{amf3.Decoder<pyamf.amf3.Decoder>}
@return: AMF0 or AMF3 decoder.
"""
if encoding == AMF0:
from pyamf import amf0
return amf0.Decoder
elif encoding == AMF3:
from pyamf import amf3
return amf3.Decoder
raise ValueError("Unknown encoding %s" % (encoding,))
def get_encoder(encoding, *args, **kwargs):
"""
Returns a subclassed instance of L{pyamf.BaseEncoder}, based on C{encoding}
"""
return _get_encoder_class(encoding)(*args, **kwargs)
def _get_encoder_class(encoding):
"""
Get compatible encoder.
@type encoding: C{int}
@param encoding: AMF encoding version.
@raise ValueError: AMF encoding version is unknown.
@rtype: L{amf0.Encoder<pyamf.amf0.Encoder>} or
L{amf3.Encoder<pyamf.amf3.Encoder>}
@return: AMF0 or AMF3 encoder.
"""
if encoding == AMF0:
from pyamf import amf0
return amf0.Encoder
elif encoding == AMF3:
from pyamf import amf3
return amf3.Encoder
raise ValueError("Unknown encoding %s" % (encoding,))
def get_context(encoding, **kwargs):
return _get_context_class(encoding)(**kwargs)
def _get_context_class(encoding):
"""
Gets a compatible context class.
@type encoding: C{int}
@param encoding: AMF encoding version.
@raise ValueError: AMF encoding version is unknown.
@rtype: L{amf0.Context<pyamf.amf0.Context>} or
L{amf3.Context<pyamf.amf3.Context>}
@return: AMF0 or AMF3 context class.
"""
if encoding == AMF0:
from pyamf import amf0
return amf0.Context
elif encoding == AMF3:
from pyamf import amf3
return amf3.Context
raise ValueError("Unknown encoding %s" % (encoding,))
def blaze_loader(alias):
"""
Loader for BlazeDS framework compatibility classes, specifically
implementing C{ISmallMessage}.
@see: U{BlazeDS (external)<http://opensource.adobe.com/wiki/display/blazeds/BlazeDS>}
@since: 0.5
"""
if alias not in ['DSC', 'DSK']:
return
import pyamf.flex.messaging
return CLASS_CACHE[alias]
def flex_loader(alias):
"""
Loader for L{Flex<pyamf.flex>} framework compatibility classes.
@raise UnknownClassAlias: Trying to load unknown Flex compatibility class.
"""
if not alias.startswith('flex.'):
return
try:
if alias.startswith('flex.messaging.messages'):
import pyamf.flex.messaging
elif alias.startswith('flex.messaging.io'):
import pyamf.flex
elif alias.startswith('flex.data.messages'):
import pyamf.flex.data
return CLASS_CACHE[alias]
except KeyError:
raise UnknownClassAlias(alias)
def add_type(type_, func=None):
"""
Adds a custom type to L{TYPE_MAP}. A custom type allows fine grain control
of what to encode to an AMF data stream.
@raise TypeError: Unable to add as a custom type (expected a class or callable).
@raise KeyError: Type already exists.
"""
def _check_type(type_):
if not (isinstance(type_, (type, types.ClassType)) or callable(type_)):
raise TypeError(r'Unable to add '%r' as a custom type (expected a '
'class or callable)' % (type_,))
if isinstance(type_, list):
type_ = tuple(type_)
if type_ in TYPE_MAP:
raise KeyError('Type %r already exists' % (type_,))
if isinstance(type_, types.TupleType):
for x in type_:
_check_type(x)
else:
_check_type(type_)
TYPE_MAP[type_] = func
def get_type(type_):
"""
Gets the declaration for the corresponding custom type.
@raise KeyError: Unknown type.
"""
if isinstance(type_, list):
type_ = tuple(type_)
for (k, v) in TYPE_MAP.iteritems():
if k == type_:
return v
raise KeyError("Unknown type %r" % (type_,))
def remove_type(type_):
"""
Removes the custom type declaration.
@return: Custom type declaration.
"""
declaration = get_type(type_)
del TYPE_MAP[type_]
return declaration
def add_error_class(klass, code):
"""
Maps an exception class to a string code. Used to map remoting C{onStatus}
objects to an exception class so that an exception can be built to
represent that error::
class AuthenticationError(Exception):
pass
An example: C{add_error_class(AuthenticationError, 'Auth.Failed')}
@type code: C{str}
@raise TypeError: C{klass} must be a C{class} type.
@raise TypeError: Error classes must subclass the C{__builtin__.Exception} class.
@raise ValueError: Code is already registered.
"""
if not isinstance(code, basestring):
code = str(code)
if not isinstance(klass, (type, types.ClassType)):
raise TypeError("klass must be a class type")
mro = inspect.getmro(klass)
if not Exception in mro:
raise TypeError('Error classes must subclass the __builtin__.Exception class')
if code in ERROR_CLASS_MAP.keys():
raise ValueError('Code %s is already registered' % (code,))
ERROR_CLASS_MAP[code] = klass
def remove_error_class(klass):
"""
Removes a class from C{ERROR_CLASS_MAP}.
@raise ValueError: Code is not registered.
@raise ValueError: Class is not registered.
@raise TypeError: Invalid type, expected C{class} or C{string}.
"""
if isinstance(klass, basestring):
if not klass in ERROR_CLASS_MAP.keys():
raise ValueError('Code %s is not registered' % (klass,))
elif isinstance(klass, (type, types.ClassType)):
classes = ERROR_CLASS_MAP.values()
if not klass in classes:
raise ValueError('Class %s is not registered' % (klass,))
klass = ERROR_CLASS_MAP.keys()[classes.index(klass)]
else:
raise TypeError("Invalid type, expected class or string")
del ERROR_CLASS_MAP[klass]
def register_alias_type(klass, *args):
"""
This function allows you to map subclasses of L{ClassAlias} to classes
listed in C{args}.
When an object is read/written from/to the AMF stream, a paired
L{ClassAlias} instance is created (or reused), based on the Python class
of that object. L{ClassAlias} provides important metadata for the class
and can also control how the equivalent Python object is created, how the
attributes are applied etc.
Use this function if you need to do something non-standard.
@see: L{pyamf.adapters._google_appengine_ext_db.DataStoreClassAlias} for a
good example.
@since: 0.4
@raise RuntimeError: Type is already registered.
@raise TypeError: C{klass} must be a class.
@raise ValueError: New aliases must subclass L{pyamf.ClassAlias}.
@raise ValueError: At least one type must be supplied.
"""
def check_type_registered(arg):
# FIXME: Create a reverse index of registered types and do a quicker lookup
for k, v in ALIAS_TYPES.iteritems():
for kl in v:
if arg is kl:
raise RuntimeError('%r is already registered under %r' % (arg, k))
if not isinstance(klass, (type, types.ClassType)):
raise TypeError('klass must be class')
if not issubclass(klass, ClassAlias):
raise ValueError('New aliases must subclass pyamf.ClassAlias')
if len(args) == 0:
raise ValueError('At least one type must be supplied')
if len(args) == 1 and callable(args[0]):
c = args[0]
check_type_registered(c)
else:
for arg in args:
if not isinstance(arg, (type, types.ClassType)):
raise TypeError('%r must be class' % (arg,))
check_type_registered(arg)
ALIAS_TYPES[klass] = args
def register_package(module=None, package=None, separator='.', ignore=[], strict=True):
"""
This is a helper function that takes the concept of Actionscript packages
and registers all the classes in the supplied Python module under that
package. It auto-aliased all classes in C{module} based on C{package}.
e.g. C{mymodule.py}::
class User(object):
pass
class Permission(object):
pass
>>> import mymodule
>>> pyamf.register_package(mymodule, 'com.example.app')
Now all instances of C{mymodule.User} will appear in Actionscript under the
alias 'com.example.app.User'. Same goes for C{mymodule.Permission} - the
Actionscript alias is 'com.example.app.Permission'. The reverse is also
true, any objects with the correct aliases will now be instances of the
relevant Python class.
This function respects the C{__all__} attribute of the module but you can
have further control of what not to auto alias by populating the C{ignore}
argument.
This function provides the ability to register the module it is being
called in, an example:
>>> class Foo:
... pass
...
>>> class Bar:
... pass
...
>>> import pyamf
>>> pyamf.register_package('foo')
You can also supply a list of classes to register. An example, taking the
above classes:
>>> import pyamf
>>> pyamf.register_package([Foo, Bar], 'foo')
@param module: The Python module that will contain all the classes to
auto alias.
@type module: C{module} or C{dict}
@param package: The base package name. e.g. 'com.example.app'. If this
is C{None} then the value is inferred from module.__name__.
@type package: C{str} or C{unicode} or C{None}
@param separator: The separator used to append to C{package} to form the
complete alias.
@type separator: C{str}
@param ignore: To give fine grain control over what gets aliased and what
doesn't, supply a list of classes that you B{do not} want to be aliased.
@type ignore: C{iterable}
@param strict: If this value is C{True} then only classes that originate
from C{module} will be registered, all others will be left in peace.
@type strict: C{bool}
@return: A collection of all the classes that were registered and their
respective L{ClassAlias} objects.
@since: 0.5
"""
if isinstance(module, basestring):
if module == '':
raise TypeError('Cannot get list of classes from %r' % (module,))
package = module
module = None
if module is None:
import inspect
prev_frame = inspect.stack()[1][0]
module = prev_frame.f_locals
if type(module) is dict:
has = lambda x: x in module.keys()
get = module.__getitem__
elif type(module) is list:
has = lambda x: x in module
get = module.__getitem__
strict = False
else:
has = lambda x: hasattr(module, x)
get = lambda x: getattr(module, x)
if package is None:
if has('__name__'):
package = get('__name__')
else:
raise TypeError('Cannot get list of classes from %r' % (module,))
if has('__all__'):
keys = get('__all__')
elif hasattr(module, '__dict__'):
keys = module.__dict__.keys()
elif hasattr(module, 'keys'):
keys = module.keys()
elif isinstance(module, list):
keys = range(len(module))
else:
raise TypeError('Cannot get list of classes from %r' % (module,))
def check_attr(attr):
if not isinstance(attr, (types.ClassType, types.TypeType)):
return False
if attr.__name__ in ignore:
return False
try:
if strict and attr.__module__ != get('__name__'):
return False
except AttributeError:
return False
return True
# gotta love python
classes = filter(check_attr, [get(x) for x in keys])
registered = {}
for klass in classes:
alias = '%s%s%s' % (package, separator, klass.__name__)
registered[klass] = register_class(klass, alias)
return registered
# init module here
register_class(ASObject)
register_class_loader(flex_loader)
register_class_loader(blaze_loader)
register_alias_type(TypedObjectClassAlias, TypedObject)
register_alias_type(ErrorAlias, Exception)
register_adapters()
| Python |
"""Flickr API.
Public class: FlickrAgent
Public functions: convertDate
photoURL
"""
# Author: Eitan Isaacson <eitan@ascender.com> November 2005.
# Maintained by: Bret Walker
#
# Copyright 2005 Eitan Isaacson <eitan@ascender.com>
# Copyright 2007 Bret Walker
#
# This library is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
__version__ = "0.5"
__all__ = ['FlickrAgent', 'convertDate', 'photoURL']
import xml.dom.minidom, md5, os, httplib, time
from xmlrpclib import ServerProxy
_host='www.flickr.com'
_rpc_path='/services/xmlrpc/'
_upload_path='/services/upload/'
_auth_path='/services/auth/'
class FlickrAgent:
'''FlickrAgent():
api_key: (required)
shared_secret: (required)
token: A valid token
user: The token's owner
rcdir: Directory where the tokens are stored automatically by saveToken()
(default: ~/.flickrlib/)
rcfile: File name for token storage (default: API key)
If no token or username is provided FlickrAgent will try to load one from disk.
If this fails the FlickrAgent instance is not authenticated'''
def __init__(self, api_key,ssecret,token=None,user=None,rcdir='~/.flickrlib/',rcfile=None):
self.api_key = api_key
self.ssecret = ssecret
self._rcdir = os.path.expanduser(rcdir)
self._rcfile = rcfile or self.api_key
if not token or user:
self._token,self._user = self.loadToken()
else:
self._token,self._user = token,user
self._serverProxy = ServerProxy('http://'+_host+_rpc_path)
def _createSig(self,kwargs):
str_sig = []
for key in kwargs.keys():
str_sig.append('%s%s' % (key, kwargs[key]))
str_sig.sort()
return md5.new(self.ssecret + ''.join(str_sig)).hexdigest()
def authLoginURL(self,perms):
'''Issues a frob and returns a URL for the user to authorize a program
perms: read, write, delete'''
base = 'http://'+_host+_auth_path
kwargs = {}
kwargs['api_key'] = self.api_key
kwargs['perms'] = perms
self.frob = kwargs['frob'] = self.flickr.auth.getFrob()['text']
api_sig = self._createSig(kwargs)
str_arg = []
for key in kwargs.keys():
str_arg.append('%s=%s' % (key, kwargs[key]))
str_arg.append('api_sig=%s' % api_sig)
return base+'?'+'&'.join(str_arg)
def retrieveToken(self):
'''Retrieves a token after a user authorizes a program via the Flickr website.
The token is automatically applied to the running instance of FlickrAgent'''
reply = self.flickr.auth.getToken(frob=self.frob)
self._user = user = reply['user'][0]['username']
self._token = reply['token'][0]['text']
def saveToken(self):
'''Saves the token in the running instance to disk'''
if not self._user or not self._token:
raise StandardError, 'No token or username in this instance'
if not os.path.exists(self._rcdir):
os.makedirs(self._rcdir)
f = file(os.path.join(self._rcdir, self._rcfile), 'w')
f.write('token='+self._token+'\n')
f.write('user='+self._user+'\n')
f.close()
def loadToken(self):
'''Loads a token from disk to the running instance'''
try:
f = file(os.path.join(self._rcdir, self._rcfile))
values = dict([s.strip().split('=') for s in f.readlines()])
f.close()
return values['token'], values['user']
except:
return None,None
def parseData(self, xmlStr):
'''Parses an XML string in to a nested dictionary'''
if xmlStr.strip() is '':
return
dom = xml.dom.minidom.parseString(xmlStr.strip().encode("utf-8))
return self.__parseRecursively(dom.firstChild)
def __parseRecursively(self,element):
flickrDict = {}
flickrDict[u'type'] = element.nodeName
flickrDict[u'text'] = ''
for attrib in element.attributes.values():
flickrDict[attrib.nodeName] = attrib.nodeValue
for node in element.childNodes:
if node.nodeType == xml.dom.Node.TEXT_NODE:
flickrDict[u'text'] += node.nodeValue.strip()
elif node.nodeType == xml.dom.Node.ELEMENT_NODE:
try:
flickrDict[node.nodeName].append(self.__parseRecursively(node))
except KeyError:
flickrDict[node.nodeName] = [self.__parseRecursively(node)]
except AttributeError:
flickrDict[node.nodeName] = [flickrDict[node.nodeName],
self.__parseRecursively(node)]
return flickrDict
def __call__(self,**kwargs):
return FlickrAgent.__dict__['_stop'](self,self.n,self.pid,**kwargs)
def __getattr__(self,name):
if name in ('__str__','__repr__'): return lambda:'instance of %s at %s' % (str(self.__class__),id(self))
if not self.__dict__.has_key('n'):
self.n=[]
self.pid = []
self.n.append(name)
self.pid.append(str(os.getpid()))
return self
def _stop(self,n,pid,**kwargs):
self.n=[]
self.pid=[]
return self._rpc_call(n,pid,**kwargs)
def _rpc_call(self,n,pid,**kwargs):
if '.'.join(n) == 'flickr.photos.upload':
return self.parseData(self._upload(kwargs))
kwargs['api_key'] = self.api_key
if self._token:
kwargs['auth_token'] = self._token
kwargs['api_sig'] = self._createSig(kwargs)
return self.parseData(getattr(self._serverProxy, '.'.join(n))(kwargs))
def _upload(self,kwargs):
if not kwargs.get('filename'):
raise ValueError, 'Must provide a file name'
filename = kwargs.pop('filename')
if kwargs.get('title'):
title = kwargs.pop('title')
else:
title = filename
kwargs['api_key'] = self.api_key
kwargs['auth_token'] = self._token
kwargs['api_sig'] = self._createSig(kwargs)
boundary = '--===Have=a=nice=day=========7d45e178b0434\r\nContent-Disposition: form-data; '
headers = {"Content-Type": "multipart/form-data; boundary=%s" % boundary[2:-34],
"Host": "www.flickr.com"}
body = ''
for key in kwargs.keys():
body += '%sname=%s\r\n\r\n%s\r\n' % (boundary,key,kwargs[key])
body += '%sname="photo"; filename="%s"\r\n' % (boundary, title)
body += 'Content-Type: image/jpeg\r\n\r\n'
f = file(filename, 'rb')
image = f.read()
f.close()
post_data = body.encode("utf_8") + image + ("--%s--" % (boundary[:34])).encode("utf_8")
conn = httplib.HTTPConnection(_host)
conn.request("POST", _upload_path, post_data, headers)
response = conn.getresponse().read()
conn.close()
return response
def convertDate(date_str):
"""Convert a Flickr formated date to epoch"""
return time.mktime(
map(int,
date_str.split(' ')[0].split('-') +
date_str.split(' ')[1].split(':') +
[0,0,0]))
def photoURL(photo,size=None,format='jpg'):
'''Return a URL for a photo.
photo: dictionary {'server': <server>, 'id': <id>, 'secret': <secret>}
size: s=square t=thumbnail m=small b=large o=original
format: jpg (default), png, gif'''
if not size:
return 'http://static.flickr.com/%s/%s_%s.%s' % (photo['server'],photo['id'],photo['secret'],format)
else:
return 'http://static.flickr.com/%s/%s_%s_%s.%s' % (photo['server'],photo['id'],photo['secret'],size,format)
| Python |
#!/usr/bin/env python
from distutils.core import setup
setup(name = 'flickrlib',
version = '0.5',
description = 'Flickr API',
author = 'Eitan Isaacson',
author_email = 'eitan@monotonous.org',
url='http://montonous.org',
license = 'LGPL',
platforms = ['any'],
long_description = 'A class and some functions for writing Python applications that interface with Flickr.com',
py_modules = ['flickrlib'],
) | Python |
#!/usr/bin/env python
from distutils.core import setup
setup(name = 'flickrlib',
version = '0.5',
description = 'Flickr API',
author = 'Eitan Isaacson',
author_email = 'eitan@monotonous.org',
url='http://montonous.org',
license = 'LGPL',
platforms = ['any'],
long_description = 'A class and some functions for writing Python applications that interface with Flickr.com',
py_modules = ['flickrlib'],
) | Python |
#!/usr/bin/env python
import flickrlib
import sys
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-k", "--api-key", dest="api_key", action="store",
type="string", help="Flickr API key", metavar="KEY")
parser.add_option("-s", "--shared-secret", dest="ssecret", action="store",
type="string", help="Flickr API key's shared secret", metavar="SECRET")
(options, args) = parser.parse_args()
if not options.api_key or not options.ssecret:
print 'Must provide API key and shared secret'
parser.exit()
agent = flickrlib.FlickrAgent(options.api_key, options.ssecret)
url = agent.authLoginURL('delete')
print '''Authorizing is a simple process which takes place in your web browser.
When you're finished, return to this terminal to complete authorization and begin developing with this library'''
print 'Go to: %s' % url
print 'Once done, press Enter.',
sys.stdin.readline()
agent.retrieveToken()
try:
agent.flickr.test.login()
print 'Token works! Saving it...'
agent.saveToken()
print 'Next time you create an instance of FlickrAgent with your API key this token will be loaded from disk'
print '...Don\'t forget to create an aquivalent interface for your app\'s users'
print 'Read more about it at: http://www.flickr.com/services/api/misc.userauth.html'
except:
raise
| Python |
VERSION = (0, 1, 'pre')
def get_version():
"""
Returns the version as a human-format string.
"""
v = '.'.join([str(i) for i in VERSION[:-1]])
if VERSION[-1]:
v += '-' + VERSION[-1]
return v | Python |
# from photo_services import *
import sys
try:
from flickrapi import FlickrAPI
except ImportError:
sys.exit('Beej\'s Python Flickr API not found: http://beej.us/flickr/flickrapi/')
raise
# class FlickrPhotoService(AbstractPhotoService):
class FlickrPhotoService:
def __init__(self):
self.service = self._login()
def _login(self):
"""
Performs user authentication and returns service object if successful
"""
# flickr auth information:
self.fapikey = "f3f460f0c95607acf33fc1296b3e9645" # API key
self.fapisecret = "cc2ede1bd2305581" # shared "secret"
# make a new FlickrAPI instance
self.fapi = FlickrAPI(self.fapikey, self.fapisecret)
# do the whole whatever-it-takes to get a valid token:
self.fapitoken = self.fapi.getToken(browser="firefox")
def get_sets(self):
rsp = self.fapi.photosets_getList(api_key=self.fapikey, auth_token=self.fapitoken)
self.fapi.testFailure(rsp)
sets = [
{
'id': s['id'],
'count': int(s['photos']),
'title': s.title[0].elementText,
'description': s.description[0].elementText,
}
for s in rsp.photosets[0].photoset]
return sets
def get_photos(self, current_set):
rspPhotos = self.fapi.photosets_getPhotos(api_key=self.fapikey, photoset_id=current_set['id'])
self.fapi.testFailure(rspPhotos)
photos = []
for p in rspPhotos.photoset[0].photo:
photo = {
'id': p['id'],
'title': p['title'],
}
# now to get info about the picture
rspPhoto = self.fapi.photos_getInfo(api_key=self.fapikey, photo_id=photo['id'])
self.fapi.testFailure(rspPhoto)
photo.update(
description = rspPhoto.photo[0].description[0].elementText,
tags = [tag['raw'] for tag in rspPhoto.photo[0].tags[0].tag],
)
# now to get url to the original image
rspPhoto = self.fapi.photos_getSizes(api_key=self.fapikey, photo_id=photo['id'])
self.fapi.testFailure(rspPhoto)
photo.update(
url = [size['url'] for size in rspPhoto.sizes[0].size if size['label'] == 'Original'][0],
)
photos.append(photo)
return photos
def download_photos(self):
sets = self._get_sets()
for s in sets:
s.photos = self._get_photos(s)
return sets
def upload_photos(self, sets):
raise NotImplementedError
if __name__ == "__main__":
fps = FlickrPhotoService()
sets = fps.get_sets()
photos = fps.get_photos(sets[0])
print sets
print photos | Python |
from photo_services import *
import sys
try:
import gdata.service
except ImportError:
sys.exit('GData Python Client Library is not installed: http://code.google.com/p/gdata-python-client/')
raise
class PicasaPhotoService(AbstractPhotoService):
def __init__(self, username, password):
self.username = username
self.service = self._login(password)
def _login(self, password):
"""
Performs user authentication and returns service object if successful
"""
gds = gdata.service.GDataService()
gds.email = self.username
gds.password = password
gds.service = 'lh2'
try:
gds.ProgrammaticLogin()
except gdata.service.Error, e:
sys.exit("Can't log on to Picasa: %s" % e)
return gds | Python |
class AbstractPhotoService: #IGNORE:W0232
def upload_photos(self, photos):
raise NotImplementedError
def download_photos(self):
raise NotImplementedError | Python |
#!/usr/bin/python
#
# Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'afshar@google.com (Ali Afshar)'
# Add the library location to the path
import sys
sys.path.insert(0, 'lib')
import os
import httplib2
import sessions
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.ext.webapp import template
from apiclient.discovery import build
from apiclient.http import MediaUpload
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
from oauth2client.client import AccessTokenRefreshError
from oauth2client.appengine import CredentialsProperty
from oauth2client.appengine import StorageByKeyName
from oauth2client.appengine import simplejson as json
ALL_SCOPES = ('https://www.googleapis.com/auth/drive.file '
'https://www.googleapis.com/auth/userinfo.email '
'https://www.googleapis.com/auth/userinfo.profile')
def SibPath(name):
"""Generate a path that is a sibling of this file.
Args:
name: Name of sibling file.
Returns:
Path to sibling file.
"""
return os.path.join(os.path.dirname(__file__), name)
# Load the secret that is used for client side sessions
# Create one of these for yourself with, for example:
# python -c "import os; print os.urandom(64)" > session-secret
SESSION_SECRET = open(SibPath('session.secret')).read()
INDEX_HTML = open(SibPath('index.html')).read()
class Credentials(db.Model):
"""Datastore entity for storing OAuth2.0 credentials.
The CredentialsProperty is provided by the Google API Python Client, and is
used by the Storage classes to store OAuth 2.0 credentials in the data store."""
credentials = CredentialsProperty()
def CreateService(service, version, creds):
"""Create a Google API service.
Load an API service from a discovery document and authorize it with the
provided credentials.
Args:
service: Service name (e.g 'drive', 'oauth2').
version: Service version (e.g 'v1').
creds: Credentials used to authorize service.
Returns:
Authorized Google API service.
"""
# Instantiate an Http instance
http = httplib2.Http()
# Authorize the Http instance with the passed credentials
creds.authorize(http)
# Build a service from the passed discovery document path
return build(service, version, http=http)
class DriveState(object):
"""Store state provided by Drive."""
def __init__(self, state):
"""Create a new instance of drive state.
Parse and load the JSON state parameter.
Args:
state: State query parameter as a string.
"""
if state:
state_data = json.loads(state)
self.action = state_data['action']
self.ids = map(str, state_data.get('ids', []))
else:
self.action = 'create'
self.ids = []
@classmethod
def FromRequest(cls, request):
"""Create a Drive State instance from an HTTP request.
Args:
cls: Type this class method is called against.
request: HTTP request.
"""
return DriveState(request.get('state'))
class BaseDriveHandler(webapp.RequestHandler):
"""Base request handler for drive applications.
Adds Authorization support for Drive.
"""
def CreateOAuthFlow(self):
"""Create OAuth2.0 flow controller
This controller can be used to perform all parts of the OAuth 2.0 dance
including exchanging an Authorization code.
Args:
request: HTTP request to create OAuth2.0 flow for
Returns:
OAuth2.0 Flow instance suitable for performing OAuth2.0.
"""
flow = flow_from_clientsecrets('client_secrets.json', scope='')
# Dynamically set the redirect_uri based on the request URL. This is extremely
# convenient for debugging to an alternative host without manually setting the
# redirect URI.
flow.redirect_uri = self.request.url.split('?', 1)[0].rsplit('/', 1)[0]
return flow
def GetCodeCredentials(self):
"""Create OAuth 2.0 credentials by extracting a code and performing OAuth2.0.
The authorization code is extracted form the URI parameters. If it is absent,
None is returned immediately. Otherwise, if it is present, it is used to
perform step 2 of the OAuth 2.0 web server flow.
Once a token is received, the user information is fetched from the userinfo
service and stored in the session. The token is saved in the datastore against
the user ID received from the userinfo service.
Args:
request: HTTP request used for extracting an authorization code and the
session information.
Returns:
OAuth2.0 credentials suitable for authorizing clients or None if
Authorization could not take place.
"""
# Other frameworks use different API to get a query parameter.
code = self.request.get('code')
if not code:
# returns None to indicate that no code was passed from Google Drive.
return None
# Auth flow is a controller that is loaded with the client information,
# including client_id, client_secret, redirect_uri etc
oauth_flow = self.CreateOAuthFlow()
# Perform the exchange of the code. If there is a failure with exchanging
# the code, return None.
try:
creds = oauth_flow.step2_exchange(code)
except FlowExchangeError:
return None
# Create an API service that can use the userinfo API. Authorize it with our
# credentials that we gained from the code exchange.
users_service = CreateService('oauth2', 'v2', creds)
# Make a call against the userinfo service to retrieve the user's information.
# In this case we are interested in the user's "id" field.
userid = users_service.userinfo().get().execute().get('id')
# Store the user id in the user's cookie-based session.
session = sessions.LilCookies(self, SESSION_SECRET)
session.set_secure_cookie(name='userid', value=userid)
# Store the credentials in the data store using the userid as the key.
StorageByKeyName(Credentials, userid, 'credentials').put(creds)
return creds
def GetSessionCredentials(self):
"""Get OAuth 2.0 credentials for an HTTP session.
If the user has a user id stored in their cookie session, extract that value
and use it to load that user's credentials from the data store.
Args:
request: HTTP request to use session from.
Returns:
OAuth2.0 credentials suitable for authorizing clients.
"""
# Try to load the user id from the session
session = sessions.LilCookies(self, SESSION_SECRET)
userid = session.get_secure_cookie(name='userid')
if not userid:
# return None to indicate that no credentials could be loaded from the
# session.
return None
# Load the credentials from the data store, using the userid as a key.
creds = StorageByKeyName(Credentials, userid, 'credentials').get()
# if the credentials are invalid, return None to indicate that the credentials
# cannot be used.
if creds and creds.invalid:
return None
return creds
def RedirectAuth(self):
"""Redirect a handler to an authorization page.
Used when a handler fails to fetch credentials suitable for making Drive API
requests. The request is redirected to an OAuth 2.0 authorization approval
page and on approval, are returned to application.
Args:
handler: webapp.RequestHandler to redirect.
"""
flow = self.CreateOAuthFlow()
# Manually add the required scopes. Since this redirect does not originate
# from the Google Drive UI, which authomatically sets the scopes that are
# listed in the API Console.
flow.scope = ALL_SCOPES
# Create the redirect URI by performing step 1 of the OAuth 2.0 web server
# flow.
uri = flow.step1_get_authorize_url(flow.redirect_uri)
# Perform the redirect.
self.redirect(uri)
def RespondJSON(self, data):
"""Generate a JSON response and return it to the client.
Args:
data: The data that will be converted to JSON to return.
"""
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(data))
def CreateAuthorizedService(self, service, version):
"""Create an authorize service instance.
The service can only ever retrieve the credentials from the session.
Args:
service: Service name (e.g 'drive', 'oauth2').
version: Service version (e.g 'v1').
Returns:
Authorized service or redirect to authorization flow if no credentials.
"""
# For the service, the session holds the credentials
creds = self.GetSessionCredentials()
if creds:
# If the session contains credentials, use them to create a Drive service
# instance.
return CreateService(service, version, creds)
else:
# If no credentials could be loaded from the session, redirect the user to
# the authorization page.
self.RedirectAuth()
def CreateDrive(self):
"""Create a drive client instance."""
return self.CreateAuthorizedService('drive', 'v2')
def CreateUserInfo(self):
"""Create a user info client instance."""
return self.CreateAuthorizedService('oauth2', 'v2')
class MainPage(BaseDriveHandler):
"""Web handler for the main page.
Handles requests and returns the user interface for Open With and Create
cases. Responsible for parsing the state provided from the Drive UI and acting
appropriately.
"""
def get(self):
"""Handle GET for Create New and Open With.
This creates an authorized client, and checks whether a resource id has
been passed or not. If a resource ID has been passed, this is the Open
With use-case, otherwise it is the Create New use-case.
"""
# Generate a state instance for the request, this includes the action, and
# the file id(s) that have been sent from the Drive user interface.
drive_state = DriveState.FromRequest(self.request)
if drive_state.action == 'open' and len(drive_state.ids) > 0:
code = self.request.get('code')
if code:
code = '?code=%s' % code
self.redirect('/#edit/%s%s' % (drive_state.ids[0], code))
return
# Fetch the credentials by extracting an OAuth 2.0 authorization code from
# the request URL. If the code is not present, redirect to the OAuth 2.0
# authorization URL.
creds = self.GetCodeCredentials()
if not creds:
return self.RedirectAuth()
# Extract the numerical portion of the client_id from the stored value in
# the OAuth flow. You could also store this value as a separate variable
# somewhere.
client_id = self.CreateOAuthFlow().client_id.split('.')[0].split('-')[0]
self.RenderTemplate()
def RenderTemplate(self):
"""Render a named template in a context."""
self.response.headers['Content-Type'] = 'text/html'
self.response.out.write(INDEX_HTML)
class ServiceHandler(BaseDriveHandler):
"""Web handler for the service to read and write to Drive."""
def post(self):
"""Called when HTTP POST requests are received by the web application.
The POST body is JSON which is deserialized and used as values to create a
new file in Drive. The authorization access token for this action is
retreived from the data store.
"""
# Create a Drive service
service = self.CreateDrive()
if service is None:
return
# Load the data that has been posted as JSON
data = self.RequestJSON()
# Create a new file data structure.
resource = {
'title': data['title'],
'description': data['description'],
'mimeType': data['mimeType'],
}
try:
# Make an insert request to create a new file. A MediaInMemoryUpload
# instance is used to upload the file body.
resource = service.files().insert(
body=resource,
media_body=MediaInMemoryUpload(
data.get('content', ''),
data['mimeType'],
resumable=True)
).execute()
# Respond with the new file id as JSON.
self.RespondJSON(resource['id'])
except AccessTokenRefreshError:
# In cases where the access token has expired and cannot be refreshed
# (e.g. manual token revoking) redirect the user to the authorization page
# to authorize.
self.RedirectAuth()
def get(self):
"""Called when HTTP GET requests are received by the web application.
Use the query parameter file_id to fetch the required file's metadata then
content and return it as a JSON object.
Since DrEdit deals with text files, it is safe to dump the content directly
into JSON, but this is not the case with binary files, where something like
Base64 encoding is more appropriate.
"""
# Create a Drive service
service = self.CreateDrive()
if service is None:
return
try:
# Requests are expected to pass the file_id query parameter.
file_id = self.request.get('file_id')
if file_id:
# Fetch the file metadata by making the service.files().get method of
# the Drive API.
f = service.files().get(fileId=file_id).execute()
downloadUrl = f.get('downloadUrl')
# If a download URL is provided in the file metadata, use it to make an
# authorized request to fetch the file ontent. Set this content in the
# data to return as the 'content' field. If there is no downloadUrl,
# just set empty content.
if downloadUrl:
resp, f['content'] = service._http.request(downloadUrl)
else:
f['content'] = ''
else:
f = None
# Generate a JSON response with the file data and return to the client.
self.RespondJSON(f)
except AccessTokenRefreshError:
# Catch AccessTokenRefreshError which occurs when the API client library
# fails to refresh a token. This occurs, for example, when a refresh token
# is revoked. When this happens the user is redirected to the
# Authorization URL.
self.RedirectAuth()
def put(self):
"""Called when HTTP PUT requests are received by the web application.
The PUT body is JSON which is deserialized and used as values to update
a file in Drive. The authorization access token for this action is
retreived from the data store.
"""
# Create a Drive service
service = self.CreateDrive()
if service is None:
return
# Load the data that has been posted as JSON
data = self.RequestJSON()
try:
# Create a new file data structure.
content = data.get('content')
if 'content' in data:
data.pop('content')
if content is not None:
# Make an update request to update the file. A MediaInMemoryUpload
# instance is used to upload the file body. Because of a limitation, this
# request must be made in two parts, the first to update the metadata, and
# the second to update the body.
resource = service.files().update(
fileId=data['resource_id'],
newRevision=self.request.get('newRevision', False),
body=data,
media_body=MediaInMemoryUpload(
content, data['mimeType'], resumable=True)
).execute()
else:
# Only update the metadata, a patch request is prefered but not yet
# supported on Google App Engine; see
# http://code.google.com/p/googleappengine/issues/detail?id=6316.
resource = service.files().update(
fileId=data['resource_id'],
newRevision=self.request.get('newRevision', False),
body=data).execute()
# Respond with the new file id as JSON.
self.RespondJSON(resource['id'])
except AccessTokenRefreshError:
# In cases where the access token has expired and cannot be refreshed
# (e.g. manual token revoking) redirect the user to the authorization page
# to authorize.
self.RedirectAuth()
def RequestJSON(self):
"""Load the request body as JSON.
Returns:
Request body loaded as JSON or None if there is no request body.
"""
if self.request.body:
return json.loads(self.request.body)
class UserHandler(BaseDriveHandler):
"""Web handler for the service to read user information."""
def get(self):
"""Called when HTTP GET requests are received by the web application."""
# Create a Drive service
service = self.CreateUserInfo()
if service is None:
return
try:
result = service.userinfo().get().execute()
# Generate a JSON response with the file data and return to the client.
self.RespondJSON(result)
except AccessTokenRefreshError:
# Catch AccessTokenRefreshError which occurs when the API client library
# fails to refresh a token. This occurs, for example, when a refresh token
# is revoked. When this happens the user is redirected to the
# Authorization URL.
self.RedirectAuth()
class AboutHandler(BaseDriveHandler):
"""Web handler for the service to read user information."""
def get(self):
"""Called when HTTP GET requests are received by the web application."""
# Create a Drive service
service = self.CreateDrive()
if service is None:
return
try:
result = service.about().get().execute()
# Generate a JSON response with the file data and return to the client.
self.RespondJSON(result)
except AccessTokenRefreshError:
# Catch AccessTokenRefreshError which occurs when the API client library
# fails to refresh a token. This occurs, for example, when a refresh token
# is revoked. When this happens the user is redirected to the
# Authorization URL.
self.RedirectAuth()
class MediaInMemoryUpload(MediaUpload):
"""MediaUpload for a chunk of bytes.
Construct a MediaFileUpload and pass as the media_body parameter of the
method. For example, if we had a service that allowed plain text:
"""
def __init__(self, body, mimetype='application/octet-stream',
chunksize=256*1024, resumable=False):
"""Create a new MediaBytesUpload.
Args:
body: string, Bytes of body content.
mimetype: string, Mime-type of the file or default of
'application/octet-stream'.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._body = body
self._mimetype = mimetype
self._resumable = resumable
self._chunksize = chunksize
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body.
"""
return len(self._body)
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorter than length if EOF was reached
first.
"""
return self._body[begin:begin + length]
# Create an WSGI application suitable for running on App Engine
application = webapp.WSGIApplication(
[('/', MainPage), ('/svc', ServiceHandler), ('/about', AboutHandler),
('/user', UserHandler)],
# XXX Set to False in production.
debug=True
)
def main():
"""Main entry point for executing a request with this handler."""
run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'afshar@google.com (Ali Afshar)'
import os
import httplib2
import sessions
from google.appengine.ext import db
from google.appengine.ext.webapp import template
from apiclient.discovery import build_from_document
from apiclient.http import MediaUpload
from oauth2client import client
from oauth2client.appengine import CredentialsProperty
from oauth2client.appengine import StorageByKeyName
from oauth2client.appengine import simplejson as json
APIS_BASE = 'https://www.googleapis.com'
ALL_SCOPES = ('https://www.googleapis.com/auth/drive.file '
'https://www.googleapis.com/auth/userinfo.email '
'https://www.googleapis.com/auth/userinfo.profile')
CODE_PARAMETER = 'code'
STATE_PARAMETER = 'state'
SESSION_SECRET = open('session.secret').read()
DRIVE_DISCOVERY_DOC = open('drive.json').read()
USERS_DISCOVERY_DOC = open('users.json').read()
class Credentials(db.Model):
"""Datastore entity for storing OAuth2.0 credentials."""
credentials = CredentialsProperty()
def CreateOAuthFlow(request):
"""Create OAuth2.0 flow controller
Args:
request: HTTP request to create OAuth2.0 flow for
Returns:
OAuth2.0 Flow instance suitable for performing OAuth2.0.
"""
flow = client.flow_from_clientsecrets('client-debug.json', scope='')
flow.redirect_uri = request.url.split('?', 1)[0].rstrip('/')
return flow
def GetCodeCredentials(request):
"""Create OAuth2.0 credentials by extracting a code and performing OAuth2.0.
Args:
request: HTTP request used for extracting an authorization code.
Returns:
OAuth2.0 credentials suitable for authorizing clients.
"""
code = request.get(CODE_PARAMETER)
if code:
oauth_flow = CreateOAuthFlow(request)
creds = oauth_flow.step2_exchange(code)
users_service = CreateService(USERS_DISCOVERY_DOC, creds)
userid = users_service.userinfo().get().execute().get('id')
request.session.set_secure_cookie(name='userid', value=userid)
StorageByKeyName(Credentials, userid, 'credentials').put(creds)
return creds
def GetSessionCredentials(request):
"""Get OAuth2.0 credentials for an HTTP session.
Args:
request: HTTP request to use session from.
Returns:
OAuth2.0 credentials suitable for authorizing clients.
"""
userid = request.session.get_secure_cookie(name='userid')
if userid:
creds = StorageByKeyName(Credentials, userid, 'credentials').get()
if creds and not creds.invalid:
return creds
def CreateService(discovery_doc, creds):
"""Create a Google API service.
Args:
discovery_doc: Discovery doc used to configure service.
creds: Credentials used to authorize service.
Returns:
Authorized Google API service.
"""
http = httplib2.Http()
creds.authorize(http)
return build_from_document(discovery_doc, APIS_BASE, http=http)
def RedirectAuth(handler):
"""Redirect a handler to an authorization page.
Args:
handler: webapp.RequestHandler to redirect.
"""
flow = CreateOAuthFlow(handler.request)
flow.scope = ALL_SCOPES
uri = flow.step1_get_authorize_url(flow.redirect_uri)
handler.redirect(uri)
def CreateDrive(handler):
"""Create a fully authorized drive service for this handler.
Args:
handler: RequestHandler from which drive service is generated.
Returns:
Authorized drive service, generated from the handler request.
"""
request = handler.request
request.session = sessions.LilCookies(handler, SESSION_SECRET)
creds = GetCodeCredentials(request) or GetSessionCredentials(request)
if creds:
return CreateService(DRIVE_DISCOVERY_DOC, creds)
else:
RedirectAuth(handler)
def ServiceEnabled(view):
"""Decorator to inject an authorized service into an HTTP handler.
Args:
view: HTTP request handler method.
Returns:
Decorated handler which accepts the service as a parameter.
"""
def ServiceDecoratedView(handler, view=view):
service = CreateDrive(handler)
response_data = view(handler, service)
handler.response.headers['Content-Type'] = 'text/html'
handler.response.out.write(response_data)
return ServiceDecoratedView
def ServiceEnabledJson(view):
"""Decorator to inject an authorized service into a JSON HTTP handler.
Args:
view: HTTP request handler method.
Returns:
Decorated handler which accepts the service as a parameter.
"""
def ServiceDecoratedView(handler, view=view):
service = CreateDrive(handler)
if handler.request.body:
data = json.loads(handler.request.body)
else:
data = None
response_data = json.dumps(view(handler, service, data))
handler.response.headers['Content-Type'] = 'application/json'
handler.response.out.write(response_data)
return ServiceDecoratedView
class DriveState(object):
"""Store state provided by Drive."""
def __init__(self, state):
self.ParseState(state)
@classmethod
def FromRequest(cls, request):
"""Create a Drive State instance from an HTTP request.
Args:
cls: Type this class method is called against.
request: HTTP request.
"""
return DriveState(request.get(STATE_PARAMETER))
def ParseState(self, state):
"""Parse a state parameter and set internal values.
Args:
state: State parameter to parse.
"""
if state.startswith('{'):
self.ParseJsonState(state)
else:
self.ParsePlainState(state)
def ParseJsonState(self, state):
"""Parse a state parameter that is JSON.
Args:
state: State parameter to parse
"""
state_data = json.loads(state)
self.action = state_data['action']
self.ids = map(str, state_data.get('ids', []))
def ParsePlainState(self, state):
"""Parse a state parameter that is a plain resource id or missing.
Args:
state: State parameter to parse
"""
if state:
self.action = 'open'
self.ids = [state]
else:
self.action = 'create'
self.ids = []
class MediaInMemoryUpload(MediaUpload):
"""MediaUpload for a chunk of bytes.
Construct a MediaFileUpload and pass as the media_body parameter of the
method. For example, if we had a service that allowed plain text:
"""
def __init__(self, body, mimetype='application/octet-stream',
chunksize=256*1024, resumable=False):
"""Create a new MediaBytesUpload.
Args:
body: string, Bytes of body content.
mimetype: string, Mime-type of the file or default of
'application/octet-stream'.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._body = body
self._mimetype = mimetype
self._resumable = resumable
self._chunksize = chunksize
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body.
"""
return len(self._body)
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorter than length if EOF was reached
first.
"""
return self._body[begin:begin + length]
def RenderTemplate(name, **context):
"""Render a named template in a context.
Args:
name: Template name.
context: Keyword arguments to render as template variables.
"""
return template.render(name, context)
| Python |
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import base64
import urllib
import time
import random
import urlparse
import hmac
import binascii
import httplib2
try:
from urlparse import parse_qs
parse_qs # placate pyflakes
except ImportError:
# fall back for Python 2.5
from cgi import parse_qs
try:
from hashlib import sha1
sha = sha1
except ImportError:
# hashlib was added in Python 2.5
import sha
import _version
__version__ = _version.__version__
OAUTH_VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def to_unicode(s):
""" Convert to unicode, raise exception with instructive error
message if s is not unicode, ascii, or utf-8. """
if not isinstance(s, unicode):
if not isinstance(s, str):
raise TypeError('You are required to pass either unicode or string here, not: %r (%s)' % (type(s), s))
try:
s = s.decode('utf-8')
except UnicodeDecodeError, le:
raise TypeError('You are required to pass either a unicode object or a utf-8 string here. You passed a Python string object which contained non-utf-8: %r. The UnicodeDecodeError that resulted from attempting to interpret it as utf-8 was: %s' % (s, le,))
return s
def to_utf8(s):
return to_unicode(s).encode('utf-8')
def to_unicode_if_string(s):
if isinstance(s, basestring):
return to_unicode(s)
else:
return s
def to_utf8_if_string(s):
if isinstance(s, basestring):
return to_utf8(s)
else:
return s
def to_unicode_optional_iterator(x):
"""
Raise TypeError if x is a str containing non-utf8 bytes or if x is
an iterable which contains such a str.
"""
if isinstance(x, basestring):
return to_unicode(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_unicode(e) for e in l ]
def to_utf8_optional_iterator(x):
"""
Raise TypeError if x is a str or if x is an iterable which
contains a str.
"""
if isinstance(x, basestring):
return to_utf8(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_utf8_if_string(e) for e in l ]
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s.encode('utf-8'), safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = OAUTH_VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None,
body='', is_form_encoded=False):
if url is not None:
self.url = to_unicode(url)
self.method = method
if parameters is not None:
for k, v in parameters.iteritems():
k = to_unicode(k)
v = to_unicode_optional_iterator(v)
self[k] = v
self.body = body
self.is_form_encoded = is_form_encoded
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
d = {}
for k, v in self.iteritems():
d[k.encode('utf-8')] = to_utf8_optional_iterator(v)
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(d, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url)
try:
query = base_url.query
except AttributeError:
# must be python <2.5
query = base_url[4]
query = parse_qs(query)
for k, v in self.items():
query.setdefault(k, []).append(v)
try:
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
except AttributeError:
# must be python <2.5
scheme = base_url[0]
netloc = base_url[1]
path = base_url[2]
params = base_url[3]
fragment = base_url[5]
url = (scheme, netloc, path, params,
urllib.urlencode(query, True), fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((to_utf8_if_string(key), to_utf8(value)))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((to_utf8_if_string(key), to_utf8_if_string(value)))
else:
items.extend((to_utf8_if_string(key), to_utf8_if_string(item)) for item in value)
# Include any query string parameters from the provided URL
query = urlparse.urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [(to_utf8(k), to_utf8(v)) for k, v in url_items if k != 'oauth_signature' ]
items.extend(url_items)
items.sort()
encoded_str = urllib.urlencode(items)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if not self.is_form_encoded:
# according to
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# section 4.1.1 "OAuth Consumers MUST NOT include an
# oauth_body_hash parameter on requests with form-encoded
# request bodies."
self['oauth_body_hash'] = base64.b64encode(sha(self.body).digest())
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None,
body='', is_form_encoded=False):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters, body=body,
is_form_encoded=is_form_encoded)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str.encode('utf-8'), keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, cache=None, timeout=None,
proxy_info=None):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
httplib2.Http.__init__(self, cache=cache, timeout=timeout, proxy_info=proxy_info)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body='', headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
DEFAULT_POST_CONTENT_TYPE = 'application/x-www-form-urlencoded'
if not isinstance(headers, dict):
headers = {}
if method == "POST":
headers['Content-Type'] = headers.get('Content-Type',
DEFAULT_POST_CONTENT_TYPE)
is_form_encoded = \
headers.get('Content-Type') == 'application/x-www-form-urlencoded'
if is_form_encoded and body:
parameters = parse_qs(body)
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=method, http_url=uri,
parameters=parameters, body=body, is_form_encoded=is_form_encoded)
req.sign_request(self.method, self.consumer, self.token)
schema, rest = urllib.splittype(uri)
if rest.startswith('//'):
hierpart = '//'
else:
hierpart = ''
host, rest = urllib.splithost(rest)
realm = schema + ':' + hierpart + host
if is_form_encoded:
body = req.to_postdata()
elif method == "GET":
uri = req.to_url()
else:
headers.update(req.to_header(realm=realm))
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = OAUTH_VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
self._check_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _check_version(self, request):
"""Verify the correct version of the request for this server."""
version = self._get_version(request)
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
def _get_version(self, request):
"""Return the version of the request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = OAUTH_VERSION
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
try:
signature_method = request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_verifier(self, request):
return request.get_parameter('oauth_verifier')
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
try:
signature = request.get_parameter('oauth_signature')
except:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now,
self.timestamp_threshold))
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if not hasattr(request, 'normalized_url') or request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
| Python |
# This is the version of this source code.
manual_verstr = "1.5"
auto_build_num = "211"
verstr = manual_verstr + "." + auto_build_num
try:
from pyutil.version_class import Version as pyutil_Version
__version__ = pyutil_Version(verstr)
except (ImportError, ValueError):
# Maybe there is no pyutil installed.
from distutils.version import LooseVersion as distutils_Version
__version__ = distutils_Version(verstr)
| Python |
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import oauth2
import imaplib
class IMAP4_SSL(imaplib.IMAP4_SSL):
"""IMAP wrapper for imaplib.IMAP4_SSL that implements XOAUTH."""
def authenticate(self, url, consumer, token):
if consumer is not None and not isinstance(consumer, oauth2.Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, oauth2.Token):
raise ValueError("Invalid token.")
imaplib.IMAP4_SSL.authenticate(self, 'XOAUTH',
lambda x: oauth2.build_xoauth_string(url, consumer, token))
| Python |
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import oauth2
import smtplib
import base64
class SMTP(smtplib.SMTP):
"""SMTP wrapper for smtplib.SMTP that implements XOAUTH."""
def authenticate(self, url, consumer, token):
if consumer is not None and not isinstance(consumer, oauth2.Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, oauth2.Token):
raise ValueError("Invalid token.")
self.docmd('AUTH', 'XOAUTH %s' % \
base64.b64encode(oauth2.build_xoauth_string(url, consumer, token)))
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line tools for authenticating via OAuth 2.0
Do the OAuth 2.0 Web Server dance for a command line application. Stores the
generated credentials in a common file that is used by other example apps in
the same directory.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = ['run']
import BaseHTTPServer
import gflags
import socket
import sys
import webbrowser
from client import FlowExchangeError
from client import OOB_CALLBACK_URN
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean('auth_local_webserver', True,
('Run a local web server to handle redirects during '
'OAuth authorization.'))
gflags.DEFINE_string('auth_host_name', 'localhost',
('Host name to use when running a local web server to '
'handle redirects during OAuth authorization.'))
gflags.DEFINE_multi_int('auth_host_port', [8080, 8090],
('Port to use when running a local web server to '
'handle redirects during OAuth authorization.'))
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(s):
"""Handle a GET request.
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
query = s.path.split('?', 1)[-1]
query = dict(parse_qsl(query))
s.server.query_params = query
s.wfile.write("<html><head><title>Authentication Status</title></head>")
s.wfile.write("<body><p>The authentication flow has completed.</p>")
s.wfile.write("</body></html>")
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
pass
def run(flow, storage, http=None):
"""Core code for a command-line application.
Args:
flow: Flow, an OAuth 2.0 Flow to step through.
storage: Storage, a Storage to store the credential in.
http: An instance of httplib2.Http.request
or something that acts like it.
Returns:
Credentials, the obtained credential.
"""
if FLAGS.auth_local_webserver:
success = False
port_number = 0
for port in FLAGS.auth_host_port:
port_number = port
try:
httpd = ClientRedirectServer((FLAGS.auth_host_name, port),
ClientRedirectHandler)
except socket.error, e:
pass
else:
success = True
break
FLAGS.auth_local_webserver = success
if not success:
print 'Failed to start a local webserver listening on either port 8080'
print 'or port 9090. Please check your firewall settings and locally'
print 'running programs that may be blocking or using those ports.'
print
print 'Falling back to --noauth_local_webserver and continuing with',
print 'authorization.'
print
if FLAGS.auth_local_webserver:
oauth_callback = 'http://%s:%s/' % (FLAGS.auth_host_name, port_number)
else:
oauth_callback = OOB_CALLBACK_URN
authorize_url = flow.step1_get_authorize_url(oauth_callback)
if FLAGS.auth_local_webserver:
webbrowser.open(authorize_url, new=1, autoraise=True)
print 'Your browser has been opened to visit:'
print
print ' ' + authorize_url
print
print 'If your browser is on a different machine then exit and re-run this'
print 'application with the command-line parameter '
print
print ' --noauth_local_webserver'
print
else:
print 'Go to the following link in your browser:'
print
print ' ' + authorize_url
print
code = None
if FLAGS.auth_local_webserver:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'code' in httpd.query_params:
code = httpd.query_params['code']
else:
print 'Failed to find "code" in the query parameters of the redirect.'
sys.exit('Try running with --noauth_local_webserver.')
else:
code = raw_input('Enter verification code: ').strip()
try:
credential = flow.step2_exchange(code, http)
except FlowExchangeError, e:
sys.exit('Authentication has failed: %s' % e)
storage.put(credential)
credential.set_store(storage)
print 'Authentication successful.'
return credential
| Python |
# Copyright 2011 Google Inc. All Rights Reserved.
"""Multi-credential file store with lock support.
This module implements a JSON credential store where multiple
credentials can be stored in one file. That file supports locking
both in a single process and across processes.
The credential themselves are keyed off of:
* client_id
* user_agent
* scope
The format of the stored data is like so:
{
'file_version': 1,
'data': [
{
'key': {
'clientId': '<client id>',
'userAgent': '<user agent>',
'scope': '<scope>'
},
'credential': {
# JSON serialized Credentials.
}
}
]
}
"""
__author__ = 'jbeda@google.com (Joe Beda)'
import base64
import errno
import logging
import os
import threading
from anyjson import simplejson
from client import Storage as BaseStorage
from client import Credentials
from locked_file import LockedFile
logger = logging.getLogger(__name__)
# A dict from 'filename'->_MultiStore instances
_multistores = {}
_multistores_lock = threading.Lock()
class Error(Exception):
"""Base error for this module."""
pass
class NewerCredentialStoreError(Error):
"""The credential store is a newer version that supported."""
pass
def get_credential_storage(filename, client_id, user_agent, scope,
warn_on_readonly=True):
"""Get a Storage instance for a credential.
Args:
filename: The JSON file storing a set of credentials
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: string or list of strings, Scope(s) being requested
warn_on_readonly: if True, log a warning if the store is readonly
Returns:
An object derived from client.Storage for getting/setting the
credential.
"""
filename = os.path.realpath(os.path.expanduser(filename))
_multistores_lock.acquire()
try:
multistore = _multistores.setdefault(
filename, _MultiStore(filename, warn_on_readonly))
finally:
_multistores_lock.release()
if type(scope) is list:
scope = ' '.join(scope)
return multistore._get_storage(client_id, user_agent, scope)
class _MultiStore(object):
"""A file backed store for multiple credentials."""
def __init__(self, filename, warn_on_readonly=True):
"""Initialize the class.
This will create the file if necessary.
"""
self._file = LockedFile(filename, 'r+b', 'rb')
self._thread_lock = threading.Lock()
self._read_only = False
self._warn_on_readonly = warn_on_readonly
self._create_file_if_needed()
# Cache of deserialized store. This is only valid after the
# _MultiStore is locked or _refresh_data_cache is called. This is
# of the form of:
#
# (client_id, user_agent, scope) -> OAuth2Credential
#
# If this is None, then the store hasn't been read yet.
self._data = None
class _Storage(BaseStorage):
"""A Storage object that knows how to read/write a single credential."""
def __init__(self, multistore, client_id, user_agent, scope):
self._multistore = multistore
self._client_id = client_id
self._user_agent = user_agent
self._scope = scope
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant.
"""
self._multistore._lock()
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
self._multistore._unlock()
def locked_get(self):
"""Retrieve credential.
The Storage lock must be held when this is called.
Returns:
oauth2client.client.Credentials
"""
credential = self._multistore._get_credential(
self._client_id, self._user_agent, self._scope)
if credential:
credential.set_store(self)
return credential
def locked_put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self._multistore._update_credential(credentials, self._scope)
def locked_delete(self):
"""Delete a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self._multistore._delete_credential(self._client_id, self._user_agent,
self._scope)
def _create_file_if_needed(self):
"""Create an empty file if necessary.
This method will not initialize the file. Instead it implements a
simple version of "touch" to ensure the file has been created.
"""
if not os.path.exists(self._file.filename()):
old_umask = os.umask(0177)
try:
open(self._file.filename(), 'a+b').close()
finally:
os.umask(old_umask)
def _lock(self):
"""Lock the entire multistore."""
self._thread_lock.acquire()
self._file.open_and_lock()
if not self._file.is_locked():
self._read_only = True
if self._warn_on_readonly:
logger.warn('The credentials file (%s) is not writable. Opening in '
'read-only mode. Any refreshed credentials will only be '
'valid for this run.' % self._file.filename())
if os.path.getsize(self._file.filename()) == 0:
logger.debug('Initializing empty multistore file')
# The multistore is empty so write out an empty file.
self._data = {}
self._write()
elif not self._read_only or self._data is None:
# Only refresh the data if we are read/write or we haven't
# cached the data yet. If we are readonly, we assume is isn't
# changing out from under us and that we only have to read it
# once. This prevents us from whacking any new access keys that
# we have cached in memory but were unable to write out.
self._refresh_data_cache()
def _unlock(self):
"""Release the lock on the multistore."""
self._file.unlock_and_close()
self._thread_lock.release()
def _locked_json_read(self):
"""Get the raw content of the multistore file.
The multistore must be locked when this is called.
Returns:
The contents of the multistore decoded as JSON.
"""
assert self._thread_lock.locked()
self._file.file_handle().seek(0)
return simplejson.load(self._file.file_handle())
def _locked_json_write(self, data):
"""Write a JSON serializable data structure to the multistore.
The multistore must be locked when this is called.
Args:
data: The data to be serialized and written.
"""
assert self._thread_lock.locked()
if self._read_only:
return
self._file.file_handle().seek(0)
simplejson.dump(data, self._file.file_handle(), sort_keys=True, indent=2)
self._file.file_handle().truncate()
def _refresh_data_cache(self):
"""Refresh the contents of the multistore.
The multistore must be locked when this is called.
Raises:
NewerCredentialStoreError: Raised when a newer client has written the
store.
"""
self._data = {}
try:
raw_data = self._locked_json_read()
except Exception:
logger.warn('Credential data store could not be loaded. '
'Will ignore and overwrite.')
return
version = 0
try:
version = raw_data['file_version']
except Exception:
logger.warn('Missing version for credential data store. It may be '
'corrupt or an old version. Overwriting.')
if version > 1:
raise NewerCredentialStoreError(
'Credential file has file_version of %d. '
'Only file_version of 1 is supported.' % version)
credentials = []
try:
credentials = raw_data['data']
except (TypeError, KeyError):
pass
for cred_entry in credentials:
try:
(key, credential) = self._decode_credential_from_json(cred_entry)
self._data[key] = credential
except:
# If something goes wrong loading a credential, just ignore it
logger.info('Error decoding credential, skipping', exc_info=True)
def _decode_credential_from_json(self, cred_entry):
"""Load a credential from our JSON serialization.
Args:
cred_entry: A dict entry from the data member of our format
Returns:
(key, cred) where the key is the key tuple and the cred is the
OAuth2Credential object.
"""
raw_key = cred_entry['key']
client_id = raw_key['clientId']
user_agent = raw_key['userAgent']
scope = raw_key['scope']
key = (client_id, user_agent, scope)
credential = None
credential = Credentials.new_from_json(simplejson.dumps(cred_entry['credential']))
return (key, credential)
def _write(self):
"""Write the cached data back out.
The multistore must be locked.
"""
raw_data = {'file_version': 1}
raw_creds = []
raw_data['data'] = raw_creds
for (cred_key, cred) in self._data.items():
raw_key = {
'clientId': cred_key[0],
'userAgent': cred_key[1],
'scope': cred_key[2]
}
raw_cred = simplejson.loads(cred.to_json())
raw_creds.append({'key': raw_key, 'credential': raw_cred})
self._locked_json_write(raw_data)
def _get_credential(self, client_id, user_agent, scope):
"""Get a credential from the multistore.
The multistore must be locked.
Args:
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: A string for the scope(s) being requested
Returns:
The credential specified or None if not present
"""
key = (client_id, user_agent, scope)
return self._data.get(key, None)
def _update_credential(self, cred, scope):
"""Update a credential and write the multistore.
This must be called when the multistore is locked.
Args:
cred: The OAuth2Credential to update/set
scope: The scope(s) that this credential covers
"""
key = (cred.client_id, cred.user_agent, scope)
self._data[key] = cred
self._write()
def _delete_credential(self, client_id, user_agent, scope):
"""Delete a credential and write the multistore.
This must be called when the multistore is locked.
Args:
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: The scope(s) that this credential covers
"""
key = (client_id, user_agent, scope)
try:
del self._data[key]
except KeyError:
pass
self._write()
def _get_storage(self, client_id, user_agent, scope):
"""Get a Storage object to get/set a credential.
This Storage is a 'view' into the multistore.
Args:
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: A string for the scope(s) being requested
Returns:
A Storage object that can be used to get/set this cred
"""
return self._Storage(self, client_id, user_agent, scope)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An OAuth 2.0 client.
Tools for interacting with OAuth 2.0 protected resources.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import base64
import clientsecrets
import copy
import datetime
import httplib2
import logging
import os
import sys
import time
import urllib
import urlparse
from anyjson import simplejson
HAS_OPENSSL = False
try:
from oauth2client.crypt import Signer
from oauth2client.crypt import make_signed_jwt
from oauth2client.crypt import verify_signed_jwt_with_certs
HAS_OPENSSL = True
except ImportError:
pass
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
logger = logging.getLogger(__name__)
# Expiry is stored in RFC3339 UTC format
EXPIRY_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
# Which certs to use to validate id_tokens received.
ID_TOKEN_VERIFICATON_CERTS = 'https://www.googleapis.com/oauth2/v1/certs'
# Constant to use for the out of band OAuth 2.0 flow.
OOB_CALLBACK_URN = 'urn:ietf:wg:oauth:2.0:oob'
class Error(Exception):
"""Base error for this module."""
pass
class FlowExchangeError(Error):
"""Error trying to exchange an authorization grant for an access token."""
pass
class AccessTokenRefreshError(Error):
"""Error trying to refresh an expired access token."""
pass
class UnknownClientSecretsFlowError(Error):
"""The client secrets file called for an unknown type of OAuth 2.0 flow. """
pass
class AccessTokenCredentialsError(Error):
"""Having only the access_token means no refresh is possible."""
pass
class VerifyJwtTokenError(Error):
"""Could on retrieve certificates for validation."""
pass
def _abstract():
raise NotImplementedError('You need to override this function')
class MemoryCache(object):
"""httplib2 Cache implementation which only caches locally."""
def __init__(self):
self.cache = {}
def get(self, key):
return self.cache.get(key)
def set(self, key, value):
self.cache[key] = value
def delete(self, key):
self.cache.pop(key, None)
class Credentials(object):
"""Base class for all Credentials objects.
Subclasses must define an authorize() method that applies the credentials to
an HTTP transport.
Subclasses must also specify a classmethod named 'from_json' that takes a JSON
string as input and returns an instaniated Credentials object.
"""
NON_SERIALIZED_MEMBERS = ['store']
def authorize(self, http):
"""Take an httplib2.Http instance (or equivalent) and
authorizes it for the set of credentials, usually by
replacing http.request() with a method that adds in
the appropriate headers and then delegates to the original
Http.request() method.
"""
_abstract()
def refresh(self, http):
"""Forces a refresh of the access_token.
Args:
http: httplib2.Http, an http object to be used to make the refresh
request.
"""
_abstract()
def apply(self, headers):
"""Add the authorization to the headers.
Args:
headers: dict, the headers to add the Authorization header to.
"""
_abstract()
def _to_json(self, strip):
"""Utility function for creating a JSON representation of an instance of Credentials.
Args:
strip: array, An array of names of members to not include in the JSON.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
t = type(self)
d = copy.copy(self.__dict__)
for member in strip:
if member in d:
del d[member]
if 'token_expiry' in d and isinstance(d['token_expiry'], datetime.datetime):
d['token_expiry'] = d['token_expiry'].strftime(EXPIRY_FORMAT)
# Add in information we will need later to reconsistitue this instance.
d['_class'] = t.__name__
d['_module'] = t.__module__
return simplejson.dumps(d)
def to_json(self):
"""Creating a JSON representation of an instance of Credentials.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json(Credentials.NON_SERIALIZED_MEMBERS)
@classmethod
def new_from_json(cls, s):
"""Utility class method to instantiate a Credentials subclass from a JSON
representation produced by to_json().
Args:
s: string, JSON from to_json().
Returns:
An instance of the subclass of Credentials that was serialized with
to_json().
"""
data = simplejson.loads(s)
# Find and call the right classmethod from_json() to restore the object.
module = data['_module']
try:
m = __import__(module)
except ImportError:
# In case there's an object from the old package structure, update it
module = module.replace('.apiclient', '')
m = __import__(module)
m = __import__(module, fromlist=module.split('.')[:-1])
kls = getattr(m, data['_class'])
from_json = getattr(kls, 'from_json')
return from_json(s)
@classmethod
def from_json(cls, s):
"""Instantiate a Credentials object from a JSON description of it.
The JSON should have been produced by calling .to_json() on the object.
Args:
data: dict, A deserialized JSON object.
Returns:
An instance of a Credentials subclass.
"""
return Credentials()
class Flow(object):
"""Base class for all Flow objects."""
pass
class Storage(object):
"""Base class for all Storage objects.
Store and retrieve a single credential. This class supports locking
such that multiple processes and threads can operate on a single
store.
"""
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant.
"""
pass
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
pass
def locked_get(self):
"""Retrieve credential.
The Storage lock must be held when this is called.
Returns:
oauth2client.client.Credentials
"""
_abstract()
def locked_put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
_abstract()
def locked_delete(self):
"""Delete a credential.
The Storage lock must be held when this is called.
"""
_abstract()
def get(self):
"""Retrieve credential.
The Storage lock must *not* be held when this is called.
Returns:
oauth2client.client.Credentials
"""
self.acquire_lock()
try:
return self.locked_get()
finally:
self.release_lock()
def put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self.acquire_lock()
try:
self.locked_put(credentials)
finally:
self.release_lock()
def delete(self):
"""Delete credential.
Frees any resources associated with storing the credential.
The Storage lock must *not* be held when this is called.
Returns:
None
"""
self.acquire_lock()
try:
return self.locked_delete()
finally:
self.release_lock()
class OAuth2Credentials(Credentials):
"""Credentials object for OAuth 2.0.
Credentials can be applied to an httplib2.Http object using the authorize()
method, which then adds the OAuth 2.0 access token to each request.
OAuth2Credentials objects may be safely pickled and unpickled.
"""
def __init__(self, access_token, client_id, client_secret, refresh_token,
token_expiry, token_uri, user_agent, id_token=None):
"""Create an instance of OAuth2Credentials.
This constructor is not usually called by the user, instead
OAuth2Credentials objects are instantiated by the OAuth2WebServerFlow.
Args:
access_token: string, access token.
client_id: string, client identifier.
client_secret: string, client secret.
refresh_token: string, refresh token.
token_expiry: datetime, when the access_token expires.
token_uri: string, URI of token endpoint.
user_agent: string, The HTTP User-Agent to provide for this application.
id_token: object, The identity of the resource owner.
Notes:
store: callable, A callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has expired and been refreshed.
"""
self.access_token = access_token
self.client_id = client_id
self.client_secret = client_secret
self.refresh_token = refresh_token
self.store = None
self.token_expiry = token_expiry
self.token_uri = token_uri
self.user_agent = user_agent
self.id_token = id_token
# True if the credentials have been revoked or expired and can't be
# refreshed.
self.invalid = False
def authorize(self, http):
"""Authorize an httplib2.Http instance with these credentials.
The modified http.request method will add authentication headers to each
request and will refresh access_tokens when a 401 is received on a
request. In addition the http.request method has a credentials property,
http.request.credentials, which is the Credentials object that authorized
it.
Args:
http: An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth subclass of httplib2.Authenication
because it never gets passed the absolute URI, which is needed for
signing. So instead we have to overload 'request' with a closure
that adds in the Authorization header and then calls the original
version of 'request()'.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
if not self.access_token:
logger.info('Attempting refresh to obtain initial access_token')
self._refresh(request_orig)
# Modify the request headers to add the appropriate
# Authorization header.
if headers is None:
headers = {}
self.apply(headers)
if self.user_agent is not None:
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
if resp.status == 401:
logger.info('Refreshing due to a 401')
self._refresh(request_orig)
self.apply(headers)
return request_orig(uri, method, body, headers,
redirections, connection_type)
else:
return (resp, content)
# Replace the request method with our own closure.
http.request = new_request
# Set credentials as a property of the request method.
setattr(http.request, 'credentials', self)
return http
def refresh(self, http):
"""Forces a refresh of the access_token.
Args:
http: httplib2.Http, an http object to be used to make the refresh
request.
"""
self._refresh(http.request)
def apply(self, headers):
"""Add the authorization to the headers.
Args:
headers: dict, the headers to add the Authorization header to.
"""
headers['Authorization'] = 'Bearer ' + self.access_token
def to_json(self):
return self._to_json(Credentials.NON_SERIALIZED_MEMBERS)
@classmethod
def from_json(cls, s):
"""Instantiate a Credentials object from a JSON description of it. The JSON
should have been produced by calling .to_json() on the object.
Args:
data: dict, A deserialized JSON object.
Returns:
An instance of a Credentials subclass.
"""
data = simplejson.loads(s)
if 'token_expiry' in data and not isinstance(data['token_expiry'],
datetime.datetime):
try:
data['token_expiry'] = datetime.datetime.strptime(
data['token_expiry'], EXPIRY_FORMAT)
except:
data['token_expiry'] = None
retval = OAuth2Credentials(
data['access_token'],
data['client_id'],
data['client_secret'],
data['refresh_token'],
data['token_expiry'],
data['token_uri'],
data['user_agent'],
data.get('id_token', None))
retval.invalid = data['invalid']
return retval
@property
def access_token_expired(self):
"""True if the credential is expired or invalid.
If the token_expiry isn't set, we assume the token doesn't expire.
"""
if self.invalid:
return True
if not self.token_expiry:
return False
now = datetime.datetime.utcnow()
if now >= self.token_expiry:
logger.info('access_token is expired. Now: %s, token_expiry: %s',
now, self.token_expiry)
return True
return False
def set_store(self, store):
"""Set the Storage for the credential.
Args:
store: Storage, an implementation of Stroage object.
This is needed to store the latest access_token if it
has expired and been refreshed. This implementation uses
locking to check for updates before updating the
access_token.
"""
self.store = store
def _updateFromCredential(self, other):
"""Update this Credential from another instance."""
self.__dict__.update(other.__getstate__())
def __getstate__(self):
"""Trim the state down to something that can be pickled."""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled."""
self.__dict__.update(state)
self.store = None
def _generate_refresh_request_body(self):
"""Generate the body that will be used in the refresh request."""
body = urllib.urlencode({
'grant_type': 'refresh_token',
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': self.refresh_token,
})
return body
def _generate_refresh_request_headers(self):
"""Generate the headers that will be used in the refresh request."""
headers = {
'content-type': 'application/x-www-form-urlencoded',
}
if self.user_agent is not None:
headers['user-agent'] = self.user_agent
return headers
def _refresh(self, http_request):
"""Refreshes the access_token.
This method first checks by reading the Storage object if available.
If a refresh is still needed, it holds the Storage lock until the
refresh is completed.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
if not self.store:
self._do_refresh_request(http_request)
else:
self.store.acquire_lock()
try:
new_cred = self.store.locked_get()
if (new_cred and not new_cred.invalid and
new_cred.access_token != self.access_token):
logger.info('Updated access_token read from Storage')
self._updateFromCredential(new_cred)
else:
self._do_refresh_request(http_request)
finally:
self.store.release_lock()
def _do_refresh_request(self, http_request):
"""Refresh the access_token using the refresh_token.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
body = self._generate_refresh_request_body()
headers = self._generate_refresh_request_headers()
logger.info('Refreshing access_token')
resp, content = http_request(
self.token_uri, method='POST', body=body, headers=headers)
if resp.status == 200:
# TODO(jcgregorio) Raise an error if loads fails?
d = simplejson.loads(content)
self.access_token = d['access_token']
self.refresh_token = d.get('refresh_token', self.refresh_token)
if 'expires_in' in d:
self.token_expiry = datetime.timedelta(
seconds=int(d['expires_in'])) + datetime.datetime.utcnow()
else:
self.token_expiry = None
if self.store:
self.store.locked_put(self)
else:
# An {'error':...} response body means the token is expired or revoked,
# so we flag the credentials as such.
logger.info('Failed to retrieve access token: %s' % content)
error_msg = 'Invalid response %s.' % resp['status']
try:
d = simplejson.loads(content)
if 'error' in d:
error_msg = d['error']
self.invalid = True
if self.store:
self.store.locked_put(self)
except:
pass
raise AccessTokenRefreshError(error_msg)
class AccessTokenCredentials(OAuth2Credentials):
"""Credentials object for OAuth 2.0.
Credentials can be applied to an httplib2.Http object using the
authorize() method, which then signs each request from that object
with the OAuth 2.0 access token. This set of credentials is for the
use case where you have acquired an OAuth 2.0 access_token from
another place such as a JavaScript client or another web
application, and wish to use it from Python. Because only the
access_token is present it can not be refreshed and will in time
expire.
AccessTokenCredentials objects may be safely pickled and unpickled.
Usage:
credentials = AccessTokenCredentials('<an access token>',
'my-user-agent/1.0')
http = httplib2.Http()
http = credentials.authorize(http)
Exceptions:
AccessTokenCredentialsExpired: raised when the access_token expires or is
revoked.
"""
def __init__(self, access_token, user_agent):
"""Create an instance of OAuth2Credentials
This is one of the few types if Credentials that you should contrust,
Credentials objects are usually instantiated by a Flow.
Args:
access_token: string, access token.
user_agent: string, The HTTP User-Agent to provide for this application.
Notes:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
"""
super(AccessTokenCredentials, self).__init__(
access_token,
None,
None,
None,
None,
None,
user_agent)
@classmethod
def from_json(cls, s):
data = simplejson.loads(s)
retval = AccessTokenCredentials(
data['access_token'],
data['user_agent'])
return retval
def _refresh(self, http_request):
raise AccessTokenCredentialsError(
"The access_token is expired or invalid and can't be refreshed.")
class AssertionCredentials(OAuth2Credentials):
"""Abstract Credentials object used for OAuth 2.0 assertion grants.
This credential does not require a flow to instantiate because it
represents a two legged flow, and therefore has all of the required
information to generate and refresh its own access tokens. It must
be subclassed to generate the appropriate assertion string.
AssertionCredentials objects may be safely pickled and unpickled.
"""
def __init__(self, assertion_type, user_agent,
token_uri='https://accounts.google.com/o/oauth2/token',
**unused_kwargs):
"""Constructor for AssertionFlowCredentials.
Args:
assertion_type: string, assertion type that will be declared to the auth
server
user_agent: string, The HTTP User-Agent to provide for this application.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
"""
super(AssertionCredentials, self).__init__(
None,
None,
None,
None,
None,
token_uri,
user_agent)
self.assertion_type = assertion_type
def _generate_refresh_request_body(self):
assertion = self._generate_assertion()
body = urllib.urlencode({
'assertion_type': self.assertion_type,
'assertion': assertion,
'grant_type': 'assertion',
})
return body
def _generate_assertion(self):
"""Generate the assertion string that will be used in the access token
request.
"""
_abstract()
if HAS_OPENSSL:
# PyOpenSSL is not a prerequisite for oauth2client, so if it is missing then
# don't create the SignedJwtAssertionCredentials or the verify_id_token()
# method.
class SignedJwtAssertionCredentials(AssertionCredentials):
"""Credentials object used for OAuth 2.0 Signed JWT assertion grants.
This credential does not require a flow to instantiate because it
represents a two legged flow, and therefore has all of the required
information to generate and refresh its own access tokens.
"""
MAX_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
def __init__(self,
service_account_name,
private_key,
scope,
private_key_password='notasecret',
user_agent=None,
token_uri='https://accounts.google.com/o/oauth2/token',
**kwargs):
"""Constructor for SignedJwtAssertionCredentials.
Args:
service_account_name: string, id for account, usually an email address.
private_key: string, private key in P12 format.
scope: string or list of strings, scope(s) of the credentials being
requested.
private_key_password: string, password for private_key.
user_agent: string, HTTP User-Agent to provide for this application.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
kwargs: kwargs, Additional parameters to add to the JWT token, for
example prn=joe@xample.org."""
super(SignedJwtAssertionCredentials, self).__init__(
'http://oauth.net/grant_type/jwt/1.0/bearer',
user_agent,
token_uri=token_uri,
)
if type(scope) is list:
scope = ' '.join(scope)
self.scope = scope
self.private_key = private_key
self.private_key_password = private_key_password
self.service_account_name = service_account_name
self.kwargs = kwargs
@classmethod
def from_json(cls, s):
data = simplejson.loads(s)
retval = SignedJwtAssertionCredentials(
data['service_account_name'],
data['private_key'],
data['private_key_password'],
data['scope'],
data['user_agent'],
data['token_uri'],
data['kwargs']
)
retval.invalid = data['invalid']
return retval
def _generate_assertion(self):
"""Generate the assertion that will be used in the request."""
now = long(time.time())
payload = {
'aud': self.token_uri,
'scope': self.scope,
'iat': now,
'exp': now + SignedJwtAssertionCredentials.MAX_TOKEN_LIFETIME_SECS,
'iss': self.service_account_name
}
payload.update(self.kwargs)
logger.debug(str(payload))
return make_signed_jwt(
Signer.from_string(self.private_key, self.private_key_password),
payload)
# Only used in verify_id_token(), which is always calling to the same URI
# for the certs.
_cached_http = httplib2.Http(MemoryCache())
def verify_id_token(id_token, audience, http=None,
cert_uri=ID_TOKEN_VERIFICATON_CERTS):
"""Verifies a signed JWT id_token.
Args:
id_token: string, A Signed JWT.
audience: string, The audience 'aud' that the token should be for.
http: httplib2.Http, instance to use to make the HTTP request. Callers
should supply an instance that has caching enabled.
cert_uri: string, URI of the certificates in JSON format to
verify the JWT against.
Returns:
The deserialized JSON in the JWT.
Raises:
oauth2client.crypt.AppIdentityError if the JWT fails to verify.
"""
if http is None:
http = _cached_http
resp, content = http.request(cert_uri)
if resp.status == 200:
certs = simplejson.loads(content)
return verify_signed_jwt_with_certs(id_token, certs, audience)
else:
raise VerifyJwtTokenError('Status code: %d' % resp.status)
def _urlsafe_b64decode(b64string):
# Guard against unicode strings, which base64 can't handle.
b64string = b64string.encode('ascii')
padded = b64string + '=' * (4 - len(b64string) % 4)
return base64.urlsafe_b64decode(padded)
def _extract_id_token(id_token):
"""Extract the JSON payload from a JWT.
Does the extraction w/o checking the signature.
Args:
id_token: string, OAuth 2.0 id_token.
Returns:
object, The deserialized JSON payload.
"""
segments = id_token.split('.')
if (len(segments) != 3):
raise VerifyJwtTokenError(
'Wrong number of segments in token: %s' % id_token)
return simplejson.loads(_urlsafe_b64decode(segments[1]))
def credentials_from_code(client_id, client_secret, scope, code,
redirect_uri = 'postmessage',
http=None, user_agent=None,
token_uri='https://accounts.google.com/o/oauth2/token'):
"""Exchanges an authorization code for an OAuth2Credentials object.
Args:
client_id: string, client identifier.
client_secret: string, client secret.
scope: string or list of strings, scope(s) to request.
code: string, An authroization code, most likely passed down from
the client
redirect_uri: string, this is generally set to 'postmessage' to match the
redirect_uri that the client specified
http: httplib2.Http, optional http instance to use to do the fetch
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
Returns:
An OAuth2Credentials object.
Raises:
FlowExchangeError if the authorization code cannot be exchanged for an
access token
"""
flow = OAuth2WebServerFlow(client_id, client_secret, scope, user_agent,
'https://accounts.google.com/o/oauth2/auth',
token_uri)
# We primarily make this call to set up the redirect_uri in the flow object
uriThatWeDontReallyUse = flow.step1_get_authorize_url(redirect_uri)
credentials = flow.step2_exchange(code, http)
return credentials
def credentials_from_clientsecrets_and_code(filename, scope, code,
message = None,
redirect_uri = 'postmessage',
http=None):
"""Returns OAuth2Credentials from a clientsecrets file and an auth code.
Will create the right kind of Flow based on the contents of the clientsecrets
file or will raise InvalidClientSecretsError for unknown types of Flows.
Args:
filename: string, File name of clientsecrets.
scope: string or list of strings, scope(s) to request.
code: string, An authroization code, most likely passed down from
the client
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. If message is provided then
sys.exit will be called in the case of an error. If message in not
provided then clientsecrets.InvalidClientSecretsError will be raised.
redirect_uri: string, this is generally set to 'postmessage' to match the
redirect_uri that the client specified
http: httplib2.Http, optional http instance to use to do the fetch
Returns:
An OAuth2Credentials object.
Raises:
FlowExchangeError if the authorization code cannot be exchanged for an
access token
UnknownClientSecretsFlowError if the file describes an unknown kind of Flow.
clientsecrets.InvalidClientSecretsError if the clientsecrets file is
invalid.
"""
flow = flow_from_clientsecrets(filename, scope, message)
# We primarily make this call to set up the redirect_uri in the flow object
uriThatWeDontReallyUse = flow.step1_get_authorize_url(redirect_uri)
credentials = flow.step2_exchange(code, http)
return credentials
class OAuth2WebServerFlow(Flow):
"""Does the Web Server Flow for OAuth 2.0.
OAuth2Credentials objects may be safely pickled and unpickled.
"""
def __init__(self, client_id, client_secret, scope, user_agent=None,
auth_uri='https://accounts.google.com/o/oauth2/auth',
token_uri='https://accounts.google.com/o/oauth2/token',
**kwargs):
"""Constructor for OAuth2WebServerFlow.
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string or list of strings, scope(s) of the credentials being
requested.
user_agent: string, HTTP User-Agent to provide for this application.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
**kwargs: dict, The keyword arguments are all optional and required
parameters for the OAuth calls.
"""
self.client_id = client_id
self.client_secret = client_secret
if type(scope) is list:
scope = ' '.join(scope)
self.scope = scope
self.user_agent = user_agent
self.auth_uri = auth_uri
self.token_uri = token_uri
self.params = {
'access_type': 'offline',
}
self.params.update(kwargs)
self.redirect_uri = None
def step1_get_authorize_url(self, redirect_uri=OOB_CALLBACK_URN):
"""Returns a URI to redirect to the provider.
Args:
redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for
a non-web-based application, or a URI that handles the callback from
the authorization server.
If redirect_uri is 'urn:ietf:wg:oauth:2.0:oob' then pass in the
generated verification code to step2_exchange,
otherwise pass in the query parameters received
at the callback uri to step2_exchange.
"""
self.redirect_uri = redirect_uri
query = {
'response_type': 'code',
'client_id': self.client_id,
'redirect_uri': redirect_uri,
'scope': self.scope,
}
query.update(self.params)
parts = list(urlparse.urlparse(self.auth_uri))
query.update(dict(parse_qsl(parts[4]))) # 4 is the index of the query part
parts[4] = urllib.urlencode(query)
return urlparse.urlunparse(parts)
def step2_exchange(self, code, http=None):
"""Exhanges a code for OAuth2Credentials.
Args:
code: string or dict, either the code as a string, or a dictionary
of the query parameters to the redirect_uri, which contains
the code.
http: httplib2.Http, optional http instance to use to do the fetch
Returns:
An OAuth2Credentials object that can be used to authorize requests.
Raises:
FlowExchangeError if a problem occured exchanging the code for a
refresh_token.
"""
if not (isinstance(code, str) or isinstance(code, unicode)):
if 'code' not in code:
if 'error' in code:
error_msg = code['error']
else:
error_msg = 'No code was supplied in the query parameters.'
raise FlowExchangeError(error_msg)
else:
code = code['code']
body = urllib.urlencode({
'grant_type': 'authorization_code',
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
'redirect_uri': self.redirect_uri,
'scope': self.scope,
})
headers = {
'content-type': 'application/x-www-form-urlencoded',
}
if self.user_agent is not None:
headers['user-agent'] = self.user_agent
if http is None:
http = httplib2.Http()
resp, content = http.request(self.token_uri, method='POST', body=body,
headers=headers)
if resp.status == 200:
# TODO(jcgregorio) Raise an error if simplejson.loads fails?
d = simplejson.loads(content)
access_token = d['access_token']
refresh_token = d.get('refresh_token', None)
token_expiry = None
if 'expires_in' in d:
token_expiry = datetime.datetime.utcnow() + datetime.timedelta(
seconds=int(d['expires_in']))
if 'id_token' in d:
d['id_token'] = _extract_id_token(d['id_token'])
logger.info('Successfully retrieved access token: %s' % content)
return OAuth2Credentials(access_token, self.client_id,
self.client_secret, refresh_token, token_expiry,
self.token_uri, self.user_agent,
id_token=d.get('id_token', None))
else:
logger.info('Failed to retrieve access token: %s' % content)
error_msg = 'Invalid response %s.' % resp['status']
try:
d = simplejson.loads(content)
if 'error' in d:
error_msg = d['error']
except:
pass
raise FlowExchangeError(error_msg)
def flow_from_clientsecrets(filename, scope, message=None):
"""Create a Flow from a clientsecrets file.
Will create the right kind of Flow based on the contents of the clientsecrets
file or will raise InvalidClientSecretsError for unknown types of Flows.
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) to request.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. If message is provided then
sys.exit will be called in the case of an error. If message in not
provided then clientsecrets.InvalidClientSecretsError will be raised.
Returns:
A Flow object.
Raises:
UnknownClientSecretsFlowError if the file describes an unknown kind of Flow.
clientsecrets.InvalidClientSecretsError if the clientsecrets file is
invalid.
"""
try:
client_type, client_info = clientsecrets.loadfile(filename)
if client_type in [clientsecrets.TYPE_WEB, clientsecrets.TYPE_INSTALLED]:
return OAuth2WebServerFlow(
client_info['client_id'],
client_info['client_secret'],
scope,
None, # user_agent
client_info['auth_uri'],
client_info['token_uri'])
except clientsecrets.InvalidClientSecretsError:
if message:
sys.exit(message)
else:
raise
else:
raise UnknownClientSecretsFlowError(
'This OAuth 2.0 flow is unsupported: "%s"' * client_type)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth 2.0
credentials.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import os
import stat
import threading
from anyjson import simplejson
from client import Storage as BaseStorage
from client import Credentials
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from a file."""
def __init__(self, filename):
self._filename = filename
self._lock = threading.Lock()
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant."""
self._lock.acquire()
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
self._lock.release()
def locked_get(self):
"""Retrieve Credential from file.
Returns:
oauth2client.client.Credentials
"""
credentials = None
try:
f = open(self._filename, 'rb')
content = f.read()
f.close()
except IOError:
return credentials
try:
credentials = Credentials.new_from_json(content)
credentials.set_store(self)
except ValueError:
pass
return credentials
def _create_file_if_needed(self):
"""Create an empty file if necessary.
This method will not initialize the file. Instead it implements a
simple version of "touch" to ensure the file has been created.
"""
if not os.path.exists(self._filename):
old_umask = os.umask(0177)
try:
open(self._filename, 'a+b').close()
finally:
os.umask(old_umask)
def locked_put(self, credentials):
"""Write Credentials to file.
Args:
credentials: Credentials, the credentials to store.
"""
self._create_file_if_needed()
f = open(self._filename, 'wb')
f.write(credentials.to_json())
f.close()
def locked_delete(self):
"""Delete Credentials file.
Args:
credentials: Credentials, the credentials to store.
"""
os.unlink(self._filename)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OAuth 2.0 utilities for Django.
Utilities for using OAuth 2.0 in conjunction with
the Django datastore.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import oauth2client
import base64
import pickle
from django.db import models
from oauth2client.client import Storage as BaseStorage
class CredentialsField(models.Field):
__metaclass__ = models.SubfieldBase
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return base64.b64encode(pickle.dumps(value))
class FlowField(models.Field):
__metaclass__ = models.SubfieldBase
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Flow):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return base64.b64encode(pickle.dumps(value))
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from
the datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsField
on a db model class.
"""
def __init__(self, model_class, key_name, key_value, property_name):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
key_value: string, key value for the entity that has the credentials
property_name: string, name of the property that is an CredentialsProperty
"""
self.model_class = model_class
self.key_name = key_name
self.key_value = key_value
self.property_name = property_name
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
credential = None
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query)
if len(entities) > 0:
credential = getattr(entities[0], self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self)
return credential
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
args = {self.key_name: self.key_value}
entity = self.model_class(**args)
setattr(entity, self.property_name, credentials)
entity.save()
def locked_delete(self):
"""Delete Credentials from the datastore."""
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query).delete()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google App Engine
Utilities for making it easier to use OAuth 2.0 on Google App Engine.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import base64
import httplib2
import logging
import pickle
import time
import clientsecrets
from anyjson import simplejson
from client import AccessTokenRefreshError
from client import AssertionCredentials
from client import Credentials
from client import Flow
from client import OAuth2WebServerFlow
from client import Storage
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.api import app_identity
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import login_required
from google.appengine.ext.webapp.util import run_wsgi_app
OAUTH2CLIENT_NAMESPACE = 'oauth2client#ns'
class InvalidClientSecretsError(Exception):
"""The client_secrets.json file is malformed or missing required fields."""
pass
class AppAssertionCredentials(AssertionCredentials):
"""Credentials object for App Engine Assertion Grants
This object will allow an App Engine application to identify itself to Google
and other OAuth 2.0 servers that can verify assertions. It can be used for
the purpose of accessing data stored under an account assigned to the App
Engine application itself.
This credential does not require a flow to instantiate because it represents
a two legged flow, and therefore has all of the required information to
generate and refresh its own access tokens.
"""
def __init__(self, scope, **kwargs):
"""Constructor for AppAssertionCredentials
Args:
scope: string or list of strings, scope(s) of the credentials being requested.
"""
if type(scope) is list:
scope = ' '.join(scope)
self.scope = scope
super(AppAssertionCredentials, self).__init__(
None,
None,
None)
@classmethod
def from_json(cls, json):
data = simplejson.loads(json)
return AppAssertionCredentials(data['scope'])
def _refresh(self, http_request):
"""Refreshes the access_token.
Since the underlying App Engine app_identity implementation does its own
caching we can skip all the storage hoops and just to a refresh using the
API.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
try:
(token, _) = app_identity.get_access_token(self.scope)
except app_identity.Error, e:
raise AccessTokenRefreshError(str(e))
self.access_token = token
class FlowProperty(db.Property):
"""App Engine datastore Property for Flow.
Utility property that allows easy storage and retreival of an
oauth2client.Flow"""
# Tell what the user type is.
data_type = Flow
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
flow = super(FlowProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(flow))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, Flow):
raise db.BadValueError('Property %s must be convertible '
'to a FlowThreeLegged instance (%s)' %
(self.name, value))
return super(FlowProperty, self).validate(value)
def empty(self, value):
return not value
class CredentialsProperty(db.Property):
"""App Engine datastore Property for Credentials.
Utility property that allows easy storage and retrieval of
oath2client.Credentials
"""
# Tell what the user type is.
data_type = Credentials
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
logging.info("get: Got type " + str(type(model_instance)))
cred = super(CredentialsProperty,
self).get_value_for_datastore(model_instance)
if cred is None:
cred = ''
else:
cred = cred.to_json()
return db.Blob(cred)
# For reading from datastore.
def make_value_from_datastore(self, value):
logging.info("make: Got type " + str(type(value)))
if value is None:
return None
if len(value) == 0:
return None
try:
credentials = Credentials.new_from_json(value)
except ValueError:
credentials = None
return credentials
def validate(self, value):
value = super(CredentialsProperty, self).validate(value)
logging.info("validate: Got type " + str(type(value)))
if value is not None and not isinstance(value, Credentials):
raise db.BadValueError('Property %s must be convertible '
'to a Credentials instance (%s)' %
(self.name, value))
#if value is not None and not isinstance(value, Credentials):
# return None
return value
class StorageByKeyName(Storage):
"""Store and retrieve a single credential to and from
the App Engine datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsProperty
on a datastore model class, and that entities
are stored by key_name.
"""
def __init__(self, model, key_name, property_name, cache=None):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
property_name: string, name of the property that is a CredentialsProperty
cache: memcache, a write-through cache to put in front of the datastore
"""
self._model = model
self._key_name = key_name
self._property_name = property_name
self._cache = cache
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
if self._cache:
json = self._cache.get(self._key_name)
if json:
return Credentials.new_from_json(json)
credential = None
entity = self._model.get_by_key_name(self._key_name)
if entity is not None:
credential = getattr(entity, self._property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self)
if self._cache:
self._cache.set(self._key_name, credential.to_json())
return credential
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity = self._model.get_or_insert(self._key_name)
setattr(entity, self._property_name, credentials)
entity.put()
if self._cache:
self._cache.set(self._key_name, credentials.to_json())
def locked_delete(self):
"""Delete Credential from datastore."""
if self._cache:
self._cache.delete(self._key_name)
entity = self._model.get_by_key_name(self._key_name)
if entity is not None:
entity.delete()
class CredentialsModel(db.Model):
"""Storage for OAuth 2.0 Credentials
Storage of the model is keyed by the user.user_id().
"""
credentials = CredentialsProperty()
class OAuth2Decorator(object):
"""Utility for making OAuth 2.0 easier.
Instantiate and then use with oauth_required or oauth_aware
as decorators on webapp.RequestHandler methods.
Example:
decorator = OAuth2Decorator(
client_id='837...ent.com',
client_secret='Qh...wwI',
scope='https://www.googleapis.com/auth/plus')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be used
# in API calls
"""
def __init__(self, client_id, client_secret, scope,
auth_uri='https://accounts.google.com/o/oauth2/auth',
token_uri='https://accounts.google.com/o/oauth2/token',
user_agent=None,
message=None, **kwargs):
"""Constructor for OAuth2Decorator
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string or list of strings, scope(s) of the credentials being
requested.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
user_agent: string, User agent of your application, default to None.
message: Message to display if there are problems with the OAuth 2.0
configuration. The message may contain HTML and will be presented on the
web interface for any method that uses the decorator.
**kwargs: dict, Keyword arguments are be passed along as kwargs to the
OAuth2WebServerFlow constructor.
"""
self.flow = OAuth2WebServerFlow(client_id, client_secret, scope, user_agent,
auth_uri, token_uri, **kwargs)
self.credentials = None
self._request_handler = None
self._message = message
self._in_error = False
def _display_error_message(self, request_handler):
request_handler.response.out.write('<html><body>')
request_handler.response.out.write(self._message)
request_handler.response.out.write('</body></html>')
def oauth_required(self, method):
"""Decorator that starts the OAuth 2.0 dance.
Starts the OAuth dance for the logged in user if they haven't already
granted access for this application.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
def check_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
# Don't use @login_decorator as this could be used in a POST request.
if not user:
request_handler.redirect(users.create_login_url(
request_handler.request.uri))
return
# Store the request URI in 'state' so we can use it later
self.flow.params['state'] = request_handler.request.url
self._request_handler = request_handler
self.credentials = StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').get()
if not self.has_credentials():
return request_handler.redirect(self.authorize_url())
try:
method(request_handler, *args, **kwargs)
except AccessTokenRefreshError:
return request_handler.redirect(self.authorize_url())
return check_oauth
def oauth_aware(self, method):
"""Decorator that sets up for OAuth 2.0 dance, but doesn't do it.
Does all the setup for the OAuth dance, but doesn't initiate it.
This decorator is useful if you want to create a page that knows
whether or not the user has granted access to this application.
From within a method decorated with @oauth_aware the has_credentials()
and authorize_url() methods can be called.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
def setup_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
# Don't use @login_decorator as this could be used in a POST request.
if not user:
request_handler.redirect(users.create_login_url(
request_handler.request.uri))
return
self.flow.params['state'] = request_handler.request.url
self._request_handler = request_handler
self.credentials = StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').get()
method(request_handler, *args, **kwargs)
return setup_oauth
def has_credentials(self):
"""True if for the logged in user there are valid access Credentials.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
return self.credentials is not None and not self.credentials.invalid
def authorize_url(self):
"""Returns the URL to start the OAuth dance.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
callback = self._request_handler.request.relative_url('/oauth2callback')
url = self.flow.step1_get_authorize_url(callback)
user = users.get_current_user()
memcache.set(user.user_id(), pickle.dumps(self.flow),
namespace=OAUTH2CLIENT_NAMESPACE)
return str(url)
def http(self):
"""Returns an authorized http instance.
Must only be called from within an @oauth_required decorated method, or
from within an @oauth_aware decorated method where has_credentials()
returns True.
"""
return self.credentials.authorize(httplib2.Http())
class OAuth2DecoratorFromClientSecrets(OAuth2Decorator):
"""An OAuth2Decorator that builds from a clientsecrets file.
Uses a clientsecrets file as the source for all the information when
constructing an OAuth2Decorator.
Example:
decorator = OAuth2DecoratorFromClientSecrets(
os.path.join(os.path.dirname(__file__), 'client_secrets.json')
scope='https://www.googleapis.com/auth/plus')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be used
# in API calls
"""
def __init__(self, filename, scope, message=None):
"""Constructor
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may contain HTML and
will be presented on the web interface for any method that uses the
decorator.
"""
try:
client_type, client_info = clientsecrets.loadfile(filename)
if client_type not in [clientsecrets.TYPE_WEB, clientsecrets.TYPE_INSTALLED]:
raise InvalidClientSecretsError('OAuth2Decorator doesn\'t support this OAuth 2.0 flow.')
super(OAuth2DecoratorFromClientSecrets,
self).__init__(
client_info['client_id'],
client_info['client_secret'],
scope,
client_info['auth_uri'],
client_info['token_uri'],
message)
except clientsecrets.InvalidClientSecretsError:
self._in_error = True
if message is not None:
self._message = message
else:
self._message = "Please configure your application for OAuth 2.0"
def oauth2decorator_from_clientsecrets(filename, scope, message=None):
"""Creates an OAuth2Decorator populated from a clientsecrets file.
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may contain HTML and
will be presented on the web interface for any method that uses the
decorator.
Returns: An OAuth2Decorator
"""
return OAuth2DecoratorFromClientSecrets(filename, scope, message)
class OAuth2Handler(webapp.RequestHandler):
"""Handler for the redirect_uri of the OAuth 2.0 dance."""
@login_required
def get(self):
error = self.request.get('error')
if error:
errormsg = self.request.get('error_description', error)
self.response.out.write(
'The authorization request failed: %s' % errormsg)
else:
user = users.get_current_user()
flow = pickle.loads(memcache.get(user.user_id(),
namespace=OAUTH2CLIENT_NAMESPACE))
# This code should be ammended with application specific error
# handling. The following cases should be considered:
# 1. What if the flow doesn't exist in memcache? Or is corrupt?
# 2. What if the step2_exchange fails?
if flow:
credentials = flow.step2_exchange(self.request.params)
StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').put(credentials)
self.redirect(str(self.request.get('state')))
else:
# TODO Add error handling here.
pass
application = webapp.WSGIApplication([('/oauth2callback', OAuth2Handler)])
def main():
run_wsgi_app(application)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
import logging
import time
from OpenSSL import crypto
from anyjson import simplejson
CLOCK_SKEW_SECS = 300 # 5 minutes in seconds
AUTH_TOKEN_LIFETIME_SECS = 300 # 5 minutes in seconds
MAX_TOKEN_LIFETIME_SECS = 86400 # 1 day in seconds
class AppIdentityError(Exception):
pass
class Verifier(object):
"""Verifies the signature on a message."""
def __init__(self, pubkey):
"""Constructor.
Args:
pubkey, OpenSSL.crypto.PKey, The public key to verify with.
"""
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string, The message to verify.
signature: string, The signature on the message.
Returns:
True if message was singed by the private key associated with the public
key that this object was constructed with.
"""
try:
crypto.verify(self._pubkey, signature, message, 'sha256')
return True
except:
return False
@staticmethod
def from_string(key_pem, is_x509_cert):
"""Construct a Verified instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it is
expected to be an RSA key in PEM format.
Returns:
Verifier instance.
Raises:
OpenSSL.crypto.Error if the key_pem can't be parsed.
"""
if is_x509_cert:
pubkey = crypto.load_certificate(crypto.FILETYPE_PEM, key_pem)
else:
pubkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key_pem)
return Verifier(pubkey)
class Signer(object):
"""Signs messages with a private key."""
def __init__(self, pkey):
"""Constructor.
Args:
pkey, OpenSSL.crypto.PKey, The private key to sign with.
"""
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: string, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
return crypto.sign(self._key, message, 'sha256')
@staticmethod
def from_string(key, password='notasecret'):
"""Construct a Signer instance from a string.
Args:
key: string, private key in P12 format.
password: string, password for the private key file.
Returns:
Signer instance.
Raises:
OpenSSL.crypto.Error if the key can't be parsed.
"""
pkey = crypto.load_pkcs12(key, password).get_privatekey()
return Signer(pkey)
def _urlsafe_b64encode(raw_bytes):
return base64.urlsafe_b64encode(raw_bytes).rstrip('=')
def _urlsafe_b64decode(b64string):
# Guard against unicode strings, which base64 can't handle.
b64string = b64string.encode('ascii')
padded = b64string + '=' * (4 - len(b64string) % 4)
return base64.urlsafe_b64decode(padded)
def _json_encode(data):
return simplejson.dumps(data, separators = (',', ':'))
def make_signed_jwt(signer, payload):
"""Make a signed JWT.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
Args:
signer: crypt.Signer, Cryptographic signer.
payload: dict, Dictionary of data to convert to JSON and then sign.
Returns:
string, The JWT for the payload.
"""
header = {'typ': 'JWT', 'alg': 'RS256'}
segments = [
_urlsafe_b64encode(_json_encode(header)),
_urlsafe_b64encode(_json_encode(payload)),
]
signing_input = '.'.join(segments)
signature = signer.sign(signing_input)
segments.append(_urlsafe_b64encode(signature))
logging.debug(str(segments))
return '.'.join(segments)
def verify_signed_jwt_with_certs(jwt, certs, audience):
"""Verify a JWT against public certs.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
Args:
jwt: string, A JWT.
certs: dict, Dictionary where values of public keys in PEM format.
audience: string, The audience, 'aud', that this JWT should contain. If
None then the JWT's 'aud' parameter is not verified.
Returns:
dict, The deserialized JSON payload in the JWT.
Raises:
AppIdentityError if any checks are failed.
"""
segments = jwt.split('.')
if (len(segments) != 3):
raise AppIdentityError(
'Wrong number of segments in token: %s' % jwt)
signed = '%s.%s' % (segments[0], segments[1])
signature = _urlsafe_b64decode(segments[2])
# Parse token.
json_body = _urlsafe_b64decode(segments[1])
try:
parsed = simplejson.loads(json_body)
except:
raise AppIdentityError('Can\'t parse token: %s' % json_body)
# Check signature.
verified = False
for (keyname, pem) in certs.items():
verifier = Verifier.from_string(pem, True)
if (verifier.verify(signed, signature)):
verified = True
break
if not verified:
raise AppIdentityError('Invalid token signature: %s' % jwt)
# Check creation timestamp.
iat = parsed.get('iat')
if iat is None:
raise AppIdentityError('No iat field in token: %s' % json_body)
earliest = iat - CLOCK_SKEW_SECS
# Check expiration timestamp.
now = long(time.time())
exp = parsed.get('exp')
if exp is None:
raise AppIdentityError('No exp field in token: %s' % json_body)
if exp >= now + MAX_TOKEN_LIFETIME_SECS:
raise AppIdentityError(
'exp field too far in future: %s' % json_body)
latest = exp + CLOCK_SKEW_SECS
if now < earliest:
raise AppIdentityError('Token used too early, %d < %d: %s' %
(now, earliest, json_body))
if now > latest:
raise AppIdentityError('Token used too late, %d > %d: %s' %
(now, latest, json_body))
# Check audience.
if audience is not None:
aud = parsed.get('aud')
if aud is None:
raise AppIdentityError('No aud field in token: %s' % json_body)
if aud != audience:
raise AppIdentityError('Wrong recipient, %s != %s: %s' %
(aud, audience, json_body))
return parsed
| Python |
# Copyright (C) 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for reading OAuth 2.0 client secret files.
A client_secrets.json file contains all the information needed to interact with
an OAuth 2.0 protected service.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from anyjson import simplejson
# Properties that make a client_secrets.json file valid.
TYPE_WEB = 'web'
TYPE_INSTALLED = 'installed'
VALID_CLIENT = {
TYPE_WEB: {
'required': [
'client_id',
'client_secret',
'redirect_uris',
'auth_uri',
'token_uri'],
'string': [
'client_id',
'client_secret'
]
},
TYPE_INSTALLED: {
'required': [
'client_id',
'client_secret',
'redirect_uris',
'auth_uri',
'token_uri'],
'string': [
'client_id',
'client_secret'
]
}
}
class Error(Exception):
"""Base error for this module."""
pass
class InvalidClientSecretsError(Error):
"""Format of ClientSecrets file is invalid."""
pass
def _validate_clientsecrets(obj):
if obj is None or len(obj) != 1:
raise InvalidClientSecretsError('Invalid file format.')
client_type = obj.keys()[0]
if client_type not in VALID_CLIENT.keys():
raise InvalidClientSecretsError('Unknown client type: %s.' % client_type)
client_info = obj[client_type]
for prop_name in VALID_CLIENT[client_type]['required']:
if prop_name not in client_info:
raise InvalidClientSecretsError(
'Missing property "%s" in a client type of "%s".' % (prop_name,
client_type))
for prop_name in VALID_CLIENT[client_type]['string']:
if client_info[prop_name].startswith('[['):
raise InvalidClientSecretsError(
'Property "%s" is not configured.' % prop_name)
return client_type, client_info
def load(fp):
obj = simplejson.load(fp)
return _validate_clientsecrets(obj)
def loads(s):
obj = simplejson.loads(s)
return _validate_clientsecrets(obj)
def loadfile(filename):
try:
fp = file(filename, 'r')
try:
obj = simplejson.load(fp)
finally:
fp.close()
except IOError:
raise InvalidClientSecretsError('File not found: "%s"' % filename)
return _validate_clientsecrets(obj)
| Python |
# Copyright 2011 Google Inc. All Rights Reserved.
"""Locked file interface that should work on Unix and Windows pythons.
This module first tries to use fcntl locking to ensure serialized access
to a file, then falls back on a lock file if that is unavialable.
Usage:
f = LockedFile('filename', 'r+b', 'rb')
f.open_and_lock()
if f.is_locked():
print 'Acquired filename with r+b mode'
f.file_handle().write('locked data')
else:
print 'Aquired filename with rb mode'
f.unlock_and_close()
"""
__author__ = 'cache@google.com (David T McWherter)'
import errno
import logging
import os
import time
logger = logging.getLogger(__name__)
class AlreadyLockedException(Exception):
"""Trying to lock a file that has already been locked by the LockedFile."""
pass
class _Opener(object):
"""Base class for different locking primitives."""
def __init__(self, filename, mode, fallback_mode):
"""Create an Opener.
Args:
filename: string, The pathname of the file.
mode: string, The preferred mode to access the file with.
fallback_mode: string, The mode to use if locking fails.
"""
self._locked = False
self._filename = filename
self._mode = mode
self._fallback_mode = fallback_mode
self._fh = None
def is_locked(self):
"""Was the file locked."""
return self._locked
def file_handle(self):
"""The file handle to the file. Valid only after opened."""
return self._fh
def filename(self):
"""The filename that is being locked."""
return self._filename
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries.
"""
pass
def unlock_and_close(self):
"""Unlock and close the file."""
pass
class _PosixOpener(_Opener):
"""Lock files using Posix advisory lock files."""
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Tries to create a .lock file next to the file we're trying to open.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries.
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
self._locked = False
try:
self._fh = open(self._filename, self._mode)
except IOError, e:
# If we can't access with _mode, try _fallback_mode and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
lock_filename = self._posix_lockfile(self._filename)
start_time = time.time()
while True:
try:
self._lock_fd = os.open(lock_filename,
os.O_CREAT|os.O_EXCL|os.O_RDWR)
self._locked = True
break
except OSError, e:
if e.errno != errno.EEXIST:
raise
if (time.time() - start_time) >= timeout:
logger.warn('Could not acquire lock %s in %s seconds' % (
lock_filename, timeout))
# Close the file and open in fallback_mode.
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Unlock a file by removing the .lock file, and close the handle."""
if self._locked:
lock_filename = self._posix_lockfile(self._filename)
os.unlink(lock_filename)
os.close(self._lock_fd)
self._locked = False
self._lock_fd = None
if self._fh:
self._fh.close()
def _posix_lockfile(self, filename):
"""The name of the lock file to use for posix locking."""
return '%s.lock' % filename
try:
import fcntl
class _FcntlOpener(_Opener):
"""Open, lock, and unlock a file using fcntl.lockf."""
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
start_time = time.time()
try:
self._fh = open(self._filename, self._mode)
except IOError, e:
# If we can't access with _mode, try _fallback_mode and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
# We opened in _mode, try to lock the file.
while True:
try:
fcntl.lockf(self._fh.fileno(), fcntl.LOCK_EX)
self._locked = True
return
except IOError, e:
# If not retrying, then just pass on the error.
if timeout == 0:
raise e
if e.errno != errno.EACCES:
raise e
# We could not acquire the lock. Try again.
if (time.time() - start_time) >= timeout:
logger.warn('Could not lock %s in %s seconds' % (
self._filename, timeout))
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Close and unlock the file using the fcntl.lockf primitive."""
if self._locked:
fcntl.lockf(self._fh.fileno(), fcntl.LOCK_UN)
self._locked = False
if self._fh:
self._fh.close()
except ImportError:
_FcntlOpener = None
try:
import pywintypes
import win32con
import win32file
class _Win32Opener(_Opener):
"""Open, lock, and unlock a file using windows primitives."""
# Error #33:
# 'The process cannot access the file because another process'
FILE_IN_USE_ERROR = 33
# Error #158:
# 'The segment is already unlocked.'
FILE_ALREADY_UNLOCKED_ERROR = 158
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
start_time = time.time()
try:
self._fh = open(self._filename, self._mode)
except IOError, e:
# If we can't access with _mode, try _fallback_mode and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
# We opened in _mode, try to lock the file.
while True:
try:
hfile = win32file._get_osfhandle(self._fh.fileno())
win32file.LockFileEx(
hfile,
(win32con.LOCKFILE_FAIL_IMMEDIATELY|
win32con.LOCKFILE_EXCLUSIVE_LOCK), 0, -0x10000,
pywintypes.OVERLAPPED())
self._locked = True
return
except pywintypes.error, e:
if timeout == 0:
raise e
# If the error is not that the file is already in use, raise.
if e[0] != _Win32Opener.FILE_IN_USE_ERROR:
raise
# We could not acquire the lock. Try again.
if (time.time() - start_time) >= timeout:
logger.warn('Could not lock %s in %s seconds' % (
self._filename, timeout))
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Close and unlock the file using the win32 primitive."""
if self._locked:
try:
hfile = win32file._get_osfhandle(self._fh.fileno())
win32file.UnlockFileEx(hfile, 0, -0x10000, pywintypes.OVERLAPPED())
except pywintypes.error, e:
if e[0] != _Win32Opener.FILE_ALREADY_UNLOCKED_ERROR:
raise
self._locked = False
if self._fh:
self._fh.close()
except ImportError:
_Win32Opener = None
class LockedFile(object):
"""Represent a file that has exclusive access."""
def __init__(self, filename, mode, fallback_mode, use_native_locking=True):
"""Construct a LockedFile.
Args:
filename: string, The path of the file to open.
mode: string, The mode to try to open the file with.
fallback_mode: string, The mode to use if locking fails.
use_native_locking: bool, Whether or not fcntl/win32 locking is used.
"""
opener = None
if not opener and use_native_locking:
if _Win32Opener:
opener = _Win32Opener(filename, mode, fallback_mode)
if _FcntlOpener:
opener = _FcntlOpener(filename, mode, fallback_mode)
if not opener:
opener = _PosixOpener(filename, mode, fallback_mode)
self._opener = opener
def filename(self):
"""Return the filename we were constructed with."""
return self._opener._filename
def file_handle(self):
"""Return the file_handle to the opened file."""
return self._opener.file_handle()
def is_locked(self):
"""Return whether we successfully locked the file."""
return self._opener.is_locked()
def open_and_lock(self, timeout=0, delay=0.05):
"""Open the file, trying to lock it.
Args:
timeout: float, The number of seconds to try to acquire the lock.
delay: float, The number of seconds to wait between retry attempts.
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
"""
self._opener.open_and_lock(timeout, delay)
def unlock_and_close(self):
"""Unlock and close a file."""
self._opener.unlock_and_close()
| Python |
__version__ = "1.0c2"
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility module to import a JSON module
Hides all the messy details of exactly where
we get a simplejson module from.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
try: # pragma: no cover
# Should work for Python2.6 and higher.
import json as simplejson
except ImportError: # pragma: no cover
try:
import simplejson
except ImportError:
# Try to import from django, should work on App Engine
from django.utils import simplejson
| Python |
import Cookie
import datetime
import time
import email.utils
import calendar
import base64
import hashlib
import hmac
import re
import logging
# Ripped from the Tornado Framework's web.py
# http://github.com/facebook/tornado/commit/39ac6d169a36a54bb1f6b9bf1fdebb5c9da96e09
#
# Tornado is licensed under the Apache Licence, Version 2.0
# (http://www.apache.org/licenses/LICENSE-2.0.html).
#
# Example:
# from vendor.prayls.lilcookies import LilCookies
# cookieutil = LilCookies(self, application_settings['cookie_secret'])
# cookieutil.set_secure_cookie(name = 'mykey', value = 'myvalue', expires_days= 365*100)
# cookieutil.get_secure_cookie(name = 'mykey')
class LilCookies:
@staticmethod
def _utf8(s):
if isinstance(s, unicode):
return s.encode("utf-8")
assert isinstance(s, str)
return s
@staticmethod
def _time_independent_equals(a, b):
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
@staticmethod
def _signature_from_secret(cookie_secret, *parts):
""" Takes a secret salt value to create a signature for values in the `parts` param."""
hash = hmac.new(cookie_secret, digestmod=hashlib.sha1)
for part in parts: hash.update(part)
return hash.hexdigest()
@staticmethod
def _signed_cookie_value(cookie_secret, name, value):
""" Returns a signed value for use in a cookie.
This is helpful to have in its own method if you need to re-use this function for other needs. """
timestamp = str(int(time.time()))
value = base64.b64encode(value)
signature = LilCookies._signature_from_secret(cookie_secret, name, value, timestamp)
return "|".join([value, timestamp, signature])
@staticmethod
def _verified_cookie_value(cookie_secret, name, signed_value):
"""Returns the un-encrypted value given the signed value if it validates, or None."""
value = signed_value
if not value: return None
parts = value.split("|")
if len(parts) != 3: return None
signature = LilCookies._signature_from_secret(cookie_secret, name, parts[0], parts[1])
if not LilCookies._time_independent_equals(parts[2], signature):
logging.warning("Invalid cookie signature %r", value)
return None
timestamp = int(parts[1])
if timestamp < time.time() - 31 * 86400:
logging.warning("Expired cookie %r", value)
return None
try:
return base64.b64decode(parts[0])
except:
return None
def __init__(self, handler, cookie_secret):
"""You must specify the cookie_secret to use any of the secure methods.
It should be a long, random sequence of bytes to be used as the HMAC
secret for the signature.
"""
if len(cookie_secret) < 45:
raise ValueError("LilCookies cookie_secret should at least be 45 characters long, but got `%s`" % cookie_secret)
self.handler = handler
self.request = handler.request
self.response = handler.response
self.cookie_secret = cookie_secret
def cookies(self):
"""A dictionary of Cookie.Morsel objects."""
if not hasattr(self, "_cookies"):
self._cookies = Cookie.BaseCookie()
if "Cookie" in self.request.headers:
try:
self._cookies.load(self.request.headers["Cookie"])
except:
self.clear_all_cookies()
return self._cookies
def get_cookie(self, name, default=None):
"""Gets the value of the cookie with the given name, else default."""
if name in self.cookies():
return self._cookies[name].value
return default
def set_cookie(self, name, value, domain=None, expires=None, path="/",
expires_days=None, **kwargs):
"""Sets the given cookie name/value with the given options.
Additional keyword arguments are set on the Cookie.Morsel
directly.
See http://docs.python.org/library/cookie.html#morsel-objects
for available attributes.
"""
name = LilCookies._utf8(name)
value = LilCookies._utf8(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError("Invalid cookie %r: %r" % (name, value))
if not hasattr(self, "_new_cookies"):
self._new_cookies = []
new_cookie = Cookie.BaseCookie()
self._new_cookies.append(new_cookie)
new_cookie[name] = value
if domain:
new_cookie[name]["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.utcnow() + datetime.timedelta(days=expires_days)
if expires:
timestamp = calendar.timegm(expires.utctimetuple())
new_cookie[name]["expires"] = email.utils.formatdate(
timestamp, localtime=False, usegmt=True)
if path:
new_cookie[name]["path"] = path
for k, v in kwargs.iteritems():
new_cookie[name][k] = v
# The 2 lines below were not in Tornado. Instead, they output all their cookies to the headers at once before a response flush.
for vals in new_cookie.values():
self.response.headers._headers.append(('Set-Cookie', vals.OutputString(None)))
def clear_cookie(self, name, path="/", domain=None):
"""Deletes the cookie with the given name."""
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
self.set_cookie(name, value="", path=path, expires=expires,
domain=domain)
def clear_all_cookies(self):
"""Deletes all the cookies the user sent with this request."""
for name in self.cookies().iterkeys():
self.clear_cookie(name)
def set_secure_cookie(self, name, value, expires_days=30, **kwargs):
"""Signs and timestamps a cookie so it cannot be forged.
To read a cookie set with this method, use get_secure_cookie().
"""
value = LilCookies._signed_cookie_value(self.cookie_secret, name, value)
self.set_cookie(name, value, expires_days=expires_days, **kwargs)
def get_secure_cookie(self, name, value=None):
"""Returns the given signed cookie if it validates, or None."""
if value is None: value = self.get_cookie(name)
return LilCookies._verified_cookie_value(self.cookie_secret, name, value)
def _cookie_signature(self, *parts):
return LilCookies._signature_from_secret(self.cookie_secret)
| Python |
# Copyright (C) 2007 Joe Gregorio
#
# Licensed under the MIT License
"""MIME-Type Parser
This module provides basic functions for handling mime-types. It can handle
matching mime-types against a list of media-ranges. See section 14.1 of the
HTTP specification [RFC 2616] for a complete explanation.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
Contents:
- parse_mime_type(): Parses a mime-type into its component parts.
- parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q'
quality parameter.
- quality(): Determines the quality ('q') of a mime-type when
compared against a list of media-ranges.
- quality_parsed(): Just like quality() except the second parameter must be
pre-parsed.
- best_match(): Choose the mime-type with the highest quality ('q')
from a list of candidates.
"""
__version__ = '0.1.3'
__author__ = 'Joe Gregorio'
__email__ = 'joe@bitworking.org'
__license__ = 'MIT License'
__credits__ = ''
def parse_mime_type(mime_type):
"""Parses a mime-type into its component parts.
Carves up a mime-type and returns a tuple of the (type, subtype, params)
where 'params' is a dictionary of all the parameters for the media range.
For example, the media range 'application/xhtml;q=0.5' would get parsed
into:
('application', 'xhtml', {'q', '0.5'})
"""
parts = mime_type.split(';')
params = dict([tuple([s.strip() for s in param.split('=', 1)])\
for param in parts[1:]
])
full_type = parts[0].strip()
# Java URLConnection class sends an Accept header that includes a
# single '*'. Turn it into a legal wildcard.
if full_type == '*':
full_type = '*/*'
(type, subtype) = full_type.split('/')
return (type.strip(), subtype.strip(), params)
def parse_media_range(range):
"""Parse a media-range into its component parts.
Carves up a media range and returns a tuple of the (type, subtype,
params) where 'params' is a dictionary of all the parameters for the media
range. For example, the media range 'application/*;q=0.5' would get parsed
into:
('application', '*', {'q', '0.5'})
In addition this function also guarantees that there is a value for 'q'
in the params dictionary, filling it in with a proper default if
necessary.
"""
(type, subtype, params) = parse_mime_type(range)
if not params.has_key('q') or not params['q'] or \
not float(params['q']) or float(params['q']) > 1\
or float(params['q']) < 0:
params['q'] = '1'
return (type, subtype, params)
def fitness_and_quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns a tuple of
the fitness value and the value of the 'q' quality parameter of the best
match, or (-1, 0) if no match was found. Just as for quality_parsed(),
'parsed_ranges' must be a list of parsed media ranges.
"""
best_fitness = -1
best_fit_q = 0
(target_type, target_subtype, target_params) =\
parse_media_range(mime_type)
for (type, subtype, params) in parsed_ranges:
type_match = (type == target_type or\
type == '*' or\
target_type == '*')
subtype_match = (subtype == target_subtype or\
subtype == '*' or\
target_subtype == '*')
if type_match and subtype_match:
param_matches = reduce(lambda x, y: x + y, [1 for (key, value) in \
target_params.iteritems() if key != 'q' and \
params.has_key(key) and value == params[key]], 0)
fitness = (type == target_type) and 100 or 0
fitness += (subtype == target_subtype) and 10 or 0
fitness += param_matches
if fitness > best_fitness:
best_fitness = fitness
best_fit_q = params['q']
return best_fitness, float(best_fit_q)
def quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns the 'q'
quality parameter of the best match, 0 if no match was found. This function
bahaves the same as quality() except that 'parsed_ranges' must be a list of
parsed media ranges.
"""
return fitness_and_quality_parsed(mime_type, parsed_ranges)[1]
def quality(mime_type, ranges):
"""Return the quality ('q') of a mime-type against a list of media-ranges.
Returns the quality 'q' of a mime-type when compared against the
media-ranges in ranges. For example:
>>> quality('text/html','text/*;q=0.3, text/html;q=0.7,
text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5')
0.7
"""
parsed_ranges = [parse_media_range(r) for r in ranges.split(',')]
return quality_parsed(mime_type, parsed_ranges)
def best_match(supported, header):
"""Return mime-type with the highest quality ('q') from list of candidates.
Takes a list of supported mime-types and finds the best match for all the
media-ranges listed in header. The value of header must be a string that
conforms to the format of the HTTP Accept: header. The value of 'supported'
is a list of mime-types. The list of supported mime-types should be sorted
in order of increasing desirability, in case of a situation where there is
a tie.
>>> best_match(['application/xbel+xml', 'text/xml'],
'text/*;q=0.5,*/*; q=0.1')
'text/xml'
"""
split_header = _filter_blank(header.split(','))
parsed_header = [parse_media_range(r) for r in split_header]
weighted_matches = []
pos = 0
for mime_type in supported:
weighted_matches.append((fitness_and_quality_parsed(mime_type,
parsed_header), pos, mime_type))
pos += 1
weighted_matches.sort()
return weighted_matches[-1][0][1] and weighted_matches[-1][2] or ''
def _filter_blank(i):
for s in i:
if s.strip():
yield s
| Python |
# Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to encapsulate a single HTTP request.
The classes implement a command pattern, with every
object supporting an execute() method that does the
actuall HTTP request.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import StringIO
import base64
import copy
import gzip
import httplib2
import mimeparse
import mimetypes
import os
import urllib
import urlparse
import uuid
from email.generator import Generator
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from email.parser import FeedParser
from errors import BatchError
from errors import HttpError
from errors import ResumableUploadError
from errors import UnexpectedBodyError
from errors import UnexpectedMethodError
from model import JsonModel
from oauth2client.anyjson import simplejson
DEFAULT_CHUNK_SIZE = 512*1024
class MediaUploadProgress(object):
"""Status of a resumable upload."""
def __init__(self, resumable_progress, total_size):
"""Constructor.
Args:
resumable_progress: int, bytes sent so far.
total_size: int, total bytes in complete upload, or None if the total
upload size isn't known ahead of time.
"""
self.resumable_progress = resumable_progress
self.total_size = total_size
def progress(self):
"""Percent of upload completed, as a float.
Returns:
the percentage complete as a float, returning 0.0 if the total size of
the upload is unknown.
"""
if self.total_size is not None:
return float(self.resumable_progress) / float(self.total_size)
else:
return 0.0
class MediaDownloadProgress(object):
"""Status of a resumable download."""
def __init__(self, resumable_progress, total_size):
"""Constructor.
Args:
resumable_progress: int, bytes received so far.
total_size: int, total bytes in complete download.
"""
self.resumable_progress = resumable_progress
self.total_size = total_size
def progress(self):
"""Percent of download completed, as a float.
Returns:
the percentage complete as a float, returning 0.0 if the total size of
the download is unknown.
"""
if self.total_size is not None:
return float(self.resumable_progress) / float(self.total_size)
else:
return 0.0
class MediaUpload(object):
"""Describes a media object to upload.
Base class that defines the interface of MediaUpload subclasses.
Note that subclasses of MediaUpload may allow you to control the chunksize
when upload a media object. It is important to keep the size of the chunk as
large as possible to keep the upload efficient. Other factors may influence
the size of the chunk you use, particularly if you are working in an
environment where individual HTTP requests may have a hardcoded time limit,
such as under certain classes of requests under Google App Engine.
"""
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
raise NotImplementedError()
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return 'application/octet-stream'
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return None
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return False
def getbytes(self, begin, end):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorter than length if EOF was reached
first.
"""
raise NotImplementedError()
def _to_json(self, strip=None):
"""Utility function for creating a JSON representation of a MediaUpload.
Args:
strip: array, An array of names of members to not include in the JSON.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
t = type(self)
d = copy.copy(self.__dict__)
if strip is not None:
for member in strip:
del d[member]
d['_class'] = t.__name__
d['_module'] = t.__module__
return simplejson.dumps(d)
def to_json(self):
"""Create a JSON representation of an instance of MediaUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json()
@classmethod
def new_from_json(cls, s):
"""Utility class method to instantiate a MediaUpload subclass from a JSON
representation produced by to_json().
Args:
s: string, JSON from to_json().
Returns:
An instance of the subclass of MediaUpload that was serialized with
to_json().
"""
data = simplejson.loads(s)
# Find and call the right classmethod from_json() to restore the object.
module = data['_module']
m = __import__(module, fromlist=module.split('.')[:-1])
kls = getattr(m, data['_class'])
from_json = getattr(kls, 'from_json')
return from_json(s)
class MediaFileUpload(MediaUpload):
"""A MediaUpload for a file.
Construct a MediaFileUpload and pass as the media_body parameter of the
method. For example, if we had a service that allowed uploading images:
media = MediaFileUpload('cow.png', mimetype='image/png',
chunksize=1024*1024, resumable=True)
farm.animals()..insert(
id='cow',
name='cow.png',
media_body=media).execute()
"""
def __init__(self, filename, mimetype=None, chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
"""Constructor.
Args:
filename: string, Name of the file.
mimetype: string, Mime-type of the file. If None then a mime-type will be
guessed from the file extension.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._filename = filename
self._size = os.path.getsize(filename)
self._fd = None
if mimetype is None:
(mimetype, encoding) = mimetypes.guess_type(filename)
self._mimetype = mimetype
self._chunksize = chunksize
self._resumable = resumable
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return self._size
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorted than length if EOF was reached
first.
"""
if self._fd is None:
self._fd = open(self._filename, 'rb')
self._fd.seek(begin)
return self._fd.read(length)
def to_json(self):
"""Creating a JSON representation of an instance of MediaFileUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json(['_fd'])
@staticmethod
def from_json(s):
d = simplejson.loads(s)
return MediaFileUpload(
d['_filename'], d['_mimetype'], d['_chunksize'], d['_resumable'])
class MediaIoBaseUpload(MediaUpload):
"""A MediaUpload for a io.Base objects.
Note that the Python file object is compatible with io.Base and can be used
with this class also.
fh = io.BytesIO('...Some data to upload...')
media = MediaIoBaseUpload(fh, mimetype='image/png',
chunksize=1024*1024, resumable=True)
farm.animals().insert(
id='cow',
name='cow.png',
media_body=media).execute()
"""
def __init__(self, fh, mimetype, chunksize=DEFAULT_CHUNK_SIZE,
resumable=False):
"""Constructor.
Args:
fh: io.Base or file object, The source of the bytes to upload. MUST be
opened in blocking mode, do not use streams opened in non-blocking mode.
mimetype: string, Mime-type of the file. If None then a mime-type will be
guessed from the file extension.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._fh = fh
self._mimetype = mimetype
self._chunksize = chunksize
self._resumable = resumable
self._size = None
try:
if hasattr(self._fh, 'fileno'):
fileno = self._fh.fileno()
# Pipes and such show up as 0 length files.
size = os.fstat(fileno).st_size
if size:
self._size = os.fstat(fileno).st_size
except IOError:
pass
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return self._size
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorted than length if EOF was reached
first.
"""
self._fh.seek(begin)
return self._fh.read(length)
def to_json(self):
"""This upload type is not serializable."""
raise NotImplementedError('MediaIoBaseUpload is not serializable.')
class MediaInMemoryUpload(MediaUpload):
"""MediaUpload for a chunk of bytes.
Construct a MediaFileUpload and pass as the media_body parameter of the
method.
"""
def __init__(self, body, mimetype='application/octet-stream',
chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
"""Create a new MediaBytesUpload.
Args:
body: string, Bytes of body content.
mimetype: string, Mime-type of the file or default of
'application/octet-stream'.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._body = body
self._mimetype = mimetype
self._resumable = resumable
self._chunksize = chunksize
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return len(self._body)
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorter than length if EOF was reached
first.
"""
return self._body[begin:begin + length]
def to_json(self):
"""Create a JSON representation of a MediaInMemoryUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
t = type(self)
d = copy.copy(self.__dict__)
del d['_body']
d['_class'] = t.__name__
d['_module'] = t.__module__
d['_b64body'] = base64.b64encode(self._body)
return simplejson.dumps(d)
@staticmethod
def from_json(s):
d = simplejson.loads(s)
return MediaInMemoryUpload(base64.b64decode(d['_b64body']),
d['_mimetype'], d['_chunksize'],
d['_resumable'])
class MediaIoBaseDownload(object):
""""Download media resources.
Note that the Python file object is compatible with io.Base and can be used
with this class also.
Example:
request = farms.animals().get_media(id='cow')
fh = io.FileIO('cow.png', mode='wb')
downloader = MediaIoBaseDownload(fh, request, chunksize=1024*1024)
done = False
while done is False:
status, done = downloader.next_chunk()
if status:
print "Download %d%%." % int(status.progress() * 100)
print "Download Complete!"
"""
def __init__(self, fh, request, chunksize=DEFAULT_CHUNK_SIZE):
"""Constructor.
Args:
fh: io.Base or file object, The stream in which to write the downloaded
bytes.
request: apiclient.http.HttpRequest, the media request to perform in
chunks.
chunksize: int, File will be downloaded in chunks of this many bytes.
"""
self.fh_ = fh
self.request_ = request
self.uri_ = request.uri
self.chunksize_ = chunksize
self.progress_ = 0
self.total_size_ = None
self.done_ = False
def next_chunk(self):
"""Get the next chunk of the download.
Returns:
(status, done): (MediaDownloadStatus, boolean)
The value of 'done' will be True when the media has been fully
downloaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.Error if a transport error has occured.
"""
headers = {
'range': 'bytes=%d-%d' % (
self.progress_, self.progress_ + self.chunksize_)
}
http = self.request_.http
http.follow_redirects = False
resp, content = http.request(self.uri_, headers=headers)
if resp.status in [301, 302, 303, 307, 308] and 'location' in resp:
self.uri_ = resp['location']
resp, content = http.request(self.uri_, headers=headers)
if resp.status in [200, 206]:
self.progress_ += len(content)
self.fh_.write(content)
if 'content-range' in resp:
content_range = resp['content-range']
length = content_range.rsplit('/', 1)[1]
self.total_size_ = int(length)
if self.progress_ == self.total_size_:
self.done_ = True
return MediaDownloadProgress(self.progress_, self.total_size_), self.done_
else:
raise HttpError(resp, content, self.uri_)
class HttpRequest(object):
"""Encapsulates a single HTTP request."""
def __init__(self, http, postproc, uri,
method='GET',
body=None,
headers=None,
methodId=None,
resumable=None):
"""Constructor for an HttpRequest.
Args:
http: httplib2.Http, the transport object to use to make a request
postproc: callable, called on the HTTP response and content to transform
it into a data object before returning, or raising an exception
on an error.
uri: string, the absolute URI to send the request to
method: string, the HTTP method to use
body: string, the request body of the HTTP request,
headers: dict, the HTTP request headers
methodId: string, a unique identifier for the API method being called.
resumable: MediaUpload, None if this is not a resumbale request.
"""
self.uri = uri
self.method = method
self.body = body
self.headers = headers or {}
self.methodId = methodId
self.http = http
self.postproc = postproc
self.resumable = resumable
self._in_error_state = False
# Pull the multipart boundary out of the content-type header.
major, minor, params = mimeparse.parse_mime_type(
headers.get('content-type', 'application/json'))
# The size of the non-media part of the request.
self.body_size = len(self.body or '')
# The resumable URI to send chunks to.
self.resumable_uri = None
# The bytes that have been uploaded.
self.resumable_progress = 0
def execute(self, http=None):
"""Execute the request.
Args:
http: httplib2.Http, an http object to be used in place of the
one the HttpRequest request object was constructed with.
Returns:
A deserialized object model of the response body as determined
by the postproc.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.Error if a transport error has occured.
"""
if http is None:
http = self.http
if self.resumable:
body = None
while body is None:
_, body = self.next_chunk(http)
return body
else:
if 'content-length' not in self.headers:
self.headers['content-length'] = str(self.body_size)
resp, content = http.request(self.uri, self.method,
body=self.body,
headers=self.headers)
if resp.status >= 300:
raise HttpError(resp, content, self.uri)
return self.postproc(resp, content)
def next_chunk(self, http=None):
"""Execute the next step of a resumable upload.
Can only be used if the method being executed supports media uploads and
the MediaUpload object passed in was flagged as using resumable upload.
Example:
media = MediaFileUpload('cow.png', mimetype='image/png',
chunksize=1000, resumable=True)
request = farm.animals().insert(
id='cow',
name='cow.png',
media_body=media)
response = None
while response is None:
status, response = request.next_chunk()
if status:
print "Upload %d%% complete." % int(status.progress() * 100)
Returns:
(status, body): (ResumableMediaStatus, object)
The body will be None until the resumable media is fully uploaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.Error if a transport error has occured.
"""
if http is None:
http = self.http
if self.resumable.size() is None:
size = '*'
else:
size = str(self.resumable.size())
if self.resumable_uri is None:
start_headers = copy.copy(self.headers)
start_headers['X-Upload-Content-Type'] = self.resumable.mimetype()
if size != '*':
start_headers['X-Upload-Content-Length'] = size
start_headers['content-length'] = str(self.body_size)
resp, content = http.request(self.uri, self.method,
body=self.body,
headers=start_headers)
if resp.status == 200 and 'location' in resp:
self.resumable_uri = resp['location']
else:
raise ResumableUploadError("Failed to retrieve starting URI.")
elif self._in_error_state:
# If we are in an error state then query the server for current state of
# the upload by sending an empty PUT and reading the 'range' header in
# the response.
headers = {
'Content-Range': 'bytes */%s' % size,
'content-length': '0'
}
resp, content = http.request(self.resumable_uri, 'PUT',
headers=headers)
status, body = self._process_response(resp, content)
if body:
# The upload was complete.
return (status, body)
data = self.resumable.getbytes(
self.resumable_progress, self.resumable.chunksize())
# A short read implies that we are at EOF, so finish the upload.
if len(data) < self.resumable.chunksize():
size = str(self.resumable_progress + len(data))
headers = {
'Content-Range': 'bytes %d-%d/%s' % (
self.resumable_progress, self.resumable_progress + len(data) - 1,
size)
}
try:
resp, content = http.request(self.resumable_uri, 'PUT',
body=data,
headers=headers)
except:
self._in_error_state = True
raise
return self._process_response(resp, content)
def _process_response(self, resp, content):
"""Process the response from a single chunk upload.
Args:
resp: httplib2.Response, the response object.
content: string, the content of the response.
Returns:
(status, body): (ResumableMediaStatus, object)
The body will be None until the resumable media is fully uploaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx or a 308.
"""
if resp.status in [200, 201]:
self._in_error_state = False
return None, self.postproc(resp, content)
elif resp.status == 308:
self._in_error_state = False
# A "308 Resume Incomplete" indicates we are not done.
self.resumable_progress = int(resp['range'].split('-')[1]) + 1
if 'location' in resp:
self.resumable_uri = resp['location']
else:
self._in_error_state = True
raise HttpError(resp, content, self.uri)
return (MediaUploadProgress(self.resumable_progress, self.resumable.size()),
None)
def to_json(self):
"""Returns a JSON representation of the HttpRequest."""
d = copy.copy(self.__dict__)
if d['resumable'] is not None:
d['resumable'] = self.resumable.to_json()
del d['http']
del d['postproc']
return simplejson.dumps(d)
@staticmethod
def from_json(s, http, postproc):
"""Returns an HttpRequest populated with info from a JSON object."""
d = simplejson.loads(s)
if d['resumable'] is not None:
d['resumable'] = MediaUpload.new_from_json(d['resumable'])
return HttpRequest(
http,
postproc,
uri=d['uri'],
method=d['method'],
body=d['body'],
headers=d['headers'],
methodId=d['methodId'],
resumable=d['resumable'])
class BatchHttpRequest(object):
"""Batches multiple HttpRequest objects into a single HTTP request.
Example:
from apiclient.http import BatchHttpRequest
def list_animals(request_id, response):
\"\"\"Do something with the animals list response.\"\"\"
pass
def list_farmers(request_id, response):
\"\"\"Do something with the farmers list response.\"\"\"
pass
service = build('farm', 'v2')
batch = BatchHttpRequest()
batch.add(service.animals().list(), list_animals)
batch.add(service.farmers().list(), list_farmers)
batch.execute(http)
"""
def __init__(self, callback=None, batch_uri=None):
"""Constructor for a BatchHttpRequest.
Args:
callback: callable, A callback to be called for each response, of the
form callback(id, response). The first parameter is the request id, and
the second is the deserialized response object.
batch_uri: string, URI to send batch requests to.
"""
if batch_uri is None:
batch_uri = 'https://www.googleapis.com/batch'
self._batch_uri = batch_uri
# Global callback to be called for each individual response in the batch.
self._callback = callback
# A map from id to request.
self._requests = {}
# A map from id to callback.
self._callbacks = {}
# List of request ids, in the order in which they were added.
self._order = []
# The last auto generated id.
self._last_auto_id = 0
# Unique ID on which to base the Content-ID headers.
self._base_id = None
# A map from request id to (headers, content) response pairs
self._responses = {}
# A map of id(Credentials) that have been refreshed.
self._refreshed_credentials = {}
def _refresh_and_apply_credentials(self, request, http):
"""Refresh the credentials and apply to the request.
Args:
request: HttpRequest, the request.
http: httplib2.Http, the global http object for the batch.
"""
# For the credentials to refresh, but only once per refresh_token
# If there is no http per the request then refresh the http passed in
# via execute()
creds = None
if request.http is not None and hasattr(request.http.request,
'credentials'):
creds = request.http.request.credentials
elif http is not None and hasattr(http.request, 'credentials'):
creds = http.request.credentials
if creds is not None:
if id(creds) not in self._refreshed_credentials:
creds.refresh(http)
self._refreshed_credentials[id(creds)] = 1
# Only apply the credentials if we are using the http object passed in,
# otherwise apply() will get called during _serialize_request().
if request.http is None or not hasattr(request.http.request,
'credentials'):
creds.apply(request.headers)
def _id_to_header(self, id_):
"""Convert an id to a Content-ID header value.
Args:
id_: string, identifier of individual request.
Returns:
A Content-ID header with the id_ encoded into it. A UUID is prepended to
the value because Content-ID headers are supposed to be universally
unique.
"""
if self._base_id is None:
self._base_id = uuid.uuid4()
return '<%s+%s>' % (self._base_id, urllib.quote(id_))
def _header_to_id(self, header):
"""Convert a Content-ID header value to an id.
Presumes the Content-ID header conforms to the format that _id_to_header()
returns.
Args:
header: string, Content-ID header value.
Returns:
The extracted id value.
Raises:
BatchError if the header is not in the expected format.
"""
if header[0] != '<' or header[-1] != '>':
raise BatchError("Invalid value for Content-ID: %s" % header)
if '+' not in header:
raise BatchError("Invalid value for Content-ID: %s" % header)
base, id_ = header[1:-1].rsplit('+', 1)
return urllib.unquote(id_)
def _serialize_request(self, request):
"""Convert an HttpRequest object into a string.
Args:
request: HttpRequest, the request to serialize.
Returns:
The request as a string in application/http format.
"""
# Construct status line
parsed = urlparse.urlparse(request.uri)
request_line = urlparse.urlunparse(
(None, None, parsed.path, parsed.params, parsed.query, None)
)
status_line = request.method + ' ' + request_line + ' HTTP/1.1\n'
major, minor = request.headers.get('content-type', 'application/json').split('/')
msg = MIMENonMultipart(major, minor)
headers = request.headers.copy()
if request.http is not None and hasattr(request.http.request,
'credentials'):
request.http.request.credentials.apply(headers)
# MIMENonMultipart adds its own Content-Type header.
if 'content-type' in headers:
del headers['content-type']
for key, value in headers.iteritems():
msg[key] = value
msg['Host'] = parsed.netloc
msg.set_unixfrom(None)
if request.body is not None:
msg.set_payload(request.body)
msg['content-length'] = str(len(request.body))
# Serialize the mime message.
fp = StringIO.StringIO()
# maxheaderlen=0 means don't line wrap headers.
g = Generator(fp, maxheaderlen=0)
g.flatten(msg, unixfrom=False)
body = fp.getvalue()
# Strip off the \n\n that the MIME lib tacks onto the end of the payload.
if request.body is None:
body = body[:-2]
return status_line.encode('utf-8') + body
def _deserialize_response(self, payload):
"""Convert string into httplib2 response and content.
Args:
payload: string, headers and body as a string.
Returns:
A pair (resp, content) like would be returned from httplib2.request.
"""
# Strip off the status line
status_line, payload = payload.split('\n', 1)
protocol, status, reason = status_line.split(' ', 2)
# Parse the rest of the response
parser = FeedParser()
parser.feed(payload)
msg = parser.close()
msg['status'] = status
# Create httplib2.Response from the parsed headers.
resp = httplib2.Response(msg)
resp.reason = reason
resp.version = int(protocol.split('/', 1)[1].replace('.', ''))
content = payload.split('\r\n\r\n', 1)[1]
return resp, content
def _new_id(self):
"""Create a new id.
Auto incrementing number that avoids conflicts with ids already used.
Returns:
string, a new unique id.
"""
self._last_auto_id += 1
while str(self._last_auto_id) in self._requests:
self._last_auto_id += 1
return str(self._last_auto_id)
def add(self, request, callback=None, request_id=None):
"""Add a new request.
Every callback added will be paired with a unique id, the request_id. That
unique id will be passed back to the callback when the response comes back
from the server. The default behavior is to have the library generate it's
own unique id. If the caller passes in a request_id then they must ensure
uniqueness for each request_id, and if they are not an exception is
raised. Callers should either supply all request_ids or nevery supply a
request id, to avoid such an error.
Args:
request: HttpRequest, Request to add to the batch.
callback: callable, A callback to be called for this response, of the
form callback(id, response). The first parameter is the request id, and
the second is the deserialized response object.
request_id: string, A unique id for the request. The id will be passed to
the callback with the response.
Returns:
None
Raises:
BatchError if a media request is added to a batch.
KeyError is the request_id is not unique.
"""
if request_id is None:
request_id = self._new_id()
if request.resumable is not None:
raise BatchError("Media requests cannot be used in a batch request.")
if request_id in self._requests:
raise KeyError("A request with this ID already exists: %s" % request_id)
self._requests[request_id] = request
self._callbacks[request_id] = callback
self._order.append(request_id)
def _execute(self, http, order, requests):
"""Serialize batch request, send to server, process response.
Args:
http: httplib2.Http, an http object to be used to make the request with.
order: list, list of request ids in the order they were added to the
batch.
request: list, list of request objects to send.
Raises:
httplib2.Error if a transport error has occured.
apiclient.errors.BatchError if the response is the wrong format.
"""
message = MIMEMultipart('mixed')
# Message should not write out it's own headers.
setattr(message, '_write_headers', lambda self: None)
# Add all the individual requests.
for request_id in order:
request = requests[request_id]
msg = MIMENonMultipart('application', 'http')
msg['Content-Transfer-Encoding'] = 'binary'
msg['Content-ID'] = self._id_to_header(request_id)
body = self._serialize_request(request)
msg.set_payload(body)
message.attach(msg)
body = message.as_string()
headers = {}
headers['content-type'] = ('multipart/mixed; '
'boundary="%s"') % message.get_boundary()
resp, content = http.request(self._batch_uri, 'POST', body=body,
headers=headers)
if resp.status >= 300:
raise HttpError(resp, content, self._batch_uri)
# Now break out the individual responses and store each one.
boundary, _ = content.split(None, 1)
# Prepend with a content-type header so FeedParser can handle it.
header = 'content-type: %s\r\n\r\n' % resp['content-type']
for_parser = header + content
parser = FeedParser()
parser.feed(for_parser)
mime_response = parser.close()
if not mime_response.is_multipart():
raise BatchError("Response not in multipart/mixed format.", resp,
content)
for part in mime_response.get_payload():
request_id = self._header_to_id(part['Content-ID'])
headers, content = self._deserialize_response(part.get_payload())
self._responses[request_id] = (headers, content)
def execute(self, http=None):
"""Execute all the requests as a single batched HTTP request.
Args:
http: httplib2.Http, an http object to be used in place of the one the
HttpRequest request object was constructed with. If one isn't supplied
then use a http object from the requests in this batch.
Returns:
None
Raises:
httplib2.Error if a transport error has occured.
apiclient.errors.BatchError if the response is the wrong format.
"""
# If http is not supplied use the first valid one given in the requests.
if http is None:
for request_id in self._order:
request = self._requests[request_id]
if request is not None:
http = request.http
break
if http is None:
raise ValueError("Missing a valid http object.")
self._execute(http, self._order, self._requests)
# Loop over all the requests and check for 401s. For each 401 request the
# credentials should be refreshed and then sent again in a separate batch.
redo_requests = {}
redo_order = []
for request_id in self._order:
headers, content = self._responses[request_id]
if headers['status'] == '401':
redo_order.append(request_id)
request = self._requests[request_id]
self._refresh_and_apply_credentials(request, http)
redo_requests[request_id] = request
if redo_requests:
self._execute(http, redo_order, redo_requests)
# Now process all callbacks that are erroring, and raise an exception for
# ones that return a non-2xx response? Or add extra parameter to callback
# that contains an HttpError?
for request_id in self._order:
headers, content = self._responses[request_id]
request = self._requests[request_id]
callback = self._callbacks[request_id]
response = None
exception = None
try:
r = httplib2.Response(headers)
response = request.postproc(r, content)
except HttpError, e:
exception = e
if callback is not None:
callback(request_id, response, exception)
if self._callback is not None:
self._callback(request_id, response, exception)
class HttpRequestMock(object):
"""Mock of HttpRequest.
Do not construct directly, instead use RequestMockBuilder.
"""
def __init__(self, resp, content, postproc):
"""Constructor for HttpRequestMock
Args:
resp: httplib2.Response, the response to emulate coming from the request
content: string, the response body
postproc: callable, the post processing function usually supplied by
the model class. See model.JsonModel.response() as an example.
"""
self.resp = resp
self.content = content
self.postproc = postproc
if resp is None:
self.resp = httplib2.Response({'status': 200, 'reason': 'OK'})
if 'reason' in self.resp:
self.resp.reason = self.resp['reason']
def execute(self, http=None):
"""Execute the request.
Same behavior as HttpRequest.execute(), but the response is
mocked and not really from an HTTP request/response.
"""
return self.postproc(self.resp, self.content)
class RequestMockBuilder(object):
"""A simple mock of HttpRequest
Pass in a dictionary to the constructor that maps request methodIds to
tuples of (httplib2.Response, content, opt_expected_body) that should be
returned when that method is called. None may also be passed in for the
httplib2.Response, in which case a 200 OK response will be generated.
If an opt_expected_body (str or dict) is provided, it will be compared to
the body and UnexpectedBodyError will be raised on inequality.
Example:
response = '{"data": {"id": "tag:google.c...'
requestBuilder = RequestMockBuilder(
{
'plus.activities.get': (None, response),
}
)
apiclient.discovery.build("plus", "v1", requestBuilder=requestBuilder)
Methods that you do not supply a response for will return a
200 OK with an empty string as the response content or raise an excpetion
if check_unexpected is set to True. The methodId is taken from the rpcName
in the discovery document.
For more details see the project wiki.
"""
def __init__(self, responses, check_unexpected=False):
"""Constructor for RequestMockBuilder
The constructed object should be a callable object
that can replace the class HttpResponse.
responses - A dictionary that maps methodIds into tuples
of (httplib2.Response, content). The methodId
comes from the 'rpcName' field in the discovery
document.
check_unexpected - A boolean setting whether or not UnexpectedMethodError
should be raised on unsupplied method.
"""
self.responses = responses
self.check_unexpected = check_unexpected
def __call__(self, http, postproc, uri, method='GET', body=None,
headers=None, methodId=None, resumable=None):
"""Implements the callable interface that discovery.build() expects
of requestBuilder, which is to build an object compatible with
HttpRequest.execute(). See that method for the description of the
parameters and the expected response.
"""
if methodId in self.responses:
response = self.responses[methodId]
resp, content = response[:2]
if len(response) > 2:
# Test the body against the supplied expected_body.
expected_body = response[2]
if bool(expected_body) != bool(body):
# Not expecting a body and provided one
# or expecting a body and not provided one.
raise UnexpectedBodyError(expected_body, body)
if isinstance(expected_body, str):
expected_body = simplejson.loads(expected_body)
body = simplejson.loads(body)
if body != expected_body:
raise UnexpectedBodyError(expected_body, body)
return HttpRequestMock(resp, content, postproc)
elif self.check_unexpected:
raise UnexpectedMethodError(methodId)
else:
model = JsonModel(False)
return HttpRequestMock(None, '{}', model.response)
class HttpMock(object):
"""Mock of httplib2.Http"""
def __init__(self, filename, headers=None):
"""
Args:
filename: string, absolute filename to read response from
headers: dict, header to return with response
"""
if headers is None:
headers = {'status': '200 OK'}
f = file(filename, 'r')
self.data = f.read()
f.close()
self.headers = headers
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
return httplib2.Response(self.headers), self.data
class HttpMockSequence(object):
"""Mock of httplib2.Http
Mocks a sequence of calls to request returning different responses for each
call. Create an instance initialized with the desired response headers
and content and then use as if an httplib2.Http instance.
http = HttpMockSequence([
({'status': '401'}, ''),
({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'),
({'status': '200'}, 'echo_request_headers'),
])
resp, content = http.request("http://examples.com")
There are special values you can pass in for content to trigger
behavours that are helpful in testing.
'echo_request_headers' means return the request headers in the response body
'echo_request_headers_as_json' means return the request headers in
the response body
'echo_request_body' means return the request body in the response body
'echo_request_uri' means return the request uri in the response body
"""
def __init__(self, iterable):
"""
Args:
iterable: iterable, a sequence of pairs of (headers, body)
"""
self._iterable = iterable
self.follow_redirects = True
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
resp, content = self._iterable.pop(0)
if content == 'echo_request_headers':
content = headers
elif content == 'echo_request_headers_as_json':
content = simplejson.dumps(headers)
elif content == 'echo_request_body':
content = body
elif content == 'echo_request_uri':
content = uri
return httplib2.Response(resp), content
def set_user_agent(http, user_agent):
"""Set the user-agent on every request.
Args:
http - An instance of httplib2.Http
or something that acts like it.
user_agent: string, the value for the user-agent header.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = set_user_agent(h, "my-app-name/6.0")
Most of the time the user-agent will be set doing auth, this is for the rare
cases where you are accessing an unauthenticated endpoint.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if 'user-agent' in headers:
headers['user-agent'] = user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
def tunnel_patch(http):
"""Tunnel PATCH requests over POST.
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = tunnel_patch(h, "my-app-name/6.0")
Useful if you are running on a platform that doesn't support PATCH.
Apply this last if you are using OAuth 1.0, as changing the method
will result in a different signature.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if method == 'PATCH':
if 'oauth_token' in headers.get('authorization', ''):
logging.warning(
'OAuth 1.0 request made with Credentials after tunnel_patch.')
headers['x-http-method-override'] = "PATCH"
method = 'POST'
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import copy
import httplib2
import logging
import oauth2 as oauth
import urllib
import urlparse
from anyjson import simplejson
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
class Error(Exception):
"""Base error for this module."""
pass
class RequestError(Error):
"""Error occurred during request."""
pass
class MissingParameter(Error):
pass
class CredentialsInvalidError(Error):
pass
def _abstract():
raise NotImplementedError('You need to override this function')
def _oauth_uri(name, discovery, params):
"""Look up the OAuth URI from the discovery
document and add query parameters based on
params.
name - The name of the OAuth URI to lookup, one
of 'request', 'access', or 'authorize'.
discovery - Portion of discovery document the describes
the OAuth endpoints.
params - Dictionary that is used to form the query parameters
for the specified URI.
"""
if name not in ['request', 'access', 'authorize']:
raise KeyError(name)
keys = discovery[name]['parameters'].keys()
query = {}
for key in keys:
if key in params:
query[key] = params[key]
return discovery[name]['url'] + '?' + urllib.urlencode(query)
class Credentials(object):
"""Base class for all Credentials objects.
Subclasses must define an authorize() method
that applies the credentials to an HTTP transport.
"""
def authorize(self, http):
"""Take an httplib2.Http instance (or equivalent) and
authorizes it for the set of credentials, usually by
replacing http.request() with a method that adds in
the appropriate headers and then delegates to the original
Http.request() method.
"""
_abstract()
class Flow(object):
"""Base class for all Flow objects."""
pass
class Storage(object):
"""Base class for all Storage objects.
Store and retrieve a single credential.
"""
def get(self):
"""Retrieve credential.
Returns:
apiclient.oauth.Credentials
"""
_abstract()
def put(self, credentials):
"""Write a credential.
Args:
credentials: Credentials, the credentials to store.
"""
_abstract()
class OAuthCredentials(Credentials):
"""Credentials object for OAuth 1.0a
"""
def __init__(self, consumer, token, user_agent):
"""
consumer - An instance of oauth.Consumer.
token - An instance of oauth.Token constructed with
the access token and secret.
user_agent - The HTTP User-Agent to provide for this application.
"""
self.consumer = consumer
self.token = token
self.user_agent = user_agent
self.store = None
# True if the credentials have been revoked
self._invalid = False
@property
def invalid(self):
"""True if the credentials are invalid, such as being revoked."""
return getattr(self, "_invalid", False)
def set_store(self, store):
"""Set the storage for the credential.
Args:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has been revoked.
"""
self.store = store
def __getstate__(self):
"""Trim the state down to something that can be pickled."""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled."""
self.__dict__.update(state)
self.store = None
def authorize(self, http):
"""Authorize an httplib2.Http instance with these Credentials
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth
subclass of httplib2.Authenication because
it never gets passed the absolute URI, which is
needed for signing. So instead we have to overload
'request' with a closure that adds in the
Authorization header and then calls the original version
of 'request()'.
"""
request_orig = http.request
signer = oauth.SignatureMethod_HMAC_SHA1()
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the appropriate
Authorization header."""
response_code = 302
http.follow_redirects = False
while response_code in [301, 302]:
req = oauth.Request.from_consumer_and_token(
self.consumer, self.token, http_method=method, http_url=uri)
req.sign_request(signer, self.consumer, self.token)
if headers is None:
headers = {}
headers.update(req.to_header())
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
response_code = resp.status
if response_code in [301, 302]:
uri = resp['location']
# Update the stored credential if it becomes invalid.
if response_code == 401:
logging.info('Access token no longer valid: %s' % content)
self._invalid = True
if self.store is not None:
self.store(self)
raise CredentialsInvalidError("Credentials are no longer valid.")
return resp, content
http.request = new_request
return http
class TwoLeggedOAuthCredentials(Credentials):
"""Two Legged Credentials object for OAuth 1.0a.
The Two Legged object is created directly, not from a flow. Once you
authorize and httplib2.Http instance you can change the requestor and that
change will propogate to the authorized httplib2.Http instance. For example:
http = httplib2.Http()
http = credentials.authorize(http)
credentials.requestor = 'foo@example.info'
http.request(...)
credentials.requestor = 'bar@example.info'
http.request(...)
"""
def __init__(self, consumer_key, consumer_secret, user_agent):
"""
Args:
consumer_key: string, An OAuth 1.0 consumer key
consumer_secret: string, An OAuth 1.0 consumer secret
user_agent: string, The HTTP User-Agent to provide for this application.
"""
self.consumer = oauth.Consumer(consumer_key, consumer_secret)
self.user_agent = user_agent
self.store = None
# email address of the user to act on the behalf of.
self._requestor = None
@property
def invalid(self):
"""True if the credentials are invalid, such as being revoked.
Always returns False for Two Legged Credentials.
"""
return False
def getrequestor(self):
return self._requestor
def setrequestor(self, email):
self._requestor = email
requestor = property(getrequestor, setrequestor, None,
'The email address of the user to act on behalf of')
def set_store(self, store):
"""Set the storage for the credential.
Args:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has been revoked.
"""
self.store = store
def __getstate__(self):
"""Trim the state down to something that can be pickled."""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled."""
self.__dict__.update(state)
self.store = None
def authorize(self, http):
"""Authorize an httplib2.Http instance with these Credentials
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth
subclass of httplib2.Authenication because
it never gets passed the absolute URI, which is
needed for signing. So instead we have to overload
'request' with a closure that adds in the
Authorization header and then calls the original version
of 'request()'.
"""
request_orig = http.request
signer = oauth.SignatureMethod_HMAC_SHA1()
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the appropriate
Authorization header."""
response_code = 302
http.follow_redirects = False
while response_code in [301, 302]:
# add in xoauth_requestor_id=self._requestor to the uri
if self._requestor is None:
raise MissingParameter(
'Requestor must be set before using TwoLeggedOAuthCredentials')
parsed = list(urlparse.urlparse(uri))
q = parse_qsl(parsed[4])
q.append(('xoauth_requestor_id', self._requestor))
parsed[4] = urllib.urlencode(q)
uri = urlparse.urlunparse(parsed)
req = oauth.Request.from_consumer_and_token(
self.consumer, None, http_method=method, http_url=uri)
req.sign_request(signer, self.consumer, None)
if headers is None:
headers = {}
headers.update(req.to_header())
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
response_code = resp.status
if response_code in [301, 302]:
uri = resp['location']
if response_code == 401:
logging.info('Access token no longer valid: %s' % content)
# Do not store the invalid state of the Credentials because
# being 2LO they could be reinstated in the future.
raise CredentialsInvalidError("Credentials are invalid.")
return resp, content
http.request = new_request
return http
class FlowThreeLegged(Flow):
"""Does the Three Legged Dance for OAuth 1.0a.
"""
def __init__(self, discovery, consumer_key, consumer_secret, user_agent,
**kwargs):
"""
discovery - Section of the API discovery document that describes
the OAuth endpoints.
consumer_key - OAuth consumer key
consumer_secret - OAuth consumer secret
user_agent - The HTTP User-Agent that identifies the application.
**kwargs - The keyword arguments are all optional and required
parameters for the OAuth calls.
"""
self.discovery = discovery
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.user_agent = user_agent
self.params = kwargs
self.request_token = {}
required = {}
for uriinfo in discovery.itervalues():
for name, value in uriinfo['parameters'].iteritems():
if value['required'] and not name.startswith('oauth_'):
required[name] = 1
for key in required.iterkeys():
if key not in self.params:
raise MissingParameter('Required parameter %s not supplied' % key)
def step1_get_authorize_url(self, oauth_callback='oob'):
"""Returns a URI to redirect to the provider.
oauth_callback - Either the string 'oob' for a non-web-based application,
or a URI that handles the callback from the authorization
server.
If oauth_callback is 'oob' then pass in the
generated verification code to step2_exchange,
otherwise pass in the query parameters received
at the callback uri to step2_exchange.
"""
consumer = oauth.Consumer(self.consumer_key, self.consumer_secret)
client = oauth.Client(consumer)
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
body = urllib.urlencode({'oauth_callback': oauth_callback})
uri = _oauth_uri('request', self.discovery, self.params)
resp, content = client.request(uri, 'POST', headers=headers,
body=body)
if resp['status'] != '200':
logging.error('Failed to retrieve temporary authorization: %s', content)
raise RequestError('Invalid response %s.' % resp['status'])
self.request_token = dict(parse_qsl(content))
auth_params = copy.copy(self.params)
auth_params['oauth_token'] = self.request_token['oauth_token']
return _oauth_uri('authorize', self.discovery, auth_params)
def step2_exchange(self, verifier):
"""Exhanges an authorized request token
for OAuthCredentials.
Args:
verifier: string, dict - either the verifier token, or a dictionary
of the query parameters to the callback, which contains
the oauth_verifier.
Returns:
The Credentials object.
"""
if not (isinstance(verifier, str) or isinstance(verifier, unicode)):
verifier = verifier['oauth_verifier']
token = oauth.Token(
self.request_token['oauth_token'],
self.request_token['oauth_token_secret'])
token.set_verifier(verifier)
consumer = oauth.Consumer(self.consumer_key, self.consumer_secret)
client = oauth.Client(consumer, token)
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
uri = _oauth_uri('access', self.discovery, self.params)
resp, content = client.request(uri, 'POST', headers=headers)
if resp['status'] != '200':
logging.error('Failed to retrieve access token: %s', content)
raise RequestError('Invalid response %s.' % resp['status'])
oauth_params = dict(parse_qsl(content))
token = oauth.Token(
oauth_params['oauth_token'],
oauth_params['oauth_token_secret'])
return OAuthCredentials(consumer, token, self.user_agent)
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model objects for requests and responses.
Each API may support one or more serializations, such
as JSON, Atom, etc. The model classes are responsible
for converting between the wire format and the Python
object representation.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import gflags
import logging
import urllib
from errors import HttpError
from oauth2client.anyjson import simplejson
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean('dump_request_response', False,
'Dump all http server requests and responses. '
)
def _abstract():
raise NotImplementedError('You need to override this function')
class Model(object):
"""Model base class.
All Model classes should implement this interface.
The Model serializes and de-serializes between a wire
format such as JSON and a Python object representation.
"""
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a serialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized in the desired wire format.
"""
_abstract()
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
_abstract()
class BaseModel(Model):
"""Base model class.
Subclasses should provide implementations for the "serialize" and
"deserialize" methods, as well as values for the following class attributes.
Attributes:
accept: The value to use for the HTTP Accept header.
content_type: The value to use for the HTTP Content-type header.
no_content_response: The value to return when deserializing a 204 "No
Content" response.
alt_param: The value to supply as the "alt" query parameter for requests.
"""
accept = None
content_type = None
no_content_response = None
alt_param = None
def _log_request(self, headers, path_params, query, body):
"""Logs debugging information about the request if requested."""
if FLAGS.dump_request_response:
logging.info('--request-start--')
logging.info('-headers-start-')
for h, v in headers.iteritems():
logging.info('%s: %s', h, v)
logging.info('-headers-end-')
logging.info('-path-parameters-start-')
for h, v in path_params.iteritems():
logging.info('%s: %s', h, v)
logging.info('-path-parameters-end-')
logging.info('body: %s', body)
logging.info('query: %s', query)
logging.info('--request-end--')
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a serialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable by simplejson.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized as JSON
"""
query = self._build_query(query_params)
headers['accept'] = self.accept
headers['accept-encoding'] = 'gzip, deflate'
if 'user-agent' in headers:
headers['user-agent'] += ' '
else:
headers['user-agent'] = ''
headers['user-agent'] += 'google-api-python-client/1.0'
if body_value is not None:
headers['content-type'] = self.content_type
body_value = self.serialize(body_value)
self._log_request(headers, path_params, query, body_value)
return (headers, path_params, query, body_value)
def _build_query(self, params):
"""Builds a query string.
Args:
params: dict, the query parameters
Returns:
The query parameters properly encoded into an HTTP URI query string.
"""
if self.alt_param is not None:
params.update({'alt': self.alt_param})
astuples = []
for key, value in params.iteritems():
if type(value) == type([]):
for x in value:
x = x.encode('utf-8')
astuples.append((key, x))
else:
if getattr(value, 'encode', False) and callable(value.encode):
value = value.encode('utf-8')
astuples.append((key, value))
return '?' + urllib.urlencode(astuples)
def _log_response(self, resp, content):
"""Logs debugging information about the response if requested."""
if FLAGS.dump_request_response:
logging.info('--response-start--')
for h, v in resp.iteritems():
logging.info('%s: %s', h, v)
if content:
logging.info(content)
logging.info('--response-end--')
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
self._log_response(resp, content)
# Error handling is TBD, for example, do we retry
# for some operation/error combinations?
if resp.status < 300:
if resp.status == 204:
# A 204: No Content response should be treated differently
# to all the other success states
return self.no_content_response
return self.deserialize(content)
else:
logging.debug('Content from bad request was: %s' % content)
raise HttpError(resp, content)
def serialize(self, body_value):
"""Perform the actual Python object serialization.
Args:
body_value: object, the request body as a Python object.
Returns:
string, the body in serialized form.
"""
_abstract()
def deserialize(self, content):
"""Perform the actual deserialization from response string to Python
object.
Args:
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
"""
_abstract()
class JsonModel(BaseModel):
"""Model class for JSON.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request and response bodies.
"""
accept = 'application/json'
content_type = 'application/json'
alt_param = 'json'
def __init__(self, data_wrapper=False):
"""Construct a JsonModel.
Args:
data_wrapper: boolean, wrap requests and responses in a data wrapper
"""
self._data_wrapper = data_wrapper
def serialize(self, body_value):
if (isinstance(body_value, dict) and 'data' not in body_value and
self._data_wrapper):
body_value = {'data': body_value}
return simplejson.dumps(body_value)
def deserialize(self, content):
body = simplejson.loads(content)
if isinstance(body, dict) and 'data' in body:
body = body['data']
return body
@property
def no_content_response(self):
return {}
class RawModel(JsonModel):
"""Model class for requests that don't return JSON.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request, and returns the raw bytes
of the response body.
"""
accept = '*/*'
content_type = 'application/json'
alt_param = None
def deserialize(self, content):
return content
@property
def no_content_response(self):
return ''
class MediaModel(JsonModel):
"""Model class for requests that return Media.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request, and returns the raw bytes
of the response body.
"""
accept = '*/*'
content_type = 'application/json'
alt_param = 'media'
def deserialize(self, content):
return content
@property
def no_content_response(self):
return ''
class ProtocolBufferModel(BaseModel):
"""Model class for protocol buffers.
Serializes and de-serializes the binary protocol buffer sent in the HTTP
request and response bodies.
"""
accept = 'application/x-protobuf'
content_type = 'application/x-protobuf'
alt_param = 'proto'
def __init__(self, protocol_buffer):
"""Constructs a ProtocolBufferModel.
The serialzed protocol buffer returned in an HTTP response will be
de-serialized using the given protocol buffer class.
Args:
protocol_buffer: The protocol buffer class used to de-serialize a
response from the API.
"""
self._protocol_buffer = protocol_buffer
def serialize(self, body_value):
return body_value.SerializeToString()
def deserialize(self, content):
return self._protocol_buffer.FromString(content)
@property
def no_content_response(self):
return self._protocol_buffer()
def makepatch(original, modified):
"""Create a patch object.
Some methods support PATCH, an efficient way to send updates to a resource.
This method allows the easy construction of patch bodies by looking at the
differences between a resource before and after it was modified.
Args:
original: object, the original deserialized resource
modified: object, the modified deserialized resource
Returns:
An object that contains only the changes from original to modified, in a
form suitable to pass to a PATCH method.
Example usage:
item = service.activities().get(postid=postid, userid=userid).execute()
original = copy.deepcopy(item)
item['object']['content'] = 'This is updated.'
service.activities.patch(postid=postid, userid=userid,
body=makepatch(original, item)).execute()
"""
patch = {}
for key, original_value in original.iteritems():
modified_value = modified.get(key, None)
if modified_value is None:
# Use None to signal that the element is deleted
patch[key] = None
elif original_value != modified_value:
if type(original_value) == type({}):
# Recursively descend objects
patch[key] = makepatch(original_value, modified_value)
else:
# In the case of simple types or arrays we just replace
patch[key] = modified_value
else:
# Don't add anything to patch if there's no change
pass
for key in modified:
if key not in original:
patch[key] = modified[key]
return patch
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for discovery based APIs
A client library for Google's discovery based APIs.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = [
'build',
'build_from_document'
'fix_method_name',
'key2param'
]
import copy
import httplib2
import logging
import os
import random
import re
import uritemplate
import urllib
import urlparse
import mimeparse
import mimetypes
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
from apiclient.errors import HttpError
from apiclient.errors import InvalidJsonError
from apiclient.errors import MediaUploadSizeError
from apiclient.errors import UnacceptableMimeTypeError
from apiclient.errors import UnknownApiNameOrVersion
from apiclient.errors import UnknownLinkType
from apiclient.http import HttpRequest
from apiclient.http import MediaFileUpload
from apiclient.http import MediaUpload
from apiclient.model import JsonModel
from apiclient.model import MediaModel
from apiclient.model import RawModel
from apiclient.schema import Schemas
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from oauth2client.anyjson import simplejson
logger = logging.getLogger(__name__)
URITEMPLATE = re.compile('{[^}]*}')
VARNAME = re.compile('[a-zA-Z0-9_-]+')
DISCOVERY_URI = ('https://www.googleapis.com/discovery/v1/apis/'
'{api}/{apiVersion}/rest')
DEFAULT_METHOD_DOC = 'A description of how to use this function'
# Parameters accepted by the stack, but not visible via discovery.
STACK_QUERY_PARAMETERS = ['trace', 'pp', 'userip', 'strict']
# Python reserved words.
RESERVED_WORDS = ['and', 'assert', 'break', 'class', 'continue', 'def', 'del',
'elif', 'else', 'except', 'exec', 'finally', 'for', 'from',
'global', 'if', 'import', 'in', 'is', 'lambda', 'not', 'or',
'pass', 'print', 'raise', 'return', 'try', 'while' ]
def fix_method_name(name):
"""Fix method names to avoid reserved word conflicts.
Args:
name: string, method name.
Returns:
The name with a '_' prefixed if the name is a reserved word.
"""
if name in RESERVED_WORDS:
return name + '_'
else:
return name
def _add_query_parameter(url, name, value):
"""Adds a query parameter to a url.
Replaces the current value if it already exists in the URL.
Args:
url: string, url to add the query parameter to.
name: string, query parameter name.
value: string, query parameter value.
Returns:
Updated query parameter. Does not update the url if value is None.
"""
if value is None:
return url
else:
parsed = list(urlparse.urlparse(url))
q = dict(parse_qsl(parsed[4]))
q[name] = value
parsed[4] = urllib.urlencode(q)
return urlparse.urlunparse(parsed)
def key2param(key):
"""Converts key names into parameter names.
For example, converting "max-results" -> "max_results"
Args:
key: string, the method key name.
Returns:
A safe method name based on the key name.
"""
result = []
key = list(key)
if not key[0].isalpha():
result.append('x')
for c in key:
if c.isalnum():
result.append(c)
else:
result.append('_')
return ''.join(result)
def build(serviceName,
version,
http=None,
discoveryServiceUrl=DISCOVERY_URI,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Construct a Resource for interacting with an API.
Construct a Resource object for interacting with an API. The serviceName and
version are the names from the Discovery service.
Args:
serviceName: string, name of the service.
version: string, the version of the service.
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
discoveryServiceUrl: string, a URI Template that points to the location of
the discovery service. It should have two parameters {api} and
{apiVersion} that when filled in produce an absolute URI to the discovery
document for that service.
developerKey: string, key obtained from
https://code.google.com/apis/console.
model: apiclient.Model, converts to and from the wire format.
requestBuilder: apiclient.http.HttpRequest, encapsulator for an HTTP
request.
Returns:
A Resource object with methods for interacting with the service.
"""
params = {
'api': serviceName,
'apiVersion': version
}
if http is None:
http = httplib2.Http()
requested_url = uritemplate.expand(discoveryServiceUrl, params)
# REMOTE_ADDR is defined by the CGI spec [RFC3875] as the environment
# variable that contains the network address of the client sending the
# request. If it exists then add that to the request for the discovery
# document to avoid exceeding the quota on discovery requests.
if 'REMOTE_ADDR' in os.environ:
requested_url = _add_query_parameter(requested_url, 'userIp',
os.environ['REMOTE_ADDR'])
logger.info('URL being requested: %s' % requested_url)
resp, content = http.request(requested_url)
if resp.status == 404:
raise UnknownApiNameOrVersion("name: %s version: %s" % (serviceName,
version))
if resp.status >= 400:
raise HttpError(resp, content, requested_url)
try:
service = simplejson.loads(content)
except ValueError, e:
logger.error('Failed to parse as JSON: ' + content)
raise InvalidJsonError()
return build_from_document(content, discoveryServiceUrl, http=http,
developerKey=developerKey, model=model, requestBuilder=requestBuilder)
def build_from_document(
service,
base,
future=None,
http=None,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Create a Resource for interacting with an API.
Same as `build()`, but constructs the Resource object from a discovery
document that is it given, as opposed to retrieving one over HTTP.
Args:
service: string, discovery document.
base: string, base URI for all HTTP requests, usually the discovery URI.
future: string, discovery document with future capabilities (deprecated).
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
developerKey: string, Key for controlling API usage, generated
from the API Console.
model: Model class instance that serializes and de-serializes requests and
responses.
requestBuilder: Takes an http request and packages it up to be executed.
Returns:
A Resource object with methods for interacting with the service.
"""
# future is no longer used.
future = {}
service = simplejson.loads(service)
base = urlparse.urljoin(base, service['basePath'])
schema = Schemas(service)
if model is None:
features = service.get('features', [])
model = JsonModel('dataWrapper' in features)
resource = _createResource(http, base, model, requestBuilder, developerKey,
service, service, schema)
return resource
def _cast(value, schema_type):
"""Convert value to a string based on JSON Schema type.
See http://tools.ietf.org/html/draft-zyp-json-schema-03 for more details on
JSON Schema.
Args:
value: any, the value to convert
schema_type: string, the type that value should be interpreted as
Returns:
A string representation of 'value' based on the schema_type.
"""
if schema_type == 'string':
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
elif schema_type == 'integer':
return str(int(value))
elif schema_type == 'number':
return str(float(value))
elif schema_type == 'boolean':
return str(bool(value)).lower()
else:
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
MULTIPLIERS = {
"KB": 2 ** 10,
"MB": 2 ** 20,
"GB": 2 ** 30,
"TB": 2 ** 40,
}
def _media_size_to_long(maxSize):
"""Convert a string media size, such as 10GB or 3TB into an integer.
Args:
maxSize: string, size as a string, such as 2MB or 7GB.
Returns:
The size as an integer value.
"""
if len(maxSize) < 2:
return 0
units = maxSize[-2:].upper()
multiplier = MULTIPLIERS.get(units, 0)
if multiplier:
return int(maxSize[:-2]) * multiplier
else:
return int(maxSize)
def _createResource(http, baseUrl, model, requestBuilder,
developerKey, resourceDesc, rootDesc, schema):
"""Build a Resource from the API description.
Args:
http: httplib2.Http, Object to make http requests with.
baseUrl: string, base URL for the API. All requests are relative to this
URI.
model: apiclient.Model, converts to and from the wire format.
requestBuilder: class or callable that instantiates an
apiclient.HttpRequest object.
developerKey: string, key obtained from
https://code.google.com/apis/console
resourceDesc: object, section of deserialized discovery document that
describes a resource. Note that the top level discovery document
is considered a resource.
rootDesc: object, the entire deserialized discovery document.
schema: object, mapping of schema names to schema descriptions.
Returns:
An instance of Resource with all the methods attached for interacting with
that resource.
"""
class Resource(object):
"""A class for interacting with a resource."""
def __init__(self):
self._http = http
self._baseUrl = baseUrl
self._model = model
self._developerKey = developerKey
self._requestBuilder = requestBuilder
def createMethod(theclass, methodName, methodDesc, rootDesc):
"""Creates a method for attaching to a Resource.
Args:
theclass: type, the class to attach methods to.
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
rootDesc: object, the entire deserialized discovery document.
"""
methodName = fix_method_name(methodName)
pathUrl = methodDesc['path']
httpMethod = methodDesc['httpMethod']
methodId = methodDesc['id']
mediaPathUrl = None
accept = []
maxSize = 0
if 'mediaUpload' in methodDesc:
mediaUpload = methodDesc['mediaUpload']
# TODO(jcgregorio) Use URLs from discovery once it is updated.
parsed = list(urlparse.urlparse(baseUrl))
basePath = parsed[2]
mediaPathUrl = '/upload' + basePath + pathUrl
accept = mediaUpload['accept']
maxSize = _media_size_to_long(mediaUpload.get('maxSize', ''))
if 'parameters' not in methodDesc:
methodDesc['parameters'] = {}
# Add in the parameters common to all methods.
for name, desc in rootDesc.get('parameters', {}).iteritems():
methodDesc['parameters'][name] = desc
# Add in undocumented query parameters.
for name in STACK_QUERY_PARAMETERS:
methodDesc['parameters'][name] = {
'type': 'string',
'location': 'query'
}
if httpMethod in ['PUT', 'POST', 'PATCH'] and 'request' in methodDesc:
methodDesc['parameters']['body'] = {
'description': 'The request body.',
'type': 'object',
'required': True,
}
if 'request' in methodDesc:
methodDesc['parameters']['body'].update(methodDesc['request'])
else:
methodDesc['parameters']['body']['type'] = 'object'
if 'mediaUpload' in methodDesc:
methodDesc['parameters']['media_body'] = {
'description': 'The filename of the media request body.',
'type': 'string',
'required': False,
}
if 'body' in methodDesc['parameters']:
methodDesc['parameters']['body']['required'] = False
argmap = {} # Map from method parameter name to query parameter name
required_params = [] # Required parameters
repeated_params = [] # Repeated parameters
pattern_params = {} # Parameters that must match a regex
query_params = [] # Parameters that will be used in the query string
path_params = {} # Parameters that will be used in the base URL
param_type = {} # The type of the parameter
enum_params = {} # Allowable enumeration values for each parameter
if 'parameters' in methodDesc:
for arg, desc in methodDesc['parameters'].iteritems():
param = key2param(arg)
argmap[param] = arg
if desc.get('pattern', ''):
pattern_params[param] = desc['pattern']
if desc.get('enum', ''):
enum_params[param] = desc['enum']
if desc.get('required', False):
required_params.append(param)
if desc.get('repeated', False):
repeated_params.append(param)
if desc.get('location') == 'query':
query_params.append(param)
if desc.get('location') == 'path':
path_params[param] = param
param_type[param] = desc.get('type', 'string')
for match in URITEMPLATE.finditer(pathUrl):
for namematch in VARNAME.finditer(match.group(0)):
name = key2param(namematch.group(0))
path_params[name] = name
if name in query_params:
query_params.remove(name)
def method(self, **kwargs):
# Don't bother with doc string, it will be over-written by createMethod.
for name in kwargs.iterkeys():
if name not in argmap:
raise TypeError('Got an unexpected keyword argument "%s"' % name)
# Remove args that have a value of None.
keys = kwargs.keys()
for name in keys:
if kwargs[name] is None:
del kwargs[name]
for name in required_params:
if name not in kwargs:
raise TypeError('Missing required parameter "%s"' % name)
for name, regex in pattern_params.iteritems():
if name in kwargs:
if isinstance(kwargs[name], basestring):
pvalues = [kwargs[name]]
else:
pvalues = kwargs[name]
for pvalue in pvalues:
if re.match(regex, pvalue) is None:
raise TypeError(
'Parameter "%s" value "%s" does not match the pattern "%s"' %
(name, pvalue, regex))
for name, enums in enum_params.iteritems():
if name in kwargs:
# We need to handle the case of a repeated enum
# name differently, since we want to handle both
# arg='value' and arg=['value1', 'value2']
if (name in repeated_params and
not isinstance(kwargs[name], basestring)):
values = kwargs[name]
else:
values = [kwargs[name]]
for value in values:
if value not in enums:
raise TypeError(
'Parameter "%s" value "%s" is not an allowed value in "%s"' %
(name, value, str(enums)))
actual_query_params = {}
actual_path_params = {}
for key, value in kwargs.iteritems():
to_type = param_type.get(key, 'string')
# For repeated parameters we cast each member of the list.
if key in repeated_params and type(value) == type([]):
cast_value = [_cast(x, to_type) for x in value]
else:
cast_value = _cast(value, to_type)
if key in query_params:
actual_query_params[argmap[key]] = cast_value
if key in path_params:
actual_path_params[argmap[key]] = cast_value
body_value = kwargs.get('body', None)
media_filename = kwargs.get('media_body', None)
if self._developerKey:
actual_query_params['key'] = self._developerKey
model = self._model
# If there is no schema for the response then presume a binary blob.
if methodName.endswith('_media'):
model = MediaModel()
elif 'response' not in methodDesc:
model = RawModel()
headers = {}
headers, params, query, body = model.request(headers,
actual_path_params, actual_query_params, body_value)
expanded_url = uritemplate.expand(pathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
resumable = None
multipart_boundary = ''
if media_filename:
# Ensure we end up with a valid MediaUpload object.
if isinstance(media_filename, basestring):
(media_mime_type, encoding) = mimetypes.guess_type(media_filename)
if media_mime_type is None:
raise UnknownFileType(media_filename)
if not mimeparse.best_match([media_mime_type], ','.join(accept)):
raise UnacceptableMimeTypeError(media_mime_type)
media_upload = MediaFileUpload(media_filename, media_mime_type)
elif isinstance(media_filename, MediaUpload):
media_upload = media_filename
else:
raise TypeError('media_filename must be str or MediaUpload.')
# Check the maxSize
if maxSize > 0 and media_upload.size() > maxSize:
raise MediaUploadSizeError("Media larger than: %s" % maxSize)
# Use the media path uri for media uploads
expanded_url = uritemplate.expand(mediaPathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
if media_upload.resumable():
url = _add_query_parameter(url, 'uploadType', 'resumable')
if media_upload.resumable():
# This is all we need to do for resumable, if the body exists it gets
# sent in the first request, otherwise an empty body is sent.
resumable = media_upload
else:
# A non-resumable upload
if body is None:
# This is a simple media upload
headers['content-type'] = media_upload.mimetype()
body = media_upload.getbytes(0, media_upload.size())
url = _add_query_parameter(url, 'uploadType', 'media')
else:
# This is a multipart/related upload.
msgRoot = MIMEMultipart('related')
# msgRoot should not write out it's own headers
setattr(msgRoot, '_write_headers', lambda self: None)
# attach the body as one part
msg = MIMENonMultipart(*headers['content-type'].split('/'))
msg.set_payload(body)
msgRoot.attach(msg)
# attach the media as the second part
msg = MIMENonMultipart(*media_upload.mimetype().split('/'))
msg['Content-Transfer-Encoding'] = 'binary'
payload = media_upload.getbytes(0, media_upload.size())
msg.set_payload(payload)
msgRoot.attach(msg)
body = msgRoot.as_string()
multipart_boundary = msgRoot.get_boundary()
headers['content-type'] = ('multipart/related; '
'boundary="%s"') % multipart_boundary
url = _add_query_parameter(url, 'uploadType', 'multipart')
logger.info('URL being requested: %s' % url)
return self._requestBuilder(self._http,
model.response,
url,
method=httpMethod,
body=body,
headers=headers,
methodId=methodId,
resumable=resumable)
docs = [methodDesc.get('description', DEFAULT_METHOD_DOC), '\n\n']
if len(argmap) > 0:
docs.append('Args:\n')
# Skip undocumented params and params common to all methods.
skip_parameters = rootDesc.get('parameters', {}).keys()
skip_parameters.append(STACK_QUERY_PARAMETERS)
for arg in argmap.iterkeys():
if arg in skip_parameters:
continue
repeated = ''
if arg in repeated_params:
repeated = ' (repeated)'
required = ''
if arg in required_params:
required = ' (required)'
paramdesc = methodDesc['parameters'][argmap[arg]]
paramdoc = paramdesc.get('description', 'A parameter')
if '$ref' in paramdesc:
docs.append(
(' %s: object, %s%s%s\n The object takes the'
' form of:\n\n%s\n\n') % (arg, paramdoc, required, repeated,
schema.prettyPrintByName(paramdesc['$ref'])))
else:
paramtype = paramdesc.get('type', 'string')
docs.append(' %s: %s, %s%s%s\n' % (arg, paramtype, paramdoc, required,
repeated))
enum = paramdesc.get('enum', [])
enumDesc = paramdesc.get('enumDescriptions', [])
if enum and enumDesc:
docs.append(' Allowed values\n')
for (name, desc) in zip(enum, enumDesc):
docs.append(' %s - %s\n' % (name, desc))
if 'response' in methodDesc:
if methodName.endswith('_media'):
docs.append('\nReturns:\n The media object as a string.\n\n ')
else:
docs.append('\nReturns:\n An object of the form:\n\n ')
docs.append(schema.prettyPrintSchema(methodDesc['response']))
setattr(method, '__doc__', ''.join(docs))
setattr(theclass, methodName, method)
def createNextMethod(theclass, methodName, methodDesc, rootDesc):
"""Creates any _next methods for attaching to a Resource.
The _next methods allow for easy iteration through list() responses.
Args:
theclass: type, the class to attach methods to.
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
rootDesc: object, the entire deserialized discovery document.
"""
methodName = fix_method_name(methodName)
methodId = methodDesc['id'] + '.next'
def methodNext(self, previous_request, previous_response):
"""Retrieves the next page of results.
Args:
previous_request: The request for the previous page.
previous_response: The response from the request for the previous page.
Returns:
A request object that you can call 'execute()' on to request the next
page. Returns None if there are no more items in the collection.
"""
# Retrieve nextPageToken from previous_response
# Use as pageToken in previous_request to create new request.
if 'nextPageToken' not in previous_response:
return None
request = copy.copy(previous_request)
pageToken = previous_response['nextPageToken']
parsed = list(urlparse.urlparse(request.uri))
q = parse_qsl(parsed[4])
# Find and remove old 'pageToken' value from URI
newq = [(key, value) for (key, value) in q if key != 'pageToken']
newq.append(('pageToken', pageToken))
parsed[4] = urllib.urlencode(newq)
uri = urlparse.urlunparse(parsed)
request.uri = uri
logger.info('URL being requested: %s' % uri)
return request
setattr(theclass, methodName, methodNext)
# Add basic methods to Resource
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
createMethod(Resource, methodName, methodDesc, rootDesc)
# Add in _media methods. The functionality of the attached method will
# change when it sees that the method name ends in _media.
if methodDesc.get('supportsMediaDownload', False):
createMethod(Resource, methodName + '_media', methodDesc, rootDesc)
# Add in nested resources
if 'resources' in resourceDesc:
def createResourceMethod(theclass, methodName, methodDesc, rootDesc):
"""Create a method on the Resource to access a nested Resource.
Args:
theclass: type, the class to attach methods to.
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
rootDesc: object, the entire deserialized discovery document.
"""
methodName = fix_method_name(methodName)
def methodResource(self):
return _createResource(self._http, self._baseUrl, self._model,
self._requestBuilder, self._developerKey,
methodDesc, rootDesc, schema)
setattr(methodResource, '__doc__', 'A collection resource.')
setattr(methodResource, '__is_resource__', True)
setattr(theclass, methodName, methodResource)
for methodName, methodDesc in resourceDesc['resources'].iteritems():
createResourceMethod(Resource, methodName, methodDesc, rootDesc)
# Add _next() methods
# Look for response bodies in schema that contain nextPageToken, and methods
# that take a pageToken parameter.
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
if 'response' in methodDesc:
responseSchema = methodDesc['response']
if '$ref' in responseSchema:
responseSchema = schema.get(responseSchema['$ref'])
hasNextPageToken = 'nextPageToken' in responseSchema.get('properties',
{})
hasPageToken = 'pageToken' in methodDesc.get('parameters', {})
if hasNextPageToken and hasPageToken:
createNextMethod(Resource, methodName + '_next',
resourceDesc['methods'][methodName],
methodName)
return Resource()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth 1.0 credentials.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import pickle
import threading
from apiclient.oauth import Storage as BaseStorage
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from a file."""
def __init__(self, filename):
self._filename = filename
self._lock = threading.Lock()
def get(self):
"""Retrieve Credential from file.
Returns:
apiclient.oauth.Credentials
"""
self._lock.acquire()
try:
f = open(self._filename, 'r')
credentials = pickle.loads(f.read())
f.close()
credentials.set_store(self.put)
except:
credentials = None
self._lock.release()
return credentials
def put(self, credentials):
"""Write a pickled Credentials to file.
Args:
credentials: Credentials, the credentials to store.
"""
self._lock.acquire()
f = open(self._filename, 'w')
f.write(pickle.dumps(credentials))
f.close()
self._lock.release()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import apiclient
import base64
import pickle
from django.db import models
class OAuthCredentialsField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self):
return 'VARCHAR'
def to_python(self, value):
if value is None:
return None
if isinstance(value, apiclient.oauth.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
class FlowThreeLeggedField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self):
return 'VARCHAR'
def to_python(self, value):
print "In to_python", value
if value is None:
return None
if isinstance(value, apiclient.oauth.FlowThreeLegged):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.