code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
"""
Utilities for creating dot output from a MachOGraph
XXX: need to rewrite this based on altgraph.Dot
"""
from collections import deque
try:
from itertools import imap
except ImportError:
imap = map
__all__ = ['itergraphreport']
def itergraphreport(nodes, describe_edge, name='G'):
edges = deque()
nodetoident = {}
mainedges = set()
def nodevisitor(node, data, outgoing, incoming):
return {'label': str(node)}
def edgevisitor(edge, data, head, tail):
return {}
yield 'digraph %s {\n' % (name,)
attr = dict(rankdir='LR', concentrate='true')
cpatt = '%s="%s"'
for item in attr.iteritems():
yield '\t%s;\n' % (cpatt % item,)
# find all packages (subgraphs)
for (node, data, outgoing, incoming) in nodes:
nodetoident[node] = getattr(data, 'identifier', node)
# create sets for subgraph, write out descriptions
for (node, data, outgoing, incoming) in nodes:
# update edges
for edge in imap(describe_edge, outgoing):
edges.append(edge)
# describe node
yield '\t"%s" [%s];\n' % (
node,
','.join([
(cpatt % item) for item in
nodevisitor(node, data, outgoing, incoming).iteritems()
]),
)
graph = []
while edges:
edge, data, head, tail = edges.popleft()
if data in ('run_file', 'load_dylib'):
graph.append((edge, data, head, tail))
def do_graph(edges, tabs):
edgestr = tabs + '"%s" -> "%s" [%s];\n'
# describe edge
for (edge, data, head, tail) in edges:
attribs = edgevisitor(edge, data, head, tail)
yield edgestr % (
head,
tail,
','.join([(cpatt % item) for item in attribs.iteritems()]),
)
for s in do_graph(graph, '\t'):
yield s
yield '}\n'
| Python |
"""
Generic dylib path manipulation
"""
import re
__all__ = ['dylib_info']
_DYLIB_RE = re.compile(r"""(?x)
(?P<location>^.*)(?:^|/)
(?P<name>
(?P<shortname>\w+?)
(?:\.(?P<version>[^._]+))?
(?:_(?P<suffix>[^._]+))?
\.dylib$
)
""")
def dylib_info(filename):
"""
A dylib name can take one of the following four forms:
Location/Name.SomeVersion_Suffix.dylib
Location/Name.SomeVersion.dylib
Location/Name_Suffix.dylib
Location/Name.dylib
returns None if not found or a mapping equivalent to:
dict(
location='Location',
name='Name.SomeVersion_Suffix.dylib',
shortname='Name',
version='SomeVersion',
suffix='Suffix',
)
Note that SomeVersion and Suffix are optional and may be None
if not present.
"""
is_dylib = _DYLIB_RE.match(filename)
if not is_dylib:
return None
return is_dylib.groupdict()
| Python |
import os
import sys
import stat
import operator
import struct
import shutil
from macholib import mach_o
MAGIC = [
struct.pack('!L', getattr(mach_o, 'MH_' + _))
for _ in ['MAGIC', 'CIGAM', 'MAGIC_64', 'CIGAM_64']
]
FAT_MAGIC_BYTES = struct.pack('!L', mach_o.FAT_MAGIC)
MAGIC_LEN = 4
STRIPCMD = ['/usr/bin/strip', '-x', '-S', '-']
def fsencoding(s, encoding=sys.getfilesystemencoding()):
"""
Ensure the given argument is in filesystem encoding (not unicode)
"""
if isinstance(s, unicode):
s = s.encode(encoding)
return s
def move(src, dst):
"""
move that ensures filesystem encoding of paths
"""
shutil.move(fsencoding(src), fsencoding(dst))
def copy2(src, dst):
"""
copy2 that ensures filesystem encoding of paths
"""
shutil.copy2(fsencoding(src), fsencoding(dst))
def flipwritable(fn, mode=None):
"""
Flip the writability of a file and return the old mode. Returns None
if the file is already writable.
"""
if os.access(fn, os.W_OK):
return None
old_mode = os.stat(fn).st_mode
os.chmod(fn, stat.S_IWRITE | old_mode)
return old_mode
class fileview(object):
"""
A proxy for file-like objects that exposes a given view of a file
"""
def __init__(self, fileobj, start, size):
self._fileobj = fileobj
self._start = start
self._end = start + size
def __repr__(self):
return '<fileview [%d, %d] %r>' % (
self._start, self._end, self._fileobj)
def tell(self):
return self._fileobj.tell() - self._start
def _checkwindow(self, seekto, op):
if not (self._start <= seekto <= self._end):
raise IOError("%s to offset %d is outside window [%d, %d]" % (
op, seekto, self._start, self._end))
def seek(self, offset, whence=0):
seekto = offset
if whence == 0:
seekto += self._start
elif whence == 1:
seekto += self._fileobj.tell()
elif whence == 2:
seekto += self._end
else:
raise IOError("Invalid whence argument to seek: %r" % (whence,))
self._checkwindow(seekto, 'seek')
self._fileobj.seek(seekto)
def write(self, bytes):
here = self._fileobj.tell()
self._checkwindow(here, 'write')
self._checkwindow(here + len(bytes), 'write')
self._fileobj.write(bytes)
def read(self, size=sys.maxsize):
assert size >= 0
here = self._fileobj.tell()
self._checkwindow(here, 'read')
bytes = min(size, self._end - here)
return self._fileobj.read(bytes)
def mergecopy(src, dest):
"""
copy2, but only if the destination isn't up to date
"""
if os.path.exists(dest) and os.stat(dest).st_mtime >= os.stat(src).st_mtime:
return
copy2(src, dest)
def mergetree(src, dst, condition=None, copyfn=mergecopy, srcbase=None):
"""
Recursively merge a directory tree using mergecopy().
"""
src = fsencoding(src)
dst = fsencoding(dst)
if srcbase is None:
srcbase = src
names = map(fsencoding, os.listdir(src))
try:
os.makedirs(dst)
except OSError:
pass
errors = []
for name in names:
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
if condition is not None and not condition(srcname):
continue
try:
if os.path.islink(srcname):
# XXX: This is naive at best, should check srcbase(?)
realsrc = os.readlink(srcname)
os.symlink(realsrc, dstname)
elif os.path.isdir(srcname):
mergetree(srcname, dstname,
condition=condition, copyfn=copyfn, srcbase=srcbase)
else:
copyfn(srcname, dstname)
except (IOError, os.error) as why:
errors.append((srcname, dstname, why))
if errors:
raise IOError(errors)
def sdk_normalize(filename):
"""
Normalize a path to strip out the SDK portion, normally so that it
can be decided whether it is in a system path or not.
"""
if filename.startswith('/Developer/SDKs/'):
pathcomp = filename.split('/')
del pathcomp[1:4]
filename = '/'.join(pathcomp)
return filename
NOT_SYSTEM_FILES=[]
def in_system_path(filename):
"""
Return True if the file is in a system path
"""
fn = sdk_normalize(os.path.realpath(filename))
if fn.startswith('/usr/local/'):
return False
elif fn.startswith('/System/') or fn.startswith('/usr/'):
if fn in NOT_SYSTEM_FILES:
return False
return True
else:
return False
def has_filename_filter(module):
"""
Return False if the module does not have a filename attribute
"""
return getattr(module, 'filename', None) is not None
def get_magic():
"""
Get a list of valid Mach-O header signatures, not including the fat header
"""
return MAGIC
def is_platform_file(path):
"""
Return True if the file is Mach-O
"""
if not os.path.exists(path) or os.path.islink(path):
return False
# If the header is fat, we need to read into the first arch
with open(path, 'rb') as fileobj:
bytes = fileobj.read(MAGIC_LEN)
if bytes == FAT_MAGIC_BYTES:
# Read in the fat header
fileobj.seek(0)
header = mach_o.fat_header.from_fileobj(fileobj, _endian_='>')
if header.nfat_arch < 1:
return False
# Read in the first fat arch header
arch = mach_o.fat_arch.from_fileobj(fileobj, _endian_='>')
fileobj.seek(arch.offset)
# Read magic off the first header
bytes = fileobj.read(MAGIC_LEN)
for magic in MAGIC:
if bytes == magic:
return True
return False
def iter_platform_files(dst):
"""
Walk a directory and yield each full path that is a Mach-O file
"""
for root, dirs, files in os.walk(dst):
for fn in files:
fn = os.path.join(root, fn)
if is_platform_file(fn):
yield fn
def strip_files(files, argv_max=(256 * 1024)):
"""
Strip a list of files
"""
tostrip = [(fn, flipwritable(fn)) for fn in files]
while tostrip:
cmd = list(STRIPCMD)
flips = []
pathlen = sum([len(s) + 1 for s in cmd])
while pathlen < argv_max:
if not tostrip:
break
added, flip = tostrip.pop()
pathlen += len(added) + 1
cmd.append(added)
flips.append((added, flip))
else:
cmd.pop()
tostrip.append(flips.pop())
os.spawnv(os.P_WAIT, cmd[0], cmd)
for args in flips:
flipwritable(*args)
| Python |
"""
Other than changing the load commands in such a way that they do not
contain the load command itself, this is largely a by-hand conversion
of the C headers. Hopefully everything in here should be at least as
obvious as the C headers, and you should be using the C headers as a real
reference because the documentation didn't come along for the ride.
Doing much of anything with the symbol tables or segments is really
not covered at this point.
See /usr/include/mach-o and friends.
"""
import time
from macholib.ptypes import *
_CPU_ARCH_ABI64 = 0x01000000
CPU_TYPE_NAMES = {
-1: 'ANY',
1: 'VAX',
6: 'MC680x0',
7: 'i386',
_CPU_ARCH_ABI64 | 7: 'x86_64',
8: 'MIPS',
10: 'MC98000',
11: 'HPPA',
12: 'ARM',
13: 'MC88000',
14: 'SPARC',
15: 'i860',
16: 'Alpha',
18: 'PowerPC',
_CPU_ARCH_ABI64 | 18: 'PowerPC64',
}
_MH_EXECUTE_SYM = "__mh_execute_header"
MH_EXECUTE_SYM = "_mh_execute_header"
_MH_BUNDLE_SYM = "__mh_bundle_header"
MH_BUNDLE_SYM = "_mh_bundle_header"
_MH_DYLIB_SYM = "__mh_dylib_header"
MH_DYLIB_SYM = "_mh_dylib_header"
_MH_DYLINKER_SYM = "__mh_dylinker_header"
MH_DYLINKER_SYM = "_mh_dylinker_header"
(
MH_OBJECT, MH_EXECUTE, MH_FVMLIB, MH_CORE, MH_PRELOAD, MH_DYLIB,
MH_DYLINKER, MH_BUNDLE, MH_DYLIB_STUB, MH_DSYM
) = range(0x1, 0xb)
(
MH_NOUNDEFS, MH_INCRLINK, MH_DYLDLINK, MH_BINDATLOAD, MH_PREBOUND,
MH_SPLIT_SEGS, MH_LAZY_INIT, MH_TWOLEVEL, MH_FORCE_FLAT, MH_NOMULTIDEFS,
MH_NOFIXPREBINDING
) = map((1).__lshift__, range(11))
MH_MAGIC = 0xfeedface
MH_CIGAM = 0xcefaedfe
MH_MAGIC_64 = 0xfeedfacf
MH_CIGAM_64 = 0xcffaedfe
integer_t = p_int32
cpu_type_t = integer_t
cpu_subtype_t = integer_t
MH_FILETYPE_NAMES = {
MH_OBJECT: 'relocatable object',
MH_EXECUTE: 'demand paged executable',
MH_FVMLIB: 'fixed vm shared library',
MH_CORE: 'core',
MH_PRELOAD: 'preloaded executable',
MH_DYLIB: 'dynamically bound shared library',
MH_DYLINKER: 'dynamic link editor',
MH_BUNDLE: 'dynamically bound bundle',
MH_DYLIB_STUB: 'shared library stub for static linking',
MH_DSYM: 'symbol information',
}
MH_FILETYPE_SHORTNAMES = {
MH_OBJECT: 'object',
MH_EXECUTE: 'execute',
MH_FVMLIB: 'fvmlib',
MH_CORE: 'core',
MH_PRELOAD: 'preload',
MH_DYLIB: 'dylib',
MH_DYLINKER: 'dylinker',
MH_BUNDLE: 'bundle',
MH_DYLIB_STUB: 'dylib_stub',
MH_DSYM: 'dsym',
}
MH_FLAGS_NAMES = {
MH_NOUNDEFS: 'no undefined references',
MH_INCRLINK: 'output of an incremental link',
MH_DYLDLINK: 'input for the dynamic linker',
MH_BINDATLOAD: 'undefined references bound dynamically when loaded',
MH_PREBOUND: 'dynamic undefined references prebound',
MH_SPLIT_SEGS: 'split read-only and read-write segments',
MH_LAZY_INIT: '(obsolete)',
MH_TWOLEVEL: 'using two-level name space bindings',
MH_FORCE_FLAT: 'forcing all imagges to use flat name space bindings',
MH_NOMULTIDEFS: 'umbrella guarantees no multiple definitions',
MH_NOFIXPREBINDING: 'do not notify prebinding agent about this executable',
}
class mach_version_helper(Structure):
_fields_ = (
('major', p_ushort),
('minor', p_uint8),
('rev', p_uint8),
)
def __str__(self):
return '%s.%s.%s' % (self.major, self.minor, self.rev)
class mach_timestamp_helper(p_uint32):
def __str__(self):
return time.ctime(self)
def read_struct(f, s, **kw):
return s.from_fileobj(f, **kw)
class mach_header(Structure):
_fields_ = (
('magic', p_uint32),
('cputype', cpu_type_t),
('cpusubtype', cpu_subtype_t),
('filetype', p_uint32),
('ncmds', p_uint32),
('sizeofcmds', p_uint32),
('flags', p_uint32),
)
def _describe(self):
bit = 1
flags = self.flags
dflags = []
while flags and bit < (1<<32):
if flags & bit:
dflags.append(MH_FLAGS_NAMES.get(bit, str(bit)))
flags = flags ^ bit
bit <<= 1
return (
('magic', '0x%08X' % self.magic),
('cputype', CPU_TYPE_NAMES.get(self.cputype, self.cputype)),
('cpusubtype', self.cpusubtype),
('filetype', MH_FILETYPE_NAMES.get(self.filetype, self.filetype)),
('ncmds', self.ncmds),
('sizeofcmds', self.sizeofcmds),
('flags', dflags),
)
class mach_header_64(mach_header):
_fields_ = mach_header._fields_ + (('reserved', p_uint32),)
class load_command(Structure):
_fields_ = (
('cmd', p_uint32),
('cmdsize', p_uint32),
)
LC_REQ_DYLD = 0x80000000
(
LC_SEGMENT, LC_SYMTAB, LC_SYMSEG, LC_THREAD, LC_UNIXTHREAD, LC_LOADFVMLIB,
LC_IDFVMLIB, LC_IDENT, LC_FVMFILE, LC_PREPAGE, LC_DYSYMTAB, LC_LOAD_DYLIB,
LC_ID_DYLIB, LC_LOAD_DYLINKER, LC_ID_DYLINKER, LC_PREBOUND_DYLIB,
LC_ROUTINES, LC_SUB_FRAMEWORK, LC_SUB_UMBRELLA, LC_SUB_CLIENT,
LC_SUB_LIBRARY, LC_TWOLEVEL_HINTS, LC_PREBIND_CKSUM
) = range(0x1, 0x18)
LC_LOAD_WEAK_DYLIB = LC_REQ_DYLD | 0x18
LC_SEGMENT_64 = 0x19
LC_ROUTINES_64 = 0x1a
LC_UUID = 0x1b
LC_RPATH = (0x1c | LC_REQ_DYLD)
LC_CODE_SIGNATURE = 0x1d
LC_CODE_SEGMENT_SPLIT_INFO = 0x1e
LC_REEXPORT_DYLIB = 0x1f | LC_REQ_DYLD
LC_LAZY_LOAD_DYLIB = 0x20
LC_ENCRYPTION_INFO = 0x21
LC_DYLD_INFO = 0x22
LC_DYLD_INFO_ONLY = 0x22 | LC_REQ_DYLD
LC_LOAD_UPWARD_DYLIB = 0x23 | LC_REQ_DYLD
LC_VERSION_MIN_MACOSX = 0x24
LC_VERSION_MIN_IPHONEOS = 0x25
LC_FUNCTION_STARTS = 0x26
LC_DYLD_ENVIRONMENT = 0x27
LC_MAIN = 0x28 | LC_REQ_DYLD
LC_DATA_IN_CODE = 0x29
LC_SOURCE_VERSION = 0x2a
LC_DYLIB_CODE_SIGN_DRS = 0x2b
# this is really a union.. but whatever
class lc_str(p_uint32):
pass
p_str16 = pypackable('p_str16', bytes, '16s')
vm_prot_t = p_int32
class segment_command(Structure):
_fields_ = (
('segname', p_str16),
('vmaddr', p_uint32),
('vmsize', p_uint32),
('fileoff', p_uint32),
('filesize', p_uint32),
('maxprot', vm_prot_t),
('initprot', vm_prot_t),
('nsects', p_uint32), # read the section structures ?
('flags', p_uint32),
)
class segment_command_64(Structure):
_fields_ = (
('segname', p_str16),
('vmaddr', p_uint64),
('vmsize', p_uint64),
('fileoff', p_uint64),
('filesize', p_uint64),
('maxprot', vm_prot_t),
('initprot', vm_prot_t),
('nsects', p_uint32), # read the section structures ?
('flags', p_uint32),
)
SG_HIGHVM = 0x1
SG_FVMLIB = 0x2
SG_NORELOC = 0x4
class section(Structure):
_fields_ = (
('sectname', p_str16),
('segname', p_str16),
('addr', p_uint32),
('size', p_uint32),
('offset', p_uint32),
('align', p_uint32),
('reloff', p_uint32),
('nreloc', p_uint32),
('flags', p_uint32),
('reserved1', p_uint32),
('reserved2', p_uint32),
)
class section_64(Structure):
_fields_ = (
('sectname', p_str16),
('segname', p_str16),
('addr', p_uint64),
('size', p_uint64),
('offset', p_uint32),
('align', p_uint32),
('reloff', p_uint32),
('nreloc', p_uint32),
('flags', p_uint32),
('reserved1', p_uint32),
('reserved2', p_uint32),
('reserved3', p_uint32),
)
SECTION_TYPE = 0xff
SECTION_ATTRIBUTES = 0xffffff00
S_REGULAR = 0x0
S_ZEROFILL = 0x1
S_CSTRING_LITERALS = 0x2
S_4BYTE_LITERALS = 0x3
S_8BYTE_LITERALS = 0x4
S_LITERAL_POINTERS = 0x5
S_NON_LAZY_SYMBOL_POINTERS = 0x6
S_LAZY_SYMBOL_POINTERS = 0x7
S_SYMBOL_STUBS = 0x8
S_MOD_INIT_FUNC_POINTERS = 0x9
S_MOD_TERM_FUNC_POINTERS = 0xa
S_COALESCED = 0xb
SECTION_ATTRIBUTES_USR = 0xff000000
S_ATTR_PURE_INSTRUCTIONS = 0x80000000
S_ATTR_NO_TOC = 0x40000000
S_ATTR_STRIP_STATIC_SYMS = 0x20000000
SECTION_ATTRIBUTES_SYS = 0x00ffff00
S_ATTR_SOME_INSTRUCTIONS = 0x00000400
S_ATTR_EXT_RELOC = 0x00000200
S_ATTR_LOC_RELOC = 0x00000100
SEG_PAGEZERO = "__PAGEZERO"
SEG_TEXT = "__TEXT"
SECT_TEXT = "__text"
SECT_FVMLIB_INIT0 = "__fvmlib_init0"
SECT_FVMLIB_INIT1 = "__fvmlib_init1"
SEG_DATA = "__DATA"
SECT_DATA = "__data"
SECT_BSS = "__bss"
SECT_COMMON = "__common"
SEG_OBJC = "__OBJC"
SECT_OBJC_SYMBOLS = "__symbol_table"
SECT_OBJC_MODULES = "__module_info"
SECT_OBJC_STRINGS = "__selector_strs"
SECT_OBJC_REFS = "__selector_refs"
SEG_ICON = "__ICON"
SECT_ICON_HEADER = "__header"
SECT_ICON_TIFF = "__tiff"
SEG_LINKEDIT = "__LINKEDIT"
SEG_UNIXSTACK = "__UNIXSTACK"
#
# I really should remove all these _command classes because they
# are no different. I decided to keep the load commands separate,
# so classes like fvmlib and fvmlib_command are equivalent.
#
class fvmlib(Structure):
_fields_ = (
('name', lc_str),
('minor_version', mach_version_helper),
('header_addr', p_uint32),
)
class fvmlib_command(Structure):
_fields_ = fvmlib._fields_
class dylib(Structure):
_fields_ = (
('name', lc_str),
('timestamp', mach_timestamp_helper),
('current_version', mach_version_helper),
('compatibility_version', mach_version_helper),
)
# merged dylib structure
class dylib_command(Structure):
_fields_ = dylib._fields_
class sub_framework_command(Structure):
_fields_ = (
('umbrella', lc_str),
)
class sub_client_command(Structure):
_fields_ = (
('client', lc_str),
)
class sub_umbrella_command(Structure):
_fields_ = (
('sub_umbrella', lc_str),
)
class sub_library_command(Structure):
_fields_ = (
('sub_library', lc_str),
)
class prebound_dylib_command(Structure):
_fields_ = (
('name', lc_str),
('nmodules', p_uint32),
('linked_modules', lc_str),
)
class dylinker_command(Structure):
_fields_ = (
('name', lc_str),
)
class thread_command(Structure):
_fields_ = (
)
class entry_point_command(Structure):
_fields_ = (
('entryoff', p_uint64),
('stacksize', p_uint64),
)
class routines_command(Structure):
_fields_ = (
('init_address', p_uint32),
('init_module', p_uint32),
('reserved1', p_uint32),
('reserved2', p_uint32),
('reserved3', p_uint32),
('reserved4', p_uint32),
('reserved5', p_uint32),
('reserved6', p_uint32),
)
class routines_command_64(Structure):
_fields_ = (
('init_address', p_uint64),
('init_module', p_uint64),
('reserved1', p_uint64),
('reserved2', p_uint64),
('reserved3', p_uint64),
('reserved4', p_uint64),
('reserved5', p_uint64),
('reserved6', p_uint64),
)
class symtab_command(Structure):
_fields_ = (
('symoff', p_uint32),
('nsyms', p_uint32),
('stroff', p_uint32),
('strsize', p_uint32),
)
class dysymtab_command(Structure):
_fields_ = (
('ilocalsym', p_uint32),
('nlocalsym', p_uint32),
('iextdefsym', p_uint32),
('nextdefsym', p_uint32),
('iundefsym', p_uint32),
('nundefsym', p_uint32),
('tocoff', p_uint32),
('ntoc', p_uint32),
('modtaboff', p_uint32),
('nmodtab', p_uint32),
('extrefsymoff', p_uint32),
('nextrefsyms', p_uint32),
('indirectsymoff', p_uint32),
('nindirectsyms', p_uint32),
('extreloff', p_uint32),
('nextrel', p_uint32),
('locreloff', p_uint32),
('nlocrel', p_uint32),
)
INDIRECT_SYMBOL_LOCAL = 0x80000000
INDIRECT_SYMBOL_ABS = 0x40000000
class dylib_table_of_contents(Structure):
_fields_ = (
('symbol_index', p_uint32),
('module_index', p_uint32),
)
class dylib_module(Structure):
_fields_ = (
('module_name', p_uint32),
('iextdefsym', p_uint32),
('nextdefsym', p_uint32),
('irefsym', p_uint32),
('nrefsym', p_uint32),
('ilocalsym', p_uint32),
('nlocalsym', p_uint32),
('iextrel', p_uint32),
('nextrel', p_uint32),
('iinit_iterm', p_uint32),
('ninit_nterm', p_uint32),
('objc_module_info_addr', p_uint32),
('objc_module_info_size', p_uint32),
)
class dylib_module_64(Structure):
_fields_ = (
('module_name', p_uint32),
('iextdefsym', p_uint32),
('nextdefsym', p_uint32),
('irefsym', p_uint32),
('nrefsym', p_uint32),
('ilocalsym', p_uint32),
('nlocalsym', p_uint32),
('iextrel', p_uint32),
('nextrel', p_uint32),
('iinit_iterm', p_uint32),
('ninit_nterm', p_uint32),
('objc_module_info_size', p_uint32),
('objc_module_info_addr', p_uint64),
)
class dylib_reference(Structure):
_fields_ = (
# XXX - ick, fix
('isym_flags', p_uint32),
#('isym', p_uint8 * 3),
#('flags', p_uint8),
)
class twolevel_hints_command(Structure):
_fields_ = (
('offset', p_uint32),
('nhints', p_uint32),
)
class twolevel_hint(Structure):
_fields_ = (
# XXX - ick, fix
('isub_image_itoc', p_uint32),
#('isub_image', p_uint8),
#('itoc', p_uint8 * 3),
)
class prebind_cksum_command(Structure):
_fields_ = (
('cksum', p_uint32),
)
class symseg_command(Structure):
_fields_ = (
('offset', p_uint32),
('size', p_uint32),
)
class ident_command(Structure):
_fields_ = (
)
class fvmfile_command(Structure):
_fields_ = (
('name', lc_str),
('header_addr', p_uint32),
)
class uuid_command (Structure):
_fields_ = (
('uuid', p_str16),
)
class rpath_command (Structure):
_fields_ = (
('path', lc_str),
)
class linkedit_data_command (Structure):
_fields_ = (
('dataoff', p_uint32),
('datassize', p_uint32),
)
class version_min_command (Structure):
_fields_ = (
('version', p_uint32), # X.Y.Z is encoded in nibbles xxxx.yy.zz
('reserved', p_uint32),
)
class source_version_command (Structure):
_fields_ = (
('version', p_uint64),
)
LC_REGISTRY = {
LC_SEGMENT: segment_command,
LC_IDFVMLIB: fvmlib_command,
LC_LOADFVMLIB: fvmlib_command,
LC_ID_DYLIB: dylib_command,
LC_LOAD_DYLIB: dylib_command,
LC_LOAD_WEAK_DYLIB: dylib_command,
LC_SUB_FRAMEWORK: sub_framework_command,
LC_SUB_CLIENT: sub_client_command,
LC_SUB_UMBRELLA: sub_umbrella_command,
LC_SUB_LIBRARY: sub_library_command,
LC_PREBOUND_DYLIB: prebound_dylib_command,
LC_ID_DYLINKER: dylinker_command,
LC_LOAD_DYLINKER: dylinker_command,
LC_THREAD: thread_command,
LC_UNIXTHREAD: thread_command,
LC_ROUTINES: routines_command,
LC_SYMTAB: symtab_command,
LC_DYSYMTAB: dysymtab_command,
LC_TWOLEVEL_HINTS: twolevel_hints_command,
LC_PREBIND_CKSUM: prebind_cksum_command,
LC_SYMSEG: symseg_command,
LC_IDENT: ident_command,
LC_FVMFILE: fvmfile_command,
LC_SEGMENT_64: segment_command_64,
LC_ROUTINES_64: routines_command_64,
LC_UUID: uuid_command,
LC_RPATH: rpath_command,
LC_CODE_SIGNATURE: linkedit_data_command,
LC_CODE_SEGMENT_SPLIT_INFO: linkedit_data_command,
LC_REEXPORT_DYLIB: dylib_command,
LC_LAZY_LOAD_DYLIB: dylib_command,
LC_ENCRYPTION_INFO: dylib_command,
LC_DYLD_INFO: dylib_command,
LC_DYLD_INFO_ONLY: dylib_command,
LC_LOAD_UPWARD_DYLIB: dylib_command,
LC_VERSION_MIN_MACOSX: version_min_command,
LC_VERSION_MIN_IPHONEOS: version_min_command,
LC_FUNCTION_STARTS: linkedit_data_command,
LC_DYLD_ENVIRONMENT: dylinker_command,
LC_MAIN: entry_point_command,
LC_DATA_IN_CODE: dylib_command,
LC_SOURCE_VERSION: source_version_command,
LC_DYLIB_CODE_SIGN_DRS: linkedit_data_command,
}
#this is another union.
class n_un(p_int32):
pass
class nlist(Structure):
_fields_ = (
('n_un', n_un),
('n_type', p_uint8),
('n_sect', p_uint8),
('n_desc', p_short),
('n_value', p_uint32),
)
class nlist_64(Structure):
_fields_ = [
('n_un', n_un),
('n_type', p_uint8),
('n_sect', p_uint8),
('n_desc', p_short),
('n_value', p_int64),
]
N_STAB = 0xe0
N_PEXT = 0x10
N_TYPE = 0x0e
N_EXT = 0x01
N_UNDF = 0x0
N_ABS = 0x2
N_SECT = 0xe
N_PBUD = 0xc
N_INDR = 0xa
NO_SECT = 0
MAX_SECT = 255
REFERENCE_TYPE = 0xf
REFERENCE_FLAG_UNDEFINED_NON_LAZY = 0
REFERENCE_FLAG_UNDEFINED_LAZY = 1
REFERENCE_FLAG_DEFINED = 2
REFERENCE_FLAG_PRIVATE_DEFINED = 3
REFERENCE_FLAG_PRIVATE_UNDEFINED_NON_LAZY = 4
REFERENCE_FLAG_PRIVATE_UNDEFINED_LAZY = 5
REFERENCED_DYNAMICALLY = 0x0010
def GET_LIBRARY_ORDINAL(n_desc):
return (((n_desc) >> 8) & 0xff)
def SET_LIBRARY_ORDINAL(n_desc, ordinal):
return (((n_desc) & 0x00ff) | (((ordinal & 0xff) << 8)))
SELF_LIBRARY_ORDINAL = 0x0
MAX_LIBRARY_ORDINAL = 0xfd
DYNAMIC_LOOKUP_ORDINAL = 0xfe
EXECUTABLE_ORDINAL = 0xff
N_DESC_DISCARDED = 0x0020
N_WEAK_REF = 0x0040
N_WEAK_DEF = 0x0080
# /usr/include/mach-o/fat.h
FAT_MAGIC = 0xcafebabe
class fat_header(Structure):
_fields_ = (
('magic', p_uint32),
('nfat_arch', p_uint32),
)
class fat_arch(Structure):
_fields_ = (
('cputype', cpu_type_t),
('cpusubtype', cpu_subtype_t),
('offset', p_uint32),
('size', p_uint32),
('align', p_uint32),
)
| Python |
"""
dyld emulation
"""
from itertools import chain
import os, sys
from macholib.framework import framework_info
from macholib.dylib import dylib_info
__all__ = [
'dyld_find', 'framework_find',
'framework_info', 'dylib_info',
]
# These are the defaults as per man dyld(1)
#
_DEFAULT_FRAMEWORK_FALLBACK = [
os.path.expanduser("~/Library/Frameworks"),
"/Library/Frameworks",
"/Network/Library/Frameworks",
"/System/Library/Frameworks",
]
_DEFAULT_LIBRARY_FALLBACK = [
os.path.expanduser("~/lib"),
"/usr/local/lib",
"/lib",
"/usr/lib",
]
# XXX: Is this function still needed?
if sys.version_info[0] == 2:
def _ensure_utf8(s):
"""Not all of PyObjC and Python understand unicode paths very well yet"""
if isinstance(s, unicode):
return s.encode('utf8')
return s
else:
def _ensure_utf8(s):
if s is not None and not isinstance(s, str):
raise ValueError(s)
return s
def _dyld_env(env, var):
if env is None:
env = os.environ
rval = env.get(var)
if rval is None or rval == '':
return []
return rval.split(':')
def dyld_image_suffix(env=None):
if env is None:
env = os.environ
return env.get('DYLD_IMAGE_SUFFIX')
def dyld_framework_path(env=None):
return _dyld_env(env, 'DYLD_FRAMEWORK_PATH')
def dyld_library_path(env=None):
return _dyld_env(env, 'DYLD_LIBRARY_PATH')
def dyld_fallback_framework_path(env=None):
return _dyld_env(env, 'DYLD_FALLBACK_FRAMEWORK_PATH')
def dyld_fallback_library_path(env=None):
return _dyld_env(env, 'DYLD_FALLBACK_LIBRARY_PATH')
def dyld_image_suffix_search(iterator, env=None):
"""For a potential path iterator, add DYLD_IMAGE_SUFFIX semantics"""
suffix = dyld_image_suffix(env)
if suffix is None:
return iterator
def _inject(iterator=iterator, suffix=suffix):
for path in iterator:
if path.endswith('.dylib'):
yield path[:-len('.dylib')] + suffix + '.dylib'
else:
yield path + suffix
yield path
return _inject()
def dyld_override_search(name, env=None):
# If DYLD_FRAMEWORK_PATH is set and this dylib_name is a
# framework name, use the first file that exists in the framework
# path if any. If there is none go on to search the DYLD_LIBRARY_PATH
# if any.
framework = framework_info(name)
if framework is not None:
for path in dyld_framework_path(env):
yield os.path.join(path, framework['name'])
# If DYLD_LIBRARY_PATH is set then use the first file that exists
# in the path. If none use the original name.
for path in dyld_library_path(env):
yield os.path.join(path, os.path.basename(name))
def dyld_executable_path_search(name, executable_path=None):
# If we haven't done any searching and found a library and the
# dylib_name starts with "@executable_path/" then construct the
# library name.
if name.startswith('@executable_path/') and executable_path is not None:
yield os.path.join(executable_path, name[len('@executable_path/'):])
def dyld_default_search(name, env=None):
yield name
framework = framework_info(name)
if framework is not None:
fallback_framework_path = dyld_fallback_framework_path(env)
if fallback_framework_path:
for path in fallback_framework_path:
yield os.path.join(path, framework['name'])
else:
for path in _DEFAULT_FRAMEWORK_FALLBACK:
yield os.path.join(path, framework['name'])
fallback_library_path = dyld_fallback_library_path(env)
if fallback_library_path:
for path in fallback_library_path:
yield os.path.join(path, os.path.basename(name))
else:
for path in _DEFAULT_LIBRARY_FALLBACK:
yield os.path.join(path, os.path.basename(name))
def dyld_find(name, executable_path=None, env=None):
"""
Find a library or framework using dyld semantics
"""
name = _ensure_utf8(name)
executable_path = _ensure_utf8(executable_path)
for path in dyld_image_suffix_search(chain(
dyld_override_search(name, env),
dyld_executable_path_search(name, executable_path),
dyld_default_search(name, env),
), env):
if os.path.isfile(path):
return path
raise ValueError("dylib %s could not be found" % (name,))
def framework_find(fn, executable_path=None, env=None):
"""
Find a framework using dyld semantics in a very loose manner.
Will take input such as:
Python
Python.framework
Python.framework/Versions/Current
"""
try:
return dyld_find(fn, executable_path=executable_path, env=env)
except ValueError:
pass
fmwk_index = fn.rfind('.framework')
if fmwk_index == -1:
fmwk_index = len(fn)
fn += '.framework'
fn = os.path.join(fn, os.path.basename(fn[:fmwk_index]))
return dyld_find(fn, executable_path=executable_path, env=env)
| Python |
"""
Enough Mach-O to make your head spin.
See the relevant header files in /usr/include/mach-o
And also Apple's documentation.
"""
__version__ = '1.4.3'
| Python |
"""
Internal helpers for basic commandline tools
"""
from __future__ import print_function, absolute_import
import os
import sys
from macholib.util import is_platform_file
def check_file(fp, path, callback):
if not os.path.exists(path):
print('%s: %s: No such file or directory' % (sys.argv[0], path), file=sys.stderr)
return 1
try:
is_plat = is_platform_file(path)
except IOError as msg:
print('%s: %s: %s' % (sys.argv[0], path, msg), file=sys.stderr)
return 1
else:
if is_plat:
callback(fp, path)
return 0
def main(callback):
args = sys.argv[1:]
name = os.path.basename(sys.argv[0])
err = 0
if not args:
print("Usage: %s filename..."%(name,), file=sys.stderr)
return 1
for base in args:
if os.path.isdir(base):
for root, dirs, files in os.walk(base):
for fn in files:
err |= check_file(sys.stdout, os.path.join(root, fn), callback)
else:
err |= check_file(sys.stdout, base, callback)
return err
| Python |
"""
This module defines packable types, that is types than can be easily converted to a binary format
as used in MachO headers.
"""
import struct
import sys
try:
from itertools import izip, imap
except ImportError:
izip, imap = zip, map
from itertools import chain, starmap
import warnings
__all__ = """
sizeof
BasePackable
Structure
pypackable
p_char
p_byte
p_ubyte
p_short
p_ushort
p_int
p_uint
p_long
p_ulong
p_longlong
p_ulonglong
p_int8
p_uint8
p_int16
p_uint16
p_int32
p_uint32
p_int64
p_uint64
p_float
p_double
""".split()
def sizeof(s):
"""
Return the size of an object when packed
"""
if hasattr(s, '_size_'):
return s._size_
elif isinstance(s, bytes):
return len(s)
raise ValueError(s)
class MetaPackable(type):
"""
Fixed size struct.unpack-able types use from_tuple as their designated initializer
"""
def from_mmap(cls, mm, ptr, **kw):
return cls.from_str(mm[ptr:ptr+cls._size_], **kw)
def from_fileobj(cls, f, **kw):
return cls.from_str(f.read(cls._size_), **kw)
def from_str(cls, s, **kw):
endian = kw.get('_endian_', cls._endian_)
return cls.from_tuple(struct.unpack(endian + cls._format_, s), **kw)
def from_tuple(cls, tpl, **kw):
return cls(tpl[0], **kw)
class BasePackable(object):
_endian_ = '>'
def to_str(self):
raise NotImplementedError
def to_fileobj(self, f):
f.write(self.to_str())
def to_mmap(self, mm, ptr):
mm[ptr:ptr+self._size_] = self.to_str()
# This defines a class with a custom metaclass, we'd normally
# use "class Packable(BasePackable, metaclass=MetaPackage)",
# but that syntax is not valid in Python 2 (and likewise the
# python 2 syntax is not valid in Python 3)
def _make():
def to_str(self):
cls = type(self)
endian = getattr(self, '_endian_', cls._endian_)
return struct.pack(endian + cls._format_, self)
return MetaPackable("Packable", (BasePackable,), {'to_str': to_str})
Packable = _make()
del _make
def pypackable(name, pytype, format):
"""
Create a "mix-in" class with a python type and a
Packable with the given struct format
"""
size, items = _formatinfo(format)
return type(Packable)(name, (pytype, Packable), {
'_format_': format,
'_size_': size,
'_items_': items,
})
def _formatinfo(format):
"""
Calculate the size and number of items in a struct format.
"""
size = struct.calcsize(format)
return size, len(struct.unpack(format, b'\x00' * size))
class MetaStructure(MetaPackable):
"""
The metaclass of Structure objects that does all the magic.
Since we can assume that all Structures have a fixed size,
we can do a bunch of calculations up front and pack or
unpack the whole thing in one struct call.
"""
def __new__(cls, clsname, bases, dct):
fields = dct['_fields_']
names = []
types = []
structmarks = []
format = ''
items = 0
size = 0
def struct_property(name, typ):
def _get(self):
return self._objects_[name]
def _set(self, obj):
if type(obj) is not typ:
obj = typ(obj)
self._objects_[name] = obj
return property(_get, _set, typ.__name__)
for name, typ in fields:
dct[name] = struct_property(name, typ)
names.append(name)
types.append(typ)
format += typ._format_
size += typ._size_
if (typ._items_ > 1):
structmarks.append((items, typ._items_, typ))
items += typ._items_
dct['_structmarks_'] = structmarks
dct['_names_'] = names
dct['_types_'] = types
dct['_size_'] = size
dct['_items_'] = items
dct['_format_'] = format
return super(MetaStructure, cls).__new__(cls, clsname, bases, dct)
def from_tuple(cls, tpl, **kw):
values = []
current = 0
for begin, length, typ in cls._structmarks_:
if begin > current:
values.extend(tpl[current:begin])
current = begin + length
values.append(typ.from_tuple(tpl[begin:current], **kw))
values.extend(tpl[current:])
return cls(*values, **kw)
# See metaclass discussion earlier in this file
def _make():
class_dict={}
class_dict['_fields_'] = ()
def as_method(function):
class_dict[function.__name__] = function
@as_method
def __init__(self, *args, **kwargs):
if len(args) == 1 and not kwargs and type(args[0]) is type(self):
kwargs = args[0]._objects_
args = ()
self._objects_ = {}
iargs = chain(izip(self._names_, args), kwargs.items())
for key, value in iargs:
if key not in self._names_ and key != "_endian_":
raise TypeError
setattr(self, key, value)
for key, typ in izip(self._names_, self._types_):
if key not in self._objects_:
self._objects_[key] = typ()
@as_method
def _get_packables(self):
for obj in imap(self._objects_.__getitem__, self._names_):
if obj._items_ == 1:
yield obj
else:
for obj in obj._get_packables():
yield obj
@as_method
def to_str(self):
return struct.pack(self._endian_ + self._format_, *self._get_packables())
@as_method
def __cmp__(self, other):
if type(other) is not type(self):
raise TypeError('Cannot compare objects of type %r to objects of type %r' % (type(other), type(self)))
if sys.version_info[0] == 2:
_cmp = cmp
else:
def _cmp(a, b):
if a < b:
return -1
elif a > b:
return 1
elif a == b:
return 0
else:
raise TypeError()
for cmpval in starmap(_cmp, izip(self._get_packables(), other._get_packables())):
if cmpval != 0:
return cmpval
return 0
@as_method
def __eq__(self, other):
r = self.__cmp__(other)
return r == 0
@as_method
def __ne__(self, other):
r = self.__cmp__(other)
return r != 0
@as_method
def __lt__(self, other):
r = self.__cmp__(other)
return r < 0
@as_method
def __le__(self, other):
r = self.__cmp__(other)
return r <= 0
@as_method
def __gt__(self, other):
r = self.__cmp__(other)
return r > 0
@as_method
def __ge__(self, other):
r = self.__cmp__(other)
return r >= 0
return MetaStructure("Structure", (BasePackable,), class_dict)
Structure = _make()
del _make
try:
long
except NameError:
long = int
# export common packables with predictable names
p_char = pypackable('p_char', bytes, 'c')
p_int8 = pypackable('p_int8', int, 'b')
p_uint8 = pypackable('p_uint8', int, 'B')
p_int16 = pypackable('p_int16', int, 'h')
p_uint16 = pypackable('p_uint16', int, 'H')
p_int32 = pypackable('p_int32', int, 'i')
p_uint32 = pypackable('p_uint32', long, 'I')
p_int64 = pypackable('p_int64', long, 'q')
p_uint64 = pypackable('p_uint64', long, 'Q')
p_float = pypackable('p_float', float, 'f')
p_double = pypackable('p_double', float, 'd')
# Deprecated names, need trick to emit deprecation warning.
p_byte = p_int8
p_ubyte = p_uint8
p_short = p_int16
p_ushort = p_uint16
p_int = p_long = p_int32
p_uint = p_ulong = p_uint32
p_longlong = p_int64
p_ulonglong = p_uint64
| Python |
"""
Generic framework path manipulation
"""
import re
__all__ = ['framework_info']
_STRICT_FRAMEWORK_RE = re.compile(r"""(?x)
(?P<location>^.*)(?:^|/)
(?P<name>
(?P<shortname>[-_A-Za-z0-9]+).framework/
(?:Versions/(?P<version>[^/]+)/)?
(?P=shortname)
(?:_(?P<suffix>[^_]+))?
)$
""")
def framework_info(filename):
"""
A framework name can take one of the following four forms:
Location/Name.framework/Versions/SomeVersion/Name_Suffix
Location/Name.framework/Versions/SomeVersion/Name
Location/Name.framework/Name_Suffix
Location/Name.framework/Name
returns None if not found, or a mapping equivalent to:
dict(
location='Location',
name='Name.framework/Versions/SomeVersion/Name_Suffix',
shortname='Name',
version='SomeVersion',
suffix='Suffix',
)
Note that SomeVersion and Suffix are optional and may be None
if not present
"""
is_framework = _STRICT_FRAMEWORK_RE.match(filename)
if not is_framework:
return None
return is_framework.groupdict()
| Python |
# -*- coding: latin-1 -*-
"""pefile, Portable Executable reader module
All the PE file basic structures are available with their default names
as attributes of the instance returned.
Processed elements such as the import table are made available with lowercase
names, to differentiate them from the upper case basic structure names.
pefile has been tested against the limits of valid PE headers, that is, malware.
Lots of packed malware attempt to abuse the format way beyond its standard use.
To the best of my knowledge most of the abuses are handled gracefully.
Copyright (c) 2005-2011 Ero Carrera <ero.carrera@gmail.com>
All rights reserved.
For detailed copyright information see the file COPYING in
the root of the distribution archive.
"""
__revision__ = "$LastChangedRevision: 102 $"
__author__ = 'Ero Carrera'
__version__ = '1.2.10-%d' % int( __revision__[21:-2] )
__contact__ = 'ero.carrera@gmail.com'
import os
import struct
import time
import math
import re
import exceptions
import string
import array
import mmap
sha1, sha256, sha512, md5 = None, None, None, None
try:
import hashlib
sha1 = hashlib.sha1
sha256 = hashlib.sha256
sha512 = hashlib.sha512
md5 = hashlib.md5
except ImportError:
try:
import sha
sha1 = sha.new
except ImportError:
pass
try:
import md5
md5 = md5.new
except ImportError:
pass
try:
enumerate
except NameError:
def enumerate(iter):
L = list(iter)
return zip(range(0, len(L)), L)
fast_load = False
# This will set a maximum length of a string to be retrieved from the file.
# It's there to prevent loading massive amounts of data from memory mapped
# files. Strings longer than 1MB should be rather rare.
MAX_STRING_LENGTH = 0x100000 # 2^20
IMAGE_DOS_SIGNATURE = 0x5A4D
IMAGE_DOSZM_SIGNATURE = 0x4D5A
IMAGE_NE_SIGNATURE = 0x454E
IMAGE_LE_SIGNATURE = 0x454C
IMAGE_LX_SIGNATURE = 0x584C
IMAGE_NT_SIGNATURE = 0x00004550
IMAGE_NUMBEROF_DIRECTORY_ENTRIES= 16
IMAGE_ORDINAL_FLAG = 0x80000000L
IMAGE_ORDINAL_FLAG64 = 0x8000000000000000L
OPTIONAL_HEADER_MAGIC_PE = 0x10b
OPTIONAL_HEADER_MAGIC_PE_PLUS = 0x20b
directory_entry_types = [
('IMAGE_DIRECTORY_ENTRY_EXPORT', 0),
('IMAGE_DIRECTORY_ENTRY_IMPORT', 1),
('IMAGE_DIRECTORY_ENTRY_RESOURCE', 2),
('IMAGE_DIRECTORY_ENTRY_EXCEPTION', 3),
('IMAGE_DIRECTORY_ENTRY_SECURITY', 4),
('IMAGE_DIRECTORY_ENTRY_BASERELOC', 5),
('IMAGE_DIRECTORY_ENTRY_DEBUG', 6),
('IMAGE_DIRECTORY_ENTRY_COPYRIGHT', 7),
('IMAGE_DIRECTORY_ENTRY_GLOBALPTR', 8),
('IMAGE_DIRECTORY_ENTRY_TLS', 9),
('IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG', 10),
('IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT', 11),
('IMAGE_DIRECTORY_ENTRY_IAT', 12),
('IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT', 13),
('IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR',14),
('IMAGE_DIRECTORY_ENTRY_RESERVED', 15) ]
DIRECTORY_ENTRY = dict([(e[1], e[0]) for e in directory_entry_types]+directory_entry_types)
image_characteristics = [
('IMAGE_FILE_RELOCS_STRIPPED', 0x0001),
('IMAGE_FILE_EXECUTABLE_IMAGE', 0x0002),
('IMAGE_FILE_LINE_NUMS_STRIPPED', 0x0004),
('IMAGE_FILE_LOCAL_SYMS_STRIPPED', 0x0008),
('IMAGE_FILE_AGGRESIVE_WS_TRIM', 0x0010),
('IMAGE_FILE_LARGE_ADDRESS_AWARE', 0x0020),
('IMAGE_FILE_16BIT_MACHINE', 0x0040),
('IMAGE_FILE_BYTES_REVERSED_LO', 0x0080),
('IMAGE_FILE_32BIT_MACHINE', 0x0100),
('IMAGE_FILE_DEBUG_STRIPPED', 0x0200),
('IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP', 0x0400),
('IMAGE_FILE_NET_RUN_FROM_SWAP', 0x0800),
('IMAGE_FILE_SYSTEM', 0x1000),
('IMAGE_FILE_DLL', 0x2000),
('IMAGE_FILE_UP_SYSTEM_ONLY', 0x4000),
('IMAGE_FILE_BYTES_REVERSED_HI', 0x8000) ]
IMAGE_CHARACTERISTICS = dict([(e[1], e[0]) for e in
image_characteristics]+image_characteristics)
section_characteristics = [
('IMAGE_SCN_CNT_CODE', 0x00000020),
('IMAGE_SCN_CNT_INITIALIZED_DATA', 0x00000040),
('IMAGE_SCN_CNT_UNINITIALIZED_DATA', 0x00000080),
('IMAGE_SCN_LNK_OTHER', 0x00000100),
('IMAGE_SCN_LNK_INFO', 0x00000200),
('IMAGE_SCN_LNK_REMOVE', 0x00000800),
('IMAGE_SCN_LNK_COMDAT', 0x00001000),
('IMAGE_SCN_MEM_FARDATA', 0x00008000),
('IMAGE_SCN_MEM_PURGEABLE', 0x00020000),
('IMAGE_SCN_MEM_16BIT', 0x00020000),
('IMAGE_SCN_MEM_LOCKED', 0x00040000),
('IMAGE_SCN_MEM_PRELOAD', 0x00080000),
('IMAGE_SCN_ALIGN_1BYTES', 0x00100000),
('IMAGE_SCN_ALIGN_2BYTES', 0x00200000),
('IMAGE_SCN_ALIGN_4BYTES', 0x00300000),
('IMAGE_SCN_ALIGN_8BYTES', 0x00400000),
('IMAGE_SCN_ALIGN_16BYTES', 0x00500000),
('IMAGE_SCN_ALIGN_32BYTES', 0x00600000),
('IMAGE_SCN_ALIGN_64BYTES', 0x00700000),
('IMAGE_SCN_ALIGN_128BYTES', 0x00800000),
('IMAGE_SCN_ALIGN_256BYTES', 0x00900000),
('IMAGE_SCN_ALIGN_512BYTES', 0x00A00000),
('IMAGE_SCN_ALIGN_1024BYTES', 0x00B00000),
('IMAGE_SCN_ALIGN_2048BYTES', 0x00C00000),
('IMAGE_SCN_ALIGN_4096BYTES', 0x00D00000),
('IMAGE_SCN_ALIGN_8192BYTES', 0x00E00000),
('IMAGE_SCN_ALIGN_MASK', 0x00F00000),
('IMAGE_SCN_LNK_NRELOC_OVFL', 0x01000000),
('IMAGE_SCN_MEM_DISCARDABLE', 0x02000000),
('IMAGE_SCN_MEM_NOT_CACHED', 0x04000000),
('IMAGE_SCN_MEM_NOT_PAGED', 0x08000000),
('IMAGE_SCN_MEM_SHARED', 0x10000000),
('IMAGE_SCN_MEM_EXECUTE', 0x20000000),
('IMAGE_SCN_MEM_READ', 0x40000000),
('IMAGE_SCN_MEM_WRITE', 0x80000000L) ]
SECTION_CHARACTERISTICS = dict([(e[1], e[0]) for e in
section_characteristics]+section_characteristics)
debug_types = [
('IMAGE_DEBUG_TYPE_UNKNOWN', 0),
('IMAGE_DEBUG_TYPE_COFF', 1),
('IMAGE_DEBUG_TYPE_CODEVIEW', 2),
('IMAGE_DEBUG_TYPE_FPO', 3),
('IMAGE_DEBUG_TYPE_MISC', 4),
('IMAGE_DEBUG_TYPE_EXCEPTION', 5),
('IMAGE_DEBUG_TYPE_FIXUP', 6),
('IMAGE_DEBUG_TYPE_OMAP_TO_SRC', 7),
('IMAGE_DEBUG_TYPE_OMAP_FROM_SRC', 8),
('IMAGE_DEBUG_TYPE_BORLAND', 9),
('IMAGE_DEBUG_TYPE_RESERVED10', 10) ]
DEBUG_TYPE = dict([(e[1], e[0]) for e in debug_types]+debug_types)
subsystem_types = [
('IMAGE_SUBSYSTEM_UNKNOWN', 0),
('IMAGE_SUBSYSTEM_NATIVE', 1),
('IMAGE_SUBSYSTEM_WINDOWS_GUI', 2),
('IMAGE_SUBSYSTEM_WINDOWS_CUI', 3),
('IMAGE_SUBSYSTEM_OS2_CUI', 5),
('IMAGE_SUBSYSTEM_POSIX_CUI', 7),
('IMAGE_SUBSYSTEM_WINDOWS_CE_GUI', 9),
('IMAGE_SUBSYSTEM_EFI_APPLICATION', 10),
('IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER', 11),
('IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER', 12),
('IMAGE_SUBSYSTEM_EFI_ROM', 13),
('IMAGE_SUBSYSTEM_XBOX', 14)]
SUBSYSTEM_TYPE = dict([(e[1], e[0]) for e in subsystem_types]+subsystem_types)
machine_types = [
('IMAGE_FILE_MACHINE_UNKNOWN', 0),
('IMAGE_FILE_MACHINE_AM33', 0x1d3),
('IMAGE_FILE_MACHINE_AMD64', 0x8664),
('IMAGE_FILE_MACHINE_ARM', 0x1c0),
('IMAGE_FILE_MACHINE_EBC', 0xebc),
('IMAGE_FILE_MACHINE_I386', 0x14c),
('IMAGE_FILE_MACHINE_IA64', 0x200),
('IMAGE_FILE_MACHINE_MR32', 0x9041),
('IMAGE_FILE_MACHINE_MIPS16', 0x266),
('IMAGE_FILE_MACHINE_MIPSFPU', 0x366),
('IMAGE_FILE_MACHINE_MIPSFPU16',0x466),
('IMAGE_FILE_MACHINE_POWERPC', 0x1f0),
('IMAGE_FILE_MACHINE_POWERPCFP',0x1f1),
('IMAGE_FILE_MACHINE_R4000', 0x166),
('IMAGE_FILE_MACHINE_SH3', 0x1a2),
('IMAGE_FILE_MACHINE_SH3DSP', 0x1a3),
('IMAGE_FILE_MACHINE_SH4', 0x1a6),
('IMAGE_FILE_MACHINE_SH5', 0x1a8),
('IMAGE_FILE_MACHINE_THUMB', 0x1c2),
('IMAGE_FILE_MACHINE_WCEMIPSV2',0x169),
]
MACHINE_TYPE = dict([(e[1], e[0]) for e in machine_types]+machine_types)
relocation_types = [
('IMAGE_REL_BASED_ABSOLUTE', 0),
('IMAGE_REL_BASED_HIGH', 1),
('IMAGE_REL_BASED_LOW', 2),
('IMAGE_REL_BASED_HIGHLOW', 3),
('IMAGE_REL_BASED_HIGHADJ', 4),
('IMAGE_REL_BASED_MIPS_JMPADDR', 5),
('IMAGE_REL_BASED_SECTION', 6),
('IMAGE_REL_BASED_REL', 7),
('IMAGE_REL_BASED_MIPS_JMPADDR16', 9),
('IMAGE_REL_BASED_IA64_IMM64', 9),
('IMAGE_REL_BASED_DIR64', 10),
('IMAGE_REL_BASED_HIGH3ADJ', 11) ]
RELOCATION_TYPE = dict([(e[1], e[0]) for e in relocation_types]+relocation_types)
dll_characteristics = [
('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x0001', 0x0001),
('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x0002', 0x0002),
('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x0004', 0x0004),
('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x0008', 0x0008),
('IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE', 0x0040),
('IMAGE_DLL_CHARACTERISTICS_FORCE_INTEGRITY', 0x0080),
('IMAGE_DLL_CHARACTERISTICS_NX_COMPAT', 0x0100),
('IMAGE_DLL_CHARACTERISTICS_NO_ISOLATION', 0x0200),
('IMAGE_DLL_CHARACTERISTICS_NO_SEH', 0x0400),
('IMAGE_DLL_CHARACTERISTICS_NO_BIND', 0x0800),
('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x1000', 0x1000),
('IMAGE_DLL_CHARACTERISTICS_WDM_DRIVER', 0x2000),
('IMAGE_DLL_CHARACTERISTICS_TERMINAL_SERVER_AWARE', 0x8000) ]
DLL_CHARACTERISTICS = dict([(e[1], e[0]) for e in dll_characteristics]+dll_characteristics)
# Resource types
resource_type = [
('RT_CURSOR', 1),
('RT_BITMAP', 2),
('RT_ICON', 3),
('RT_MENU', 4),
('RT_DIALOG', 5),
('RT_STRING', 6),
('RT_FONTDIR', 7),
('RT_FONT', 8),
('RT_ACCELERATOR', 9),
('RT_RCDATA', 10),
('RT_MESSAGETABLE', 11),
('RT_GROUP_CURSOR', 12),
('RT_GROUP_ICON', 14),
('RT_VERSION', 16),
('RT_DLGINCLUDE', 17),
('RT_PLUGPLAY', 19),
('RT_VXD', 20),
('RT_ANICURSOR', 21),
('RT_ANIICON', 22),
('RT_HTML', 23),
('RT_MANIFEST', 24) ]
RESOURCE_TYPE = dict([(e[1], e[0]) for e in resource_type]+resource_type)
# Language definitions
lang = [
('LANG_NEUTRAL', 0x00),
('LANG_INVARIANT', 0x7f),
('LANG_AFRIKAANS', 0x36),
('LANG_ALBANIAN', 0x1c),
('LANG_ARABIC', 0x01),
('LANG_ARMENIAN', 0x2b),
('LANG_ASSAMESE', 0x4d),
('LANG_AZERI', 0x2c),
('LANG_BASQUE', 0x2d),
('LANG_BELARUSIAN', 0x23),
('LANG_BENGALI', 0x45),
('LANG_BULGARIAN', 0x02),
('LANG_CATALAN', 0x03),
('LANG_CHINESE', 0x04),
('LANG_CROATIAN', 0x1a),
('LANG_CZECH', 0x05),
('LANG_DANISH', 0x06),
('LANG_DIVEHI', 0x65),
('LANG_DUTCH', 0x13),
('LANG_ENGLISH', 0x09),
('LANG_ESTONIAN', 0x25),
('LANG_FAEROESE', 0x38),
('LANG_FARSI', 0x29),
('LANG_FINNISH', 0x0b),
('LANG_FRENCH', 0x0c),
('LANG_GALICIAN', 0x56),
('LANG_GEORGIAN', 0x37),
('LANG_GERMAN', 0x07),
('LANG_GREEK', 0x08),
('LANG_GUJARATI', 0x47),
('LANG_HEBREW', 0x0d),
('LANG_HINDI', 0x39),
('LANG_HUNGARIAN', 0x0e),
('LANG_ICELANDIC', 0x0f),
('LANG_INDONESIAN', 0x21),
('LANG_ITALIAN', 0x10),
('LANG_JAPANESE', 0x11),
('LANG_KANNADA', 0x4b),
('LANG_KASHMIRI', 0x60),
('LANG_KAZAK', 0x3f),
('LANG_KONKANI', 0x57),
('LANG_KOREAN', 0x12),
('LANG_KYRGYZ', 0x40),
('LANG_LATVIAN', 0x26),
('LANG_LITHUANIAN', 0x27),
('LANG_MACEDONIAN', 0x2f),
('LANG_MALAY', 0x3e),
('LANG_MALAYALAM', 0x4c),
('LANG_MANIPURI', 0x58),
('LANG_MARATHI', 0x4e),
('LANG_MONGOLIAN', 0x50),
('LANG_NEPALI', 0x61),
('LANG_NORWEGIAN', 0x14),
('LANG_ORIYA', 0x48),
('LANG_POLISH', 0x15),
('LANG_PORTUGUESE', 0x16),
('LANG_PUNJABI', 0x46),
('LANG_ROMANIAN', 0x18),
('LANG_RUSSIAN', 0x19),
('LANG_SANSKRIT', 0x4f),
('LANG_SERBIAN', 0x1a),
('LANG_SINDHI', 0x59),
('LANG_SLOVAK', 0x1b),
('LANG_SLOVENIAN', 0x24),
('LANG_SPANISH', 0x0a),
('LANG_SWAHILI', 0x41),
('LANG_SWEDISH', 0x1d),
('LANG_SYRIAC', 0x5a),
('LANG_TAMIL', 0x49),
('LANG_TATAR', 0x44),
('LANG_TELUGU', 0x4a),
('LANG_THAI', 0x1e),
('LANG_TURKISH', 0x1f),
('LANG_UKRAINIAN', 0x22),
('LANG_URDU', 0x20),
('LANG_UZBEK', 0x43),
('LANG_VIETNAMESE', 0x2a),
('LANG_GAELIC', 0x3c),
('LANG_MALTESE', 0x3a),
('LANG_MAORI', 0x28),
('LANG_RHAETO_ROMANCE',0x17),
('LANG_SAAMI', 0x3b),
('LANG_SORBIAN', 0x2e),
('LANG_SUTU', 0x30),
('LANG_TSONGA', 0x31),
('LANG_TSWANA', 0x32),
('LANG_VENDA', 0x33),
('LANG_XHOSA', 0x34),
('LANG_ZULU', 0x35),
('LANG_ESPERANTO', 0x8f),
('LANG_WALON', 0x90),
('LANG_CORNISH', 0x91),
('LANG_WELSH', 0x92),
('LANG_BRETON', 0x93) ]
LANG = dict(lang+[(e[1], e[0]) for e in lang])
# Sublanguage definitions
sublang = [
('SUBLANG_NEUTRAL', 0x00),
('SUBLANG_DEFAULT', 0x01),
('SUBLANG_SYS_DEFAULT', 0x02),
('SUBLANG_ARABIC_SAUDI_ARABIA', 0x01),
('SUBLANG_ARABIC_IRAQ', 0x02),
('SUBLANG_ARABIC_EGYPT', 0x03),
('SUBLANG_ARABIC_LIBYA', 0x04),
('SUBLANG_ARABIC_ALGERIA', 0x05),
('SUBLANG_ARABIC_MOROCCO', 0x06),
('SUBLANG_ARABIC_TUNISIA', 0x07),
('SUBLANG_ARABIC_OMAN', 0x08),
('SUBLANG_ARABIC_YEMEN', 0x09),
('SUBLANG_ARABIC_SYRIA', 0x0a),
('SUBLANG_ARABIC_JORDAN', 0x0b),
('SUBLANG_ARABIC_LEBANON', 0x0c),
('SUBLANG_ARABIC_KUWAIT', 0x0d),
('SUBLANG_ARABIC_UAE', 0x0e),
('SUBLANG_ARABIC_BAHRAIN', 0x0f),
('SUBLANG_ARABIC_QATAR', 0x10),
('SUBLANG_AZERI_LATIN', 0x01),
('SUBLANG_AZERI_CYRILLIC', 0x02),
('SUBLANG_CHINESE_TRADITIONAL', 0x01),
('SUBLANG_CHINESE_SIMPLIFIED', 0x02),
('SUBLANG_CHINESE_HONGKONG', 0x03),
('SUBLANG_CHINESE_SINGAPORE', 0x04),
('SUBLANG_CHINESE_MACAU', 0x05),
('SUBLANG_DUTCH', 0x01),
('SUBLANG_DUTCH_BELGIAN', 0x02),
('SUBLANG_ENGLISH_US', 0x01),
('SUBLANG_ENGLISH_UK', 0x02),
('SUBLANG_ENGLISH_AUS', 0x03),
('SUBLANG_ENGLISH_CAN', 0x04),
('SUBLANG_ENGLISH_NZ', 0x05),
('SUBLANG_ENGLISH_EIRE', 0x06),
('SUBLANG_ENGLISH_SOUTH_AFRICA', 0x07),
('SUBLANG_ENGLISH_JAMAICA', 0x08),
('SUBLANG_ENGLISH_CARIBBEAN', 0x09),
('SUBLANG_ENGLISH_BELIZE', 0x0a),
('SUBLANG_ENGLISH_TRINIDAD', 0x0b),
('SUBLANG_ENGLISH_ZIMBABWE', 0x0c),
('SUBLANG_ENGLISH_PHILIPPINES', 0x0d),
('SUBLANG_FRENCH', 0x01),
('SUBLANG_FRENCH_BELGIAN', 0x02),
('SUBLANG_FRENCH_CANADIAN', 0x03),
('SUBLANG_FRENCH_SWISS', 0x04),
('SUBLANG_FRENCH_LUXEMBOURG', 0x05),
('SUBLANG_FRENCH_MONACO', 0x06),
('SUBLANG_GERMAN', 0x01),
('SUBLANG_GERMAN_SWISS', 0x02),
('SUBLANG_GERMAN_AUSTRIAN', 0x03),
('SUBLANG_GERMAN_LUXEMBOURG', 0x04),
('SUBLANG_GERMAN_LIECHTENSTEIN', 0x05),
('SUBLANG_ITALIAN', 0x01),
('SUBLANG_ITALIAN_SWISS', 0x02),
('SUBLANG_KASHMIRI_SASIA', 0x02),
('SUBLANG_KASHMIRI_INDIA', 0x02),
('SUBLANG_KOREAN', 0x01),
('SUBLANG_LITHUANIAN', 0x01),
('SUBLANG_MALAY_MALAYSIA', 0x01),
('SUBLANG_MALAY_BRUNEI_DARUSSALAM', 0x02),
('SUBLANG_NEPALI_INDIA', 0x02),
('SUBLANG_NORWEGIAN_BOKMAL', 0x01),
('SUBLANG_NORWEGIAN_NYNORSK', 0x02),
('SUBLANG_PORTUGUESE', 0x02),
('SUBLANG_PORTUGUESE_BRAZILIAN', 0x01),
('SUBLANG_SERBIAN_LATIN', 0x02),
('SUBLANG_SERBIAN_CYRILLIC', 0x03),
('SUBLANG_SPANISH', 0x01),
('SUBLANG_SPANISH_MEXICAN', 0x02),
('SUBLANG_SPANISH_MODERN', 0x03),
('SUBLANG_SPANISH_GUATEMALA', 0x04),
('SUBLANG_SPANISH_COSTA_RICA', 0x05),
('SUBLANG_SPANISH_PANAMA', 0x06),
('SUBLANG_SPANISH_DOMINICAN_REPUBLIC', 0x07),
('SUBLANG_SPANISH_VENEZUELA', 0x08),
('SUBLANG_SPANISH_COLOMBIA', 0x09),
('SUBLANG_SPANISH_PERU', 0x0a),
('SUBLANG_SPANISH_ARGENTINA', 0x0b),
('SUBLANG_SPANISH_ECUADOR', 0x0c),
('SUBLANG_SPANISH_CHILE', 0x0d),
('SUBLANG_SPANISH_URUGUAY', 0x0e),
('SUBLANG_SPANISH_PARAGUAY', 0x0f),
('SUBLANG_SPANISH_BOLIVIA', 0x10),
('SUBLANG_SPANISH_EL_SALVADOR', 0x11),
('SUBLANG_SPANISH_HONDURAS', 0x12),
('SUBLANG_SPANISH_NICARAGUA', 0x13),
('SUBLANG_SPANISH_PUERTO_RICO', 0x14),
('SUBLANG_SWEDISH', 0x01),
('SUBLANG_SWEDISH_FINLAND', 0x02),
('SUBLANG_URDU_PAKISTAN', 0x01),
('SUBLANG_URDU_INDIA', 0x02),
('SUBLANG_UZBEK_LATIN', 0x01),
('SUBLANG_UZBEK_CYRILLIC', 0x02),
('SUBLANG_DUTCH_SURINAM', 0x03),
('SUBLANG_ROMANIAN', 0x01),
('SUBLANG_ROMANIAN_MOLDAVIA', 0x02),
('SUBLANG_RUSSIAN', 0x01),
('SUBLANG_RUSSIAN_MOLDAVIA', 0x02),
('SUBLANG_CROATIAN', 0x01),
('SUBLANG_LITHUANIAN_CLASSIC', 0x02),
('SUBLANG_GAELIC', 0x01),
('SUBLANG_GAELIC_SCOTTISH', 0x02),
('SUBLANG_GAELIC_MANX', 0x03) ]
SUBLANG = dict(sublang+[(e[1], e[0]) for e in sublang])
# Initialize the dictionary with all the name->value pairs
SUBLANG = dict( sublang )
# Now add all the value->name information, handling duplicates appropriately
for sublang_name, sublang_value in sublang:
if SUBLANG.has_key( sublang_value ):
SUBLANG[ sublang_value ].append( sublang_name )
else:
SUBLANG[ sublang_value ] = [ sublang_name ]
# Resolve a sublang name given the main lang name
#
def get_sublang_name_for_lang( lang_value, sublang_value ):
lang_name = LANG.get(lang_value, '*unknown*')
for sublang_name in SUBLANG.get(sublang_value, list()):
# if the main language is a substring of sublang's name, then
# return that
if lang_name in sublang_name:
return sublang_name
# otherwise return the first sublang name
return SUBLANG.get(sublang_value, ['*unknown*'])[0]
# Ange Albertini's code to process resources' strings
#
def parse_strings(data, counter, l):
i = 0
error_count = 0
while i < len(data):
data_slice = data[i:i + 2]
if len(data_slice) < 2:
break
len_ = struct.unpack("<h", data_slice)[0]
i += 2
if len_ != 0 and 0 <= len_*2 <= len(data):
try:
l[counter] = data[i: i + len_ * 2].decode('utf-16')
except UnicodeDecodeError:
error_count += 1
pass
if error_count >= 3:
break
i += len_ * 2
counter += 1
def retrieve_flags(flag_dict, flag_filter):
"""Read the flags from a dictionary and return them in a usable form.
Will return a list of (flag, value) for all flags in "flag_dict"
matching the filter "flag_filter".
"""
return [(f[0], f[1]) for f in flag_dict.items() if
isinstance(f[0], str) and f[0].startswith(flag_filter)]
def set_flags(obj, flag_field, flags):
"""Will process the flags and set attributes in the object accordingly.
The object "obj" will gain attributes named after the flags provided in
"flags" and valued True/False, matching the results of applying each
flag value from "flags" to flag_field.
"""
for flag in flags:
if flag[1] & flag_field:
#setattr(obj, flag[0], True)
obj.__dict__[flag[0]] = True
else:
#setattr(obj, flag[0], False)
obj.__dict__[flag[0]] = False
# According to http://corkami.blogspot.com/2010/01/parce-que-la-planche-aura-brule.html
# if PointerToRawData is less that 0x200 it's rounded to zero. Loading the test file
# in a debugger it's easy to verify that the PointerToRawData value of 1 is rounded
# to zero. Hence we reproduce the behabior
#
# According to the document:
# [ Microsoft Portable Executable and Common Object File Format Specification ]
# "The alignment factor (in bytes) that is used to align the raw data of sections in
# the image file. The value should be a power of 2 between 512 and 64 K, inclusive.
# The default is 512. If the SectionAlignment is less than the architecture’s page
# size, then FileAlignment must match SectionAlignment."
#
def adjust_FileAlignment( val, file_aligment ):
#if file_aligment and val % file_aligment:
# return file_aligment * ( val / file_aligment )
return val
# According to the document:
# [ Microsoft Portable Executable and Common Object File Format Specification ]
# "The alignment (in bytes) of sections when they are loaded into memory. It must be
# greater than or equal to FileAlignment. The default is the page size for the
# architecture."
#
def adjust_SectionAlignment( val, section_alignment, file_aligment ):
if section_alignment < 0x1000: # page size
section_alignment = file_aligment
# 0x200 is the minimum valid FileAlignment according to the documentation
# although ntoskrnl.exe has an alignment of 0x80 in some Windows versions
#
#elif section_alignment < 0x80:
# section_alignment = 0x80
if section_alignment and val % section_alignment:
return section_alignment * ( val / section_alignment )
return val
class UnicodeStringWrapperPostProcessor:
"""This class attempts to help the process of identifying strings
that might be plain Unicode or Pascal. A list of strings will be
wrapped on it with the hope the overlappings will help make the
decision about their type."""
def __init__(self, pe, rva_ptr):
self.pe = pe
self.rva_ptr = rva_ptr
self.string = None
def get_rva(self):
"""Get the RVA of the string."""
return self.rva_ptr
def __str__(self):
"""Return the escaped ASCII representation of the string."""
def convert_char(char):
if char in string.printable:
return char
else:
return r'\x%02x' % ord(char)
if self.string:
return ''.join([convert_char(c) for c in self.string])
return ''
def invalidate(self):
"""Make this instance None, to express it's no known string type."""
self = None
def render_pascal_16(self):
self.string = self.pe.get_string_u_at_rva(
self.rva_ptr+2,
max_length=self.__get_pascal_16_length())
def ask_pascal_16(self, next_rva_ptr):
"""The next RVA is taken to be the one immediately following this one.
Such RVA could indicate the natural end of the string and will be checked
with the possible length contained in the first word.
"""
length = self.__get_pascal_16_length()
if length == (next_rva_ptr - (self.rva_ptr+2)) / 2:
self.length = length
return True
return False
def __get_pascal_16_length(self):
return self.__get_word_value_at_rva(self.rva_ptr)
def __get_word_value_at_rva(self, rva):
try:
data = self.pe.get_data(self.rva_ptr, 2)
except PEFormatError, e:
return False
if len(data)<2:
return False
return struct.unpack('<H', data)[0]
#def render_pascal_8(self):
# """"""
def ask_unicode_16(self, next_rva_ptr):
"""The next RVA is taken to be the one immediately following this one.
Such RVA could indicate the natural end of the string and will be checked
to see if there's a Unicode NULL character there.
"""
if self.__get_word_value_at_rva(next_rva_ptr-2) == 0:
self.length = next_rva_ptr - self.rva_ptr
return True
return False
def render_unicode_16(self):
""""""
self.string = self.pe.get_string_u_at_rva(self.rva_ptr)
class PEFormatError(Exception):
"""Generic PE format error exception."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Dump:
"""Convenience class for dumping the PE information."""
def __init__(self):
self.text = list()
def add_lines(self, txt, indent=0):
"""Adds a list of lines.
The list can be indented with the optional argument 'indent'.
"""
for line in txt:
self.add_line(line, indent)
def add_line(self, txt, indent=0):
"""Adds a line.
The line can be indented with the optional argument 'indent'.
"""
self.add(txt+'\n', indent)
def add(self, txt, indent=0):
"""Adds some text, no newline will be appended.
The text can be indented with the optional argument 'indent'.
"""
if isinstance(txt, unicode):
try:
txt = str(txt)
except UnicodeEncodeError:
s = []
for c in txt:
try:
s.append(str(c))
except UnicodeEncodeError:
s.append(repr(c))
txt = ''.join(s)
self.text.append( ' '*indent + txt )
def add_header(self, txt):
"""Adds a header element."""
self.add_line('-'*10+txt+'-'*10+'\n')
def add_newline(self):
"""Adds a newline."""
self.text.append( '\n' )
def get_text(self):
"""Get the text in its current state."""
return ''.join( self.text )
STRUCT_SIZEOF_TYPES = {
'x': 1, 'c': 1, 'b': 1, 'B': 1,
'h': 2, 'H': 2,
'i': 4, 'I': 4, 'l': 4, 'L': 4, 'f': 4,
'q': 8, 'Q': 8, 'd': 8,
's': 1 }
class Structure:
"""Prepare structure object to extract members from data.
Format is a list containing definitions for the elements
of the structure.
"""
def __init__(self, format, name=None, file_offset=None):
# Format is forced little endian, for big endian non Intel platforms
self.__format__ = '<'
self.__keys__ = []
#self.values = {}
self.__format_length__ = 0
self.__field_offsets__ = dict()
self.__set_format__(format[1])
self.__all_zeroes__ = False
self.__unpacked_data_elms__ = None
self.__file_offset__ = file_offset
if name:
self.name = name
else:
self.name = format[0]
def __get_format__(self):
return self.__format__
def get_field_absolute_offset(self, field_name):
"""Return the offset within the field for the requested field in the structure."""
return self.__file_offset__ + self.__field_offsets__[field_name]
def get_field_relative_offset(self, field_name):
"""Return the offset within the structure for the requested field."""
return self.__field_offsets__[field_name]
def get_file_offset(self):
return self.__file_offset__
def set_file_offset(self, offset):
self.__file_offset__ = offset
def all_zeroes(self):
"""Returns true is the unpacked data is all zeroes."""
return self.__all_zeroes__
def sizeof_type(self, t):
count = 1
_t = t
if t[0] in string.digits:
# extract the count
count = int( ''.join([d for d in t if d in string.digits]) )
_t = ''.join([d for d in t if d not in string.digits])
return STRUCT_SIZEOF_TYPES[_t] * count
def __set_format__(self, format):
offset = 0
for elm in format:
if ',' in elm:
elm_type, elm_name = elm.split(',', 1)
self.__format__ += elm_type
elm_names = elm_name.split(',')
names = []
for elm_name in elm_names:
if elm_name in self.__keys__:
search_list = [x[:len(elm_name)] for x in self.__keys__]
occ_count = search_list.count(elm_name)
elm_name = elm_name+'_'+str(occ_count)
names.append(elm_name)
self.__field_offsets__[elm_name] = offset
offset += self.sizeof_type(elm_type)
# Some PE header structures have unions on them, so a certain
# value might have different names, so each key has a list of
# all the possible members referring to the data.
self.__keys__.append(names)
self.__format_length__ = struct.calcsize(self.__format__)
def sizeof(self):
"""Return size of the structure."""
return self.__format_length__
def __unpack__(self, data):
if len(data) > self.__format_length__:
data = data[:self.__format_length__]
# OC Patch:
# Some malware have incorrect header lengths.
# Fail gracefully if this occurs
# Buggy malware: a29b0118af8b7408444df81701ad5a7f
#
elif len(data) < self.__format_length__:
raise PEFormatError('Data length less than expected header length.')
if data.count(chr(0)) == len(data):
self.__all_zeroes__ = True
self.__unpacked_data_elms__ = struct.unpack(self.__format__, data)
for i in xrange(len(self.__unpacked_data_elms__)):
for key in self.__keys__[i]:
#self.values[key] = self.__unpacked_data_elms__[i]
setattr(self, key, self.__unpacked_data_elms__[i])
def __pack__(self):
new_values = []
for i in xrange(len(self.__unpacked_data_elms__)):
for key in self.__keys__[i]:
new_val = getattr(self, key)
old_val = self.__unpacked_data_elms__[i]
# In the case of Unions, when the first changed value
# is picked the loop is exited
if new_val != old_val:
break
new_values.append(new_val)
return struct.pack(self.__format__, *new_values)
def __str__(self):
return '\n'.join( self.dump() )
def __repr__(self):
return '<Structure: %s>' % (' '.join( [' '.join(s.split()) for s in self.dump()] ))
def dump(self, indentation=0):
"""Returns a string representation of the structure."""
dump = []
dump.append('[%s]' % self.name)
# Refer to the __set_format__ method for an explanation
# of the following construct.
for keys in self.__keys__:
for key in keys:
val = getattr(self, key)
if isinstance(val, int) or isinstance(val, long):
val_str = '0x%-8X' % (val)
if key == 'TimeDateStamp' or key == 'dwTimeStamp':
try:
val_str += ' [%s UTC]' % time.asctime(time.gmtime(val))
except exceptions.ValueError, e:
val_str += ' [INVALID TIME]'
else:
val_str = ''.join(filter(lambda c:c != '\0', str(val)))
dump.append('0x%-8X 0x%-3X %-30s %s' % (
self.__field_offsets__[key] + self.__file_offset__,
self.__field_offsets__[key], key+':', val_str))
return dump
class SectionStructure(Structure):
"""Convenience section handling class."""
def __init__(self, *argl, **argd):
if 'pe' in argd:
self.pe = argd['pe']
del argd['pe']
Structure.__init__(self, *argl, **argd)
def get_data(self, start=None, length=None):
"""Get data chunk from a section.
Allows to query data from the section by passing the
addresses where the PE file would be loaded by default.
It is then possible to retrieve code and data by its real
addresses as it would be if loaded.
"""
PointerToRawData_adj = adjust_FileAlignment( self.PointerToRawData,
self.pe.OPTIONAL_HEADER.FileAlignment )
VirtualAddress_adj = adjust_SectionAlignment( self.VirtualAddress,
self.pe.OPTIONAL_HEADER.SectionAlignment, self.pe.OPTIONAL_HEADER.FileAlignment )
if start is None:
offset = PointerToRawData_adj
else:
offset = ( start - VirtualAddress_adj ) + PointerToRawData_adj
if length is not None:
end = offset + length
else:
end = offset + self.SizeOfRawData
# PointerToRawData is not adjusted here as we might want to read any possible extra bytes
# that might get cut off by aligning the start (and hence cutting something off the end)
#
if end > self.PointerToRawData + self.SizeOfRawData:
end = self.PointerToRawData + self.SizeOfRawData
return self.pe.__data__[offset:end]
def __setattr__(self, name, val):
if name == 'Characteristics':
section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
# Set the section's flags according the the Characteristics member
set_flags(self, val, section_flags)
elif 'IMAGE_SCN_' in name and hasattr(self, name):
if val:
self.__dict__['Characteristics'] |= SECTION_CHARACTERISTICS[name]
else:
self.__dict__['Characteristics'] ^= SECTION_CHARACTERISTICS[name]
self.__dict__[name] = val
def get_rva_from_offset(self, offset):
return offset - adjust_FileAlignment( self.PointerToRawData,
self.pe.OPTIONAL_HEADER.FileAlignment ) + adjust_SectionAlignment( self.VirtualAddress,
self.pe.OPTIONAL_HEADER.SectionAlignment, self.pe.OPTIONAL_HEADER.FileAlignment )
def get_offset_from_rva(self, rva):
return (rva -
adjust_SectionAlignment(
self.VirtualAddress,
self.pe.OPTIONAL_HEADER.SectionAlignment,
self.pe.OPTIONAL_HEADER.FileAlignment )
) + adjust_FileAlignment(
self.PointerToRawData,
self.pe.OPTIONAL_HEADER.FileAlignment )
def contains_offset(self, offset):
"""Check whether the section contains the file offset provided."""
if self.PointerToRawData is None:
# bss and other sections containing only uninitialized data must have 0
# and do not take space in the file
return False
return ( adjust_FileAlignment( self.PointerToRawData,
self.pe.OPTIONAL_HEADER.FileAlignment ) <=
offset <
adjust_FileAlignment( self.PointerToRawData,
self.pe.OPTIONAL_HEADER.FileAlignment ) +
self.SizeOfRawData )
def contains_rva(self, rva):
"""Check whether the section contains the address provided."""
# Check if the SizeOfRawData is realistic. If it's bigger than the size of
# the whole PE file minus the start address of the section it could be
# either truncated or the SizeOfRawData contain a misleading value.
# In either of those cases we take the VirtualSize
#
if len(self.pe.__data__) - adjust_FileAlignment( self.PointerToRawData,
self.pe.OPTIONAL_HEADER.FileAlignment ) < self.SizeOfRawData:
# PECOFF documentation v8 says:
# VirtualSize: The total size of the section when loaded into memory.
# If this value is greater than SizeOfRawData, the section is zero-padded.
# This field is valid only for executable images and should be set to zero
# for object files.
#
size = self.Misc_VirtualSize
else:
size = max(self.SizeOfRawData, self.Misc_VirtualSize)
VirtualAddress_adj = adjust_SectionAlignment( self.VirtualAddress,
self.pe.OPTIONAL_HEADER.SectionAlignment, self.pe.OPTIONAL_HEADER.FileAlignment )
return VirtualAddress_adj <= rva < VirtualAddress_adj + size
def contains(self, rva):
#print "DEPRECATION WARNING: you should use contains_rva() instead of contains()"
return self.contains_rva(rva)
#def set_data(self, data):
# """Set the data belonging to the section."""
#
# self.data = data
def get_entropy(self):
"""Calculate and return the entropy for the section."""
return self.entropy_H( self.get_data() )
def get_hash_sha1(self):
"""Get the SHA-1 hex-digest of the section's data."""
if sha1 is not None:
return sha1( self.get_data() ).hexdigest()
def get_hash_sha256(self):
"""Get the SHA-256 hex-digest of the section's data."""
if sha256 is not None:
return sha256( self.get_data() ).hexdigest()
def get_hash_sha512(self):
"""Get the SHA-512 hex-digest of the section's data."""
if sha512 is not None:
return sha512( self.get_data() ).hexdigest()
def get_hash_md5(self):
"""Get the MD5 hex-digest of the section's data."""
if md5 is not None:
return md5( self.get_data() ).hexdigest()
def entropy_H(self, data):
"""Calculate the entropy of a chunk of data."""
if len(data) == 0:
return 0.0
occurences = array.array('L', [0]*256)
for x in data:
occurences[ord(x)] += 1
entropy = 0
for x in occurences:
if x:
p_x = float(x) / len(data)
entropy -= p_x*math.log(p_x, 2)
return entropy
class DataContainer:
"""Generic data container."""
def __init__(self, **args):
for key, value in args.items():
setattr(self, key, value)
class ImportDescData(DataContainer):
"""Holds import descriptor information.
dll: name of the imported DLL
imports: list of imported symbols (ImportData instances)
struct: IMAGE_IMPORT_DESCRIPTOR structure
"""
class ImportData(DataContainer):
"""Holds imported symbol's information.
ordinal: Ordinal of the symbol
name: Name of the symbol
bound: If the symbol is bound, this contains
the address.
"""
def __setattr__(self, name, val):
# If the instance doesn't yet have an ordinal attribute
# it's not fully initialized so can't do any of the
# following
#
if hasattr(self, 'ordinal') and hasattr(self, 'bound') and hasattr(self, 'name'):
if name == 'ordinal':
if self.pe.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
ordinal_flag = IMAGE_ORDINAL_FLAG
elif self.pe.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
ordinal_flag = IMAGE_ORDINAL_FLAG64
# Set the ordinal and flag the entry as imporing by ordinal
self.struct_table.Ordinal = ordinal_flag | (val & 0xffff)
self.struct_table.AddressOfData = self.struct_table.Ordinal
self.struct_table.Function = self.struct_table.Ordinal
self.struct_table.ForwarderString = self.struct_table.Ordinal
elif name == 'bound':
if self.struct_iat is not None:
self.struct_iat.AddressOfData = val
self.struct_iat.AddressOfData = self.struct_iat.AddressOfData
self.struct_iat.Function = self.struct_iat.AddressOfData
self.struct_iat.ForwarderString = self.struct_iat.AddressOfData
elif name == 'address':
self.struct_table.AddressOfData = val
self.struct_table.Ordinal = self.struct_table.AddressOfData
self.struct_table.Function = self.struct_table.AddressOfData
self.struct_table.ForwarderString = self.struct_table.AddressOfData
elif name == 'name':
# Make sure we reset the entry in case the import had been set to import by ordinal
if self.name_offset:
name_rva = self.pe.get_rva_from_offset( self.name_offset )
self.pe.set_dword_at_offset( self.ordinal_offset, (0<<31) | name_rva )
# Complain if the length of the new name is longer than the existing one
if len(val) > len(self.name):
#raise Exception('The export name provided is longer than the existing one.')
pass
self.pe.set_bytes_at_offset( self.name_offset, val )
self.__dict__[name] = val
class ExportDirData(DataContainer):
"""Holds export directory information.
struct: IMAGE_EXPORT_DIRECTORY structure
symbols: list of exported symbols (ExportData instances)
"""
class ExportData(DataContainer):
"""Holds exported symbols' information.
ordinal: ordinal of the symbol
address: address of the symbol
name: name of the symbol (None if the symbol is
exported by ordinal only)
forwarder: if the symbol is forwarded it will
contain the name of the target symbol,
None otherwise.
"""
def __setattr__(self, name, val):
# If the instance doesn't yet have an ordinal attribute
# it's not fully initialized so can't do any of the
# following
#
if hasattr(self, 'ordinal') and hasattr(self, 'address') and hasattr(self, 'forwarder') and hasattr(self, 'name'):
if name == 'ordinal':
self.pe.set_word_at_offset( self.ordinal_offset, val )
elif name == 'address':
self.pe.set_dword_at_offset( self.address_offset, val )
elif name == 'name':
# Complain if the length of the new name is longer than the existing one
if len(val) > len(self.name):
#raise Exception('The export name provided is longer than the existing one.')
pass
self.pe.set_bytes_at_offset( self.name_offset, val )
elif name == 'forwarder':
# Complain if the length of the new name is longer than the existing one
if len(val) > len(self.forwarder):
#raise Exception('The forwarder name provided is longer than the existing one.')
pass
self.pe.set_bytes_at_offset( self.forwarder_offset, val )
self.__dict__[name] = val
class ResourceDirData(DataContainer):
"""Holds resource directory information.
struct: IMAGE_RESOURCE_DIRECTORY structure
entries: list of entries (ResourceDirEntryData instances)
"""
class ResourceDirEntryData(DataContainer):
"""Holds resource directory entry data.
struct: IMAGE_RESOURCE_DIRECTORY_ENTRY structure
name: If the resource is identified by name this
attribute will contain the name string. None
otherwise. If identified by id, the id is
available at 'struct.Id'
id: the id, also in struct.Id
directory: If this entry has a lower level directory
this attribute will point to the
ResourceDirData instance representing it.
data: If this entry has no further lower directories
and points to the actual resource data, this
attribute will reference the corresponding
ResourceDataEntryData instance.
(Either of the 'directory' or 'data' attribute will exist,
but not both.)
"""
class ResourceDataEntryData(DataContainer):
"""Holds resource data entry information.
struct: IMAGE_RESOURCE_DATA_ENTRY structure
lang: Primary language ID
sublang: Sublanguage ID
"""
class DebugData(DataContainer):
"""Holds debug information.
struct: IMAGE_DEBUG_DIRECTORY structure
"""
class BaseRelocationData(DataContainer):
"""Holds base relocation information.
struct: IMAGE_BASE_RELOCATION structure
entries: list of relocation data (RelocationData instances)
"""
class RelocationData(DataContainer):
"""Holds relocation information.
type: Type of relocation
The type string is can be obtained by
RELOCATION_TYPE[type]
rva: RVA of the relocation
"""
def __setattr__(self, name, val):
# If the instance doesn't yet have a struct attribute
# it's not fully initialized so can't do any of the
# following
#
if hasattr(self, 'struct'):
# Get the word containing the type and data
#
word = self.struct.Data
if name == 'type':
word = (val << 12) | (word & 0xfff)
elif name == 'rva':
offset = val-self.base_rva
if offset < 0:
offset = 0
word = ( word & 0xf000) | ( offset & 0xfff)
# Store the modified data
#
self.struct.Data = word
self.__dict__[name] = val
class TlsData(DataContainer):
"""Holds TLS information.
struct: IMAGE_TLS_DIRECTORY structure
"""
class BoundImportDescData(DataContainer):
"""Holds bound import descriptor data.
This directory entry will provide with information on the
DLLs this PE files has been bound to (if bound at all).
The structure will contain the name and timestamp of the
DLL at the time of binding so that the loader can know
whether it differs from the one currently present in the
system and must, therefore, re-bind the PE's imports.
struct: IMAGE_BOUND_IMPORT_DESCRIPTOR structure
name: DLL name
entries: list of entries (BoundImportRefData instances)
the entries will exist if this DLL has forwarded
symbols. If so, the destination DLL will have an
entry in this list.
"""
class LoadConfigData(DataContainer):
"""Holds Load Config data.
struct: IMAGE_LOAD_CONFIG_DIRECTORY structure
name: dll name
"""
class BoundImportRefData(DataContainer):
"""Holds bound import forwarder reference data.
Contains the same information as the bound descriptor but
for forwarded DLLs, if any.
struct: IMAGE_BOUND_FORWARDER_REF structure
name: dll name
"""
# Valid FAT32 8.3 short filename characters according to:
# http://en.wikipedia.org/wiki/8.3_filename
# This will help decide whether DLL ASCII names are likely
# to be valid of otherwise corruted data
#
# The flename length is not checked because the DLLs filename
# can be longer that the 8.3
allowed_filename = string.lowercase + string.uppercase + string.digits + "!#$%&'()-@^_`{}~+,.;=[]" + ''.join( [chr(i) for i in range(128, 256)] )
def is_valid_dos_filename(s):
if s is None or not isinstance(s, str):
return False
for c in s:
if c not in allowed_filename:
return False
return True
# Check if a imported name uses the valid accepted characters expected in mangled
# function names. If the symbol's characters don't fall within this charset
# we will assume the name is invalid
#
allowed_function_name = string.lowercase + string.uppercase + string.digits + '_?@$()'
def is_valid_function_name(s):
if s is None or not isinstance(s, str):
return False
for c in s:
if c not in allowed_function_name:
return False
return True
class PE:
"""A Portable Executable representation.
This class provides access to most of the information in a PE file.
It expects to be supplied the name of the file to load or PE data
to process and an optional argument 'fast_load' (False by default)
which controls whether to load all the directories information,
which can be quite time consuming.
pe = pefile.PE('module.dll')
pe = pefile.PE(name='module.dll')
would load 'module.dll' and process it. If the data would be already
available in a buffer the same could be achieved with:
pe = pefile.PE(data=module_dll_data)
The "fast_load" can be set to a default by setting its value in the
module itself by means,for instance, of a "pefile.fast_load = True".
That will make all the subsequent instances not to load the
whole PE structure. The "full_load" method can be used to parse
the missing data at a later stage.
Basic headers information will be available in the attributes:
DOS_HEADER
NT_HEADERS
FILE_HEADER
OPTIONAL_HEADER
All of them will contain among their attributes the members of the
corresponding structures as defined in WINNT.H
The raw data corresponding to the header (from the beginning of the
file up to the start of the first section) will be available in the
instance's attribute 'header' as a string.
The sections will be available as a list in the 'sections' attribute.
Each entry will contain as attributes all the structure's members.
Directory entries will be available as attributes (if they exist):
(no other entries are processed at this point)
DIRECTORY_ENTRY_IMPORT (list of ImportDescData instances)
DIRECTORY_ENTRY_EXPORT (ExportDirData instance)
DIRECTORY_ENTRY_RESOURCE (ResourceDirData instance)
DIRECTORY_ENTRY_DEBUG (list of DebugData instances)
DIRECTORY_ENTRY_BASERELOC (list of BaseRelocationData instances)
DIRECTORY_ENTRY_TLS
DIRECTORY_ENTRY_BOUND_IMPORT (list of BoundImportData instances)
The following dictionary attributes provide ways of mapping different
constants. They will accept the numeric value and return the string
representation and the opposite, feed in the string and get the
numeric constant:
DIRECTORY_ENTRY
IMAGE_CHARACTERISTICS
SECTION_CHARACTERISTICS
DEBUG_TYPE
SUBSYSTEM_TYPE
MACHINE_TYPE
RELOCATION_TYPE
RESOURCE_TYPE
LANG
SUBLANG
"""
#
# Format specifications for PE structures.
#
__IMAGE_DOS_HEADER_format__ = ('IMAGE_DOS_HEADER',
('H,e_magic', 'H,e_cblp', 'H,e_cp',
'H,e_crlc', 'H,e_cparhdr', 'H,e_minalloc',
'H,e_maxalloc', 'H,e_ss', 'H,e_sp', 'H,e_csum',
'H,e_ip', 'H,e_cs', 'H,e_lfarlc', 'H,e_ovno', '8s,e_res',
'H,e_oemid', 'H,e_oeminfo', '20s,e_res2',
'I,e_lfanew'))
__IMAGE_FILE_HEADER_format__ = ('IMAGE_FILE_HEADER',
('H,Machine', 'H,NumberOfSections',
'I,TimeDateStamp', 'I,PointerToSymbolTable',
'I,NumberOfSymbols', 'H,SizeOfOptionalHeader',
'H,Characteristics'))
__IMAGE_DATA_DIRECTORY_format__ = ('IMAGE_DATA_DIRECTORY',
('I,VirtualAddress', 'I,Size'))
__IMAGE_OPTIONAL_HEADER_format__ = ('IMAGE_OPTIONAL_HEADER',
('H,Magic', 'B,MajorLinkerVersion',
'B,MinorLinkerVersion', 'I,SizeOfCode',
'I,SizeOfInitializedData', 'I,SizeOfUninitializedData',
'I,AddressOfEntryPoint', 'I,BaseOfCode', 'I,BaseOfData',
'I,ImageBase', 'I,SectionAlignment', 'I,FileAlignment',
'H,MajorOperatingSystemVersion', 'H,MinorOperatingSystemVersion',
'H,MajorImageVersion', 'H,MinorImageVersion',
'H,MajorSubsystemVersion', 'H,MinorSubsystemVersion',
'I,Reserved1', 'I,SizeOfImage', 'I,SizeOfHeaders',
'I,CheckSum', 'H,Subsystem', 'H,DllCharacteristics',
'I,SizeOfStackReserve', 'I,SizeOfStackCommit',
'I,SizeOfHeapReserve', 'I,SizeOfHeapCommit',
'I,LoaderFlags', 'I,NumberOfRvaAndSizes' ))
__IMAGE_OPTIONAL_HEADER64_format__ = ('IMAGE_OPTIONAL_HEADER64',
('H,Magic', 'B,MajorLinkerVersion',
'B,MinorLinkerVersion', 'I,SizeOfCode',
'I,SizeOfInitializedData', 'I,SizeOfUninitializedData',
'I,AddressOfEntryPoint', 'I,BaseOfCode',
'Q,ImageBase', 'I,SectionAlignment', 'I,FileAlignment',
'H,MajorOperatingSystemVersion', 'H,MinorOperatingSystemVersion',
'H,MajorImageVersion', 'H,MinorImageVersion',
'H,MajorSubsystemVersion', 'H,MinorSubsystemVersion',
'I,Reserved1', 'I,SizeOfImage', 'I,SizeOfHeaders',
'I,CheckSum', 'H,Subsystem', 'H,DllCharacteristics',
'Q,SizeOfStackReserve', 'Q,SizeOfStackCommit',
'Q,SizeOfHeapReserve', 'Q,SizeOfHeapCommit',
'I,LoaderFlags', 'I,NumberOfRvaAndSizes' ))
__IMAGE_NT_HEADERS_format__ = ('IMAGE_NT_HEADERS', ('I,Signature',))
__IMAGE_SECTION_HEADER_format__ = ('IMAGE_SECTION_HEADER',
('8s,Name', 'I,Misc,Misc_PhysicalAddress,Misc_VirtualSize',
'I,VirtualAddress', 'I,SizeOfRawData', 'I,PointerToRawData',
'I,PointerToRelocations', 'I,PointerToLinenumbers',
'H,NumberOfRelocations', 'H,NumberOfLinenumbers',
'I,Characteristics'))
__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__ = ('IMAGE_DELAY_IMPORT_DESCRIPTOR',
('I,grAttrs', 'I,szName', 'I,phmod', 'I,pIAT', 'I,pINT',
'I,pBoundIAT', 'I,pUnloadIAT', 'I,dwTimeStamp'))
__IMAGE_IMPORT_DESCRIPTOR_format__ = ('IMAGE_IMPORT_DESCRIPTOR',
('I,OriginalFirstThunk,Characteristics',
'I,TimeDateStamp', 'I,ForwarderChain', 'I,Name', 'I,FirstThunk'))
__IMAGE_EXPORT_DIRECTORY_format__ = ('IMAGE_EXPORT_DIRECTORY',
('I,Characteristics',
'I,TimeDateStamp', 'H,MajorVersion', 'H,MinorVersion', 'I,Name',
'I,Base', 'I,NumberOfFunctions', 'I,NumberOfNames',
'I,AddressOfFunctions', 'I,AddressOfNames', 'I,AddressOfNameOrdinals'))
__IMAGE_RESOURCE_DIRECTORY_format__ = ('IMAGE_RESOURCE_DIRECTORY',
('I,Characteristics',
'I,TimeDateStamp', 'H,MajorVersion', 'H,MinorVersion',
'H,NumberOfNamedEntries', 'H,NumberOfIdEntries'))
__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__ = ('IMAGE_RESOURCE_DIRECTORY_ENTRY',
('I,Name',
'I,OffsetToData'))
__IMAGE_RESOURCE_DATA_ENTRY_format__ = ('IMAGE_RESOURCE_DATA_ENTRY',
('I,OffsetToData', 'I,Size', 'I,CodePage', 'I,Reserved'))
__VS_VERSIONINFO_format__ = ( 'VS_VERSIONINFO',
('H,Length', 'H,ValueLength', 'H,Type' ))
__VS_FIXEDFILEINFO_format__ = ( 'VS_FIXEDFILEINFO',
('I,Signature', 'I,StrucVersion', 'I,FileVersionMS', 'I,FileVersionLS',
'I,ProductVersionMS', 'I,ProductVersionLS', 'I,FileFlagsMask', 'I,FileFlags',
'I,FileOS', 'I,FileType', 'I,FileSubtype', 'I,FileDateMS', 'I,FileDateLS'))
__StringFileInfo_format__ = ( 'StringFileInfo',
('H,Length', 'H,ValueLength', 'H,Type' ))
__StringTable_format__ = ( 'StringTable',
('H,Length', 'H,ValueLength', 'H,Type' ))
__String_format__ = ( 'String',
('H,Length', 'H,ValueLength', 'H,Type' ))
__Var_format__ = ( 'Var', ('H,Length', 'H,ValueLength', 'H,Type' ))
__IMAGE_THUNK_DATA_format__ = ('IMAGE_THUNK_DATA',
('I,ForwarderString,Function,Ordinal,AddressOfData',))
__IMAGE_THUNK_DATA64_format__ = ('IMAGE_THUNK_DATA',
('Q,ForwarderString,Function,Ordinal,AddressOfData',))
__IMAGE_DEBUG_DIRECTORY_format__ = ('IMAGE_DEBUG_DIRECTORY',
('I,Characteristics', 'I,TimeDateStamp', 'H,MajorVersion',
'H,MinorVersion', 'I,Type', 'I,SizeOfData', 'I,AddressOfRawData',
'I,PointerToRawData'))
__IMAGE_BASE_RELOCATION_format__ = ('IMAGE_BASE_RELOCATION',
('I,VirtualAddress', 'I,SizeOfBlock') )
__IMAGE_BASE_RELOCATION_ENTRY_format__ = ('IMAGE_BASE_RELOCATION_ENTRY',
('H,Data',) )
__IMAGE_TLS_DIRECTORY_format__ = ('IMAGE_TLS_DIRECTORY',
('I,StartAddressOfRawData', 'I,EndAddressOfRawData',
'I,AddressOfIndex', 'I,AddressOfCallBacks',
'I,SizeOfZeroFill', 'I,Characteristics' ) )
__IMAGE_TLS_DIRECTORY64_format__ = ('IMAGE_TLS_DIRECTORY',
('Q,StartAddressOfRawData', 'Q,EndAddressOfRawData',
'Q,AddressOfIndex', 'Q,AddressOfCallBacks',
'I,SizeOfZeroFill', 'I,Characteristics' ) )
__IMAGE_LOAD_CONFIG_DIRECTORY_format__ = ('IMAGE_LOAD_CONFIG_DIRECTORY',
('I,Size', 'I,TimeDateStamp',
'H,MajorVersion', 'H,MinorVersion',
'I,GlobalFlagsClear', 'I,GlobalFlagsSet',
'I,CriticalSectionDefaultTimeout',
'I,DeCommitFreeBlockThreshold',
'I,DeCommitTotalFreeThreshold',
'I,LockPrefixTable',
'I,MaximumAllocationSize',
'I,VirtualMemoryThreshold',
'I,ProcessHeapFlags',
'I,ProcessAffinityMask',
'H,CSDVersion', 'H,Reserved1',
'I,EditList', 'I,SecurityCookie',
'I,SEHandlerTable', 'I,SEHandlerCount' ) )
__IMAGE_LOAD_CONFIG_DIRECTORY64_format__ = ('IMAGE_LOAD_CONFIG_DIRECTORY',
('I,Size', 'I,TimeDateStamp',
'H,MajorVersion', 'H,MinorVersion',
'I,GlobalFlagsClear', 'I,GlobalFlagsSet',
'I,CriticalSectionDefaultTimeout',
'Q,DeCommitFreeBlockThreshold',
'Q,DeCommitTotalFreeThreshold',
'Q,LockPrefixTable',
'Q,MaximumAllocationSize',
'Q,VirtualMemoryThreshold',
'Q,ProcessAffinityMask',
'I,ProcessHeapFlags',
'H,CSDVersion', 'H,Reserved1',
'Q,EditList', 'Q,SecurityCookie',
'Q,SEHandlerTable', 'Q,SEHandlerCount' ) )
__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__ = ('IMAGE_BOUND_IMPORT_DESCRIPTOR',
('I,TimeDateStamp', 'H,OffsetModuleName', 'H,NumberOfModuleForwarderRefs'))
__IMAGE_BOUND_FORWARDER_REF_format__ = ('IMAGE_BOUND_FORWARDER_REF',
('I,TimeDateStamp', 'H,OffsetModuleName', 'H,Reserved') )
def __init__(self, name=None, data=None, fast_load=None):
self.sections = []
self.__warnings = []
self.PE_TYPE = None
if not name and not data:
return
# This list will keep track of all the structures created.
# That will allow for an easy iteration through the list
# in order to save the modifications made
self.__structures__ = []
if not fast_load:
fast_load = globals()['fast_load']
self.__parse__(name, data, fast_load)
def __unpack_data__(self, format, data, file_offset):
"""Apply structure format to raw data.
Returns and unpacked structure object if successful, None otherwise.
"""
structure = Structure(format, file_offset=file_offset)
try:
structure.__unpack__(data)
except PEFormatError, err:
self.__warnings.append(
'Corrupt header "%s" at file offset %d. Exception: %s' % (
format[0], file_offset, str(err)) )
return None
self.__structures__.append(structure)
return structure
def __parse__(self, fname, data, fast_load):
"""Parse a Portable Executable file.
Loads a PE file, parsing all its structures and making them available
through the instance's attributes.
"""
if fname:
fd = file(fname, 'rb')
self.fileno = fd.fileno()
self.__data__ = mmap.mmap( self.fileno, 0, access = mmap.ACCESS_READ )
fd.close()
elif data:
self.__data__ = data
dos_header_data = self.__data__[:64]
if len(dos_header_data) != 64:
raise PEFormatError('Unable to read the DOS Header, possibly a truncated file.')
self.DOS_HEADER = self.__unpack_data__(
self.__IMAGE_DOS_HEADER_format__,
dos_header_data, file_offset=0)
if self.DOS_HEADER.e_magic == IMAGE_DOSZM_SIGNATURE:
raise PEFormatError('Probably a ZM Executable (not a PE file).')
if not self.DOS_HEADER or self.DOS_HEADER.e_magic != IMAGE_DOS_SIGNATURE:
raise PEFormatError('DOS Header magic not found.')
# OC Patch:
# Check for sane value in e_lfanew
#
if self.DOS_HEADER.e_lfanew > len(self.__data__):
raise PEFormatError('Invalid e_lfanew value, probably not a PE file')
nt_headers_offset = self.DOS_HEADER.e_lfanew
self.NT_HEADERS = self.__unpack_data__(
self.__IMAGE_NT_HEADERS_format__,
self.__data__[nt_headers_offset:nt_headers_offset+8],
file_offset = nt_headers_offset)
# We better check the signature right here, before the file screws
# around with sections:
# OC Patch:
# Some malware will cause the Signature value to not exist at all
if not self.NT_HEADERS or not self.NT_HEADERS.Signature:
raise PEFormatError('NT Headers not found.')
if (0xFFFF & self.NT_HEADERS.Signature) == IMAGE_NE_SIGNATURE:
raise PEFormatError('Invalid NT Headers signature. Probably a NE file')
if (0xFFFF & self.NT_HEADERS.Signature) == IMAGE_LE_SIGNATURE:
raise PEFormatError('Invalid NT Headers signature. Probably a LE file')
if (0xFFFF & self.NT_HEADERS.Signature) == IMAGE_LX_SIGNATURE:
raise PEFormatError('Invalid NT Headers signature. Probably a LX file')
if self.NT_HEADERS.Signature != IMAGE_NT_SIGNATURE:
raise PEFormatError('Invalid NT Headers signature.')
self.FILE_HEADER = self.__unpack_data__(
self.__IMAGE_FILE_HEADER_format__,
self.__data__[nt_headers_offset+4:nt_headers_offset+4+32],
file_offset = nt_headers_offset+4)
image_flags = retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_')
if not self.FILE_HEADER:
raise PEFormatError('File Header missing')
# Set the image's flags according the the Characteristics member
set_flags(self.FILE_HEADER, self.FILE_HEADER.Characteristics, image_flags)
optional_header_offset = nt_headers_offset+4+self.FILE_HEADER.sizeof()
# Note: location of sections can be controlled from PE header:
sections_offset = optional_header_offset + self.FILE_HEADER.SizeOfOptionalHeader
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER_format__,
self.__data__[optional_header_offset:],
file_offset = optional_header_offset)
# According to solardesigner's findings for his
# Tiny PE project, the optional header does not
# need fields beyond "Subsystem" in order to be
# loadable by the Windows loader (given that zeroes
# are acceptable values and the header is loaded
# in a zeroed memory page)
# If trying to parse a full Optional Header fails
# we try to parse it again with some 0 padding
#
MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE = 69
if ( self.OPTIONAL_HEADER is None and
len(self.__data__[optional_header_offset:optional_header_offset+0x200])
>= MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE ):
# Add enough zeroes to make up for the unused fields
#
padding_length = 128
# Create padding
#
padded_data = self.__data__[optional_header_offset:optional_header_offset+0x200] + (
'\0' * padding_length)
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER_format__,
padded_data,
file_offset = optional_header_offset)
# Check the Magic in the OPTIONAL_HEADER and set the PE file
# type accordingly
#
if self.OPTIONAL_HEADER is not None:
if self.OPTIONAL_HEADER.Magic == OPTIONAL_HEADER_MAGIC_PE:
self.PE_TYPE = OPTIONAL_HEADER_MAGIC_PE
elif self.OPTIONAL_HEADER.Magic == OPTIONAL_HEADER_MAGIC_PE_PLUS:
self.PE_TYPE = OPTIONAL_HEADER_MAGIC_PE_PLUS
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER64_format__,
self.__data__[optional_header_offset:optional_header_offset+0x200],
file_offset = optional_header_offset)
# Again, as explained above, we try to parse
# a reduced form of the Optional Header which
# is still valid despite not including all
# structure members
#
MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE = 69+4
if ( self.OPTIONAL_HEADER is None and
len(self.__data__[optional_header_offset:optional_header_offset+0x200])
>= MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE ):
padding_length = 128
padded_data = self.__data__[optional_header_offset:optional_header_offset+0x200] + (
'\0' * padding_length)
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER64_format__,
padded_data,
file_offset = optional_header_offset)
if not self.FILE_HEADER:
raise PEFormatError('File Header missing')
# OC Patch:
# Die gracefully if there is no OPTIONAL_HEADER field
# 975440f5ad5e2e4a92c4d9a5f22f75c1
if self.PE_TYPE is None or self.OPTIONAL_HEADER is None:
raise PEFormatError("No Optional Header found, invalid PE32 or PE32+ file")
dll_characteristics_flags = retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLL_CHARACTERISTICS_')
# Set the Dll Characteristics flags according the the DllCharacteristics member
set_flags(
self.OPTIONAL_HEADER,
self.OPTIONAL_HEADER.DllCharacteristics,
dll_characteristics_flags)
self.OPTIONAL_HEADER.DATA_DIRECTORY = []
#offset = (optional_header_offset + self.FILE_HEADER.SizeOfOptionalHeader)
offset = (optional_header_offset + self.OPTIONAL_HEADER.sizeof())
self.NT_HEADERS.FILE_HEADER = self.FILE_HEADER
self.NT_HEADERS.OPTIONAL_HEADER = self.OPTIONAL_HEADER
# The NumberOfRvaAndSizes is sanitized to stay within
# reasonable limits so can be casted to an int
#
if self.OPTIONAL_HEADER.NumberOfRvaAndSizes > 0x10:
self.__warnings.append(
'Suspicious NumberOfRvaAndSizes in the Optional Header. ' +
'Normal values are never larger than 0x10, the value is: 0x%x' %
self.OPTIONAL_HEADER.NumberOfRvaAndSizes )
MAX_ASSUMED_VALID_NUMBER_OF_RVA_AND_SIZES = 0x100
for i in xrange(int(0x7fffffffL & self.OPTIONAL_HEADER.NumberOfRvaAndSizes)):
if len(self.__data__) - offset == 0:
break
if len(self.__data__) - offset < 8:
data = self.__data__[offset:] + '\0'*8
else:
data = self.__data__[offset:offset+MAX_ASSUMED_VALID_NUMBER_OF_RVA_AND_SIZES]
dir_entry = self.__unpack_data__(
self.__IMAGE_DATA_DIRECTORY_format__,
data,
file_offset = offset)
if dir_entry is None:
break
# Would fail if missing an entry
# 1d4937b2fa4d84ad1bce0309857e70ca offending sample
try:
dir_entry.name = DIRECTORY_ENTRY[i]
except (KeyError, AttributeError):
break
offset += dir_entry.sizeof()
self.OPTIONAL_HEADER.DATA_DIRECTORY.append(dir_entry)
# If the offset goes outside the optional header,
# the loop is broken, regardless of how many directories
# NumberOfRvaAndSizes says there are
#
# We assume a normally sized optional header, hence that we do
# a sizeof() instead of reading SizeOfOptionalHeader.
# Then we add a default number of directories times their size,
# if we go beyond that, we assume the number of directories
# is wrong and stop processing
if offset >= (optional_header_offset +
self.OPTIONAL_HEADER.sizeof() + 8*16) :
break
offset = self.parse_sections(sections_offset)
# OC Patch:
# There could be a problem if there are no raw data sections
# greater than 0
# fc91013eb72529da005110a3403541b6 example
# Should this throw an exception in the minimum header offset
# can't be found?
#
rawDataPointers = [
adjust_FileAlignment( s.PointerToRawData,
self.OPTIONAL_HEADER.FileAlignment )
for s in self.sections if s.PointerToRawData>0 ]
if len(rawDataPointers) > 0:
lowest_section_offset = min(rawDataPointers)
else:
lowest_section_offset = None
if not lowest_section_offset or lowest_section_offset < offset:
self.header = self.__data__[:offset]
else:
self.header = self.__data__[:lowest_section_offset]
# Check whether the entry point lies within a section
#
if self.get_section_by_rva(self.OPTIONAL_HEADER.AddressOfEntryPoint) is not None:
# Check whether the entry point lies within the file
#
ep_offset = self.get_offset_from_rva(self.OPTIONAL_HEADER.AddressOfEntryPoint)
if ep_offset > len(self.__data__):
self.__warnings.append(
'Possibly corrupt file. AddressOfEntryPoint lies outside the file. ' +
'AddressOfEntryPoint: 0x%x' %
self.OPTIONAL_HEADER.AddressOfEntryPoint )
else:
self.__warnings.append(
'AddressOfEntryPoint lies outside the sections\' boundaries. ' +
'AddressOfEntryPoint: 0x%x' %
self.OPTIONAL_HEADER.AddressOfEntryPoint )
if not fast_load:
self.parse_data_directories()
class RichHeader:
pass
rich_header = self.parse_rich_header()
if rich_header:
self.RICH_HEADER = RichHeader()
self.RICH_HEADER.checksum = rich_header.get('checksum', None)
self.RICH_HEADER.values = rich_header.get('values', None)
else:
self.RICH_HEADER = None
def parse_rich_header(self):
"""Parses the rich header
see http://www.ntcore.com/files/richsign.htm for more information
Structure:
00 DanS ^ checksum, checksum, checksum, checksum
10 Symbol RVA ^ checksum, Symbol size ^ checksum...
...
XX Rich, checksum, 0, 0,...
"""
# Rich Header constants
#
DANS = 0x536E6144 # 'DanS' as dword
RICH = 0x68636952 # 'Rich' as dword
# Read a block of data
#
try:
data = list(struct.unpack("<32I", self.get_data(0x80, 0x80)))
except:
# In the cases where there's not enough data to contain the Rich header
# we abort its parsing
return None
# the checksum should be present 3 times after the DanS signature
#
checksum = data[1]
if (data[0] ^ checksum != DANS
or data[2] != checksum
or data[3] != checksum):
return None
result = {"checksum": checksum}
headervalues = []
result ["values"] = headervalues
data = data[4:]
for i in xrange(len(data) / 2):
# Stop until the Rich footer signature is found
#
if data[2 * i] == RICH:
# it should be followed by the checksum
#
if data[2 * i + 1] != checksum:
self.__warnings.append('Rich Header corrupted')
break
# header values come by pairs
#
headervalues += [data[2 * i] ^ checksum, data[2 * i + 1] ^ checksum]
return result
def get_warnings(self):
"""Return the list of warnings.
Non-critical problems found when parsing the PE file are
appended to a list of warnings. This method returns the
full list.
"""
return self.__warnings
def show_warnings(self):
"""Print the list of warnings.
Non-critical problems found when parsing the PE file are
appended to a list of warnings. This method prints the
full list to standard output.
"""
for warning in self.__warnings:
print '>', warning
def full_load(self):
"""Process the data directories.
This method will load the data directories which might not have
been loaded if the "fast_load" option was used.
"""
self.parse_data_directories()
def write(self, filename=None):
"""Write the PE file.
This function will process all headers and components
of the PE file and include all changes made (by just
assigning to attributes in the PE objects) and write
the changes back to a file whose name is provided as
an argument. The filename is optional, if not
provided the data will be returned as a 'str' object.
"""
file_data = list(self.__data__)
for structure in self.__structures__:
struct_data = list(structure.__pack__())
offset = structure.get_file_offset()
file_data[offset:offset+len(struct_data)] = struct_data
if hasattr(self, 'VS_VERSIONINFO'):
if hasattr(self, 'FileInfo'):
for entry in self.FileInfo:
if hasattr(entry, 'StringTable'):
for st_entry in entry.StringTable:
for key, entry in st_entry.entries.items():
offsets = st_entry.entries_offsets[key]
lengths = st_entry.entries_lengths[key]
if len( entry ) > lengths[1]:
l = list()
for idx, c in enumerate(entry):
if ord(c) > 256:
l.extend( [ chr(ord(c) & 0xff), chr( (ord(c) & 0xff00) >>8) ] )
else:
l.extend( [chr( ord(c) ), '\0'] )
file_data[
offsets[1] : offsets[1] + lengths[1]*2 ] = l
else:
l = list()
for idx, c in enumerate(entry):
if ord(c) > 256:
l.extend( [ chr(ord(c) & 0xff), chr( (ord(c) & 0xff00) >>8) ] )
else:
l.extend( [chr( ord(c) ), '\0'] )
file_data[
offsets[1] : offsets[1] + len(entry)*2 ] = l
remainder = lengths[1] - len(entry)
file_data[
offsets[1] + len(entry)*2 :
offsets[1] + lengths[1]*2 ] = [
u'\0' ] * remainder*2
new_file_data = ''.join( [ chr(ord(c)) for c in file_data] )
if filename:
f = file(filename, 'wb+')
f.write(new_file_data)
f.close()
else:
return new_file_data
def parse_sections(self, offset):
"""Fetch the PE file sections.
The sections will be readily available in the "sections" attribute.
Its attributes will contain all the section information plus "data"
a buffer containing the section's data.
The "Characteristics" member will be processed and attributes
representing the section characteristics (with the 'IMAGE_SCN_'
string trimmed from the constant's names) will be added to the
section instance.
Refer to the SectionStructure class for additional info.
"""
self.sections = []
for i in xrange(self.FILE_HEADER.NumberOfSections):
section = SectionStructure( self.__IMAGE_SECTION_HEADER_format__, pe=self )
if not section:
break
section_offset = offset + section.sizeof() * i
section.set_file_offset(section_offset)
section.__unpack__(self.__data__[section_offset : section_offset + section.sizeof()])
self.__structures__.append(section)
if section.SizeOfRawData > len(self.__data__):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'SizeOfRawData is larger than file.')
if adjust_FileAlignment( section.PointerToRawData,
self.OPTIONAL_HEADER.FileAlignment ) > len(self.__data__):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'PointerToRawData points beyond the end of the file.')
if section.Misc_VirtualSize > 0x10000000:
self.__warnings.append(
('Suspicious value found parsing section %d. ' % i) +
'VirtualSize is extremely large > 256MiB.')
if adjust_SectionAlignment( section.VirtualAddress,
self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment ) > 0x10000000:
self.__warnings.append(
('Suspicious value found parsing section %d. ' % i) +
'VirtualAddress is beyond 0x10000000.')
#
# Some packer used a non-aligned PointerToRawData in the sections,
# which causes several common tools not to load the section data
# properly as they blindly read from the indicated offset.
# It seems that Windows will round the offset down to the largest
# offset multiple of FileAlignment which is smaller than
# PointerToRawData. The following code will do the same.
#
#alignment = self.OPTIONAL_HEADER.FileAlignment
#self.update_section_data(section)
if ( self.OPTIONAL_HEADER.FileAlignment != 0 and
( section.PointerToRawData % self.OPTIONAL_HEADER.FileAlignment) != 0):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'Suspicious value for FileAlignment in the Optional Header. ' +
'Normally the PointerToRawData entry of the sections\' structures ' +
'is a multiple of FileAlignment, this might imply the file ' +
'is trying to confuse tools which parse this incorrectly')
section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
# Set the section's flags according the the Characteristics member
set_flags(section, section.Characteristics, section_flags)
if ( section.__dict__.get('IMAGE_SCN_MEM_WRITE', False) and
section.__dict__.get('IMAGE_SCN_MEM_EXECUTE', False) ):
self.__warnings.append(
('Suspicious flags set for section %d. ' % i) +
'Both IMAGE_SCN_MEM_WRITE and IMAGE_SCN_MEM_EXECUTE are set. ' +
'This might indicate a packed executable.')
self.sections.append(section)
if self.FILE_HEADER.NumberOfSections > 0 and self.sections:
return offset + self.sections[0].sizeof()*self.FILE_HEADER.NumberOfSections
else:
return offset
def parse_data_directories(self, directories=None):
"""Parse and process the PE file's data directories.
If the optional argument 'directories' is given, only
the directories at the specified indices will be parsed.
Such functionality allows parsing of areas of interest
without the burden of having to parse all others.
The directories can then be specified as:
For export/import only:
directories = [ 0, 1 ]
or (more verbosely):
directories = [ DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT'],
DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_EXPORT'] ]
If 'directories' is a list, the ones that are processed will be removed,
leaving only the ones that are not present in the image.
"""
directory_parsing = (
('IMAGE_DIRECTORY_ENTRY_IMPORT', self.parse_import_directory),
('IMAGE_DIRECTORY_ENTRY_EXPORT', self.parse_export_directory),
('IMAGE_DIRECTORY_ENTRY_RESOURCE', self.parse_resources_directory),
('IMAGE_DIRECTORY_ENTRY_DEBUG', self.parse_debug_directory),
('IMAGE_DIRECTORY_ENTRY_BASERELOC', self.parse_relocations_directory),
('IMAGE_DIRECTORY_ENTRY_TLS', self.parse_directory_tls),
('IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG', self.parse_directory_load_config),
('IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT', self.parse_delay_import_directory),
('IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT', self.parse_directory_bound_imports) )
if directories is not None:
if not isinstance(directories, (tuple, list)):
directories = [directories]
for entry in directory_parsing:
# OC Patch:
#
try:
directory_index = DIRECTORY_ENTRY[entry[0]]
dir_entry = self.OPTIONAL_HEADER.DATA_DIRECTORY[directory_index]
except IndexError:
break
# Only process all the directories if no individual ones have
# been chosen
#
if directories is None or directory_index in directories:
if dir_entry.VirtualAddress:
value = entry[1](dir_entry.VirtualAddress, dir_entry.Size)
if value:
setattr(self, entry[0][6:], value)
if (directories is not None) and isinstance(directories, list) and (entry[0] in directories):
directories.remove(directory_index)
def parse_directory_bound_imports(self, rva, size):
""""""
bnd_descr = Structure(self.__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__)
bnd_descr_size = bnd_descr.sizeof()
start = rva
bound_imports = []
while True:
bnd_descr = self.__unpack_data__(
self.__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__,
self.__data__[rva:rva+bnd_descr_size],
file_offset = rva)
if bnd_descr is None:
# If can't parse directory then silently return.
# This directory does not necessarily have to be valid to
# still have a valid PE file
self.__warnings.append(
'The Bound Imports directory exists but can\'t be parsed.')
return
if bnd_descr.all_zeroes():
break
rva += bnd_descr.sizeof()
forwarder_refs = []
for idx in xrange(bnd_descr.NumberOfModuleForwarderRefs):
# Both structures IMAGE_BOUND_IMPORT_DESCRIPTOR and
# IMAGE_BOUND_FORWARDER_REF have the same size.
bnd_frwd_ref = self.__unpack_data__(
self.__IMAGE_BOUND_FORWARDER_REF_format__,
self.__data__[rva:rva+bnd_descr_size],
file_offset = rva)
# OC Patch:
if not bnd_frwd_ref:
raise PEFormatError(
"IMAGE_BOUND_FORWARDER_REF cannot be read")
rva += bnd_frwd_ref.sizeof()
offset = start+bnd_frwd_ref.OffsetModuleName
name_str = self.get_string_from_data(
0, self.__data__[offset : offset + MAX_STRING_LENGTH])
if not name_str:
break
forwarder_refs.append(BoundImportRefData(
struct = bnd_frwd_ref,
name = name_str))
offset = start+bnd_descr.OffsetModuleName
name_str = self.get_string_from_data(
0, self.__data__[offset : offset + MAX_STRING_LENGTH])
if not name_str:
break
bound_imports.append(
BoundImportDescData(
struct = bnd_descr,
name = name_str,
entries = forwarder_refs))
return bound_imports
def parse_directory_tls(self, rva, size):
""""""
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
format = self.__IMAGE_TLS_DIRECTORY_format__
elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
format = self.__IMAGE_TLS_DIRECTORY64_format__
try:
tls_struct = self.__unpack_data__(
format,
self.get_data( rva, Structure(format).sizeof() ),
file_offset = self.get_offset_from_rva(rva))
except PEFormatError:
self.__warnings.append(
'Invalid TLS information. Can\'t read ' +
'data at RVA: 0x%x' % rva)
tls_struct = None
if not tls_struct:
return None
return TlsData( struct = tls_struct )
def parse_directory_load_config(self, rva, size):
""""""
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
format = self.__IMAGE_LOAD_CONFIG_DIRECTORY_format__
elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
format = self.__IMAGE_LOAD_CONFIG_DIRECTORY64_format__
try:
load_config_struct = self.__unpack_data__(
format,
self.get_data( rva, Structure(format).sizeof() ),
file_offset = self.get_offset_from_rva(rva))
except PEFormatError:
self.__warnings.append(
'Invalid LOAD_CONFIG information. Can\'t read ' +
'data at RVA: 0x%x' % rva)
load_config_struct = None
if not load_config_struct:
return None
return LoadConfigData( struct = load_config_struct )
def parse_relocations_directory(self, rva, size):
""""""
rlc_size = Structure(self.__IMAGE_BASE_RELOCATION_format__).sizeof()
end = rva+size
relocations = []
while rva < end:
# OC Patch:
# Malware that has bad RVA entries will cause an error.
# Just continue on after an exception
#
try:
rlc = self.__unpack_data__(
self.__IMAGE_BASE_RELOCATION_format__,
self.get_data(rva, rlc_size),
file_offset = self.get_offset_from_rva(rva) )
except PEFormatError:
self.__warnings.append(
'Invalid relocation information. Can\'t read ' +
'data at RVA: 0x%x' % rva)
rlc = None
if not rlc:
break
reloc_entries = self.parse_relocations(
rva+rlc_size, rlc.VirtualAddress, rlc.SizeOfBlock-rlc_size )
relocations.append(
BaseRelocationData(
struct = rlc,
entries = reloc_entries))
if not rlc.SizeOfBlock:
break
rva += rlc.SizeOfBlock
return relocations
def parse_relocations(self, data_rva, rva, size):
""""""
data = self.get_data(data_rva, size)
file_offset = self.get_offset_from_rva(data_rva)
entries = []
for idx in xrange( len(data) / 2 ):
entry = self.__unpack_data__(
self.__IMAGE_BASE_RELOCATION_ENTRY_format__,
data[idx*2:(idx+1)*2],
file_offset = file_offset )
if not entry:
break
word = entry.Data
reloc_type = (word>>12)
reloc_offset = (word & 0x0fff)
entries.append(
RelocationData(
struct = entry,
type = reloc_type,
base_rva = rva,
rva = reloc_offset+rva))
file_offset += entry.sizeof()
return entries
def parse_debug_directory(self, rva, size):
""""""
dbg_size = Structure(self.__IMAGE_DEBUG_DIRECTORY_format__).sizeof()
debug = []
for idx in xrange(size/dbg_size):
try:
data = self.get_data(rva+dbg_size*idx, dbg_size)
except PEFormatError, e:
self.__warnings.append(
'Invalid debug information. Can\'t read ' +
'data at RVA: 0x%x' % rva)
return None
dbg = self.__unpack_data__(
self.__IMAGE_DEBUG_DIRECTORY_format__,
data, file_offset = self.get_offset_from_rva(rva+dbg_size*idx))
if not dbg:
return None
debug.append(
DebugData(
struct = dbg))
return debug
def parse_resources_directory(self, rva, size=0, base_rva = None, level = 0, dirs=None):
"""Parse the resources directory.
Given the RVA of the resources directory, it will process all
its entries.
The root will have the corresponding member of its structure,
IMAGE_RESOURCE_DIRECTORY plus 'entries', a list of all the
entries in the directory.
Those entries will have, correspondingly, all the structure's
members (IMAGE_RESOURCE_DIRECTORY_ENTRY) and an additional one,
"directory", pointing to the IMAGE_RESOURCE_DIRECTORY structure
representing upper layers of the tree. This one will also have
an 'entries' attribute, pointing to the 3rd, and last, level.
Another directory with more entries. Those last entries will
have a new attribute (both 'leaf' or 'data_entry' can be used to
access it). This structure finally points to the resource data.
All the members of this structure, IMAGE_RESOURCE_DATA_ENTRY,
are available as its attributes.
"""
# OC Patch:
if dirs is None:
dirs = [rva]
if base_rva is None:
base_rva = rva
resources_section = self.get_section_by_rva(rva)
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva, Structure(self.__IMAGE_RESOURCE_DIRECTORY_format__).sizeof() )
except PEFormatError, e:
self.__warnings.append(
'Invalid resources directory. Can\'t read ' +
'directory data at RVA: 0x%x' % rva)
return None
# Get the resource directory structure, that is, the header
# of the table preceding the actual entries
#
resource_dir = self.__unpack_data__(
self.__IMAGE_RESOURCE_DIRECTORY_format__, data,
file_offset = self.get_offset_from_rva(rva) )
if resource_dir is None:
# If can't parse resources directory then silently return.
# This directory does not necessarily have to be valid to
# still have a valid PE file
self.__warnings.append(
'Invalid resources directory. Can\'t parse ' +
'directory data at RVA: 0x%x' % rva)
return None
dir_entries = []
# Advance the RVA to the positon immediately following the directory
# table header and pointing to the first entry in the table
#
rva += resource_dir.sizeof()
number_of_entries = (
resource_dir.NumberOfNamedEntries +
resource_dir.NumberOfIdEntries )
# Set a hard limit on the maximum resonable number of entries
MAX_ALLOWED_ENTRIES = 4096
if number_of_entries > MAX_ALLOWED_ENTRIES:
self.__warnings.append(
'Error parsing the resources directory, '
'The directory contains %d entries (>%s)' %
(number_of_entries, MAX_ALLOWED_ENTRIES) )
return None
strings_to_postprocess = list()
for idx in xrange(number_of_entries):
res = self.parse_resource_entry(rva)
if res is None:
self.__warnings.append(
'Error parsing the resources directory, '
'Entry %d is invalid, RVA = 0x%x. ' %
(idx, rva) )
break
entry_name = None
entry_id = None
# If all named entries have been processed, only Id ones
# remain
if idx >= resource_dir.NumberOfNamedEntries:
entry_id = res.Name
else:
ustr_offset = base_rva+res.NameOffset
try:
#entry_name = self.get_string_u_at_rva(ustr_offset, max_length=16)
entry_name = UnicodeStringWrapperPostProcessor(self, ustr_offset)
strings_to_postprocess.append(entry_name)
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the resources directory, '
'attempting to read entry name. '
'Can\'t read unicode string at offset 0x%x' %
(ustr_offset) )
if res.DataIsDirectory:
# OC Patch:
#
# One trick malware can do is to recursively reference
# the next directory. This causes hilarity to ensue when
# trying to parse everything correctly.
# If the original RVA given to this function is equal to
# the next one to parse, we assume that it's a trick.
# Instead of raising a PEFormatError this would skip some
# reasonable data so we just break.
#
# 9ee4d0a0caf095314fd7041a3e4404dc is the offending sample
if (base_rva + res.OffsetToDirectory) in dirs:
break
else:
entry_directory = self.parse_resources_directory(
base_rva+res.OffsetToDirectory,
size-(rva-base_rva), # size
base_rva=base_rva, level = level+1,
dirs=dirs + [base_rva + res.OffsetToDirectory])
if not entry_directory:
break
# Ange Albertini's code to process resources' strings
#
strings = None
if entry_id == RESOURCE_TYPE['RT_STRING']:
strings = dict()
for resource_id in entry_directory.entries:
if hasattr(resource_id, 'directory'):
for resource_lang in resource_id.directory.entries:
resource_strings = dict()
string_entry_rva = resource_lang.data.struct.OffsetToData
string_entry_size = resource_lang.data.struct.Size
string_entry_id = resource_id.id
if resource_lang.data.struct.Size is None or resource_id.id is None:
continue
string_entry_data = self.get_data(string_entry_rva, string_entry_size)
parse_strings( string_entry_data, (int(string_entry_id) - 1) * 16, resource_strings )
strings.update(resource_strings)
resource_id.directory.strings = resource_strings
dir_entries.append(
ResourceDirEntryData(
struct = res,
name = entry_name,
id = entry_id,
directory = entry_directory))
else:
struct = self.parse_resource_data_entry(
base_rva + res.OffsetToDirectory)
if struct:
entry_data = ResourceDataEntryData(
struct = struct,
lang = res.Name & 0x3ff,
sublang = res.Name >> 10 )
dir_entries.append(
ResourceDirEntryData(
struct = res,
name = entry_name,
id = entry_id,
data = entry_data))
else:
break
# Check if this entry contains version information
#
if level == 0 and res.Id == RESOURCE_TYPE['RT_VERSION']:
if len(dir_entries)>0:
last_entry = dir_entries[-1]
rt_version_struct = None
try:
rt_version_struct = last_entry.directory.entries[0].directory.entries[0].data.struct
except:
# Maybe a malformed directory structure...?
# Lets ignore it
pass
if rt_version_struct is not None:
self.parse_version_information(rt_version_struct)
rva += res.sizeof()
string_rvas = [s.get_rva() for s in strings_to_postprocess]
string_rvas.sort()
for idx, s in enumerate(strings_to_postprocess):
s.render_pascal_16()
resource_directory_data = ResourceDirData(
struct = resource_dir,
entries = dir_entries)
return resource_directory_data
def parse_resource_data_entry(self, rva):
"""Parse a data entry from the resources directory."""
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva, Structure(self.__IMAGE_RESOURCE_DATA_ENTRY_format__).sizeof() )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing a resource directory data entry, ' +
'the RVA is invalid: 0x%x' % ( rva ) )
return None
data_entry = self.__unpack_data__(
self.__IMAGE_RESOURCE_DATA_ENTRY_format__, data,
file_offset = self.get_offset_from_rva(rva) )
return data_entry
def parse_resource_entry(self, rva):
"""Parse a directory entry from the resources directory."""
try:
data = self.get_data( rva, Structure(self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__).sizeof() )
except PEFormatError, excp:
# A warning will be added by the caller if this method returns None
return None
resource = self.__unpack_data__(
self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__, data,
file_offset = self.get_offset_from_rva(rva) )
if resource is None:
return None
#resource.NameIsString = (resource.Name & 0x80000000L) >> 31
resource.NameOffset = resource.Name & 0x7FFFFFFFL
resource.__pad = resource.Name & 0xFFFF0000L
resource.Id = resource.Name & 0x0000FFFFL
resource.DataIsDirectory = (resource.OffsetToData & 0x80000000L) >> 31
resource.OffsetToDirectory = resource.OffsetToData & 0x7FFFFFFFL
return resource
def parse_version_information(self, version_struct):
"""Parse version information structure.
The date will be made available in three attributes of the PE object.
VS_VERSIONINFO will contain the first three fields of the main structure:
'Length', 'ValueLength', and 'Type'
VS_FIXEDFILEINFO will hold the rest of the fields, accessible as sub-attributes:
'Signature', 'StrucVersion', 'FileVersionMS', 'FileVersionLS',
'ProductVersionMS', 'ProductVersionLS', 'FileFlagsMask', 'FileFlags',
'FileOS', 'FileType', 'FileSubtype', 'FileDateMS', 'FileDateLS'
FileInfo is a list of all StringFileInfo and VarFileInfo structures.
StringFileInfo structures will have a list as an attribute named 'StringTable'
containing all the StringTable structures. Each of those structures contains a
dictionary 'entries' with all the key/value version information string pairs.
VarFileInfo structures will have a list as an attribute named 'Var' containing
all Var structures. Each Var structure will have a dictionary as an attribute
named 'entry' which will contain the name and value of the Var.
"""
# Retrieve the data for the version info resource
#
start_offset = self.get_offset_from_rva( version_struct.OffsetToData )
raw_data = self.__data__[ start_offset : start_offset+version_struct.Size ]
# Map the main structure and the subsequent string
#
versioninfo_struct = self.__unpack_data__(
self.__VS_VERSIONINFO_format__, raw_data,
file_offset = start_offset )
if versioninfo_struct is None:
return
ustr_offset = version_struct.OffsetToData + versioninfo_struct.sizeof()
try:
versioninfo_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read VS_VERSION_INFO string. Can\'t ' +
'read unicode string at offset 0x%x' % (
ustr_offset ) )
versioninfo_string = None
# If the structure does not contain the expected name, it's assumed to be invalid
#
if versioninfo_string != u'VS_VERSION_INFO':
self.__warnings.append('Invalid VS_VERSION_INFO block')
return
# Set the PE object's VS_VERSIONINFO to this one
#
self.VS_VERSIONINFO = versioninfo_struct
# The the Key attribute to point to the unicode string identifying the structure
#
self.VS_VERSIONINFO.Key = versioninfo_string
# Process the fixed version information, get the offset and structure
#
fixedfileinfo_offset = self.dword_align(
versioninfo_struct.sizeof() + 2 * (len(versioninfo_string) + 1),
version_struct.OffsetToData)
fixedfileinfo_struct = self.__unpack_data__(
self.__VS_FIXEDFILEINFO_format__,
raw_data[fixedfileinfo_offset:],
file_offset = start_offset+fixedfileinfo_offset )
if not fixedfileinfo_struct:
return
# Set the PE object's VS_FIXEDFILEINFO to this one
#
self.VS_FIXEDFILEINFO = fixedfileinfo_struct
# Start parsing all the StringFileInfo and VarFileInfo structures
#
# Get the first one
#
stringfileinfo_offset = self.dword_align(
fixedfileinfo_offset + fixedfileinfo_struct.sizeof(),
version_struct.OffsetToData)
original_stringfileinfo_offset = stringfileinfo_offset
# Set the PE object's attribute that will contain them all.
#
self.FileInfo = list()
while True:
# Process the StringFileInfo/VarFileInfo struct
#
stringfileinfo_struct = self.__unpack_data__(
self.__StringFileInfo_format__,
raw_data[stringfileinfo_offset:],
file_offset = start_offset+stringfileinfo_offset )
if stringfileinfo_struct is None:
self.__warnings.append(
'Error parsing StringFileInfo/VarFileInfo struct' )
return None
# Get the subsequent string defining the structure.
#
ustr_offset = ( version_struct.OffsetToData +
stringfileinfo_offset + versioninfo_struct.sizeof() )
try:
stringfileinfo_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringFileInfo string. Can\'t ' +
'read unicode string at offset 0x%x' % ( ustr_offset ) )
break
# Set such string as the Key attribute
#
stringfileinfo_struct.Key = stringfileinfo_string
# Append the structure to the PE object's list
#
self.FileInfo.append(stringfileinfo_struct)
# Parse a StringFileInfo entry
#
if stringfileinfo_string and stringfileinfo_string.startswith(u'StringFileInfo'):
if stringfileinfo_struct.Type == 1 and stringfileinfo_struct.ValueLength == 0:
stringtable_offset = self.dword_align(
stringfileinfo_offset + stringfileinfo_struct.sizeof() +
2*(len(stringfileinfo_string)+1),
version_struct.OffsetToData)
stringfileinfo_struct.StringTable = list()
# Process the String Table entries
#
while True:
stringtable_struct = self.__unpack_data__(
self.__StringTable_format__,
raw_data[stringtable_offset:],
file_offset = start_offset+stringtable_offset )
if not stringtable_struct:
break
ustr_offset = ( version_struct.OffsetToData + stringtable_offset +
stringtable_struct.sizeof() )
try:
stringtable_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringTable string. Can\'t ' +
'read unicode string at offset 0x%x' % ( ustr_offset ) )
break
stringtable_struct.LangID = stringtable_string
stringtable_struct.entries = dict()
stringtable_struct.entries_offsets = dict()
stringtable_struct.entries_lengths = dict()
stringfileinfo_struct.StringTable.append(stringtable_struct)
entry_offset = self.dword_align(
stringtable_offset + stringtable_struct.sizeof() +
2*(len(stringtable_string)+1),
version_struct.OffsetToData)
# Process all entries in the string table
#
while entry_offset < stringtable_offset + stringtable_struct.Length:
string_struct = self.__unpack_data__(
self.__String_format__, raw_data[entry_offset:],
file_offset = start_offset+entry_offset )
if not string_struct:
break
ustr_offset = ( version_struct.OffsetToData + entry_offset +
string_struct.sizeof() )
try:
key = self.get_string_u_at_rva( ustr_offset )
key_offset = self.get_offset_from_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringTable Key string. Can\'t ' +
'read unicode string at offset 0x%x' % ( ustr_offset ) )
break
value_offset = self.dword_align(
2*(len(key)+1) + entry_offset + string_struct.sizeof(),
version_struct.OffsetToData)
ustr_offset = version_struct.OffsetToData + value_offset
try:
value = self.get_string_u_at_rva( ustr_offset,
max_length = string_struct.ValueLength )
value_offset = self.get_offset_from_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringTable Value string. ' +
'Can\'t read unicode string at offset 0x%x' % (
ustr_offset ) )
break
if string_struct.Length == 0:
entry_offset = stringtable_offset + stringtable_struct.Length
else:
entry_offset = self.dword_align(
string_struct.Length+entry_offset, version_struct.OffsetToData)
key_as_char = []
for c in key:
if ord(c)>128:
key_as_char.append('\\x%02x' %ord(c))
else:
key_as_char.append(c)
key_as_char = ''.join(key_as_char)
setattr(stringtable_struct, key_as_char, value)
stringtable_struct.entries[key] = value
stringtable_struct.entries_offsets[key] = (key_offset, value_offset)
stringtable_struct.entries_lengths[key] = (len(key), len(value))
new_stringtable_offset = self.dword_align(
stringtable_struct.Length + stringtable_offset,
version_struct.OffsetToData)
# check if the entry is crafted in a way that would lead to an infinite
# loop and break if so
#
if new_stringtable_offset == stringtable_offset:
break
stringtable_offset = new_stringtable_offset
if stringtable_offset >= stringfileinfo_struct.Length:
break
# Parse a VarFileInfo entry
#
elif stringfileinfo_string and stringfileinfo_string.startswith( u'VarFileInfo' ):
varfileinfo_struct = stringfileinfo_struct
varfileinfo_struct.name = 'VarFileInfo'
if varfileinfo_struct.Type == 1 and varfileinfo_struct.ValueLength == 0:
var_offset = self.dword_align(
stringfileinfo_offset + varfileinfo_struct.sizeof() +
2*(len(stringfileinfo_string)+1),
version_struct.OffsetToData)
varfileinfo_struct.Var = list()
# Process all entries
#
while True:
var_struct = self.__unpack_data__(
self.__Var_format__,
raw_data[var_offset:],
file_offset = start_offset+var_offset )
if not var_struct:
break
ustr_offset = ( version_struct.OffsetToData + var_offset +
var_struct.sizeof() )
try:
var_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read VarFileInfo Var string. ' +
'Can\'t read unicode string at offset 0x%x' % (ustr_offset))
break
varfileinfo_struct.Var.append(var_struct)
varword_offset = self.dword_align(
2*(len(var_string)+1) + var_offset + var_struct.sizeof(),
version_struct.OffsetToData)
orig_varword_offset = varword_offset
while varword_offset < orig_varword_offset + var_struct.ValueLength:
word1 = self.get_word_from_data(
raw_data[varword_offset:varword_offset+2], 0)
word2 = self.get_word_from_data(
raw_data[varword_offset+2:varword_offset+4], 0)
varword_offset += 4
if isinstance(word1, (int, long)) and isinstance(word1, (int, long)):
var_struct.entry = {var_string: '0x%04x 0x%04x' % (word1, word2)}
var_offset = self.dword_align(
var_offset+var_struct.Length, version_struct.OffsetToData)
if var_offset <= var_offset+var_struct.Length:
break
# Increment and align the offset
#
stringfileinfo_offset = self.dword_align(
stringfileinfo_struct.Length+stringfileinfo_offset,
version_struct.OffsetToData)
# Check if all the StringFileInfo and VarFileInfo items have been processed
#
if stringfileinfo_struct.Length == 0 or stringfileinfo_offset >= versioninfo_struct.Length:
break
def parse_export_directory(self, rva, size):
"""Parse the export directory.
Given the RVA of the export directory, it will process all
its entries.
The exports will be made available through a list "exports"
containing a tuple with the following elements:
(ordinal, symbol_address, symbol_name)
And also through a dictionary "exports_by_ordinal" whose keys
will be the ordinals and the values tuples of the from:
(symbol_address, symbol_name)
The symbol addresses are relative, not absolute.
"""
try:
export_dir = self.__unpack_data__(
self.__IMAGE_EXPORT_DIRECTORY_format__,
self.get_data( rva, Structure(self.__IMAGE_EXPORT_DIRECTORY_format__).sizeof() ),
file_offset = self.get_offset_from_rva(rva) )
except PEFormatError:
self.__warnings.append(
'Error parsing export directory at RVA: 0x%x' % ( rva ) )
return
if not export_dir:
return
# We keep track of the bytes left in the file and use it to set a upper
# bound in the number of items that can be read from the different
# arrays
#
def length_until_eof(rva):
return len(self.__data__) - self.get_offset_from_rva(rva)
try:
address_of_names = self.get_data(
export_dir.AddressOfNames, min( length_until_eof(export_dir.AddressOfNames), export_dir.NumberOfNames*4))
address_of_name_ordinals = self.get_data(
export_dir.AddressOfNameOrdinals, min( length_until_eof(export_dir.AddressOfNameOrdinals), export_dir.NumberOfNames*4) )
address_of_functions = self.get_data(
export_dir.AddressOfFunctions, min( length_until_eof(export_dir.AddressOfFunctions), export_dir.NumberOfFunctions*4) )
except PEFormatError:
self.__warnings.append(
'Error parsing export directory at RVA: 0x%x' % ( rva ) )
return
exports = []
max_failed_entries_before_giving_up = 10
for i in xrange( min( export_dir.NumberOfNames, length_until_eof(export_dir.AddressOfNames)/4) ):
symbol_name_address = self.get_dword_from_data(address_of_names, i)
symbol_name = self.get_string_at_rva( symbol_name_address )
try:
symbol_name_offset = self.get_offset_from_rva( symbol_name_address )
except PEFormatError:
max_failed_entries_before_giving_up -= 1
if max_failed_entries_before_giving_up <= 0:
break
continue
symbol_ordinal = self.get_word_from_data(
address_of_name_ordinals, i)
if symbol_ordinal*4 < len(address_of_functions):
symbol_address = self.get_dword_from_data(
address_of_functions, symbol_ordinal)
else:
# Corrupt? a bad pointer... we assume it's all
# useless, no exports
return None
if symbol_address is None or symbol_address == 0:
continue
# If the funcion's RVA points within the export directory
# it will point to a string with the forwarded symbol's string
# instead of pointing the the function start address.
if symbol_address >= rva and symbol_address < rva+size:
forwarder_str = self.get_string_at_rva(symbol_address)
try:
forwarder_offset = self.get_offset_from_rva( symbol_address )
except PEFormatError:
continue
else:
forwarder_str = None
forwarder_offset = None
exports.append(
ExportData(
pe = self,
ordinal = export_dir.Base+symbol_ordinal,
ordinal_offset = self.get_offset_from_rva( export_dir.AddressOfNameOrdinals + 2*i ),
address = symbol_address,
address_offset = self.get_offset_from_rva( export_dir.AddressOfFunctions + 4*symbol_ordinal ),
name = symbol_name,
name_offset = symbol_name_offset,
forwarder = forwarder_str,
forwarder_offset = forwarder_offset ))
ordinals = [exp.ordinal for exp in exports]
max_failed_entries_before_giving_up = 10
for idx in xrange( min(export_dir.NumberOfFunctions, length_until_eof(export_dir.AddressOfFunctions)/4) ):
if not idx+export_dir.Base in ordinals:
try:
symbol_address = self.get_dword_from_data(
address_of_functions, idx)
except PEFormatError:
symbol_address = None
if symbol_address is None:
max_failed_entries_before_giving_up -= 1
if max_failed_entries_before_giving_up <= 0:
break
if symbol_address == 0:
continue
#
# Checking for forwarder again.
#
if symbol_address >= rva and symbol_address < rva+size:
forwarder_str = self.get_string_at_rva(symbol_address)
else:
forwarder_str = None
exports.append(
ExportData(
ordinal = export_dir.Base+idx,
address = symbol_address,
name = None,
forwarder = forwarder_str))
return ExportDirData(
struct = export_dir,
symbols = exports)
def dword_align(self, offset, base):
return ((offset+base+3) & 0xfffffffcL) - (base & 0xfffffffcL)
def parse_delay_import_directory(self, rva, size):
"""Walk and parse the delay import directory."""
import_descs = []
while True:
try:
# If the RVA is invalid all would blow up. Some PEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data( rva, Structure(self.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__).sizeof() )
except PEFormatError, e:
self.__warnings.append(
'Error parsing the Delay import directory at RVA: 0x%x' % ( rva ) )
break
import_desc = self.__unpack_data__(
self.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__,
data, file_offset = self.get_offset_from_rva(rva) )
# If the structure is all zeroes, we reached the end of the list
if not import_desc or import_desc.all_zeroes():
break
rva += import_desc.sizeof()
try:
import_data = self.parse_imports(
import_desc.pINT,
import_desc.pIAT,
None)
except PEFormatError, e:
self.__warnings.append(
'Error parsing the Delay import directory. ' +
'Invalid import data at RVA: 0x%x' % ( rva ) )
break
if not import_data:
continue
dll = self.get_string_at_rva(import_desc.szName)
if not is_valid_dos_filename(dll):
dll = '*invalid*'
if dll:
import_descs.append(
ImportDescData(
struct = import_desc,
imports = import_data,
dll = dll))
return import_descs
def parse_import_directory(self, rva, size):
"""Walk and parse the import directory."""
import_descs = []
while True:
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva, Structure(self.__IMAGE_IMPORT_DESCRIPTOR_format__).sizeof() )
except PEFormatError, e:
self.__warnings.append(
'Error parsing the import directory at RVA: 0x%x' % ( rva ) )
break
import_desc = self.__unpack_data__(
self.__IMAGE_IMPORT_DESCRIPTOR_format__,
data, file_offset = self.get_offset_from_rva(rva) )
# If the structure is all zeroes, we reached the end of the list
if not import_desc or import_desc.all_zeroes():
break
rva += import_desc.sizeof()
try:
import_data = self.parse_imports(
import_desc.OriginalFirstThunk,
import_desc.FirstThunk,
import_desc.ForwarderChain)
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the import directory. ' +
'Invalid Import data at RVA: 0x%x (%s)' % ( rva, str(excp) ) )
break
#raise excp
if not import_data:
continue
dll = self.get_string_at_rva(import_desc.Name)
if not is_valid_dos_filename(dll):
dll = '*invalid*'
if dll:
import_descs.append(
ImportDescData(
struct = import_desc,
imports = import_data,
dll = dll))
suspicious_imports = set([ 'LoadLibrary', 'GetProcAddress' ])
suspicious_imports_count = 0
total_symbols = 0
for imp_dll in import_descs:
for symbol in imp_dll.imports:
for suspicious_symbol in suspicious_imports:
if symbol and symbol.name and symbol.name.startswith( suspicious_symbol ):
suspicious_imports_count += 1
break
total_symbols += 1
if suspicious_imports_count == len(suspicious_imports) and total_symbols < 20:
self.__warnings.append(
'Imported symbols contain entries typical of packed executables.' )
return import_descs
def parse_imports(self, original_first_thunk, first_thunk, forwarder_chain):
"""Parse the imported symbols.
It will fill a list, which will be available as the dictionary
attribute "imports". Its keys will be the DLL names and the values
all the symbols imported from that object.
"""
imported_symbols = []
# The following has been commented as a PE does not
# need to have the import data necessarily witin
# a section, it can keep it in gaps between sections
# or overlapping other data.
#
#imports_section = self.get_section_by_rva(first_thunk)
#if not imports_section:
# raise PEFormatError, 'Invalid/corrupt imports.'
# Import Lookup Table. Contains ordinals or pointers to strings.
ilt = self.get_import_table(original_first_thunk)
# Import Address Table. May have identical content to ILT if
# PE file is not bounded, Will contain the address of the
# imported symbols once the binary is loaded or if it is already
# bound.
iat = self.get_import_table(first_thunk)
# OC Patch:
# Would crash if IAT or ILT had None type
if (not iat or len(iat)==0) and (not ilt or len(ilt)==0):
raise PEFormatError(
'Invalid Import Table information. ' +
'Both ILT and IAT appear to be broken.')
table = None
if ilt:
table = ilt
elif iat:
table = iat
else:
return None
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
ordinal_flag = IMAGE_ORDINAL_FLAG
elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
ordinal_flag = IMAGE_ORDINAL_FLAG64
for idx in xrange(len(table)):
imp_ord = None
imp_hint = None
imp_name = None
name_offset = None
hint_name_table_rva = None
if table[idx].AddressOfData:
# If imported by ordinal, we will append the ordinal number
#
if table[idx].AddressOfData & ordinal_flag:
import_by_ordinal = True
imp_ord = table[idx].AddressOfData & 0xffff
imp_name = None
name_offset = None
else:
import_by_ordinal = False
try:
hint_name_table_rva = table[idx].AddressOfData & 0x7fffffff
data = self.get_data(hint_name_table_rva, 2)
# Get the Hint
imp_hint = self.get_word_from_data(data, 0)
imp_name = self.get_string_at_rva(table[idx].AddressOfData+2)
if not is_valid_function_name(imp_name):
imp_name = '*invalid*'
name_offset = self.get_offset_from_rva(table[idx].AddressOfData+2)
except PEFormatError, e:
pass
# by nriva: we want the ThunkRVA and ThunkOffset
thunk_offset = table[idx].get_file_offset()
thunk_rva = self.get_rva_from_offset(thunk_offset)
imp_address = first_thunk + self.OPTIONAL_HEADER.ImageBase + idx * 4
struct_iat = None
try:
if iat and ilt and ilt[idx].AddressOfData != iat[idx].AddressOfData:
imp_bound = iat[idx].AddressOfData
struct_iat = iat[idx]
else:
imp_bound = None
except IndexError:
imp_bound = None
# The file with hashes:
#
# MD5: bfe97192e8107d52dd7b4010d12b2924
# SHA256: 3d22f8b001423cb460811ab4f4789f277b35838d45c62ec0454c877e7c82c7f5
#
# has an invalid table built in a way that it's parseable but contains invalid
# entries that lead pefile to take extremely long amounts of time to
# parse. It also leads to extreme memory consumption.
# To prevent similar cases, if invalid entries are found in the middle of a
# table the parsing will be aborted
#
if imp_ord == None and imp_name == None:
raise PEFormatError( 'Invalid entries in the Import Table. Aborting parsing.' )
if imp_name != '' and (imp_ord or imp_name):
imported_symbols.append(
ImportData(
pe = self,
struct_table = table[idx],
struct_iat = struct_iat, # for bound imports if any
import_by_ordinal = import_by_ordinal,
ordinal = imp_ord,
ordinal_offset = table[idx].get_file_offset(),
hint = imp_hint,
name = imp_name,
name_offset = name_offset,
bound = imp_bound,
address = imp_address,
hint_name_table_rva = hint_name_table_rva,
thunk_offset = thunk_offset,
thunk_rva = thunk_rva ))
return imported_symbols
def get_import_table(self, rva):
table = []
# We need the ordinal flag for a simple heuristic
# we're implementing within the loop
#
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
ordinal_flag = IMAGE_ORDINAL_FLAG
format = self.__IMAGE_THUNK_DATA_format__
elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
ordinal_flag = IMAGE_ORDINAL_FLAG64
format = self.__IMAGE_THUNK_DATA64_format__
while True and rva:
try:
data = self.get_data( rva, Structure(format).sizeof() )
except PEFormatError, e:
self.__warnings.append(
'Error parsing the import table. ' +
'Invalid data at RVA: 0x%x' % ( rva ) )
return None
thunk_data = self.__unpack_data__(
format, data, file_offset=self.get_offset_from_rva(rva) )
if thunk_data and thunk_data.AddressOfData:
# If the entry looks like could be an ordinal...
if thunk_data.AddressOfData & ordinal_flag:
# But its value is beyond 2^16, we will assume it's a
# corrupted and ignore it altogether
if thunk_data.AddressOfData & 0x7fffffff > 0xffff:
return []
if not thunk_data or thunk_data.all_zeroes():
break
rva += thunk_data.sizeof()
table.append(thunk_data)
return table
def get_memory_mapped_image(self, max_virtual_address=0x10000000, ImageBase=None):
"""Returns the data corresponding to the memory layout of the PE file.
The data includes the PE header and the sections loaded at offsets
corresponding to their relative virtual addresses. (the VirtualAddress
section header member).
Any offset in this data corresponds to the absolute memory address
ImageBase+offset.
The optional argument 'max_virtual_address' provides with means of limiting
which section are processed.
Any section with their VirtualAddress beyond this value will be skipped.
Normally, sections with values beyond this range are just there to confuse
tools. It's a common trick to see in packed executables.
If the 'ImageBase' optional argument is supplied, the file's relocations
will be applied to the image by calling the 'relocate_image()' method. Beware
that the relocation information is applied permanently.
"""
# Rebase if requested
#
if ImageBase is not None:
# Keep a copy of the image's data before modifying it by rebasing it
#
original_data = self.__data__
self.relocate_image(ImageBase)
# Collect all sections in one code block
#mapped_data = self.header
mapped_data = ''+ self.__data__[:]
for section in self.sections:
# Miscellaneous integrity tests.
# Some packer will set these to bogus values to
# make tools go nuts.
#
if section.Misc_VirtualSize == 0 or section.SizeOfRawData == 0:
continue
if section.SizeOfRawData > len(self.__data__):
continue
if adjust_FileAlignment( section.PointerToRawData,
self.OPTIONAL_HEADER.FileAlignment ) > len(self.__data__):
continue
VirtualAddress_adj = adjust_SectionAlignment( section.VirtualAddress,
self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment )
if VirtualAddress_adj >= max_virtual_address:
continue
padding_length = VirtualAddress_adj - len(mapped_data)
if padding_length>0:
mapped_data += '\0'*padding_length
elif padding_length<0:
mapped_data = mapped_data[:padding_length]
mapped_data += section.get_data()
# If the image was rebased, restore it to its original form
#
if ImageBase is not None:
self.__data__ = original_data
return mapped_data
def get_resources_strings(self):
"""Returns a list of all the strings found withing the resources (if any).
This method will scan all entries in the resources directory of the PE, if
there is one, and will return a list() with the strings.
An empty list will be returned otherwise.
"""
resources_strings = list()
if hasattr(self, 'DIRECTORY_ENTRY_RESOURCE'):
for resource_type in self.DIRECTORY_ENTRY_RESOURCE.entries:
if hasattr(resource_type, 'directory'):
for resource_id in resource_type.directory.entries:
if hasattr(resource_id, 'directory'):
if hasattr(resource_id.directory, 'strings') and resource_id.directory.strings:
for res_string in resource_id.directory.strings.values():
resources_strings.append( res_string )
return resources_strings
def get_data(self, rva=0, length=None):
"""Get data regardless of the section where it lies on.
Given a RVA and the size of the chunk to retrieve, this method
will find the section where the data lies and return the data.
"""
s = self.get_section_by_rva(rva)
if length:
end = rva + length
else:
end = None
if not s:
if rva < len(self.header):
return self.header[rva:end]
# Before we give up we check whether the file might
# contain the data anyway. There are cases of PE files
# without sections that rely on windows loading the first
# 8291 bytes into memory and assume the data will be
# there
# A functional file with these characteristics is:
# MD5: 0008892cdfbc3bda5ce047c565e52295
# SHA-1: c7116b9ff950f86af256defb95b5d4859d4752a9
#
if rva < len(self.__data__):
return self.__data__[rva:end]
raise PEFormatError, 'data at RVA can\'t be fetched. Corrupt header?'
return s.get_data(rva, length)
def get_rva_from_offset(self, offset):
"""Get the RVA corresponding to this file offset. """
s = self.get_section_by_offset(offset)
if not s:
if self.sections:
lowest_rva = min( [ adjust_SectionAlignment( s.VirtualAddress,
self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment ) for s in self.sections] )
if offset < lowest_rva:
# We will assume that the offset lies within the headers, or
# at least points before where the earliest section starts
# and we will simply return the offset as the RVA
#
# The case illustrating this behavior can be found at:
# http://corkami.blogspot.com/2010/01/hey-hey-hey-whats-in-your-head.html
# where the import table is not contained by any section
# hence the RVA needs to be resolved to a raw offset
return offset
else:
return offset
#raise PEFormatError("specified offset (0x%x) doesn't belong to any section." % offset)
return s.get_rva_from_offset(offset)
def get_offset_from_rva(self, rva):
"""Get the file offset corresponding to this RVA.
Given a RVA , this method will find the section where the
data lies and return the offset within the file.
"""
s = self.get_section_by_rva(rva)
if not s:
# If not found within a section assume it might
# point to overlay data or otherwise data present
# but not contained in any section. In those
# cases the RVA should equal the offset
if rva<len(self.__data__):
return rva
raise PEFormatError, 'data at RVA can\'t be fetched. Corrupt header?'
return s.get_offset_from_rva(rva)
def get_string_at_rva(self, rva):
"""Get an ASCII string located at the given address."""
s = self.get_section_by_rva(rva)
if not s:
return self.get_string_from_data(0, self.__data__[rva:rva+MAX_STRING_LENGTH])
return self.get_string_from_data( 0, s.get_data(rva, length=MAX_STRING_LENGTH) )
def get_string_from_data(self, offset, data):
"""Get an ASCII string from within the data."""
# OC Patch
b = None
try:
b = data[offset]
except IndexError:
return ''
s = ''
while ord(b):
s += b
offset += 1
try:
b = data[offset]
except IndexError:
break
return s
def get_string_u_at_rva(self, rva, max_length = 2**16):
"""Get an Unicode string located at the given address."""
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva, 2)
except PEFormatError, e:
return None
#length = struct.unpack('<H', data)[0]
s = u''
for idx in xrange(max_length):
try:
uchr = struct.unpack('<H', self.get_data(rva+2*idx, 2))[0]
except struct.error:
break
if unichr(uchr) == u'\0':
break
s += unichr(uchr)
return s
def get_section_by_offset(self, offset):
"""Get the section containing the given file offset."""
sections = [s for s in self.sections if s.contains_offset(offset)]
if sections:
return sections[0]
return None
def get_section_by_rva(self, rva):
"""Get the section containing the given address."""
sections = [s for s in self.sections if s.contains_rva(rva)]
if sections:
return sections[0]
return None
def __str__(self):
return self.dump_info()
def print_info(self):
"""Print all the PE header information in a human readable from."""
print self.dump_info()
def dump_info(self, dump=None):
"""Dump all the PE header information into human readable string."""
if dump is None:
dump = Dump()
warnings = self.get_warnings()
if warnings:
dump.add_header('Parsing Warnings')
for warning in warnings:
dump.add_line(warning)
dump.add_newline()
dump.add_header('DOS_HEADER')
dump.add_lines(self.DOS_HEADER.dump())
dump.add_newline()
dump.add_header('NT_HEADERS')
dump.add_lines(self.NT_HEADERS.dump())
dump.add_newline()
dump.add_header('FILE_HEADER')
dump.add_lines(self.FILE_HEADER.dump())
image_flags = retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_')
dump.add('Flags: ')
flags = []
for flag in image_flags:
if getattr(self.FILE_HEADER, flag[0]):
flags.append(flag[0])
dump.add_line(', '.join(flags))
dump.add_newline()
if hasattr(self, 'OPTIONAL_HEADER') and self.OPTIONAL_HEADER is not None:
dump.add_header('OPTIONAL_HEADER')
dump.add_lines(self.OPTIONAL_HEADER.dump())
dll_characteristics_flags = retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLL_CHARACTERISTICS_')
dump.add('DllCharacteristics: ')
flags = []
for flag in dll_characteristics_flags:
if getattr(self.OPTIONAL_HEADER, flag[0]):
flags.append(flag[0])
dump.add_line(', '.join(flags))
dump.add_newline()
dump.add_header('PE Sections')
section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
for section in self.sections:
dump.add_lines(section.dump())
dump.add('Flags: ')
flags = []
for flag in section_flags:
if getattr(section, flag[0]):
flags.append(flag[0])
dump.add_line(', '.join(flags))
dump.add_line('Entropy: %f (Min=0.0, Max=8.0)' % section.get_entropy() )
if md5 is not None:
dump.add_line('MD5 hash: %s' % section.get_hash_md5() )
if sha1 is not None:
dump.add_line('SHA-1 hash: %s' % section.get_hash_sha1() )
if sha256 is not None:
dump.add_line('SHA-256 hash: %s' % section.get_hash_sha256() )
if sha512 is not None:
dump.add_line('SHA-512 hash: %s' % section.get_hash_sha512() )
dump.add_newline()
if (hasattr(self, 'OPTIONAL_HEADER') and
hasattr(self.OPTIONAL_HEADER, 'DATA_DIRECTORY') ):
dump.add_header('Directories')
for idx in xrange(len(self.OPTIONAL_HEADER.DATA_DIRECTORY)):
directory = self.OPTIONAL_HEADER.DATA_DIRECTORY[idx]
dump.add_lines(directory.dump())
dump.add_newline()
def convert_char(char):
if char in string.ascii_letters or char in string.digits or char in string.punctuation or char in string.whitespace:
return char
else:
return r'\x%02x' % ord(char)
def convert_to_printable(s):
return ''.join([convert_char(c) for c in s])
if hasattr(self, 'VS_VERSIONINFO'):
dump.add_header('Version Information')
dump.add_lines(self.VS_VERSIONINFO.dump())
dump.add_newline()
if hasattr(self, 'VS_FIXEDFILEINFO'):
dump.add_lines(self.VS_FIXEDFILEINFO.dump())
dump.add_newline()
if hasattr(self, 'FileInfo'):
for entry in self.FileInfo:
dump.add_lines(entry.dump())
dump.add_newline()
if hasattr(entry, 'StringTable'):
for st_entry in entry.StringTable:
[dump.add_line(' '+line) for line in st_entry.dump()]
dump.add_line(' LangID: '+st_entry.LangID)
dump.add_newline()
for str_entry in st_entry.entries.items():
dump.add_line( ' ' +
convert_to_printable(str_entry[0]) + ': ' +
convert_to_printable(str_entry[1]) )
dump.add_newline()
elif hasattr(entry, 'Var'):
for var_entry in entry.Var:
if hasattr(var_entry, 'entry'):
[dump.add_line(' '+line) for line in var_entry.dump()]
dump.add_line(
' ' +
convert_to_printable(var_entry.entry.keys()[0]) +
': ' + var_entry.entry.values()[0])
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_EXPORT'):
dump.add_header('Exported symbols')
dump.add_lines(self.DIRECTORY_ENTRY_EXPORT.struct.dump())
dump.add_newline()
dump.add_line('%-10s %-10s %s' % ('Ordinal', 'RVA', 'Name'))
for export in self.DIRECTORY_ENTRY_EXPORT.symbols:
if export.address is not None:
dump.add('%-10d 0x%08Xh %s' % (
export.ordinal, export.address, export.name))
if export.forwarder:
dump.add_line(' forwarder: %s' % export.forwarder)
else:
dump.add_newline()
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
dump.add_header('Imported symbols')
for module in self.DIRECTORY_ENTRY_IMPORT:
dump.add_lines(module.struct.dump())
dump.add_newline()
for symbol in module.imports:
if symbol.import_by_ordinal is True:
dump.add('%s Ordinal[%s] (Imported by Ordinal)' % (
module.dll, str(symbol.ordinal)))
else:
dump.add('%s.%s Hint[%s]' % (
module.dll, symbol.name, str(symbol.hint)))
if symbol.bound:
dump.add_line(' Bound: 0x%08X' % (symbol.bound))
else:
dump.add_newline()
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_BOUND_IMPORT'):
dump.add_header('Bound imports')
for bound_imp_desc in self.DIRECTORY_ENTRY_BOUND_IMPORT:
dump.add_lines(bound_imp_desc.struct.dump())
dump.add_line('DLL: %s' % bound_imp_desc.name)
dump.add_newline()
for bound_imp_ref in bound_imp_desc.entries:
dump.add_lines(bound_imp_ref.struct.dump(), 4)
dump.add_line('DLL: %s' % bound_imp_ref.name, 4)
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_DELAY_IMPORT'):
dump.add_header('Delay Imported symbols')
for module in self.DIRECTORY_ENTRY_DELAY_IMPORT:
dump.add_lines(module.struct.dump())
dump.add_newline()
for symbol in module.imports:
if symbol.import_by_ordinal is True:
dump.add('%s Ordinal[%s] (Imported by Ordinal)' % (
module.dll, str(symbol.ordinal)))
else:
dump.add('%s.%s Hint[%s]' % (
module.dll, symbol.name, str(symbol.hint)))
if symbol.bound:
dump.add_line(' Bound: 0x%08X' % (symbol.bound))
else:
dump.add_newline()
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_RESOURCE'):
dump.add_header('Resource directory')
dump.add_lines(self.DIRECTORY_ENTRY_RESOURCE.struct.dump())
for resource_type in self.DIRECTORY_ENTRY_RESOURCE.entries:
if resource_type.name is not None:
dump.add_line('Name: [%s]' % resource_type.name, 2)
else:
dump.add_line('Id: [0x%X] (%s)' % (
resource_type.struct.Id, RESOURCE_TYPE.get(
resource_type.struct.Id, '-')),
2)
dump.add_lines(resource_type.struct.dump(), 2)
if hasattr(resource_type, 'directory'):
dump.add_lines(resource_type.directory.struct.dump(), 4)
for resource_id in resource_type.directory.entries:
if resource_id.name is not None:
dump.add_line('Name: [%s]' % resource_id.name, 6)
else:
dump.add_line('Id: [0x%X]' % resource_id.struct.Id, 6)
dump.add_lines(resource_id.struct.dump(), 6)
if hasattr(resource_id, 'directory'):
dump.add_lines(resource_id.directory.struct.dump(), 8)
for resource_lang in resource_id.directory.entries:
if hasattr(resource_lang, 'data'):
dump.add_line('\\--- LANG [%d,%d][%s,%s]' % (
resource_lang.data.lang,
resource_lang.data.sublang,
LANG.get(resource_lang.data.lang, '*unknown*'),
get_sublang_name_for_lang( resource_lang.data.lang, resource_lang.data.sublang ) ), 8)
dump.add_lines(resource_lang.struct.dump(), 10)
dump.add_lines(resource_lang.data.struct.dump(), 12)
if hasattr(resource_id.directory, 'strings') and resource_id.directory.strings:
dump.add_line( '[STRINGS]' , 10 )
for idx, res_string in resource_id.directory.strings.items():
dump.add_line( '%6d: %s' % (idx, convert_to_printable(res_string) ), 12 )
dump.add_newline()
dump.add_newline()
if ( hasattr(self, 'DIRECTORY_ENTRY_TLS') and
self.DIRECTORY_ENTRY_TLS and
self.DIRECTORY_ENTRY_TLS.struct ):
dump.add_header('TLS')
dump.add_lines(self.DIRECTORY_ENTRY_TLS.struct.dump())
dump.add_newline()
if ( hasattr(self, 'DIRECTORY_ENTRY_LOAD_CONFIG') and
self.DIRECTORY_ENTRY_LOAD_CONFIG and
self.DIRECTORY_ENTRY_LOAD_CONFIG.struct ):
dump.add_header('LOAD_CONFIG')
dump.add_lines(self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.dump())
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_DEBUG'):
dump.add_header('Debug information')
for dbg in self.DIRECTORY_ENTRY_DEBUG:
dump.add_lines(dbg.struct.dump())
try:
dump.add_line('Type: '+DEBUG_TYPE[dbg.struct.Type])
except KeyError:
dump.add_line('Type: 0x%x(Unknown)' % dbg.struct.Type)
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_BASERELOC'):
dump.add_header('Base relocations')
for base_reloc in self.DIRECTORY_ENTRY_BASERELOC:
dump.add_lines(base_reloc.struct.dump())
for reloc in base_reloc.entries:
try:
dump.add_line('%08Xh %s' % (
reloc.rva, RELOCATION_TYPE[reloc.type][16:]), 4)
except KeyError:
dump.add_line('0x%08X 0x%x(Unknown)' % (
reloc.rva, reloc.type), 4)
dump.add_newline()
return dump.get_text()
# OC Patch
def get_physical_by_rva(self, rva):
"""Gets the physical address in the PE file from an RVA value."""
try:
return self.get_offset_from_rva(rva)
except Exception:
return None
##
# Double-Word get/set
##
def get_data_from_dword(self, dword):
"""Return a four byte string representing the double word value. (little endian)."""
return struct.pack('<L', dword & 0xffffffff)
def get_dword_from_data(self, data, offset):
"""Convert four bytes of data to a double word (little endian)
'offset' is assumed to index into a dword array. So setting it to
N will return a dword out of the data starting at offset N*4.
Returns None if the data can't be turned into a double word.
"""
if (offset+1)*4 > len(data):
return None
return struct.unpack('<I', data[offset*4:(offset+1)*4])[0]
def get_dword_at_rva(self, rva):
"""Return the double word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_dword_from_data(self.get_data(rva)[:4], 0)
except PEFormatError:
return None
def get_dword_from_offset(self, offset):
"""Return the double word value at the given file offset. (little endian)"""
if offset+4 > len(self.__data__):
return None
return self.get_dword_from_data(self.__data__[offset:offset+4], 0)
def set_dword_at_rva(self, rva, dword):
"""Set the double word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_dword(dword))
def set_dword_at_offset(self, offset, dword):
"""Set the double word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_dword(dword))
##
# Word get/set
##
def get_data_from_word(self, word):
"""Return a two byte string representing the word value. (little endian)."""
return struct.pack('<H', word)
def get_word_from_data(self, data, offset):
"""Convert two bytes of data to a word (little endian)
'offset' is assumed to index into a word array. So setting it to
N will return a dword out of the data starting at offset N*2.
Returns None if the data can't be turned into a word.
"""
if (offset+1)*2 > len(data):
return None
return struct.unpack('<H', data[offset*2:(offset+1)*2])[0]
def get_word_at_rva(self, rva):
"""Return the word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_word_from_data(self.get_data(rva)[:2], 0)
except PEFormatError:
return None
def get_word_from_offset(self, offset):
"""Return the word value at the given file offset. (little endian)"""
if offset+2 > len(self.__data__):
return None
return self.get_word_from_data(self.__data__[offset:offset+2], 0)
def set_word_at_rva(self, rva, word):
"""Set the word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_word(word))
def set_word_at_offset(self, offset, word):
"""Set the word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_word(word))
##
# Quad-Word get/set
##
def get_data_from_qword(self, word):
"""Return a eight byte string representing the quad-word value. (little endian)."""
return struct.pack('<Q', word)
def get_qword_from_data(self, data, offset):
"""Convert eight bytes of data to a word (little endian)
'offset' is assumed to index into a word array. So setting it to
N will return a dword out of the data starting at offset N*8.
Returns None if the data can't be turned into a quad word.
"""
if (offset+1)*8 > len(data):
return None
return struct.unpack('<Q', data[offset*8:(offset+1)*8])[0]
def get_qword_at_rva(self, rva):
"""Return the quad-word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_qword_from_data(self.get_data(rva)[:8], 0)
except PEFormatError:
return None
def get_qword_from_offset(self, offset):
"""Return the quad-word value at the given file offset. (little endian)"""
if offset+8 > len(self.__data__):
return None
return self.get_qword_from_data(self.__data__[offset:offset+8], 0)
def set_qword_at_rva(self, rva, qword):
"""Set the quad-word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_qword(qword))
def set_qword_at_offset(self, offset, qword):
"""Set the quad-word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_qword(qword))
##
# Set bytes
##
def set_bytes_at_rva(self, rva, data):
"""Overwrite, with the given string, the bytes at the file offset corresponding to the given RVA.
Return True if successful, False otherwise. It can fail if the
offset is outside the file's boundaries.
"""
offset = self.get_physical_by_rva(rva)
if not offset:
raise False
return self.set_bytes_at_offset(offset, data)
def set_bytes_at_offset(self, offset, data):
"""Overwrite the bytes at the given file offset with the given string.
Return True if successful, False otherwise. It can fail if the
offset is outside the file's boundaries.
"""
if not isinstance(data, str):
raise TypeError('data should be of type: str')
if offset >= 0 and offset < len(self.__data__):
self.__data__ = ( self.__data__[:offset] + data + self.__data__[offset+len(data):] )
else:
return False
return True
def merge_modified_section_data(self):
"""Update the PE image content with any individual section data that has been modified."""
for section in self.sections:
section_data_start = adjust_FileAlignment( section.PointerToRawData,
self.OPTIONAL_HEADER.FileAlignment )
section_data_end = section_data_start+section.SizeOfRawData
if section_data_start < len(self.__data__) and section_data_end < len(self.__data__):
self.__data__ = self.__data__[:section_data_start] + section.get_data() + self.__data__[section_data_end:]
def relocate_image(self, new_ImageBase):
"""Apply the relocation information to the image using the provided new image base.
This method will apply the relocation information to the image. Given the new base,
all the relocations will be processed and both the raw data and the section's data
will be fixed accordingly.
The resulting image can be retrieved as well through the method:
get_memory_mapped_image()
In order to get something that would more closely match what could be found in memory
once the Windows loader finished its work.
"""
relocation_difference = new_ImageBase - self.OPTIONAL_HEADER.ImageBase
for reloc in self.DIRECTORY_ENTRY_BASERELOC:
virtual_address = reloc.struct.VirtualAddress
size_of_block = reloc.struct.SizeOfBlock
# We iterate with an index because if the relocation is of type
# IMAGE_REL_BASED_HIGHADJ we need to also process the next entry
# at once and skip it for the next iteration
#
entry_idx = 0
while entry_idx<len(reloc.entries):
entry = reloc.entries[entry_idx]
entry_idx += 1
if entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_ABSOLUTE']:
# Nothing to do for this type of relocation
pass
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGH']:
# Fix the high 16bits of a relocation
#
# Add high 16bits of relocation_difference to the
# 16bit value at RVA=entry.rva
self.set_word_at_rva(
entry.rva,
( self.get_word_at_rva(entry.rva) + relocation_difference>>16)&0xffff )
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_LOW']:
# Fix the low 16bits of a relocation
#
# Add low 16 bits of relocation_difference to the 16bit value
# at RVA=entry.rva
self.set_word_at_rva(
entry.rva,
( self.get_word_at_rva(entry.rva) + relocation_difference)&0xffff)
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHLOW']:
# Handle all high and low parts of a 32bit relocation
#
# Add relocation_difference to the value at RVA=entry.rva
self.set_dword_at_rva(
entry.rva,
self.get_dword_at_rva(entry.rva)+relocation_difference)
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHADJ']:
# Fix the high 16bits of a relocation and adjust
#
# Add high 16bits of relocation_difference to the 32bit value
# composed from the (16bit value at RVA=entry.rva)<<16 plus
# the 16bit value at the next relocation entry.
#
# If the next entry is beyond the array's limits,
# abort... the table is corrupt
#
if entry_idx == len(reloc.entries):
break
next_entry = reloc.entries[entry_idx]
entry_idx += 1
self.set_word_at_rva( entry.rva,
((self.get_word_at_rva(entry.rva)<<16) + next_entry.rva +
relocation_difference & 0xffff0000) >> 16 )
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_DIR64']:
# Apply the difference to the 64bit value at the offset
# RVA=entry.rva
self.set_qword_at_rva(
entry.rva,
self.get_qword_at_rva(entry.rva) + relocation_difference)
def verify_checksum(self):
return self.OPTIONAL_HEADER.CheckSum == self.generate_checksum()
def generate_checksum(self):
# This will make sure that the data representing the PE image
# is updated with any changes that might have been made by
# assigning values to header fields as those are not automatically
# updated upon assignment.
#
self.__data__ = self.write()
# Get the offset to the CheckSum field in the OptionalHeader
#
checksum_offset = self.OPTIONAL_HEADER.__file_offset__ + 0x40 # 64
checksum = 0
# Verify the data is dword-aligned. Add padding if needed
#
remainder = len(self.__data__) % 4
data = self.__data__ + ( '\0' * ((4-remainder) * ( remainder != 0 )) )
for i in range( len( data ) / 4 ):
# Skip the checksum field
#
if i == checksum_offset / 4:
continue
dword = struct.unpack('I', data[ i*4 : i*4+4 ])[0]
checksum = (checksum & 0xffffffff) + dword + (checksum>>32)
if checksum > 2**32:
checksum = (checksum & 0xffffffff) + (checksum >> 32)
checksum = (checksum & 0xffff) + (checksum >> 16)
checksum = (checksum) + (checksum >> 16)
checksum = checksum & 0xffff
# The length is the one of the original data, not the padded one
#
return checksum + len(self.__data__)
def is_exe(self):
"""Check whether the file is a standard executable.
This will return true only if the file has the IMAGE_FILE_EXECUTABLE_IMAGE flag set
and the IMAGE_FILE_DLL not set and the file does not appear to be a driver either.
"""
EXE_flag = IMAGE_CHARACTERISTICS['IMAGE_FILE_EXECUTABLE_IMAGE']
if (not self.is_dll()) and (not self.is_driver()) and (
EXE_flag & self.FILE_HEADER.Characteristics) == EXE_flag:
return True
return False
def is_dll(self):
"""Check whether the file is a standard DLL.
This will return true only if the image has the IMAGE_FILE_DLL flag set.
"""
DLL_flag = IMAGE_CHARACTERISTICS['IMAGE_FILE_DLL']
if ( DLL_flag & self.FILE_HEADER.Characteristics) == DLL_flag:
return True
return False
def is_driver(self):
"""Check whether the file is a Windows driver.
This will return true only if there are reliable indicators of the image
being a driver.
"""
# Checking that the ImageBase field of the OptionalHeader is above or
# equal to 0x80000000 (that is, whether it lies in the upper 2GB of
# the address space, normally belonging to the kernel) is not a
# reliable enough indicator. For instance, PEs that play the invalid
# ImageBase trick to get relocated could be incorrectly assumed to be
# drivers.
# This is not reliable either...
#
# if any( (section.Characteristics & SECTION_CHARACTERISTICS['IMAGE_SCN_MEM_NOT_PAGED']) for section in self.sections ):
# return True
if hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
# If it imports from "ntoskrnl.exe" or other kernel components it should be a driver
#
if set( ('ntoskrnl.exe', 'hal.dll', 'ndis.sys', 'bootvid.dll', 'kdcom.dll' ) ).intersection( [ imp.dll.lower() for imp in self.DIRECTORY_ENTRY_IMPORT ] ):
return True
return False
def get_overlay_data_start_offset(self):
"""Get the offset of data appended to the file and not contained within the area described in the headers."""
highest_PointerToRawData = 0
highest_SizeOfRawData = 0
for section in self.sections:
# If a section seems to fall outside the boundaries of the file we assume it's either
# because of intentionally misleading values or because the file is truncated
# In either case we skip it
if section.PointerToRawData + section.SizeOfRawData > len(self.__data__):
continue
if section.PointerToRawData + section.SizeOfRawData > highest_PointerToRawData + highest_SizeOfRawData:
highest_PointerToRawData = section.PointerToRawData
highest_SizeOfRawData = section.SizeOfRawData
if len(self.__data__) > highest_PointerToRawData + highest_SizeOfRawData:
return highest_PointerToRawData + highest_SizeOfRawData
return None
def get_overlay(self):
"""Get the data appended to the file and not contained within the area described in the headers."""
overlay_data_offset = self.get_overlay_data_start_offset()
if overlay_data_offset is not None:
return self.__data__[ overlay_data_offset : ]
return None
def trim(self):
"""Return the just data defined by the PE headers, removing any overlayed data."""
overlay_data_offset = self.get_overlay_data_start_offset()
if overlay_data_offset is not None:
return self.__data__[ : overlay_data_offset ]
return self.__data__[:]
| Python |
#! /usr/bin/env python
# Copyright (C) 2005, Giovanni Bajo
# Based on previous work under copyright (c) 1999, 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
from win32com.shell import shell
import win32api
import pythoncom
import os
import sys
def CreateShortCut(Path, Target,Arguments = "", StartIn = "", Icon = ("",0), Description = ""):
# Get the shell interface.
sh = pythoncom.CoCreateInstance(shell.CLSID_ShellLink, None, \
pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink)
# Get an IPersist interface
persist = sh.QueryInterface(pythoncom.IID_IPersistFile)
# Set the data
sh.SetPath(Target)
sh.SetDescription(Description)
sh.SetArguments(Arguments)
sh.SetWorkingDirectory(StartIn)
sh.SetIconLocation(Icon[0],Icon[1])
# sh.SetShowCmd( win32con.SW_SHOWMINIMIZED)
# Save the link itself.
persist.Save(Path, 1)
print "Saved to", Path
if __name__ == "__main__":
try:
TempDir = os.environ["TEMP"]
WinRoot = os.environ["windir"]
Path = TempDir
Target = os.path.normpath(sys.executable)
Arguments = ""
StartIn = TempDir
Icon = ("", 0)
Description = "py made shortcut"
CreateShortCut(Path,Target,Arguments,StartIn,Icon,Description)
except Exception, e:
print "Failed!", e
import traceback
traceback.print_exc()
raw_input("Press any key to continue...")
| Python |
# Copyright (C) 2005, Giovanni Bajo
# Based on previous work under copyright (c) 1999, 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#import pythoncom
pycomCLSCTX_INPROC = 3
pycomCLSCTX_LOCAL_SERVER = 4
import os
d = {}
class NextID:
_reg_clsid_ = '{25E06E61-2D18-11D5-945F-00609736B700}'
_reg_desc_ = 'Text COM server'
_reg_progid_ = 'MEInc.NextID'
_reg_clsctx_ = pycomCLSCTX_INPROC | pycomCLSCTX_LOCAL_SERVER
_public_methods_ = [
'getNextID'
]
def __init__(self):
import win32api
win32api.MessageBox(0, "NextID.__init__ started", "NextID.py")
global d
if sys.frozen:
for entry in sys.path:
if entry.find('?') > -1:
here = os.path.dirname(entry.split('?')[0])
break
else:
here = os.getcwd()
else:
here = os.path.dirname(__file__)
self.fnm = os.path.join(here, 'id.cfg')
try:
d = eval(open(self.fnm, 'rU').read()+'\n')
except:
d = {
'systemID': 0xaaaab,
'highID': 0
}
win32api.MessageBox(0, "NextID.__init__ complete", "NextID.py")
def getNextID(self):
global d
d['highID'] = d['highID'] + 1
open(self.fnm, 'w').write(repr(d))
return '%(systemID)-0.5x%(highID)-0.7x' % d
def RegisterNextID():
from win32com.server import register
register.UseCommandLine(NextID)
def UnRegisterNextID():
from win32com.server import register
register.UnregisterServer(NextID._reg_clsid_, NextID._reg_progid_)
if __name__ == '__main__':
import sys
if "/unreg" in sys.argv:
UnRegisterNextID()
elif "/register" in sys.argv:
RegisterNextID()
else:
print "running as server"
import win32com.server.localserver
win32com.server.localserver.main()
raw_input("Press any key...")
| Python |
# Copyright (C) 2005, Giovanni Bajo
# Based on previous work under copyright (c) 1999, 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# Test MSOffice
#
# Main purpose of test is to ensure that Dynamic COM objects
# work as expected.
# Assumes Word and Excel installed on your machine.
import win32com, sys, string, win32api, traceback
import win32com.client.dynamic
from win32com.test.util import CheckClean
import pythoncom
from win32com.client import gencache
from pywintypes import Unicode
error = "MSOffice test error"
# Test a few of the MSOffice components.
def TestWord():
# Try and load the object exposed by Word 8
# Office 97 - _totally_ different object model!
try:
# NOTE - using "client.Dispatch" would return an msword8.py instance!
print "Starting Word 8 for dynamic test"
word = win32com.client.dynamic.Dispatch("Word.Application")
TestWord8(word)
word = None
# Now we will test Dispatch without the new "lazy" capabilities
print "Starting Word 8 for non-lazy dynamic test"
dispatch = win32com.client.dynamic._GetGoodDispatch("Word.Application")
typeinfo = dispatch.GetTypeInfo()
attr = typeinfo.GetTypeAttr()
olerepr = win32com.client.build.DispatchItem(typeinfo, attr, None, 0)
word = win32com.client.dynamic.CDispatch(dispatch, olerepr)
dispatch = typeinfo = attr = olerepr = None
TestWord8(word)
except pythoncom.com_error:
print "Starting Word 7 for dynamic test"
word = win32com.client.Dispatch("Word.Basic")
TestWord7(word)
try:
print "Starting MSWord for generated test"
# Typelib, lcid, major and minor for the typelib
try:
o = gencache.EnsureModule("{00020905-0000-0000-C000-000000000046}", 1033, 8, 0, bForDemand=1)
except TypeError:
o = gencache.EnsureModule("{00020905-0000-0000-C000-000000000046}", 1033, 8, 0)
if o is None :
raise ImportError, "Can not load the Word8 typelibrary."
word = win32com.client.Dispatch("Word.Application.8")
TestWord8(word)
except ImportError, details:
print "Can not test MSWord8 -", details
def TestWord7(word):
word.FileNew()
# If not shown, show the app.
if not word.AppShow(): word._proc_("AppShow")
for i in xrange(12):
word.FormatFont(Color=i+1, Points=i+12)
word.Insert("Hello from Python %d\n" % i)
word.FileClose(2)
def TestWord8(word):
word.Visible = 1
doc = word.Documents.Add()
wrange = doc.Range()
for i in range(10):
wrange.InsertAfter("Hello from Python %d\n" % i)
paras = doc.Paragraphs
for i in range(len(paras)):
paras[i]().Font.ColorIndex = i+1
paras[i]().Font.Size = 12 + (4 * i)
# XXX - note that
# for para in paras:
# para().Font...
# doesnt seem to work - no error, just doesnt work
# Should check if it works for VB!
doc.Close(SaveChanges = 0)
word.Quit()
win32api.Sleep(1000) # Wait for word to close, else we
# may get OA error.
def TestWord8OldStyle():
try:
import win32com.test.Generated4Test.msword8
except ImportError:
print "Can not do old style test"
def TextExcel(xl):
xl.Visible = 0
if xl.Visible: raise error, "Visible property is true."
xl.Visible = 1
if not xl.Visible: raise error, "Visible property not true."
if int(xl.Version[0])>=8:
xl.Workbooks.Add()
else:
xl.Workbooks().Add()
xl.Range("A1:C1").Value = (1,2,3)
xl.Range("A2:C2").Value = ('x','y','z')
xl.Range("A3:C3").Value = ('3','2','1')
for i in xrange(20):
xl.Cells(i+1,i+1).Value = "Hi %d" % i
if xl.Range("A1").Value <> "Hi 0":
raise error, "Single cell range failed"
if xl.Range("A1:B1").Value <> ((Unicode("Hi 0"),2),):
raise error, "flat-horizontal cell range failed"
if xl.Range("A1:A2").Value <> ((Unicode("Hi 0"),),(Unicode("x"),)):
raise error, "flat-vertical cell range failed"
if xl.Range("A1:C3").Value <> ((Unicode("Hi 0"),2,3),(Unicode("x"),Unicode("Hi 1"),Unicode("z")),(3,2,Unicode("Hi 2"))):
raise error, "square cell range failed"
xl.Range("A1:C3").Value =((3,2,1),("x","y","z"),(1,2,3))
if xl.Range("A1:C3").Value <> ((3,2,1),(Unicode("x"),Unicode("y"),Unicode("z")),(1,2,3)):
raise error, "Range was not what I set it to!"
# test dates out with Excel
xl.Cells(5,1).Value = "Excel time"
xl.Cells(5,2).Formula = "=Now()"
import time
xl.Cells(6,1).Value = "Python time"
xl.Cells(6,2).Value = pythoncom.MakeTime(time.time())
xl.Cells(6,2).NumberFormat = "d/mm/yy h:mm"
xl.Columns("A:B").EntireColumn.AutoFit()
xl.Workbooks(1).Close(0)
xl.Quit()
def TestAll():
try:
TestWord()
print "Starting Excel for Dynamic test..."
xl = win32com.client.dynamic.Dispatch("Excel.Application")
TextExcel(xl)
try:
print "Starting Excel 8 for generated excel8.py test..."
try:
mod = gencache.EnsureModule("{00020813-0000-0000-C000-000000000046}", 0, 1, 2, bForDemand=1)
except TypeError:
mod = gencache.EnsureModule("{00020813-0000-0000-C000-000000000046}", 0, 1, 2)
xl = win32com.client.Dispatch("Excel.Application")
TextExcel(xl)
except ImportError:
print "Could not import the generated Excel 97 wrapper"
try:
import xl5en32
mod = gencache.EnsureModule("{00020813-0000-0000-C000-000000000046}", 9, 1, 0)
xl = win32com.client.Dispatch("Excel.Application.5")
print "Starting Excel 95 for makepy test..."
TextExcel(xl)
except ImportError:
print "Could not import the generated Excel 95 wrapper"
except KeyboardInterrupt:
print "*** Interrupted MSOffice test ***"
except:
traceback.print_exc()
if __name__=='__main__':
TestAll()
CheckClean()
pythoncom.CoUninitialize()
| Python |
# for older Pythons, we need to set up for the import of cPickle
import string
import copy_reg
import win32com.client.gencache
x = win32com.client.gencache.EnsureDispatch('ADOR.Recordset')
print x
x = None
#raw_input("Press any key to continue...")
| Python |
# Animated Towers of Hanoi using Tk with optional bitmap file in
# background.
#
# Usage: tkhanoi [n [bitmapfile]]
#
# n is the number of pieces to animate; default is 4, maximum 15.
#
# The bitmap file can be any X11 bitmap file (look in
# /usr/include/X11/bitmaps for samples); it is displayed as the
# background of the animation. Default is no bitmap.
# This uses Steen Lumholt's Tk interface
from Tkinter import *
# Basic Towers-of-Hanoi algorithm: move n pieces from a to b, using c
# as temporary. For each move, call report()
def hanoi(n, a, b, c, report):
if n <= 0: return
hanoi(n-1, a, c, b, report)
report(n, a, b)
hanoi(n-1, c, b, a, report)
# The graphical interface
class Tkhanoi:
# Create our objects
def __init__(self, n, bitmap = None):
self.n = n
self.tk = tk = Tk()
Label(text="Press <ESC> to exit").pack()
tk.bind("<Escape>", lambda x: tk.destroy()) #quit())
self.canvas = c = Canvas(tk)
c.pack()
width, height = tk.getint(c['width']), tk.getint(c['height'])
# Add background bitmap
if bitmap:
self.bitmap = c.create_bitmap(width/2, height/2,
bitmap=bitmap,
foreground='blue')
# Generate pegs
pegwidth = 10
pegheight = height/2
pegdist = width/3
x1, y1 = (pegdist-pegwidth)/2, height*1/3
x2, y2 = x1+pegwidth, y1+pegheight
self.pegs = []
p = c.create_rectangle(x1, y1, x2, y2, fill='black')
self.pegs.append(p)
x1, x2 = x1+pegdist, x2+pegdist
p = c.create_rectangle(x1, y1, x2, y2, fill='black')
self.pegs.append(p)
x1, x2 = x1+pegdist, x2+pegdist
p = c.create_rectangle(x1, y1, x2, y2, fill='black')
self.pegs.append(p)
self.tk.update()
# Generate pieces
pieceheight = pegheight/16
maxpiecewidth = pegdist*2/3
minpiecewidth = 2*pegwidth
self.pegstate = [[], [], []]
self.pieces = {}
x1, y1 = (pegdist-maxpiecewidth)/2, y2-pieceheight-2
x2, y2 = x1+maxpiecewidth, y1+pieceheight
dx = (maxpiecewidth-minpiecewidth) / (2*max(1, n-1))
for i in range(n, 0, -1):
p = c.create_rectangle(x1, y1, x2, y2, fill='red')
self.pieces[i] = p
self.pegstate[0].append(i)
x1, x2 = x1 + dx, x2-dx
y1, y2 = y1 - pieceheight-2, y2-pieceheight-2
self.tk.update()
self.tk.after(25)
# Run -- never returns - press esc or close window to exit
def run(self):
try:
while 1:
hanoi(self.n, 0, 1, 2, self.report)
hanoi(self.n, 1, 2, 0, self.report)
hanoi(self.n, 2, 0, 1, self.report)
hanoi(self.n, 0, 2, 1, self.report)
hanoi(self.n, 2, 1, 0, self.report)
hanoi(self.n, 1, 0, 2, self.report)
except TclError:
pass
# Reporting callback for the actual hanoi function
def report(self, i, a, b):
if self.pegstate[a][-1] != i: raise RuntimeError # Assertion
del self.pegstate[a][-1]
p = self.pieces[i]
c = self.canvas
# Lift the piece above peg a
ax1, ay1, ax2, ay2 = c.bbox(self.pegs[a])
while 1:
x1, y1, x2, y2 = c.bbox(p)
if y2 < ay1: break
c.move(p, 0, -1)
self.tk.update()
# Move it towards peg b
bx1, by1, bx2, by2 = c.bbox(self.pegs[b])
newcenter = (bx1+bx2)/2
while 1:
x1, y1, x2, y2 = c.bbox(p)
center = (x1+x2)/2
if center == newcenter: break
if center > newcenter: c.move(p, -1, 0)
else: c.move(p, 1, 0)
self.tk.update()
# Move it down on top of the previous piece
pieceheight = y2-y1
newbottom = by2 - pieceheight*len(self.pegstate[b]) - 2
while 1:
x1, y1, x2, y2 = c.bbox(p)
if y2 >= newbottom: break
c.move(p, 0, 1)
self.tk.update()
# Update peg state
self.pegstate[b].append(i)
# Main program
def main():
import sys, string
# First argument is number of pegs, default 4
if sys.argv[1:]:
n = string.atoi(sys.argv[1])
else:
n = 4
# Second argument is bitmap file, default none
if sys.argv[2:]:
bitmap = sys.argv[2]
# Reverse meaning of leading '@' compared to Tk
if bitmap[0] == '@': bitmap = bitmap[1:]
else: bitmap = '@' + bitmap
else:
bitmap = None
# Create the graphical objects...
h = Tkhanoi(n, bitmap)
# ...and run!
h.run()
# Call main when run as script
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
# Copyright (C) 2011, Hartmut Goebel
# Copyright (C) 2005, Giovanni Bajo
# Based on previous work under copyright (c) 1999, 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import os
import optparse
import shutil
try:
import PyInstaller
except ImportError:
# if importing PyInstaller fails, try to load from parent
# directory to support running without installation
import imp
import os
if not hasattr(os, "getuid") or os.getuid() != 0:
imp.load_module('PyInstaller', *imp.find_module('PyInstaller',
[os.path.abspath(os.path.join(__file__, '..','..','..'))]))
from PyInstaller import is_win, is_linux
from PyInstaller import compat
utils_dir = os.path.normpath(os.path.join(__file__, '..', '..', '..', 'utils'))
makespec = os.path.join(utils_dir, 'Makespec.py')
build = os.path.join(utils_dir, 'Build.py')
if is_win:
stripopts = ('',)
consoleopts = ('', '--noconsole')
else:
stripopts = ('', '--strip')
consoleopts = ('',)
out_pattern = 't%d'
if is_linux:
import tempfile
out_pattern = os.path.join(tempfile.gettempdir(), 'hanoi', out_pattern)
dist_pattern_dir = os.path.join(out_pattern, 'dist', 'hanoi', 'hanoi')
dist_pattern_file = os.path.join(out_pattern, 'dist', 'hanoi')
script_name = os.path.abspath(os.path.join(__file__, '..', 'hanoi.py'))
def build_test(cnt, bldconfig, *options, **kwopts):
options = filter(None, options)
if kwopts['clean'] and os.path.isdir(out_pattern % cnt):
# remove/clean the working directory
shutil.rmtree(out_pattern % cnt)
compat.exec_python_rc(makespec, script_name,
'--out', out_pattern % cnt, bldconfig, *options)
compat.exec_python_rc(build, os.path.join(out_pattern % cnt, 'hanoi.spec'),
'--noconfirm')
if is_linux:
# create symlinks
if os.path.islink('hanoi%d' % cnt):
os.remove('hanoi%d' % cnt)
if bldconfig == '--onedir':
os.symlink(dist_pattern_dir % cnt, 'hanoi%d' % cnt)
else:
os.symlink(dist_pattern_file % cnt, 'hanoi%d' % cnt)
parser = optparse.OptionParser('%prog [NUM ...]')
parser.add_option('--clean', action='store_true',
help=('Perform clean builds '
'(remove target dirs prior to building).'))
opts, args = parser.parse_args()
args = map(int, args)
i = 1
for bldconfig in ('--onedir', '--onefile'):
for console in consoleopts:
for dbg in ('--debug', ''):
for stripopt in stripopts:
if not args or i in args:
build_test(i, bldconfig, console, dbg, stripopt, **opts.__dict__)
i += 1
| Python |
#!/usr/bin/env python
# Team 4067 64-bit Java Disable Tool - Disable 64-bit Java to enable SmartDashboard to run!
# Copyright (C) 2013 River Hill HS Robotics Team (Albert H.)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import shutil
import traceback
import ctypes
try:
import win32ui
import win32con
except:
print "ERROR: You need to install pywin32 in order to use this program."
print "Get it at http://sourceforge.net/projects/pywin32/ and install it."
print "Press ENTER to exit."
raw_input()
sys.exit(1)
# Functions
# Windows 64-bit check - courtesy of "phobie" from http://stackoverflow.com/a/12578715
def is_windows_64bit():
if 'PROCESSOR_ARCHITEW6432' in os.environ:
return True
return os.environ['PROCESSOR_ARCHITECTURE'].endswith('64')
print "SmartDashboard 64-bit Launcher"
print "==============================="
print "Copyright (C) 2013 River Hill High School Robotics Team (Albert H.)"
print " *** FRC 4067 The Incredible Hawk *** "
print ""
# Sanity check - is this Windows?
if sys.platform != 'win32':
print "ERROR: This tool only runs on Windows."
print "Press ENTER to exit."
raw_input()
sys.exit(1)
if not is_windows_64bit():
print "ERROR: This tool only runs on Windows 64-bit. (You are running Windows 32-bit, as detected by this program.)"
win32ui.MessageBox("This tool only runs on Windows 64-bit. You are running Windows 32-bit, as detected by this program.", "Error - SmartDashboard 64-bit Launcher", win32con.MB_ICONERROR)
sys.exit(1)
print "Trying to find a 32-bit Java installation..."
#if os.path.isdir("C:\Program Files (x86)\Java"):
#win32ui.MessageBox("Hello world", "Hello", win32con.MB_ICONERROR) | Python |
#!/usr/bin/env python
# Team 4067 Qt UI to Python Batch Converter - convert all .ui files to .py!
# Copyright (C) 2013 River Hill HS Robotics Team (Albert H.)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import subprocess
print "Starting conversions..."
for uifile in os.listdir("."):
if uifile.endswith(".ui"):
print "Converting "+uifile+" to "+uifile.replace(".ui", ".py")+"..."
pyfile = open(uifile.replace(".ui", ".py"), "w")
subprocess.call([r"C:\Python27\Lib\site-packages\PyQt4\pyuic4.bat", uifile], stdout = pyfile)
elif uifile.endswith(".qrc"):
print "Converting "+uifile+" to "+uifile.replace(".qrc", "_rc.py")+"..."
pyfile = open(uifile.replace(".qrc", "_rc.py"), "w")
subprocess.call([r"C:\Python27\Lib\site-packages\PyQt4\pyrcc4.exe", "-py2", uifile], stdout = pyfile)
elif uifile.endswith(".pyc"):
print "Removing precompiled Python file "+uifile+"..."
os.remove(uifile)
print "Conversions complete."
raw_input() | Python |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'InitialNotice.ui'
#
# Created: Sat Jan 19 21:47:32 2013
# by: PyQt4 UI code generator 4.9.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_NoticeForm(object):
def setupUi(self, NoticeForm):
NoticeForm.setObjectName(_fromUtf8("NoticeForm"))
NoticeForm.resize(405, 282)
self.verticalLayout = QtGui.QVBoxLayout(NoticeForm)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.noticeLbl = QtGui.QLabel(NoticeForm)
self.noticeLbl.setWordWrap(True)
self.noticeLbl.setObjectName(_fromUtf8("noticeLbl"))
self.verticalLayout.addWidget(self.noticeLbl)
self.buttonLayout = QtGui.QHBoxLayout()
self.buttonLayout.setObjectName(_fromUtf8("buttonLayout"))
self.acceptBtn = QtGui.QPushButton(NoticeForm)
self.acceptBtn.setEnabled(False)
self.acceptBtn.setObjectName(_fromUtf8("acceptBtn"))
self.buttonLayout.addWidget(self.acceptBtn)
self.declineExitBtn = QtGui.QPushButton(NoticeForm)
self.declineExitBtn.setObjectName(_fromUtf8("declineExitBtn"))
self.buttonLayout.addWidget(self.declineExitBtn)
self.verticalLayout.addLayout(self.buttonLayout)
self.verticalLayout.setStretch(0, 1)
self.retranslateUi(NoticeForm)
QtCore.QMetaObject.connectSlotsByName(NoticeForm)
def retranslateUi(self, NoticeForm):
NoticeForm.setWindowTitle(_translate("NoticeForm", "Team 4067 - 2013 FRC Software Installer - NOTICE", None))
self.noticeLbl.setText(_translate("NoticeForm", "<html><head/><body><p><span style=\" font-weight:600; text-decoration: underline;\">2013 FRC Software Installer by Team 4067 - SOFTWARE USE NOTICE</span></p><p>You may only use this software IF AND ONLY IF you are an active teacher, mentor, or participant of the River Hill Robotics Team (The Incredible Hawk, FRC Team 4067). If not, you can NOT use this software and must exit this installer now. Even if you are an active participant of FIRST Robotics™, the licensing of the software distributed by this installer permits only one team to download, install, and use. If you wish to use this software for your team, you must recreate this application and the appropriate archive(s) for your team.<br/><br/>If you have left the team and/or have terminated participation in FIRST Robotics via the River Hill Robotics Team, you can NOT use this software and must exit this installer now.</p><p>This installer is bound to the licensing agreements of FIRST Robotics™, National Instruments™, and any other software licensing agreements that are a part of the FRC competiton software.</p></body></html>", None))
self.acceptBtn.setText(_translate("NoticeForm", "&Accept (10)", None))
self.declineExitBtn.setText(_translate("NoticeForm", "Decline and E&xit", None))
| Python |
#!/usr/bin/env python
# Team 4067 FRC Tools Installer - easy installer for FRC Tools
# Copyright (C) 2013 River Hill HS Robotics Team (Albert H.)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from PyQt4.QtNetwork import QHttp
from MainWindow import Ui_MainForm
from InitialNotice import Ui_NoticeForm
from Options import Ui_OptionsForm
from LicenseAgreements import Ui_LicenseAgreementsForm
import sys
FRC2013_ARCHIVE_URL="http://riverhillrobotics.org/Resources/FRC2013/Software/FRC2013.7z"
class DownloadProgress(QtCore.QThread):
signalUpdateProcess = QtCore.pyqtSignal(int)
signalDone = QtCore.pyqtSignal(bool)
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.exiting = False
self.doneDownload = False
def run(self):
while not self.exiting and not self.doneDownload:
bla
def __del__(self):
self.exiting = True
self.wait()
class VerificationProgress(QtCore.QThread):
signalUpdateProcess = QtCore.pyqtSignal(int)
signalDone = QtCore.pyqtSignal(bool)
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.exiting = False
self.doneVerify = False
def run(self):
while not self.exiting and not self.doneVerify:
bla
def __del__(self):
self.exiting = True
self.wait()
class ExtractProgress(QtCore.QThread):
signalUpdateProcess = QtCore.pyqtSignal(int)
signalDone = QtCore.pyqtSignal(bool)
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.exiting = False
self.doneExtract = False
def run(self):
while not self.exiting and not self.doneExtract:
bla
def __del__(self):
self.exiting = True
self.wait()
class Main(QMainWindow):
def __init__(self):
QDialog.__init__(self)
# Set up the user interface from Designer.
self.ui = Ui_MainForm()
self.ui.setupUi(self)
# Let's set some attributes!
# First, make the passwords hidden
self.ui.downloadPasswordTxt.setEchoMode(QLineEdit.Password)
self.ui.archivePasswordTxt.setEchoMode(QLineEdit.Password)
# Make the start button default
self.ui.startBtn.setDefault(True)
# Connect up the buttons.
self.ui.startBtn.clicked.connect(self.startDownload)
#self.ui.cancelButton.clicked.connect(self.reject)
# Make the window as small as possible!
self.resize(self.minimumSizeHint())
self.setFixedSize(self.minimumSizeHint())
def startDownload(self):
print "Start download trigger"
pass
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
myapp = Main()
myapp.show()
#sys.exit(app.exec_())
try:
app.exec_()
except Exception:
print "An error occured." | Python |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MainWindow.ui'
#
# Created: Sat Jan 19 21:47:33 2013
# by: PyQt4 UI code generator 4.9.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainForm(object):
def setupUi(self, MainForm):
MainForm.setObjectName(_fromUtf8("MainForm"))
MainForm.resize(460, 270)
MainForm.setAutoFillBackground(False)
self.MainWidget = QtGui.QWidget(MainForm)
self.MainWidget.setObjectName(_fromUtf8("MainWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.MainWidget)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.headerIMG = QtGui.QLabel(self.MainWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.headerIMG.sizePolicy().hasHeightForWidth())
self.headerIMG.setSizePolicy(sizePolicy)
self.headerIMG.setText(_fromUtf8(""))
self.headerIMG.setPixmap(QtGui.QPixmap(_fromUtf8(":/Images/FRCToolsInstallerHeader.png")))
self.headerIMG.setObjectName(_fromUtf8("headerIMG"))
self.verticalLayout.addWidget(self.headerIMG)
self.MainLayout = QtGui.QVBoxLayout()
self.MainLayout.setSpacing(6)
self.MainLayout.setMargin(9)
self.MainLayout.setObjectName(_fromUtf8("MainLayout"))
self.installerMessageLabel = QtGui.QLabel(self.MainWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.installerMessageLabel.sizePolicy().hasHeightForWidth())
self.installerMessageLabel.setSizePolicy(sizePolicy)
self.installerMessageLabel.setObjectName(_fromUtf8("installerMessageLabel"))
self.MainLayout.addWidget(self.installerMessageLabel)
self.passwordOptionsLayout = QtGui.QHBoxLayout()
self.passwordOptionsLayout.setSpacing(6)
self.passwordOptionsLayout.setMargin(0)
self.passwordOptionsLayout.setObjectName(_fromUtf8("passwordOptionsLayout"))
self.downloadPasswordLayout = QtGui.QVBoxLayout()
self.downloadPasswordLayout.setObjectName(_fromUtf8("downloadPasswordLayout"))
self.downloadPasswordLabel = QtGui.QLabel(self.MainWidget)
self.downloadPasswordLabel.setObjectName(_fromUtf8("downloadPasswordLabel"))
self.downloadPasswordLayout.addWidget(self.downloadPasswordLabel)
self.downloadPasswordTxt = QtGui.QLineEdit(self.MainWidget)
self.downloadPasswordTxt.setText(_fromUtf8(""))
self.downloadPasswordTxt.setObjectName(_fromUtf8("downloadPasswordTxt"))
self.downloadPasswordLayout.addWidget(self.downloadPasswordTxt)
self.passwordOptionsLayout.addLayout(self.downloadPasswordLayout)
self.archivePasswordLayout = QtGui.QVBoxLayout()
self.archivePasswordLayout.setObjectName(_fromUtf8("archivePasswordLayout"))
self.archivePasswordLabel = QtGui.QLabel(self.MainWidget)
self.archivePasswordLabel.setObjectName(_fromUtf8("archivePasswordLabel"))
self.archivePasswordLayout.addWidget(self.archivePasswordLabel)
self.archivePasswordTxt = QtGui.QLineEdit(self.MainWidget)
self.archivePasswordTxt.setObjectName(_fromUtf8("archivePasswordTxt"))
self.archivePasswordLayout.addWidget(self.archivePasswordTxt)
self.passwordOptionsLayout.addLayout(self.archivePasswordLayout)
self.advOptionsBtn = QtGui.QPushButton(self.MainWidget)
self.advOptionsBtn.setObjectName(_fromUtf8("advOptionsBtn"))
self.passwordOptionsLayout.addWidget(self.advOptionsBtn)
self.MainLayout.addLayout(self.passwordOptionsLayout)
self.progressBar = QtGui.QProgressBar(self.MainWidget)
self.progressBar.setProperty("value", 0)
self.progressBar.setTextVisible(False)
self.progressBar.setObjectName(_fromUtf8("progressBar"))
self.MainLayout.addWidget(self.progressBar)
self.buttonLayout = QtGui.QHBoxLayout()
self.buttonLayout.setSpacing(80)
self.buttonLayout.setContentsMargins(0, 0, -1, -1)
self.buttonLayout.setObjectName(_fromUtf8("buttonLayout"))
self.startBtn = QtGui.QPushButton(self.MainWidget)
self.startBtn.setObjectName(_fromUtf8("startBtn"))
self.buttonLayout.addWidget(self.startBtn)
self.aboutBtn = QtGui.QPushButton(self.MainWidget)
self.aboutBtn.setObjectName(_fromUtf8("aboutBtn"))
self.buttonLayout.addWidget(self.aboutBtn)
self.exitBtn = QtGui.QPushButton(self.MainWidget)
self.exitBtn.setObjectName(_fromUtf8("exitBtn"))
self.buttonLayout.addWidget(self.exitBtn)
self.MainLayout.addLayout(self.buttonLayout)
self.verticalLayout.addLayout(self.MainLayout)
self.verticalLayout.setStretch(1, 1)
MainForm.setCentralWidget(self.MainWidget)
self.retranslateUi(MainForm)
QtCore.QMetaObject.connectSlotsByName(MainForm)
def retranslateUi(self, MainForm):
MainForm.setWindowTitle(_translate("MainForm", "Team 4067 - 2013 FRC Software Installer", None))
self.installerMessageLabel.setText(_translate("MainForm", "<html><head/><body><p>This installer will download and install the <span style=\" font-weight:600;\">2013 FRC Tools and Driver Station</span>.</p><p>Enter any passwords given to you by the team, and then click <span style=\" font-weight:600;\">Start</span>.</p><p>This tool will also install the updates as well.</p><p><span style=\" font-weight:600;\">Note that you must be connected to the internet for the tool to run.</span></p></body></html>", None))
self.downloadPasswordLabel.setText(_translate("MainForm", "Download Password:", None))
self.archivePasswordLabel.setText(_translate("MainForm", "Archive Password:", None))
self.advOptionsBtn.setText(_translate("MainForm", "Advanced &Options...", None))
self.startBtn.setText(_translate("MainForm", "&Start", None))
self.aboutBtn.setText(_translate("MainForm", "&About...", None))
self.exitBtn.setText(_translate("MainForm", "E&xit", None))
import MainWindows_rc
| Python |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Sat Jan 19 21:47:34 2013
# by: The Resource Compiler for PyQt (Qt v4.8.4)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x71\x85\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x01\xcc\x00\x00\x00\x39\x08\x06\x00\x00\x00\xbb\xf3\x1b\x98\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdd\x01\x0c\x12\x04\x21\x22\x65\x91\x1d\x00\x00\x20\x00\x49\x44\
\x41\x54\x78\xda\xb4\xbd\x77\x7c\x1d\xd5\xb5\xf7\xfd\x5d\x7b\x4e\
\x51\x97\x6c\xc9\xb2\xe4\xde\xc0\x80\x89\x81\x90\x00\xa1\xdb\x74\
\x48\x4f\x48\x72\x09\x2d\x84\xf4\xde\x93\x7b\x43\x28\xb9\xcf\x13\
\x52\x48\x21\x97\xf4\x0b\xa1\xa4\x91\x4e\x48\x48\x68\x36\xbd\x86\
\x66\x83\x7b\x93\xbb\x65\xab\x97\xd3\x66\xef\xf7\x8f\x39\x3a\x9a\
\xd9\xb3\xe7\x48\xe6\x7d\x5f\xf1\x39\x89\xac\x73\x66\xce\x9e\x5d\
\x56\xf9\xad\xdf\x5a\x4b\x38\xe7\x3a\x03\x80\x94\x5f\x86\xe0\x7f\
\xc4\x80\x31\xc1\x1f\xa5\xf2\x46\xf9\x43\xd6\x8f\x29\x7f\x56\x64\
\xfc\xdf\x30\xfe\x6f\xec\xcb\x8d\xf5\x87\xf2\xef\x46\xc6\xdf\x92\
\xf2\x18\x2a\x7f\x97\xf2\x18\xc3\xd7\x86\xae\x37\xe5\xdf\xed\x71\
\x1b\x13\x7a\x26\xc7\x98\xc6\xbe\x2b\x3c\x66\xd7\xf8\xc7\x3f\x5c\
\xfe\x3e\x19\xbf\x76\x6c\x8c\x12\x9a\x9e\xd8\xbd\x65\xfc\xb3\xf6\
\x73\xc7\x2e\x2a\xff\xcd\x38\xe6\xd2\xb8\x2e\x37\x09\xf7\x0a\xcd\
\x69\x78\x8d\xc3\xdf\x63\x3f\x07\x92\xb0\x5e\xc4\xbf\x23\x3c\x25\
\xf6\x7e\xa8\xec\xa1\xf2\x97\x4a\xc2\x3c\x46\xd6\xb0\xfc\x79\x09\
\x8f\x29\xbc\x1e\xb8\x9f\xb3\xf2\x1d\x26\x79\xed\x2a\x7b\x74\x6c\
\x6e\x65\x7c\xee\x24\x3c\xdf\xa1\xfd\x8f\xbd\xfe\xae\xb5\x0a\x2d\
\xba\x31\xd5\xd7\x2b\x3a\x49\x8e\xb5\x8e\xde\x2e\xfe\xf1\xd0\xb8\
\xc2\x7b\x3a\xf9\x02\x6b\x9e\xc7\xfe\x2c\xd1\xf7\x2b\x67\xdf\x58\
\xe3\x95\xf8\xb3\x87\xcf\xba\xbd\x37\xc6\xe6\x74\xec\xfe\x62\xed\
\xaf\xf0\x59\x89\xdc\xbe\xfc\xbd\x22\xd6\xa3\x84\xf6\x04\xd6\x3a\
\x11\xda\xfb\x95\xfd\x26\x8e\x7b\x33\xbe\xee\x22\xd6\x7d\xad\xf3\
\x9e\x28\xb3\x4c\xf4\xf9\xc4\x3e\x3b\x49\xe7\xd2\x3e\x0f\x93\x58\
\x2b\xd7\x99\x27\x3c\xd6\x49\xee\xa7\xa4\x6d\x20\xe1\xbd\x6d\xc6\
\xf7\x43\x6c\xab\x1b\x4b\x26\x54\x93\xff\xa1\x7b\x53\x65\xbd\xc2\
\xb2\x30\x69\x2a\x92\xc6\x14\x5e\xb7\x8a\x2c\x9d\x40\xbe\x54\xd6\
\x5e\xdc\xf2\x2d\xf1\xf1\x4c\xe2\xf3\xa6\x50\x09\x13\x15\xdb\x78\
\x92\x7c\xa0\xc3\x1b\x63\x6c\xd3\xdb\xc2\x54\x5c\x42\x26\x41\xe2\
\x2a\x87\x50\x53\x3a\xfe\x39\x63\x29\x4a\x8c\xf5\x8c\x3a\x2a\x10\
\xed\x4d\x12\x5e\x64\x09\x0b\x4c\x89\x6f\x88\x8a\x32\x0d\x09\x2b\
\x42\x8a\x5c\x25\x6c\xa8\xc8\x81\xb2\x0e\x80\xfd\x19\xb1\xe6\xa5\
\x22\x40\x5c\x4a\xd0\x84\x9e\xd9\xd6\xd4\xa1\xfb\x88\xb1\x94\x47\
\x68\x6e\x63\xb6\x47\x92\x06\x74\x2e\xf6\xf8\x98\x23\xc2\x7c\xec\
\x39\x94\xa5\x98\x4d\x7c\x0e\x6d\xa1\x2b\xca\x3a\x54\x12\x5a\x5f\
\x1c\xc2\x20\xb4\xde\x15\x01\x68\x09\xca\xb0\x52\x14\x15\xb7\x57\
\x4c\x95\x67\x8d\x1c\xa8\xd0\x9c\x8b\x38\x94\x09\x13\x2b\xd4\x09\
\x3f\x57\x4d\x06\x56\x31\x34\x30\x96\x32\x73\xec\x25\x42\x46\x85\
\x58\xf3\x6b\x82\xa3\x32\x6e\x90\x92\xfc\x3d\x22\x51\x41\x1e\xb7\
\xb6\xc7\x3f\x17\xbb\x46\x12\x84\x91\x35\x17\xc6\x52\x7c\x62\xe2\
\x4a\x1e\x6b\xdf\xc4\xb6\x87\x4b\x22\xea\xd0\xbf\x55\x74\x7c\x2e\
\x81\x6c\x1b\x4d\x49\xf6\xb3\x84\xaf\x25\x7e\x16\xa5\xda\x06\x1b\
\x7b\x5e\x35\xbe\x8f\xb1\x8f\xb4\x24\xef\xcd\xf0\xfe\x8c\xc8\xc1\
\x90\xd3\x60\x9f\xa9\x88\x1c\x0c\x2b\x53\x5b\xde\x8b\xc3\x48\x75\
\x18\x79\xf6\x58\x30\x09\x46\x43\x79\xad\x4c\x58\x5f\x38\xee\x17\
\x9b\xc2\x24\x83\xc5\x75\x7f\x89\x2b\x64\x63\x92\xf7\x98\xd8\x8f\
\x66\x2c\x07\x0c\x4b\x61\x6a\x7b\x1e\x5d\x1e\x87\xc3\x3b\x73\x9d\
\x7f\x91\x44\xc3\x36\x62\x05\x82\x65\x89\x6a\x6b\x71\x89\x7a\x18\
\x61\x01\x1f\xf6\xd2\x8c\x4a\xf0\x06\xc4\x2d\x5c\x8d\x65\xe5\x29\
\x7b\xe3\x5b\xde\x8a\x84\x15\x9b\x44\x25\x4b\xc4\x7b\x95\xea\x4a\
\x16\xe2\xde\xa5\x38\x9c\x4d\xed\xf2\x04\x65\xdc\xca\x36\xa1\x71\
\x45\x04\xbe\xbd\xb1\x49\x50\xde\x36\x6a\x90\xb0\xee\xb6\x15\x17\
\xdb\x55\xe2\x5e\xfb\x88\x85\x1c\x7a\x18\x91\xe8\x66\x94\x2a\x42\
\x64\x4c\x31\x61\xdc\x12\xca\x18\xb7\xf0\x15\xb1\xbc\x20\x1d\x7a\
\x46\xe5\x56\x3e\xc6\xb8\x0d\xa9\xd8\xfd\x6d\x45\x41\xf4\xb3\x62\
\xcf\x2d\x6e\xa3\xd0\x54\x31\x1e\x25\x64\xa8\x6a\xa2\x8a\xde\x36\
\x56\x0d\x93\x54\x66\xb6\x70\xb0\x84\x53\x58\x21\x11\x16\x42\x0e\
\x8f\xc2\x58\x9e\x42\xe5\xcc\xa9\xa8\xc7\x46\xc8\x10\x32\xa1\x8d\
\x1d\x31\x6c\x89\x2b\xa2\xb1\xcf\x6b\xaa\x78\xa7\xd6\xda\x8a\xb8\
\x51\x8d\xb0\x40\x13\x97\x80\x2c\xdf\x03\x97\xc7\x8a\xc3\x08\x30\
\xd6\x9a\x87\xbd\x56\x7b\xdf\x38\xce\x5f\xc4\xb0\xb3\x3d\xf8\xb1\
\xb5\xd7\x71\xb4\x8c\x24\x4f\xd5\xf2\x8a\x71\x29\x36\x5b\x5e\x8d\
\x19\xa3\x1a\xb4\x44\x65\x1d\x61\x59\x97\x64\x15\x28\x87\x7c\x19\
\x73\x70\xc2\xeb\x53\xbe\x9f\x32\x91\x73\xdf\x54\x23\x0c\x14\xe0\
\x78\x59\x4f\x5f\x7f\x0f\x43\x43\x83\x0c\x8f\x0c\x47\x6d\xa2\xb2\
\xac\x68\x6e\x69\x22\x93\xcd\xd2\xd2\xd2\xc2\xd3\xa5\xc3\x1c\x48\
\x8d\x4b\xb9\x4a\x74\xbf\x47\xe4\x12\x71\x43\xd2\xb6\xdd\x0c\x0e\
\x79\x12\x37\xf0\x52\x89\x56\x8b\xed\x19\xc5\x2c\x5c\x6b\xa3\x18\
\x07\x9c\x62\x7b\xaa\x06\xd0\xa1\x83\x1d\x3e\x24\x62\x1c\x6e\xf3\
\xd8\xf7\xea\xa8\x70\x8b\x29\xdd\x24\x4b\xdc\xf2\x9e\x22\xde\x10\
\x96\xa5\x80\x05\x73\x39\x94\x8b\xed\x95\x22\xa0\x6c\x08\xd7\x44\
\xbd\xcf\xf0\x06\x36\x26\x3a\x37\x11\x4f\xd0\x12\xfa\x15\x05\xa7\
\x43\xb0\x99\xed\x61\x56\xf1\x58\x8c\x63\x5c\x15\xaf\xae\x3c\xa7\
\xb6\xa7\x14\x81\xb2\x13\x0c\x9f\x6a\xf0\x7c\xd8\xd3\xac\xe6\xc1\
\x0b\xc9\x82\xc5\xe0\x86\xf6\x4d\xdc\x31\x08\xc6\xa6\xe2\x06\x9e\
\x2d\x4c\x2b\x9e\xb6\xed\xbd\xa8\x04\xc8\xce\x86\xa4\x54\x15\x07\
\xd1\x3a\xb9\x49\xcf\x96\xe8\x25\x56\xc1\x6d\x2b\xe8\x45\xc8\xe2\
\x37\xa1\x31\x89\xad\xa4\x63\x56\x4c\xf4\xd9\x93\xe0\xad\x98\x34\
\x27\xc1\x28\x72\x41\x5e\xda\x12\x2e\x96\x45\x6f\xc3\x8a\x46\xdc\
\x67\xd4\xe5\x3c\x46\x60\xdc\x10\x82\xa5\x24\x6e\xa0\x08\xd6\x5a\
\x59\x6b\x12\x51\x9c\x55\x20\xe7\xb8\x14\x75\x2b\xb6\xf0\x39\x08\
\xc3\xef\x61\x14\xc2\x84\xbd\x6d\x19\x37\x24\x25\xc1\x48\x93\x30\
\x6a\x63\x02\x59\x19\x91\x77\xe2\x76\x06\x12\x21\x44\x47\x8c\xc8\
\x36\x56\x63\xa8\x84\xad\x98\xc3\x32\xd1\x65\xe8\xb8\x5c\x4c\xe1\
\x90\xba\x7e\xf2\xdb\xfe\xcd\x81\x03\xbd\xe4\x46\x73\x0c\xfa\xc1\
\x7d\x9f\x22\xd8\x33\x82\xa6\xac\x22\x43\x4f\x11\xfc\xab\xaf\xaf\
\x8f\xf1\x38\xe1\xb3\x88\x80\xa7\x3c\xea\xea\xeb\x98\x36\xad\x1d\
\x35\x73\x09\x1b\x46\x1a\xa3\xe7\x35\x8c\x2c\x48\x82\x41\x66\x4f\
\x55\x4c\x0e\x57\x73\xe3\xc7\x6e\x75\xfe\x75\x26\x51\xe0\x81\xe5\
\xbd\x25\xc5\xb0\xac\x38\x42\x2c\xae\x58\x0d\x8e\x20\x6a\xfd\x9a\
\x6a\xde\x87\x49\x5e\xbc\xaa\xf1\x31\xdc\x90\x66\x64\x33\xbb\xdc\
\xf2\x90\x72\x37\x09\x90\x8f\xc1\x1d\x37\x11\x47\x1c\x24\x0c\xd9\
\xc8\x04\xf1\xcb\xca\x66\xb6\xe1\x9d\x09\xe2\x5e\x2e\x4f\x3e\x09\
\x46\x11\x87\x22\x89\x78\xdb\x26\x6e\xa0\x38\x37\x94\x3d\x0f\x0e\
\x4b\x0f\x92\xe3\xdb\xce\x58\xa3\xed\x11\xd8\x70\x8f\x58\x10\x96\
\x24\x28\x04\xcb\x13\x74\x09\x09\xa7\x43\xeb\x80\xeb\x92\x62\xee\
\x58\x73\x66\x24\x7e\x18\x13\xbd\xcb\x2a\x10\x97\x73\x5d\x4d\xd4\
\x9b\x71\xc6\x32\x5d\x58\x9b\x1d\xc2\x70\x6d\xbd\x09\xf6\x8e\xf3\
\xac\x59\x46\x08\x2e\x2e\x84\x44\xef\x1b\x51\xf2\x26\xce\x45\x08\
\x7b\x71\x98\x04\xaf\xc2\x44\xbd\xd8\x98\x9c\x4a\x78\xe6\xaa\xf1\
\x5d\x99\x64\x4c\x4b\xaa\xc8\x30\x87\xec\xb4\xc7\x68\x5c\x5e\xb4\
\x05\xed\xda\x72\x45\x5c\xf1\xc5\x49\xae\x53\x85\x1b\xc0\x24\xe4\
\xb2\xbd\xf7\x2d\x07\xc0\xd8\x71\xff\xf1\xb3\x74\xe4\xc8\xbf\xd9\
\xba\x6d\x1b\x23\xa3\x79\xb4\xd6\x80\x8f\x32\x06\x05\x78\x08\x35\
\x19\xa1\xae\x56\xc8\x66\x52\xd4\x66\x15\x2d\xcd\x19\x54\xf9\xfe\
\xa6\xfc\x90\xbe\x86\xbe\x81\x3c\xf9\x82\x26\x97\x2b\x90\xcf\x41\
\xae\x18\xf8\x54\x7e\xd9\x5f\xd6\x78\x78\x4a\x51\x57\x57\xc3\x9c\
\x39\x73\x78\xb9\xee\xd8\x04\xdd\x64\xac\x90\x9c\xbd\xd7\x89\x1a\
\x95\x76\x18\xd0\xc1\xad\x11\xce\xb9\xce\x24\x2f\xc8\x24\x02\xbe\
\xb1\x45\xb1\xbd\x04\xe2\xb1\x9f\xa4\xcd\x68\x7b\x45\x95\xc5\xb0\
\x3c\x42\xe3\xb2\x26\x9c\x41\xc1\x28\x5e\xee\x24\xfe\xb8\x48\x39\
\x92\x1c\x83\x72\x06\x8b\x4d\x14\x9a\xc1\xa5\x74\x98\x98\x98\x13\
\x33\x74\x1c\x07\x3b\x1c\x43\xad\x66\xcc\x24\x19\x0c\xb6\x17\x87\
\x2b\x78\x6e\xaa\xdf\x23\xf2\xab\xc4\xbd\x34\x31\xd1\x18\x49\xc4\
\xb3\x4d\x7a\xde\xc9\x90\x16\x1c\x5e\x85\x2d\x7c\xaa\x12\x22\x8c\
\x1b\xde\x37\x0e\x58\x4a\xc4\x0d\xf3\x25\x7a\xc9\x16\x69\x29\x0c\
\xe9\xc7\x08\x1b\x36\x81\x2c\x24\x70\xa5\x1a\xb1\xc2\x16\x64\x0e\
\x21\x6f\x92\xbc\x41\xdb\xd0\x70\x6d\x75\x2b\x0c\x21\x55\xe2\x56\
\xb6\x97\x68\x13\x67\x9c\x22\xc3\x4c\x2c\x4b\x4c\x02\x99\xcd\x26\
\x92\x24\x12\x54\xb0\xa0\x79\x07\x21\x4f\x2c\x25\x62\x1c\x64\x3c\
\xb1\x62\x87\x31\x38\xda\xa1\xe4\x23\x8e\x7d\x82\x51\xe6\x8c\xb3\
\xda\x06\xb2\x63\xff\x60\x41\xff\xf6\x79\x82\x64\x62\x8b\x24\x29\
\x42\xdc\x04\x9d\x24\x08\xd8\x61\x6c\x1d\xcf\x2b\xac\x5a\xb5\x8a\
\x5c\x2e\x87\x31\x06\x41\xf0\xc4\x23\xe3\x41\x4b\x93\x61\xfe\xac\
\x1a\x0e\x99\x57\xcf\xfc\xd9\x8d\xcc\x9e\xe9\x31\x63\x86\xa6\xb9\
\x29\x4b\x7d\x8d\x47\x53\xbd\x87\xc2\x0f\x14\xa6\x28\x0c\x0a\x63\
\x14\xfd\x03\x45\x72\xb9\x12\x7d\xfd\x79\xf6\xee\xf3\xd8\xb1\x4b\
\xb3\x7d\xcf\x08\x9b\xba\x72\x6c\xee\x1a\xa4\xbb\xc7\x67\xa4\x68\
\x82\x2b\xcb\x6b\x57\x57\x57\xc7\xe2\xc5\x8b\x79\xce\x3b\xb2\x0a\
\xf9\x07\x37\xe9\x33\x49\xc4\x39\xc8\x94\x51\x85\x99\x14\xe3\x11\
\x89\x0b\x95\xd8\x06\x49\xb0\x08\x93\x8c\xd3\x18\x4b\xce\x41\x22\
\x09\x0b\x15\x31\x6e\xc8\x0a\x2c\x41\x97\x14\xc9\x4e\x10\x52\x58\
\x96\x9d\xb8\x14\x52\x52\x2c\xd0\xf2\x40\x93\xd8\xc1\x62\x41\x77\
\x32\x19\xe6\x99\x49\x60\x78\x39\xc6\xee\x64\xf3\x25\x59\xb0\x96\
\xe2\xb1\xc9\x5a\x18\x0b\x4a\x9a\xa4\x25\xeb\x62\x98\xc6\x94\x59\
\x92\x42\x48\x88\x59\xba\x84\x9d\x6d\x84\x21\x93\xf0\x5c\x43\xf3\
\xef\x54\x88\x16\xb2\x11\x63\x4e\x4f\xc2\xc8\x99\xc8\x6b\x35\x2e\
\xc4\xc2\x44\xa1\x72\x3b\x7e\x15\x81\x12\xed\xfd\x2e\xf1\xef\xab\
\xca\x25\x32\x93\x30\xd0\xaa\x29\x2b\xeb\x9c\x8b\x05\x35\x86\x43\
\x04\x36\xa3\xdd\x19\x6b\x4f\x22\xac\x11\x0f\x13\x18\xeb\xbb\x62\
\xde\x40\x82\x61\xe4\x24\xf9\x39\x0c\xd9\x88\xb2\x67\x62\x47\xc1\
\x96\x75\x49\x67\xd1\xc5\x76\x4d\x32\x08\x49\x40\xea\x22\x67\xc8\
\xf6\xda\xc5\x11\x0a\x72\x90\x0b\xc3\xdc\x87\x88\xd1\xe8\xf2\xb2\
\x70\x30\x4f\xa3\x7b\xb5\xbd\xce\xa3\x76\xeb\xfd\x6c\xdf\xbd\x1d\
\xed\x1b\xa4\xa4\x48\x0b\xd4\xa6\x35\xf3\xe6\xd4\x71\xec\x92\x0c\
\xc7\x2e\x6d\x63\xf1\xa2\x7a\xe6\xcd\x49\xd3\xda\x52\x22\x9b\x2a\
\xe2\x01\x4a\x0b\x18\x8d\x12\x8d\x98\x31\x9f\xd1\x94\x43\x0c\x60\
\x8c\x2a\xc3\xea\x0a\x23\x06\xa3\x0c\x25\x23\x14\x74\x2d\xfd\x43\
\x59\x76\xec\x2a\xb2\x6e\xc3\x00\x2f\xad\xe9\xe7\xdf\xab\x86\x59\
\xbb\x65\x84\x9e\x41\x1f\xbf\xfc\x1c\x4a\x79\xb4\x4f\x6b\x67\xf7\
\xec\xe5\x13\x93\x89\xb0\x64\xb3\x4c\xec\x1c\x5a\x0a\xd3\x24\xc3\
\x08\x61\x78\xc3\x24\x1c\xe8\x08\x1c\xeb\xa2\x46\x1b\x07\x3b\xd2\
\xe5\xf6\x9b\x78\xcc\x31\xa2\x9c\x24\x99\x65\xe5\x8c\x63\x26\x78\
\x62\x89\x16\x86\x49\x56\x98\x11\xd8\x9a\x50\xbc\xcc\x21\xd8\x5c\
\xb1\x4a\x26\x80\xa1\xc4\xe5\x09\x1a\x07\x1c\xe2\x20\x8e\x4c\x98\
\x6a\x50\xcd\xda\x4f\xf0\xf2\x63\xd0\xab\xf5\xbe\x24\x78\x81\xe1\
\xf7\xaa\x7a\x80\x2e\x4b\xd0\x31\x67\x49\xde\xa3\x73\xad\x1c\x24\
\x14\x3b\xf5\x43\xaa\x78\x42\xb6\x30\x8c\x79\x6d\x96\x07\x23\x8e\
\x10\x45\x22\xed\xdd\xe5\x59\x90\x9c\x5a\x91\x44\xb6\xb5\x91\x8d\
\x89\xbe\xc3\x05\x4f\x1a\x2b\xbe\x1b\x61\x4f\x57\x9b\x23\x13\xf7\
\xde\x92\xb8\x66\xc6\x1a\x4b\x2c\xdd\xc3\x45\x58\x73\xa4\xc1\x18\
\x8b\xb9\x1a\x8e\x9f\x26\xcd\x41\x84\xaf\x80\x23\xad\xc8\x44\x15\
\x9b\xb1\x88\x87\xe0\xd8\x8b\xd5\x42\x22\x26\x4e\xd4\xb3\x19\xfc\
\xb6\x23\x61\x5c\x5e\x6f\xc2\x19\x36\x8e\xf3\xc5\x24\xce\x58\xc4\
\xe8\x70\xec\x33\x53\xc5\x4b\xb6\xce\xd6\xdc\xdd\x0f\xd3\xb5\x63\
\x0b\x06\x4d\x4a\x65\x48\x89\xa1\xb3\x55\x71\xf2\xeb\x9b\x38\xf5\
\xb8\x26\x8e\x3d\xaa\x95\x85\x9d\x3e\x0d\x99\x42\x99\x7f\xa1\xcb\
\xc7\x23\x1e\x6e\xaa\x16\xe8\x11\xa4\x3c\x9c\xf1\xeb\x4c\x79\x8d\
\x0d\x8a\x51\xd2\xec\xea\xcd\xb3\x66\xed\x08\x8f\x3c\x5e\x62\xc5\
\x13\xfd\xac\xdb\x3e\x44\xce\x80\x36\x06\xe3\x2b\x5a\xa7\xb4\x71\
\x60\xc1\x39\xf1\x14\x22\x63\x11\xe4\x62\x7a\x44\x1c\x28\x6b\x38\
\x86\x19\x13\x54\xb6\xe5\x2e\x16\x1b\xac\x8a\xc0\x8b\x79\x7c\x49\
\xda\xdd\x3d\x20\x77\x2e\x5c\x48\xf8\x88\xb8\xe3\x61\xb6\xc5\xe0\
\xa2\x87\x1b\x49\x64\xe4\xc7\x72\x84\xa8\x62\xc9\xba\x72\x45\x5d\
\xf9\x3d\x50\x9d\xe6\x9e\x08\xc5\xb9\xf2\xb2\x12\x04\x83\x0b\x9e\
\x75\x09\x72\xb1\x82\xdd\x49\x31\x0a\xa7\xc2\x35\xf1\x98\xaa\xed\
\x95\xbb\x10\x05\x17\xab\x4f\x26\x82\xf5\x13\xc6\x5d\x35\x56\x6b\
\xe2\xe4\xaa\xb0\xa0\x14\x57\x8e\xa5\x63\xee\x4d\x52\xec\xca\xc4\
\xc9\x61\x4e\x7b\xcc\x44\xa1\xd9\x30\xe1\x64\x32\x71\x34\x23\xc9\
\x0e\x3d\x0e\xef\x7d\xa2\x18\x6e\x62\x8a\x90\xcb\xeb\x22\x6e\xe8\
\x9a\x04\x3e\x50\x2c\xbd\xc6\xb1\xe9\x8d\x4b\xe9\x38\x90\xa0\x88\
\x52\x71\x28\xa2\x44\xc3\xc0\x32\x68\x22\x4a\x27\xf4\x1d\xe1\x94\
\xa0\x18\x92\x62\x7d\xb7\xb1\x62\x73\x36\x44\x8f\x3b\x84\x34\xa3\
\xa6\x9f\xb7\x76\xbc\x8c\xe0\x93\x92\xaa\x50\xda\x04\xa1\x2d\x3b\
\x5e\x9f\x4c\x4c\x05\xf0\x52\x8a\x1e\x7f\x2a\xb7\x6d\x3e\x14\x8d\
\x4c\x22\xcf\x33\x81\x60\x17\x23\xfd\xc4\xcf\xd7\x89\xac\xe3\xe9\
\xe7\x9f\xa5\x54\x54\x28\x7c\x6a\x3c\x9f\xa5\x73\xeb\x38\xf3\xb4\
\x0e\x4e\x3b\xbd\x86\xa3\x8e\xf0\x68\xcc\xf8\x28\x1f\x52\xca\x20\
\x63\x2c\x64\x91\x09\x4d\xf7\x83\xfd\x31\x46\x93\x33\x90\x2b\x16\
\x49\xa7\xd2\x18\xa9\x63\xfd\x0e\xc3\x83\x4f\xe6\xb8\xe7\xc1\x6e\
\x9e\x5d\xd5\xcb\x48\x41\xe3\x6b\x0f\x45\x0d\x87\xcc\x5f\xc4\xba\
\xe6\xd7\x58\x8e\x8d\xc4\x51\x25\x2c\x92\x59\x38\x34\x58\x79\x96\
\xf3\xae\x33\x71\x96\x24\x0e\x12\x4f\x95\x83\x16\xb6\x8e\x2a\x93\
\x6f\x31\x6c\x23\x87\x27\x81\x21\x13\x83\x35\x4d\x72\x12\x2f\x55\
\x14\x32\x16\x23\x2e\x12\x53\x0c\x93\x0a\x5c\x54\xe2\x49\x04\xc5\
\xed\x4d\x85\x75\x40\x5d\xa4\xa8\xc8\x73\x3a\x2c\x18\x53\x0d\x5a\
\x4d\x28\x92\x60\xdf\xbb\x1a\x99\xc0\x58\x71\x1b\x17\xb1\xcb\x69\
\xa9\x26\x14\x53\x48\xb4\xd6\x5c\x50\xc7\x04\xac\x3e\x9b\x51\x1b\
\xb9\x67\x12\x34\x6b\x27\x9f\x4f\xe4\x5d\x9b\x84\x38\xb0\x4d\xfc\
\x70\x9d\x01\x1c\x28\x81\x0b\x29\x70\x78\x15\x61\xc5\x59\x95\x48\
\x94\xc4\xcc\xb3\xa4\x67\xe5\x8c\x4d\xf4\xcc\x16\x5b\x3a\x06\x3b\
\xdb\x86\x83\xeb\xbc\x55\xc1\x79\x8d\x03\x4a\xb6\x8b\x41\x44\x88\
\x26\xd5\x62\xf9\xd5\x82\x4a\x13\xa0\x43\xe2\x20\x78\x54\x8d\xeb\
\xba\xbc\xe4\xb0\x8c\x90\x84\x75\x77\x93\x68\x16\xd6\x77\xf3\xe5\
\x85\x0f\xd3\xdc\xd8\x44\x6d\x53\x5b\xe0\x1b\x85\x0c\xb5\x31\x75\
\x21\x21\xa6\xbd\x94\xef\x6b\xca\xca\x5b\x42\xd0\xaa\x84\xe4\x8e\
\x84\xd8\xb1\x22\x32\xce\x1c\xad\xdc\xcb\xa7\x7f\xef\x66\x9e\xde\
\x57\xcf\x8d\x1b\x5e\x47\xc1\xa4\x27\x96\x57\xb6\xf1\x92\x58\x10\
\x65\x1c\x25\x69\x59\xfd\x57\xfa\x73\x7d\x20\x42\xca\x4f\xb1\xa8\
\x33\xc3\xdb\xcf\x6b\xe7\xed\xe7\xa4\x38\xfc\x90\x26\x52\x52\x40\
\x91\x43\x30\xe0\xa7\x40\x29\x44\xfe\xff\x54\x98\x50\xca\xa7\xe9\
\xe9\x29\xb0\xa3\x2f\xcf\xb0\x51\x4c\x6b\x6f\xa4\xb3\xad\x81\x03\
\x07\xe0\xef\x0f\xf6\xf1\xe7\x7b\xb6\xf3\xf4\x4b\xfd\xe4\x74\x90\
\x52\xd3\x3a\x75\x0a\xdd\x73\xce\x4d\x26\xbd\x45\x1c\x09\x3b\xec\
\x37\xbe\x17\xa2\x90\x6c\x0c\x56\x90\x64\xc8\xcb\x69\x62\x3b\x48\
\x01\xce\x0a\x15\xb6\x07\xe5\x50\x06\xae\x43\x16\x26\x50\x24\xc5\
\xef\x48\x48\xd8\x4e\x62\xb5\x26\x11\x1e\xec\x58\x8c\x13\xf2\xb1\
\xab\xc7\x50\x85\xb1\x37\x01\x0d\xdc\x38\x3c\x95\xb0\x57\x2d\x54\
\x61\x11\xca\x04\x6c\x5b\x97\x72\x4d\x8a\x6b\x54\xf3\x8c\x13\x62\
\x3b\x13\x09\x2f\x99\x08\x56\x75\x5c\xe3\x4a\x29\x33\x0e\xf2\xc5\
\x64\xaa\x51\x19\x17\x14\x2b\x8e\x6a\x52\xc9\x42\x23\x99\x7a\x9e\
\x10\x7b\xb6\x0f\x66\xb5\x79\x35\x92\x9c\xda\xe2\x32\x24\x5d\x21\
\x2b\xfb\x9c\x86\x15\xab\x6b\xef\xd8\x9e\xb1\xb1\xd2\x30\xaa\x29\
\x4e\x57\x3e\xaf\x71\xec\x93\xc4\xf3\x95\xe0\xb1\x4d\x2a\x56\xe8\
\x20\x61\x39\x8d\x9b\x24\xe4\x27\xa9\x5a\xd2\x04\xa4\x46\xe2\x70\
\xfc\xd2\x86\x9d\x7c\x7a\xfe\x13\x4c\x9f\xb3\x98\xc6\xa9\x33\x19\
\x1c\x1c\xac\x28\x44\x53\x2e\x94\x11\x51\x98\xe5\x73\x2a\x91\x1c\
\xcc\x31\x85\x19\xbc\x2f\x32\xfe\x7e\x45\x49\x8a\xaa\x28\x4f\xa9\
\xa4\xb2\x41\x26\x9b\xa5\xbe\x36\xc3\xe6\x57\x9e\x62\xfd\xae\x5e\
\xbe\xb9\xfe\x44\xfa\x4b\x75\xd5\x23\x30\x31\xc3\xc2\x24\xe6\x31\
\x9e\x56\xbb\x99\xc7\x9f\x78\x1c\xdf\x68\x94\x11\x5a\x6a\x84\x37\
\x9d\xd9\xce\x45\x6f\x6e\xe1\x84\xa3\x6a\xa9\x4d\x0d\x96\xc5\x63\
\x0a\x8c\x07\x14\x11\x95\x0b\xe2\x8f\x65\xe5\x2d\x55\x51\xa5\x57\
\xa9\x30\x31\xf8\x52\x40\x8b\x62\x74\xb4\x9e\x2d\xdb\x0c\x5b\xb7\
\x8d\x50\xf4\x7d\x16\xcc\xcb\x72\xe8\xc2\x29\x74\xed\x29\xf1\xfb\
\xbb\xbb\xf9\xd5\x5f\x76\xb1\xad\xbb\x44\x51\x0c\xa7\x9c\x78\x32\
\x8f\x0c\xcf\x4b\x8e\xdb\x33\x31\x22\x16\xf2\x30\x21\x9e\xf8\x69\
\xe2\xc4\x1a\x3b\xd6\x66\xc7\xd7\x9c\x30\x97\xcb\xb2\x36\x16\x84\
\x46\x9c\xbd\x25\x09\x58\x7c\x98\x8d\xe7\x4c\x33\x30\x6e\x16\xa8\
\x91\x09\x4a\x57\x4d\x04\x1d\xba\x98\x96\x96\xe5\x09\x09\x25\xf5\
\x26\x03\xcd\x98\x49\x28\x45\x63\x41\x08\x76\x89\x38\x3b\x76\x64\
\xcd\x8d\x0d\x53\x55\x65\x8e\x39\x3c\x63\x3b\x46\x07\x56\x6e\xa3\
\x21\x5e\xe9\x63\x02\xd2\x90\x71\x09\x6f\x89\x57\x9e\x09\xaf\x7f\
\x38\x0e\x53\xd5\x2b\x9a\x24\xc9\xa4\xda\x73\x4f\x08\xa7\xd9\xc6\
\x58\x02\x3b\x32\xc9\xe6\x70\xe5\xaf\x02\x89\xac\x6e\x12\xd6\xd0\
\x09\x9b\xe2\x86\xf6\x5d\x73\x2a\x13\xa5\xea\x24\x90\x7e\x22\xde\
\x35\xc9\xa5\xe8\xb0\xe2\xfd\x49\x86\xb6\x38\x52\x38\x8c\x23\xe7\
\xd4\x79\x36\xec\x54\x80\x30\x7b\xdb\x65\xe8\x62\x21\x56\x13\xb1\
\x78\x83\x6b\x4f\x6f\xdd\xc8\xa5\xb3\x9e\x65\xe9\x49\xef\xa4\xa5\
\xf3\x50\x76\x6d\x78\x96\xa7\x1f\x7f\x28\x11\x49\x77\xe3\x07\x26\
\x16\x5d\x9a\xec\xb5\xc6\x40\xfb\xf4\x76\x8e\x3f\xf5\x3c\xe6\x1e\
\xb9\x8c\x95\x77\x7e\x9d\xed\xdb\xb7\xf0\x9d\x0d\x27\xb1\x65\xb4\
\x75\x02\xc8\xd7\x24\xef\xd3\xf2\x14\x2f\xee\x7f\x8c\xf5\xeb\xd7\
\xa3\x94\xe0\x89\xe6\xa8\x43\xa6\x72\xc9\xdb\x17\xf2\xb6\x0b\x9a\
\x68\x69\xec\x45\x17\x47\xa8\xf7\x04\x65\xca\x84\x1d\x0c\x18\x85\
\x18\x2f\xc8\x55\x15\x9f\xff\xcf\x5d\xcb\x90\xc2\x2c\x48\x09\x44\
\xf0\x4a\x0a\x10\xf2\x64\x78\x65\x5b\x89\xd5\xab\xf2\xa8\x3c\x1c\
\x73\x4c\x1d\x73\x16\x4e\xe5\x2f\x0f\x74\x73\xed\xf7\x36\xd3\xb5\
\x27\x45\xeb\x94\x06\xba\xe7\x9c\x69\x91\xd2\xb0\xca\x70\x52\x95\
\x00\x14\xad\xf4\xe3\x22\xfb\x88\xe1\xea\x8b\x4e\xe3\x9a\x8b\x4f\
\x9f\xd4\xc3\xec\xe8\xee\x67\xf9\x97\x6e\x65\xc3\xee\x5e\x30\x30\
\x7f\x7a\x0b\x2b\xbe\x75\x19\x73\xa7\xb7\x4c\xea\xfa\xeb\xef\x7c\
\x84\xaf\xdc\xf2\x40\xdc\x61\x32\x70\xf5\x25\xa7\x73\xcd\x25\xcb\
\x0e\x62\x1c\x37\xb3\x61\x57\x2f\xd1\x64\xe6\xa8\x17\x73\xec\x21\
\x9d\x3c\x7b\xd3\x47\x0e\x7a\xd1\x1e\x5d\xbd\x95\xf3\xfe\xeb\x97\
\x0c\xe5\x4a\x11\xa5\x7d\xdd\x65\x67\x70\xd5\xc5\x13\x8f\xf1\x9a\
\xdb\xee\xe7\xda\x3b\x1e\x74\x48\x4f\xe1\xba\xcb\xcf\xe0\xaa\xf7\
\x2e\x9f\xf8\x1e\xb7\xde\xcf\xb5\xb7\x3f\x10\x2a\x49\x07\xf3\xa6\
\x37\xb3\xe2\x3b\x1f\x64\x5e\xc7\x54\xe4\xcc\xaf\x58\x6c\x63\x57\
\x8c\x4f\x62\x06\xf4\x61\xb3\xdb\xb8\xff\xdb\x1f\x60\xe6\xb4\xe6\
\x49\xcf\xc7\xe6\x5d\x07\x58\xf6\xf9\x9f\xd2\xb5\xb7\xdf\x4a\x4e\
\x27\x06\xc7\x9f\x70\xd8\x1c\xfe\xf9\xcd\x2b\x69\x6e\xa8\x3d\xa8\
\x39\xdf\xb8\xa3\x9b\x43\x2e\xbd\x3e\x32\x57\x57\x5f\x7a\x26\xd7\
\x5c\x7e\xce\xa4\xae\xdf\xb6\xa7\x87\x65\x9f\xb9\x89\x2d\xbb\x7b\
\x01\xe1\xb0\x39\xed\xdc\x7f\xc3\x87\x99\x39\xad\xe5\x20\x9e\x73\
\x3f\xcb\x3e\x73\x13\x5d\xfb\xfa\x39\xf6\xd0\x59\x3c\xfb\xd3\xcf\
\x1e\xfc\xde\x79\x69\x13\xe7\x7d\xf1\x27\x0c\xe5\x8a\x11\xa1\x75\
\xdd\x15\xe7\x73\xd5\xa5\xe7\x1c\xd4\xbd\xe4\xb4\x4f\x82\x11\xe6\
\x4d\x9f\xc2\x8a\x1f\x7c\x82\x79\x9d\xad\xc8\xa9\x9f\xb2\x2a\x38\
\x85\xd6\x3a\x16\x2f\x1f\x37\x74\xe6\x75\x4c\x61\xc5\x0f\x3e\x15\
\xdc\xe3\x94\x4f\x58\xeb\x68\x55\x40\xa9\x08\x96\x70\x95\xae\x71\
\x64\xe6\x9a\xf7\x9d\xcf\xd5\x57\x9c\x3f\xb9\x39\xdd\xd9\xcd\xc9\
\x1f\xf9\x2e\xbb\x7b\x06\x40\x84\xf9\x9d\xad\xac\xb8\xf1\xd3\xcc\
\xed\x68\x9d\x9c\xcc\xb8\xfd\x5f\x7c\xe5\xa7\x7f\x89\x1b\x88\xe1\
\x31\xc7\x0a\x20\x84\xf6\xbe\x58\x31\x7a\x27\x47\x63\xfc\x0c\x1d\
\xdb\xbc\x83\xcb\x66\x3e\xcb\xd2\x13\xdf\xcc\xa2\xa3\x97\xb1\x6f\
\xd7\x66\x32\x35\x0d\x0c\xe5\xf5\xa4\x14\x5d\x92\x1e\x3e\x18\xa5\
\x0a\x90\x2f\x05\xff\xaf\x94\x70\xf2\xf9\x1f\xe0\x99\x7f\xfd\x2f\
\x5f\xe1\x21\xbe\xb4\xe6\x5c\x7a\x8b\x75\x13\xcc\x5a\x48\xf0\xeb\
\xd0\x3c\x29\x98\xd9\xf5\x4f\x36\xee\xde\x5b\xae\x48\x6a\x38\xea\
\xb0\x19\xbc\xfd\x2d\x6d\xcc\x9a\x35\xcc\x83\xf7\xee\x27\x37\xe2\
\x71\xc4\x91\xc2\x31\x87\x29\xea\x32\x0a\xa9\x94\xf6\xf3\x41\x8a\
\xe3\xa5\x31\x8d\x50\x61\xc1\x8a\xc6\x20\x18\xd2\x41\xe1\x02\x55\
\x08\xbc\x52\xe3\x01\x3e\x62\xd2\x18\x29\x73\x5d\x75\x2a\xb8\x0f\
\x1e\x46\x15\xcb\x33\x90\x2e\x2b\xe4\xa0\xf8\x43\xc6\xaf\x0b\x6a\
\xde\xa4\xf2\xf8\xa2\xf1\x28\xb1\x74\x5e\x9a\x85\x0b\xeb\x78\x7e\
\x5d\x81\x7f\xbf\x90\x67\xdd\xda\x3d\x1c\xf3\xba\x4e\x3a\xa7\x6d\
\x67\xfb\xce\x34\x7e\xc9\x2a\x85\x17\x49\x51\x14\x07\x6f\xc6\x32\
\xa6\x0d\x28\x3c\x1c\xa5\xd9\xc6\x21\x9d\x83\x51\x96\x00\xb3\xa6\
\x35\xb3\xf2\xdb\x97\x93\x56\xc1\xc8\x56\x7e\x7b\xf2\xca\x12\xe0\
\xcb\xef\x3a\x85\xeb\x2f\x3f\xd3\x72\x93\x35\x57\x5f\x7c\xea\xa4\
\x95\xe5\xf8\x38\xde\x4f\x5a\x79\xe5\x35\x33\xd1\x4a\x1a\xe5\x03\
\xb5\xaf\x6f\x90\xa1\xd1\xfc\x41\x0b\xbd\x93\x8f\x9c\xc7\x6f\xff\
\xf3\x3d\xa0\x75\xc5\xdb\xb9\xf6\xb2\xe5\x93\x52\x96\x00\xd7\x5c\
\x7a\x26\x57\x5f\x7c\x86\x75\x3c\x34\xd7\x5e\xba\x7c\x52\xca\x12\
\xe0\x9a\xcb\xce\xe4\xea\x4b\xce\x88\xdc\x62\xe5\x0d\x1f\x62\x5e\
\xc7\xd4\x90\xa5\x2f\xd1\x4a\x39\x11\x28\x6e\xac\xa2\x88\x8e\xc4\
\x9e\x1f\xfe\xfe\x87\x0f\x4a\x59\x02\x2c\x98\xd1\xca\xc3\xdf\xfd\
\xf0\xf8\x01\x31\x6e\xb6\x75\xe7\xd4\x26\xee\xfd\xf6\x07\x0e\x5a\
\x59\x02\x6c\xde\xdd\xc3\x18\xe5\x1c\x11\xae\xbe\x6c\xf2\xca\x12\
\x60\x6e\xc7\x54\x56\x7c\xef\x63\x95\x79\x78\xf8\xc6\x8f\x1d\x94\
\xb2\x0c\x9e\xb3\x8d\x87\x7f\xf0\x09\x30\x86\x7d\x07\x06\x18\x1a\
\xc9\x1d\xfc\xde\x59\xba\x90\xdf\x5e\x7d\x59\x44\x08\x5f\x73\xf9\
\x39\x07\xad\x2c\xc3\x30\xf7\xca\x1b\x3f\xc9\xbc\xce\x56\x0b\x7d\
\x51\x8c\x97\x0a\x92\x72\xb9\xb2\x90\x10\xd0\x21\x61\xa1\x15\x2b\
\x6f\xfc\xd4\xf8\x3d\x84\xb2\x32\x0c\x0b\x94\xb2\xe1\x29\x3a\xe4\
\x0d\x87\xca\x0e\x96\x91\x86\x6b\xaf\x98\xbc\xb2\x04\x58\x30\x73\
\x1a\xf7\x7e\xff\xe3\x95\x33\xb9\xf2\x20\x94\x25\xc0\x97\x2f\x39\
\x87\xeb\x3f\xfc\xd6\xb8\x57\x2a\x04\xf0\xa5\x5d\x05\x4a\x95\xf7\
\x90\x5d\x7e\x13\x2b\xf4\x13\x41\x4f\xc6\x21\xd4\xc3\x1b\xf6\x32\
\xad\x73\x01\xf3\x17\x1f\x8b\x29\xe5\x30\xa5\x02\x99\xda\x26\x86\
\x73\xba\xf2\x1a\x4a\x78\x0d\xe7\xc7\x5f\x91\xf7\xf2\xe3\xaf\xe8\
\x7d\x4c\xc2\x4b\x93\x2b\x06\xeb\x63\x4a\x79\x14\x3e\xaf\x3b\xf9\
\x7c\x32\x9e\x66\x6e\x6d\x5f\x5c\x1b\xeb\x50\xb5\x34\x51\xc1\x1c\
\xa8\xf8\xa3\xb7\xae\xff\x13\x7b\x77\xed\x41\x8c\x42\x21\x68\x63\
\xe8\xda\x3d\xc8\x4d\xb7\x6c\xe6\xe3\xff\xb5\x96\xbb\x1e\xea\xa7\
\x69\x4a\x8a\xc3\x17\xd6\x53\x9b\x51\x41\xdc\x52\x74\x50\xeb\x5b\
\xc0\x94\x61\x64\x23\x06\x23\x3e\x06\x1d\xe4\x4a\x6a\x85\xd1\x82\
\x31\x1e\x86\x34\x5a\x34\xda\x08\xda\xcf\x62\x50\x68\x53\xaa\x6c\
\x49\x1f\x0f\x2d\x05\xb4\xc9\xa1\xb5\x21\xf8\x4f\x63\xc6\x50\x34\
\xed\x81\x97\x87\xd4\x08\x4a\x0c\x69\xa3\xc8\x68\xc8\x98\x12\x8d\
\xfe\x30\xc7\x2f\xd6\x9c\x73\x6e\x03\xfd\x85\x12\xdf\xfa\xee\x06\
\x36\x6c\x1a\x45\xcb\x30\xcd\x53\x1b\x2b\x46\x41\x3c\xda\xe4\xe0\
\xbd\x10\x32\xf4\x95\x01\x0f\x52\xee\xfa\x90\x65\xeb\xfd\xbd\xa7\
\x71\xcd\xc5\xa7\x1d\xf4\x19\x9e\xd1\xda\x44\x26\xed\x51\xf4\x4b\
\xcc\x69\x6f\x39\xe8\xeb\xbf\xf4\xee\x93\x51\x4a\xf8\xe2\x2f\xee\
\x03\x29\x7b\x96\x17\x2f\x7f\x75\xe3\xf0\x14\xc5\xa2\x8f\xbb\x7c\
\x99\xb0\x7d\xff\x20\x17\x7c\xf5\x36\xfe\xf1\xdf\x97\x52\x5f\x9b\
\x3d\xa8\xfb\xcf\x6d\x6f\xa9\x28\xdf\x6b\x2e\x5e\xce\xd7\x0e\x72\
\x8c\xd7\x5c\x7a\x06\x40\xd9\xd3\x84\x6b\x2e\x39\x83\xaf\x5d\x7c\
\xc6\xc1\xdd\xe3\xb2\x33\x41\xe0\xda\xdb\xef\x07\x0c\x73\xa7\x4f\
\x19\x7f\x53\x39\x2c\xa5\x30\x92\x20\xc6\x09\x78\x4c\x6b\x69\x78\
\x55\x50\xc9\xdc\x8e\xa9\xd1\xd8\x99\xa3\x53\xc6\xd4\x86\x5a\x1a\
\xeb\x6a\x0e\xfa\xde\xcf\xac\xed\xe2\xc2\x6b\x6e\x0b\x0e\xbe\xc0\
\xd5\x97\x9d\x7d\x50\xca\x32\x3a\xc6\xe0\xf7\x69\x2d\x8d\xff\xaf\
\x9e\x73\xfb\xfe\x01\x2e\xf8\xca\x2f\xf8\xc7\xf5\x1f\x38\xf8\xbd\
\x33\x7d\x6a\x45\x52\x5d\x75\xc9\x59\x5c\x7d\xf9\x79\xaf\x0e\x9f\
\x92\xd0\x98\x2a\x7f\x0b\x79\x0f\x92\x90\xc3\x2c\x16\xb4\xaf\x8c\
\xa5\xa4\x74\x42\x4c\xd1\xca\xa5\xd4\xd1\x18\xfb\xd7\xaf\x7c\x23\
\x5f\xbd\xec\xdc\x83\x7e\x8c\x23\x17\xcc\xac\x7c\xe7\x9c\x83\x50\
\x96\x15\x99\x71\xf1\x39\x28\xa5\xf8\xe2\x8f\xfe\x44\xb4\x28\x73\
\x95\x78\xbe\xb2\xd2\xa4\x5c\xa1\x1c\xb1\x4b\x07\x1a\x5a\x33\x23\
\x34\x34\x4e\xc5\x94\x46\x31\xca\xc3\xe8\x22\x35\x75\x8d\x0c\xe5\
\x4c\x04\x36\x7c\xd5\xde\xe6\x24\x3d\xd2\x7c\xd1\x94\xb9\x3f\xf9\
\xf2\xab\x40\x2a\xd3\x40\x6b\x66\xc8\x5a\x6b\x2b\xd6\x6c\x4c\xbc\
\x23\x10\x30\x77\xef\xbf\x50\x2d\xf5\x88\xa7\xe8\x3d\x30\x84\xaf\
\x83\x75\xed\x19\x1a\x64\x66\x6d\x0d\xef\x79\xf7\x42\x2e\x7a\x6b\
\x33\x87\x76\x68\xd2\x45\x41\xb4\x07\x9e\x1f\x0b\x59\x18\x7c\x8c\
\x8c\xa5\x8d\x28\x20\x0b\x3a\x83\xc8\x00\x22\xfb\xd1\x7e\x1b\xc6\
\x4c\x03\xc9\x81\x14\x31\xbe\x46\x67\x0c\x1b\x36\xd5\xd2\x5f\x48\
\x21\x69\xcd\xf4\x96\x66\xda\x5b\x0c\x35\x22\x28\xbf\x84\x48\x1e\
\x24\x0f\x2a\x85\x2f\x59\x04\x0f\xc1\x94\xb7\xb1\x46\xca\x10\x70\
\xca\x17\x3c\xbf\x88\xd7\x08\x4b\x4f\x98\xcb\xcd\x7f\x5d\x45\xef\
\xa0\x06\xcf\xb0\xaf\x7b\x3b\x4c\x39\x3e\x56\x11\x35\x5e\xa5\xcc\
\x86\xe4\xc3\x90\xac\x38\x2e\x28\x7b\x4c\x61\x65\x29\xe7\x5f\x37\
\x71\xdc\xc7\x95\x7f\x17\xfe\xda\xf3\xae\xb1\xc8\x05\xe3\x9b\x70\
\xc6\xd4\x06\x1e\xfa\xf6\x15\x2c\x9a\x11\x1c\x96\x2f\x5c\x78\x12\
\xd7\xde\xbe\x92\xe1\x42\x31\xa2\x2c\xb7\xed\xed\x65\xf9\x97\x6e\
\x61\xf3\x9e\x5e\xab\x44\x58\x42\x3d\x57\x00\xcf\x8a\xd7\x98\xa8\
\xd2\x7c\xf8\xe5\xed\x34\xbc\xf5\xff\x44\x88\x2b\xc7\x2e\xea\xe0\
\xd9\x1f\x7d\x14\x80\x67\xd7\xef\xe0\xf5\x1f\xff\xa9\x3b\xb4\x51\
\x8e\x7b\x5e\x7d\x89\x35\xc6\x2f\xdc\xcc\xe6\xdd\xbd\xd8\xe5\xc1\
\x66\x4f\x6b\xe2\xa1\x1b\x3e\xc8\xfc\xce\xa9\x15\xa5\x79\xed\x6d\
\x0f\x04\x1e\xd3\x25\x67\x44\xef\xf1\xf9\x9f\xb3\x79\x77\x5f\x2c\
\x0e\x33\xbb\xcd\xbe\xc7\x99\x01\x34\x9b\x74\xf4\x22\xdd\x4d\xac\
\x58\x6a\xa4\x8b\x87\xa3\x76\xe2\x99\x5f\xaa\x12\xc7\x72\xc4\xb9\
\x54\x98\x2c\xa6\xc7\xd1\x0a\x1d\x7c\xff\xcb\x5d\xfb\x90\x33\xbe\
\x18\x83\x6a\x17\x76\x4c\x61\xe3\xaf\xfe\x13\x80\xb5\x5d\x7b\x39\
\xfc\x7d\xdf\xc6\x59\x30\xa2\xac\xf0\xc3\xca\x72\xeb\x9e\x1e\x4e\
\xfb\xf4\x8f\xe8\xda\xd7\x97\xec\x88\x3d\xf8\x1d\x6b\x8c\xd6\x73\
\x9e\xfe\xb9\x2a\xa9\x3a\xe2\x8e\x2f\x0b\x3c\xfc\xd2\x66\x1a\xce\
\xff\x72\x24\x86\x77\xec\xe2\x59\x3c\xfb\xb3\xcf\x05\x7b\x67\x6d\
\x17\xaf\xff\xd0\xf7\x42\xf3\x1d\x2f\x15\x77\xdd\xfb\x2f\xa8\x8c\
\xa3\x6b\x6f\x0f\xcb\x3f\x73\x13\x9b\x76\x1d\x48\x8c\x9f\x99\x87\
\x6f\x9c\x58\x8b\xda\xad\xa7\x6c\x74\x21\xdc\x65\xc7\xd5\xb5\xc5\
\x88\x43\x59\xe8\xa8\xf7\xe6\x08\x13\x87\x95\xe5\x68\xae\xc0\x1b\
\x3e\xf4\x1d\x5e\xdc\xbc\xcb\x51\xdc\x24\xb8\xd8\x3c\x72\x53\x48\
\x47\xc7\x8d\x38\x39\xe9\xa3\x09\x71\x55\xc3\x8c\xd6\x66\x1e\xba\
\xe9\xb3\x2c\x9a\xd5\x1e\xc8\x8c\x8b\xce\xe2\xda\x9b\xff\xc6\x70\
\xae\x10\x92\x4f\x12\x2f\xc1\x1a\xae\x61\x1d\x49\x3f\x52\xf1\x39\
\x8a\x15\x43\x11\xda\xb2\x39\x32\x99\x4c\xe0\x5d\x8a\xc7\xc8\x50\
\x2f\xf5\x53\xe6\xd2\xd6\x31\xab\xcc\x72\x95\x10\xe3\x55\x2a\x73\
\x29\xe2\x6a\x45\xe5\x86\x64\x8d\x81\xe1\xd1\x22\x7d\xc3\x05\x46\
\xf3\xa5\xb2\xa7\x15\x5c\x9a\x49\x7b\xd4\xd7\xa4\x68\x98\xda\x41\
\xba\xa6\x81\xbe\xee\x6d\x34\x64\x3d\x8c\x9f\xa7\xb6\xae\x96\xb6\
\xf4\x28\xd1\x50\x9b\x63\xee\x1d\xb1\xef\x6d\xd3\xcf\x61\x76\x6b\
\x96\x8e\x3d\x0f\xb3\xbf\x67\x43\xa0\x80\x04\x5e\xbf\xa4\x99\x0f\
\x5d\x36\x87\x37\x2d\xaf\xa3\x81\x41\xbc\x92\x8f\x51\x3e\xa2\x3c\
\x62\x75\x96\x4d\x20\x70\x05\x09\xd6\x53\x81\x51\x79\x8c\x37\x8c\
\x56\x30\x94\x6f\x64\x67\x57\x86\x8d\x1b\x46\x39\x7c\x49\x8e\x85\
\x73\x46\x50\xc5\x5a\xfa\x87\xeb\xb8\xf9\x37\x7b\xb8\xeb\xa1\x61\
\xa6\xcc\x98\xc2\xf4\x46\xc5\xd1\x8b\xd3\x5c\x70\x76\x3d\xaf\x59\
\xd8\x40\x8d\x4e\xa1\x28\x80\xf2\x31\xaa\x84\xf8\x1e\xa2\xd3\x65\
\xe3\xd0\x60\xca\x72\x47\x2b\x9f\x82\xc9\xf0\xe8\xf3\x9a\x9f\xfd\
\x6e\x1d\xcf\xbd\xdc\x8d\x42\x91\xa5\x81\xd1\x21\x50\xcf\xfd\x81\
\xce\xce\x36\xe4\xb0\xe5\xec\x18\xf0\xad\x73\x22\xf1\x0c\x0a\x4b\
\x66\xa6\xf0\x89\xa7\x86\x38\x09\x2f\x9a\xdf\xff\xe7\x85\xbc\xf3\
\xe4\x25\xce\x23\xba\x7a\xeb\x5e\x5e\xf3\xe1\x1f\x13\x4f\xde\x77\
\x29\x98\xb0\x15\x1b\x0c\x66\x57\xcf\x30\xef\xfa\x3f\xbf\xe7\xb9\
\x9b\x3e\x9c\x6c\x6e\x01\xa7\x7d\xe1\x66\xb6\xed\x1b\x18\x5f\xa8\
\x48\x21\xe5\xf0\xa1\x56\x16\x34\x88\xe3\xa0\x97\xdf\x1f\x3b\xa8\
\xce\x62\xc3\xc4\x0f\x8d\x91\xaa\xb9\x52\xa7\x7d\xf6\x67\xc1\x18\
\x23\x6d\x8d\x82\xcf\x6d\xef\x1e\xe4\xf4\xcf\xfd\x9c\x6d\xbf\x0e\
\x29\x22\x47\x6d\xef\xd3\x3e\xf3\x33\xb6\x75\xf7\x5b\x0a\x28\x98\
\xdb\xed\xfb\x06\xe2\xf7\xa8\xd6\x42\x4a\x5c\x06\x53\x38\x15\x88\
\x44\x12\xa8\xb9\xff\x9b\x93\xb2\xf0\x37\xed\xdc\xcf\x29\x9f\xfe\
\x09\xbb\x7b\x06\xdd\xf5\x19\x14\xc9\x25\x39\xc3\x70\x5f\x92\xc9\
\xed\xac\x91\x1b\xfc\x2c\xff\xec\x8f\xe8\xda\xdb\x1b\x27\x84\x18\
\xe3\x9e\x1a\x13\x37\xdd\xcd\xca\x1b\x26\xff\x9c\x9f\xf8\xe1\xf8\
\x73\xda\x15\x87\x12\xeb\x26\x8b\xa3\x5f\xa3\x89\xef\x9d\x4f\xfe\
\x90\xad\xbb\x7b\xe3\xcb\x9a\xd8\x15\xc7\xb8\x0b\xaf\x8b\x23\x19\
\xbb\xa2\x20\x4d\x54\xa1\x90\xd0\x7b\x30\xac\x2c\xc7\xbe\x5f\x25\
\x11\xd8\xe2\x0f\xff\xb1\xef\xdd\x19\x28\x4b\x67\xdb\x31\xc7\x8f\
\x4a\xda\x84\x38\x48\xe0\xc2\xae\xfd\x03\xbc\xeb\xaa\x5f\xf0\xdc\
\x2d\xff\xe9\x9e\x73\x57\xf5\x24\x47\x97\xab\x48\x03\x87\x49\xd4\
\xb4\x6e\x4e\x8f\x90\x4d\xa7\xd1\xc5\x51\x36\xbc\xfc\x0c\x79\x5d\
\xc3\xfe\xdd\xdb\x38\xfd\xd8\x39\x28\xe5\x21\xca\x43\x79\xa9\xf2\
\xef\xa9\xf1\xdf\x3d\x0f\xa5\xe2\x7f\x57\x5e\xf9\xdf\xca\xa3\x77\
\x28\xcf\xea\x0d\x7b\x58\xbb\x65\x37\xc5\x8c\xe6\xa4\x53\x4f\x66\
\xde\x61\x87\xd1\xd4\xd8\x42\x36\x53\x83\x5f\x2a\xd1\xbb\x7f\x3f\
\xcf\x3f\xf5\x18\x1b\xb7\x6c\xa4\xf0\xd8\x36\x3a\xea\x86\x59\xd8\
\x2e\xcc\x9f\x39\x9d\x96\xfa\x2c\xed\x99\xa1\xd0\x71\x4a\xa8\x1c\
\xa4\xb0\x3a\xe3\x04\x7f\x9e\xd5\xff\x12\x4f\xac\x5e\x8f\xa0\xc9\
\x7a\x3e\x6f\x5c\xde\xce\x27\x2f\xed\xe0\xf5\x47\x36\x91\x36\x3d\
\x28\x86\xd1\x29\x85\xaf\xd2\x88\x16\x47\xcd\x8a\xb2\xa2\x94\x22\
\x5a\x8d\x62\xbc\x14\x3e\xcd\x1c\xe8\xc9\xf0\xf0\xbf\x47\xb8\xe7\
\xf1\x7e\x5e\x7c\x61\x17\xfb\x76\x0c\xf3\xc1\x8b\x67\xf0\xc5\x8f\
\x4e\xa7\x86\x7e\xea\xea\x0b\x1c\x7e\x54\x23\x4f\x6f\xe9\xe4\xb3\
\x5f\xbd\x8a\x9d\x5b\xb7\xf1\x7f\xaf\xfe\x3a\x7f\x7b\x68\x17\x6f\
\x3e\x7b\x16\x17\x9e\x3d\x95\xc5\xb3\x3d\x14\x03\x65\xfb\x49\x63\
\x54\x19\xf6\x15\x0f\x4d\x8a\x92\xa4\xd9\xb4\x13\xfe\x74\xef\x10\
\x77\xde\x75\x80\x75\x5b\x47\x48\x0b\xbc\xe9\xf4\x59\x1c\x79\x78\
\x23\x4f\xbe\x72\x80\xa7\x9e\x1d\xa4\x7b\xcf\x2e\xcc\xde\xdf\xb3\
\xf4\xc8\xc5\x6c\xa8\x5d\xca\x68\x51\x3b\x4a\x9c\x5a\x61\xab\xf2\
\x1c\xa6\xf0\x48\x2e\xe3\x15\x8b\x95\xc8\xc4\x96\xad\x4d\x22\x4a\
\x12\xe0\xd6\xa1\xeb\x9c\x52\xcf\xaf\xbe\xf4\xf6\x38\x6c\x64\xdd\
\x62\xdb\xde\x81\xe8\xfb\x49\x85\x8f\x2b\x89\xb3\x63\x31\x17\x71\
\xb4\x8e\x0a\x27\x97\xdb\xb9\x91\xae\x13\x1b\x6a\x47\x26\x09\xad\
\x78\x80\x6d\xdd\x83\xe3\x82\x3d\x12\x3f\x0d\xfe\xd6\xd5\x3d\x38\
\x31\x41\xa5\x7b\x20\x99\x7a\x2f\x86\xae\xee\xbe\x89\xa5\xbb\x6f\
\xaf\x45\x52\x0b\x9c\xe8\x65\xdd\x7d\x43\x07\x0d\xcb\x2e\x9c\xd9\
\xc6\x7d\xdf\xba\x92\x23\xaf\xbc\xa1\x2c\xf8\x42\x9e\xad\x01\x77\
\x61\x83\xd0\x26\xb5\xe7\x5d\x5b\x70\x9a\x7d\xe0\x43\x3f\x5b\x76\
\xf7\x26\xb0\xa9\xab\x64\x7f\x2b\xe8\xee\x1b\x3c\x68\x58\x76\xe1\
\xcc\x36\xee\xbb\xe1\xc3\x1c\x79\xc5\xb7\xa3\xca\x08\x17\xcb\xd5\
\xda\xfa\x2e\x16\xae\xf5\xdc\x5b\xf7\xf6\x8c\xcf\x9f\xcb\x68\x74\
\xed\x4f\x33\x41\xbf\x4d\x31\xf1\xbe\xb6\xf6\x47\x63\xb7\xf0\x2c\
\xb2\x8f\x7d\x0e\x6c\x04\x23\x3e\xcf\xb7\xfc\xe3\xc9\xca\xf7\x9b\
\x47\x7e\x34\x89\x78\x6c\x32\x82\x13\x6b\x43\x08\x74\xb6\x36\xf1\
\xab\xab\xdf\x97\xf0\xdc\x54\xcf\xdf\xb5\xcf\x7d\xd8\x28\x49\x6c\
\xd5\x1b\xac\x6f\x9d\xca\x91\x4e\x0b\x6b\x5e\x7c\x84\x4d\x1b\xd6\
\xb0\x93\x39\x9c\x30\xaf\x8e\xa3\x4e\x7a\x95\xb0\x3a\x30\x3c\x5a\
\xe0\x91\xe7\x36\xf0\xe2\xfa\x3d\xbc\xf6\xe4\x33\x78\xcf\x05\x17\
\x53\xdf\x3e\x83\x74\x3a\x43\x36\x25\x64\x53\xb5\xd4\xa4\x6a\xc8\
\xa6\x6b\xc8\xa6\x6a\x58\x76\xce\x85\xe4\x0b\x23\xec\xde\xb9\x91\
\x1f\xfc\xf0\xc7\xdc\xff\xf8\x03\x2c\x9d\x51\x62\xc9\xcc\x34\xd3\
\x6b\x47\x1c\x06\xa2\x65\xb8\x6a\x62\xe9\x25\xcb\x32\xdb\x58\xf9\
\xf8\xb3\x28\x81\x3a\x4f\x73\xe1\x79\x73\xf8\xd8\xe5\xd3\x58\xba\
\x70\x00\xaf\x54\x40\x24\x05\x92\xc5\x88\x01\x93\x72\x9e\x2b\x03\
\x18\xe5\xa3\x45\x53\x4a\xd7\xb1\xeb\x40\x96\x07\x1f\xca\x73\xcf\
\xbf\xf6\xf2\xc4\x0b\xfb\xd9\x3b\x52\x42\xe3\xa1\x8c\xf0\xb7\x95\
\x3d\x9c\x7b\x66\x07\xc7\x2e\xd6\x78\x7a\x98\x73\x96\xb7\xf1\xdc\
\x9a\x1e\xba\x36\xae\xe7\xc2\x77\x5f\xce\x63\x8f\xbd\xcc\x1f\xff\
\xf8\x3b\x36\xfc\x64\x13\x8f\x3f\xb9\x9f\xb7\x9f\x3b\x8b\x93\x8e\
\x6f\xa3\xb3\x23\x8f\xa7\x86\x10\x11\x0a\xa5\x7a\xfa\x06\xeb\xd9\
\xb0\xb1\xc8\x93\xcf\xee\x67\xc5\x63\x3d\x3c\xbf\x3e\x4f\xb1\x68\
\x48\x8b\xe2\x2d\xcb\x67\xf0\xf9\x8f\x4c\xe3\xd0\x85\x83\xec\x1d\
\x6e\xe5\xc1\x15\x2d\xdc\xf3\xaf\x5e\x1e\x79\xfa\x00\x6b\x5f\x7a\
\x09\x55\xf3\x0a\x1c\xfe\xce\x78\xc9\x4a\x45\x9c\x30\x07\xe1\xf6\
\x5e\x0e\x0b\x24\x06\xf3\x98\x89\x15\xa6\x72\x15\xf8\x1e\x9f\xcd\
\x2f\xbd\xeb\x44\xae\xbf\xe2\xac\x09\x37\xd0\xf5\xbf\x7b\xa4\x0c\
\xad\xb8\xbe\xc6\xb5\xc1\x1d\x96\x72\xb8\x15\x92\x9d\xc3\x49\x2c\
\x34\x91\x08\x25\xc7\x13\xd6\x71\xe4\x4d\x86\xdf\xd3\x0e\x65\x0c\
\xf1\x06\xd7\x44\x15\x4b\xdc\xa9\x8f\x0a\x3c\x23\xf1\xa2\xd0\xd5\
\x7e\x3c\x70\xb7\x55\x4a\x96\xad\x18\x38\xed\x33\x3f\xe5\xbe\x6f\
\x5d\x79\xd0\xc4\x9f\x25\xf3\x3b\x42\x73\x6d\xd7\x15\xb6\x2a\xc8\
\x84\x9b\x66\xdb\xdd\x2e\x22\x9e\x12\x51\x38\xcf\xe5\x51\xb9\x84\
\xac\x08\x89\x65\x52\xca\x63\x39\xed\x53\x3f\xe2\xbe\xef\x7c\xe8\
\xa0\x89\x3f\x4b\xe6\x77\x5a\xc7\xc6\x4a\xa5\x89\xe1\xbd\x38\x52\
\x22\x12\xe0\x05\x1d\x36\xf6\xc4\xd1\xc2\x48\x12\xee\x3d\x19\x6d\
\x84\xc3\xd0\x33\xee\xeb\x45\x27\x41\x13\x56\x6a\x89\x5d\x6e\xce\
\x56\x80\x93\xcb\x5f\x5d\xb3\x75\x77\xb4\x3a\x50\x68\x42\xbe\x74\
\xf1\xd9\x5c\xff\x91\xb7\x4d\x2c\x33\xee\xf8\x17\xc3\xb9\x3c\x91\
\x32\x7c\xb1\x3c\x5b\x47\xc5\xa6\x98\x5c\x71\x85\x77\x82\x6b\x6b\
\x55\x91\x14\x9a\xed\x5b\x37\xb1\x6f\x5f\x37\xbf\xd8\x73\x26\x17\
\x4d\x7f\x94\xb6\xce\x20\x7c\x65\x8a\x23\x07\xad\x2c\xd7\x77\x1d\
\xe0\xae\x87\xd6\x73\xc4\x09\xcb\x79\xf7\x97\x3f\x87\xd4\x37\xe3\
\x89\xc1\x17\x41\x19\x3f\x20\xca\x98\x32\xf9\xc5\x04\x2f\x11\x21\
\x9b\xa9\x63\xde\xfc\xa5\x7c\xf7\x86\x1f\xf1\xca\xa6\x4d\x7c\xe7\
\xbf\xae\xe4\x95\x4d\x4f\xb1\x74\x5e\xbd\x25\x5b\xc6\xd6\x59\x5b\
\x45\x2a\x88\xb4\x59\x7c\xf4\xf1\x15\x88\xf2\x48\x89\xe6\x6d\xe7\
\xcd\xe6\x8b\x1f\x9d\xcd\xfc\x8e\x03\x08\x06\x9d\x2a\x05\x45\xe9\
\x74\x06\xaf\x24\x78\xf8\xa0\x4c\x10\xab\x2c\xcb\xde\x80\xe8\x63\
\x28\x29\x45\xef\x48\x2b\xf7\xfe\x33\xc7\x1f\xef\xd9\xcd\x23\xcf\
\x1e\x60\x60\xc4\xa0\x0c\x34\xa6\x05\x9d\x81\xa1\x51\xcd\x9a\x4d\
\x3e\xbf\xfa\xd3\x7e\xe6\x7e\xba\x85\xf6\x6c\x81\xf6\xba\x3c\x97\
\xbd\x33\xcb\x5f\xfe\xf1\x77\x1e\xba\xb7\x83\xcb\xdf\x7f\x11\x43\
\x83\xfb\xb9\xef\xbe\x7f\xb2\xf2\xdf\xbd\x3c\xf3\x72\x1f\xf3\x67\
\x37\x30\xb7\x33\x43\x4d\x46\x63\x44\x18\xc9\x1b\x76\x77\x17\xd9\
\xb9\xa7\x40\x6f\x5f\x11\x5f\xa7\xf0\x10\xda\xea\x7c\xde\x76\xde\
\x4c\x3e\xf0\xde\x0e\x8e\x5c\x34\x8a\x2a\x15\x59\x50\xa7\x99\xf7\
\xc6\x5a\xce\x3a\x61\x2e\x7f\xb9\xbf\x99\xdf\xdf\xb5\x87\x17\xd7\
\x0e\x91\x7a\xe1\x4e\x5e\x7f\xdc\xeb\x78\xa2\xb0\x20\xca\xf6\x76\
\x6c\xdb\x73\x24\x43\x85\x00\x00\x20\x00\x49\x44\x41\x54\x54\xd4\
\x0b\xa9\x92\x44\x2c\x86\x0b\xbf\xf1\x07\xf8\xc6\x1f\xe3\x07\x31\
\x56\x71\x45\xdc\x86\xfd\x24\xe5\xfc\xf5\x77\x3e\xc2\x57\x7e\xf9\
\x40\xc0\xe4\xb2\xee\x31\x7f\x7a\x33\x5b\x2a\xb1\xaa\x84\x7e\x68\
\xb1\xbe\x6f\x8e\x9a\x95\x11\x58\xd5\x3a\x39\xce\x32\x63\xb6\xd2\
\x73\x35\x72\x2d\x8f\xb1\x63\x2a\x5b\xf6\xf6\x25\xc4\xc3\x82\xd4\
\x8f\xb8\x85\x2f\xd6\x3d\x9a\x43\x9e\x53\xbc\x98\xfb\x3c\x9b\x4c\
\x15\x6e\x98\xec\x54\x3a\x56\xae\x5c\xa4\x13\x42\x94\x24\xb2\x66\
\xfb\x3e\x66\xfd\xc7\x37\x92\xb0\xd1\xe8\xfc\x19\x30\x2b\xbe\x99\
\x60\x49\x48\xbc\xb7\x9e\xab\xf6\xa6\x6b\xdc\x4a\x8f\x0b\xe1\x30\
\xed\xbd\x9a\x22\x4c\xb2\x04\x8c\x43\xa1\x62\x58\xb3\x6d\x2f\xb3\
\xde\xf5\xf5\xe4\x12\x89\xb6\xfc\x5f\xf1\x5d\x6b\xdf\x27\xc0\x78\
\xe2\x30\x7c\xac\xb4\x8b\x24\x6f\x71\xc1\xcc\xa9\xe5\xd8\x37\xd1\
\x66\xc9\x4a\xdc\xc8\x0f\xe2\xee\x75\x19\x33\xf6\x1c\xbd\x60\xc3\
\x5b\x5a\x27\x78\xad\x89\x0d\x9f\x13\x9b\x0c\x5a\x08\x90\x99\xf0\
\xe0\x6f\xda\xd9\xcd\x99\x9f\xbe\x31\xee\x59\x33\xf9\xa5\xbe\xfe\
\x8e\x7f\xf1\x95\x9f\xfc\xd9\x61\x38\x87\x7b\x82\xba\x4a\xbf\xb9\
\x20\x59\xb1\x4a\xa3\x8d\xcb\x8a\x29\xd9\x40\x21\xee\xee\xee\xe6\
\xfb\x5b\x4f\x65\x77\xbe\x86\x86\xce\x11\x5a\x3b\x17\x81\x2e\x61\
\xfc\xc2\xa4\x15\xa5\x31\xf0\xe8\x8b\xdb\x79\xf2\x95\xfd\x9c\x7f\
\xc5\x67\xa9\x9f\x77\x24\xbe\x31\xa4\x8c\x89\x6c\x75\x6d\x34\xda\
\xf8\xf8\xda\xc7\xd7\x25\x4a\xba\x48\xda\xcb\x84\xb6\xb3\x70\xc4\
\xc2\x05\xfc\xe8\xf6\x7f\x70\xd3\xff\x7c\x9f\x3f\xff\xea\xfb\xc0\
\x28\x64\x6a\xe3\x68\x9f\xb1\x1a\x47\x97\x79\x27\xb5\x2f\xfe\x96\
\xa2\x51\xa4\x8d\xcf\x79\xcb\xda\xf8\xe4\xfb\x9a\x58\xd8\x31\x88\
\xd2\x01\x11\x5a\x89\xc2\xf8\xaa\xfc\x79\x8d\x11\x1f\x5f\x19\x14\
\x05\x94\x5f\x8f\x36\x8a\x92\x37\x44\x49\x9a\x79\xfa\x25\xcd\x6f\
\xfe\xba\x83\xbf\xde\xd7\x4d\xcf\x80\x42\x23\xb4\x36\xc3\x69\xaf\
\x9b\xc6\xf2\x93\xa6\x51\x4c\x15\xf9\xdd\x9d\x3b\x79\xe9\xa5\x14\
\xb7\xff\xb5\x9b\xba\xf6\x14\x9f\x7c\x77\x3b\xd3\x1a\x7b\x38\x66\
\x41\x8e\xcc\x59\x83\xfc\xed\x91\x1f\x93\xd9\x7e\x0a\xef\x7c\xeb\
\x05\xb4\x34\x35\xf0\xd7\xbf\xff\x8d\xa1\xd1\x61\x56\x6d\x1c\xe1\
\x95\xf5\x43\x88\x09\x6a\xca\xfa\x02\x5a\x02\xe5\x2d\x02\x35\xaa\
\xc8\x51\x0b\xeb\x78\xef\x3b\x67\xf1\x8e\xf3\x5b\x68\x6b\xc8\x93\
\xf2\x0d\x46\xd2\xe5\x88\xc4\x08\x73\xdb\xf2\x7c\xf8\x3d\x6d\x1c\
\xbf\xb4\x9d\x3b\xfe\xb0\x8d\xdf\xff\x73\x37\xcf\x3e\xf5\x24\xf3\
\x66\xef\x61\xeb\xb4\x93\xaa\x2a\x2d\x0b\x92\x95\x68\xc9\x33\x97\
\x17\x24\x76\xdc\xcb\x2a\xab\x25\x56\x25\x99\x83\xfc\x09\xf2\x30\
\xef\x4f\x8c\x83\xae\xfc\xce\x15\x2c\xfb\xe2\x2d\x6c\xde\xd3\x97\
\x50\xdf\x76\x82\xaa\x3a\xce\x8a\x2e\x8e\x0e\x12\x55\x74\x45\xac\
\xfc\x96\x35\xcc\x95\x37\x5c\xc9\xb2\x2f\xfc\xef\x38\xe9\x27\xa4\
\x70\x67\xb7\x35\xb1\xe2\xdb\x57\x32\x91\x64\x58\xf9\x9d\x0f\xb0\
\xec\x73\xbf\x60\xf3\x9e\x9e\x71\xf2\x4c\x99\xac\x32\xbb\xad\x89\
\x15\x37\x5c\xe9\x50\x02\x52\x7d\xdc\xf6\x14\x45\x12\xeb\x27\x8a\
\xc1\x11\x83\x85\xdd\xc6\x85\x38\x04\x4f\x92\xf0\x15\xc7\x7b\x93\
\xb1\xae\x84\xea\x30\xc1\x24\x3e\x4e\x15\xb2\x98\x58\x25\x00\x93\
\xac\xbf\x58\xa7\x1e\xed\xde\x10\x4a\x12\x50\x85\xf8\x73\xaf\xfc\
\xfe\x27\x59\xf6\x99\xff\x09\x48\x3f\xae\x2a\x3d\xc6\xa5\x8c\x5d\
\xf6\xdd\x04\x8d\xaa\x15\xee\x42\xde\xb1\x39\x8b\xe5\x9c\xb9\x03\
\xd1\x0e\xef\xf7\xf7\xd7\xbd\x9f\x8f\x7f\xf7\x77\xec\xed\x1d\x44\
\x4e\xf9\xd8\x24\xea\x09\x4f\x00\x65\x57\x53\x96\x3f\xfe\x4b\x1c\
\x7d\x72\x1a\x30\x56\x4e\x69\xc4\x88\x26\xb9\x3d\x9f\x0a\x3e\xa0\
\x0d\xf8\x5a\x71\x53\xd7\x49\xac\x1e\xea\xe4\x94\x29\x9b\x49\xd5\
\x34\xd3\xd8\x32\x9d\xd2\xd0\x4e\xe7\xf8\xb4\xd6\x18\xed\x63\x8c\
\x2e\xff\xae\x31\xc6\xe7\xe1\xe7\x77\xf0\x62\x57\x9e\xb7\x7c\xfa\
\xbf\xa1\xa9\x1d\x5f\x1b\x3c\x35\x3e\xac\xb1\x23\xea\x1b\x83\xaf\
\x7d\x4a\xba\x84\xf2\x83\xaa\x3f\xc6\x18\x3c\x15\xe4\x04\x1a\x63\
\xf0\x75\x09\xdf\xf8\xbc\xff\xc3\x1f\x64\xca\xdc\xc5\xac\xf8\xcc\
\x37\x60\xfa\x61\xe0\xd5\x38\x64\x75\xc8\x70\x32\x70\x68\xef\xe3\
\xac\xcf\x1b\x94\xd4\x70\xd4\xa1\x8a\x8f\x5e\xdc\xce\x51\xf3\x0d\
\x9e\x19\xc0\xa8\x34\x48\x1d\xc5\x52\x89\x94\x98\x20\xce\x23\x01\
\x7c\xa5\xb4\x41\xa1\xc0\x78\x94\x52\x8a\xfd\xc5\x2c\xbf\xff\x53\
\x3f\xb7\xfe\xee\x00\x6b\xb6\x8e\xe2\x53\x62\x4a\x43\x9a\xb3\x4e\
\x9d\xc6\x5b\xce\xa9\xe5\x94\x63\x6b\x69\xa9\x03\x3f\x55\xcf\x21\
\x9d\x0b\xf9\xf9\x1d\xbb\xb9\xf7\xf1\x21\x7e\x7a\xcb\x16\x46\xf6\
\xe5\xb9\xec\x9d\x9d\x1c\x7e\x48\x8a\x23\x16\x0f\x33\x63\x8e\x62\
\xc5\xe3\x7f\x67\xd5\xbf\x1f\xa0\xd0\x33\xc2\x9c\xa6\x12\x03\x2a\
\x43\x3a\x9b\xa2\xae\x41\xb1\x6f\xa0\x44\x77\xef\x48\xd0\xe7\xc4\
\x40\xca\x87\xe9\x53\xd3\x9c\xb7\x7c\x06\xef\x79\x6b\x27\xaf\x3f\
\xba\x48\xd6\x3f\x40\xca\xaf\x0b\xa0\x36\xa5\xd0\x28\x94\x01\x65\
\x7c\xb2\xf4\xf2\xda\x23\x1a\x98\xfb\xf9\xd9\xcc\x5e\x90\xe5\x96\
\xdb\xba\xd8\xd1\xd5\xc5\x1c\x03\x5d\xed\x27\x25\xca\x93\x94\xb3\
\x64\x9d\x38\x48\x18\xc2\xc4\xd5\x6f\xec\xc0\xba\x83\x96\xfe\xcd\
\xdf\x3f\xce\x37\x7f\xff\x18\x18\xc3\xdb\x4e\x3c\x8c\x3f\x7d\xed\
\x3f\x2a\x6f\x7f\xeb\xf7\x8f\xf2\x95\x9b\x1f\x08\xc5\x84\x02\x06\
\xd4\xd7\x7f\xb5\x92\xab\xde\x7b\x3a\x00\x73\xda\x5b\xd8\xf4\xcb\
\xcf\x4c\xfa\xbc\x35\xbc\xe9\xbf\x19\xce\x15\xa3\xf0\x51\x0c\xe6\
\xb2\x0b\x1a\xd8\x82\x49\xe2\x73\x60\x09\xba\xaf\xff\x6a\x05\x57\
\xbd\x77\xd9\xf8\x18\x6f\xfd\xdc\xa4\xc6\x77\xdd\x1d\x0f\x56\x3c\
\xf4\xaf\xdf\xf1\x20\x57\x95\x19\xc1\x73\xa6\x4f\x61\xd3\x1d\x5f\
\x98\xdc\x3d\x6e\xbf\xdf\x6d\xc1\xde\xf7\x8d\x49\x5d\xbf\xa3\xbb\
\x8f\xe5\x9f\xff\x39\x1b\x76\xee\x0f\x8a\x4d\x74\x4c\x65\xc5\x77\
\x3f\x18\x4d\x51\x99\xf4\x8f\x8e\xc2\xc7\x91\x4a\x36\xc1\x24\x9e\
\x74\xc4\x5c\xfe\xf9\xcd\x2b\x69\xa8\x73\xa7\x62\x1c\x36\x67\x3a\
\xe6\x81\xef\xc4\xfe\xbe\x76\xdb\x5e\x0e\x7f\xdf\x77\x12\x21\xd6\
\x64\x41\x2c\x0e\x25\x63\x98\x3f\x7d\x2a\x2b\xbe\xff\xd1\x68\x4a\
\xc6\x41\x3c\x66\xac\x9d\xd5\x44\x9f\x0f\xef\xb7\x10\xe2\xf0\xdf\
\xb7\xdd\xcb\x57\x2f\x3d\x3b\x30\xa8\xa6\x4f\x61\xe3\xaf\xaf\x72\
\xde\xe2\x3b\xbf\x7d\x90\x2f\x8c\x29\x86\x2a\xcf\x37\x31\x8b\xb6\
\xbc\xee\xfb\x7a\x59\xfe\xa9\x1b\xd9\xb0\xa3\x3b\x36\x77\xe6\xe1\
\x1f\x1e\xc4\x3d\xbe\xcf\x86\xed\xc1\xde\xb9\xf6\xe6\x7f\x54\x72\
\x30\xdf\xb9\xec\xb5\xbc\x73\xd9\x6b\x27\xaf\x1b\x4f\xf9\x98\x13\
\xce\xfe\xe6\x6f\xee\xe3\x9b\xbf\xbe\x2f\x90\x19\xa7\x1d\xc5\x9f\
\xfe\xef\x38\x31\xf0\x5b\xbf\xba\x97\xaf\xfc\xe4\xaf\xee\x74\x10\
\xb1\x8d\xfd\x31\xd4\x22\x24\xdb\xb4\x83\xb4\x14\x23\x7e\x8c\xbf\
\xb7\xa7\xd0\xc4\xfb\x5f\x7e\x77\x65\x63\x2f\x69\xdc\xcb\x8c\x79\
\x87\xe1\xeb\x12\x85\x91\xfe\x40\x21\x1a\x1f\xa3\xcb\xca\xd1\xe8\
\xf2\xef\x7e\x59\x51\x06\xff\x7e\x69\xd3\x01\x9e\x5c\x37\xcc\xbb\
\xbe\xf4\x03\x4a\x8d\xd3\x82\xa2\xed\x12\x75\x08\xc3\x8a\xb3\xa4\
\x8b\x81\xb2\x1c\xa3\x27\x28\x8d\x92\x71\xe5\xe9\x1b\x9f\x92\x5f\
\xa4\xe4\x17\x39\xf7\x8c\xd7\xf3\x3f\xd7\x7f\x9a\x8f\x7f\xe5\x87\
\x30\xed\x30\x48\x79\x51\x83\xda\x0a\x21\x6c\xda\xb4\x09\x94\x62\
\x6a\x93\xe1\xca\xf7\x4c\xe7\xb4\xa3\xeb\xf1\x8a\x0a\x3f\x03\x07\
\x06\xf2\xac\x79\x79\x80\xb9\x73\xb3\xcc\xec\x54\x64\xc4\x07\xed\
\x21\x46\x21\xda\x07\x49\x93\x4f\x0b\xab\xb6\x19\x7e\xfe\x9b\x1e\
\xfe\xf0\xd7\x5e\xfa\x47\xd2\xa4\x25\xc5\x69\xaf\x6d\xe2\x92\x77\
\xb4\x72\xee\xe9\x29\xa6\xd4\x17\xf0\xfc\x51\x94\xef\xe1\xfb\x45\
\x4e\x3f\xce\x63\xf6\xfc\x69\x2c\xbd\xbb\x8e\xbb\xff\xb9\x9f\xdb\
\xff\xba\x9d\xa7\x57\x0d\xb0\xfc\xe4\x76\x4e\x7c\xed\x54\x16\xcc\
\x4a\xb3\xfc\xb8\x14\xe7\x9e\x5a\xa4\xa7\x4f\x38\xd0\xb3\x80\xde\
\x91\x1a\xba\xba\x3d\x1e\x7a\x7c\x3f\x2b\x9e\xdc\x87\x01\x3c\x2d\
\xb4\xd6\x0a\x67\xbc\x61\x2a\xef\x78\xf3\x74\x4e\x3a\xa1\x89\xa6\
\x86\x41\x52\xf4\x12\x64\x92\x4a\x00\x65\x57\x1c\xbd\x72\x01\x04\
\x4a\xa4\xbc\x03\x4c\x6b\x82\x8f\x5c\x34\x95\xf6\xa6\x14\xdf\xfd\
\xd9\x66\x36\xee\xd8\xc5\x3c\xf5\x6f\xb6\xb6\x1d\x6b\x21\x49\xe2\
\x8e\x54\x8e\xef\x39\xed\x86\x5e\xb5\x6d\x5c\x06\x49\xcf\x95\x78\
\x9b\x1d\xa7\x1a\x23\x66\xec\xe9\x2d\x2f\x94\xae\x28\xa7\x3f\x3f\
\xb1\x96\x6b\xee\x58\x51\xf9\xcc\x17\x2f\x3c\x99\x77\x9d\x72\x44\
\x34\x31\x1a\x8f\xaf\xdd\xb1\x82\xeb\x7e\xb5\xe2\xa0\x65\xda\xf6\
\x7d\x7d\x14\xb4\x1f\xc4\xf2\x94\xc5\x7a\xd2\xa6\xdc\xc2\x3b\x04\
\x55\x6a\xe5\x68\xa1\x55\x4e\x98\x17\xcb\xd8\x0e\xb7\x1d\xd2\xc2\
\xd7\x6e\xbd\x3f\x50\x7e\x07\xf1\x73\xed\xed\x0f\x70\x75\x39\xa5\
\x04\x25\x7c\xed\xb6\x07\x0e\xfe\x1e\xb7\x3d\xc0\xd5\xb7\x3e\x38\
\x39\x53\x3c\xe1\x67\xd6\xb4\x16\x56\xde\xf0\x41\xd2\x9e\x07\x02\
\x2b\x6e\x78\x75\xca\x72\xd5\xe6\xdd\x0e\x67\x2c\x1a\x10\x98\xd1\
\xda\xc0\x3f\xae\x7f\x7f\xa2\xb2\xac\xf6\xb3\x6d\xdf\x38\xcc\xbd\
\x69\xe7\x7e\x2b\x66\x69\x7d\xf1\xd8\x3a\xe9\x68\x8c\x6f\xf3\xae\
\xfd\x95\xf7\x5f\xad\xb2\x5c\xb5\x79\x17\xd1\x0a\x30\xe5\x43\x61\
\xd4\xb8\xb1\x57\xd9\x3b\x07\x42\x90\x72\xd8\x8b\x19\x8f\xab\x5f\
\x75\xf3\xdf\xf9\xfa\x6d\xff\x9a\x1c\x86\x67\x1c\xf9\x94\xf2\x2a\
\xd7\xbd\x7d\x0a\x2b\x7f\xf8\x29\xd2\xa9\x14\xaf\xb6\x8e\xd9\xac\
\xf6\x29\xac\xbc\xf1\x33\xa4\xbd\x20\x11\xfe\x9a\x5b\xfe\xc1\xd7\
\x7f\x79\xcf\x41\xdf\xa7\x6b\x4f\x0f\x36\x6b\x78\xcb\xae\xfd\x56\
\x1d\x5d\xf8\xf3\x43\x2f\x72\xcd\x2f\xee\x1e\x97\x19\xef\x3d\x9b\
\x77\x2d\x3b\x26\x84\x64\xa8\x71\x63\x3f\x2c\xaf\x54\x39\x61\xbf\
\x52\x90\x41\x5b\xc5\x4c\x88\xc6\x3c\x2b\xde\x98\x4a\x40\xaa\x40\
\x7c\xc3\x51\x8d\xbb\x99\x3a\x6d\x36\xf9\x42\x09\xd3\x30\x0f\x69\
\x9c\x0f\x8d\x0b\x90\xa6\x05\xa8\xe6\x85\xa8\xa6\x45\xa8\xe6\x45\
\x78\x2d\x87\xe0\x4d\x39\x14\x6f\xca\xa1\xf4\xc8\x2c\x1e\x59\xaf\
\xb9\xf8\xbf\x7e\x4a\xb1\x71\x5a\xcc\xa3\x94\x72\x7d\x59\xbb\xa2\
\x5f\xc1\x2f\x50\xf0\x0b\xe4\x4b\xb9\xf1\x57\x71\x94\x7c\x69\x94\
\x42\x29\x47\xc1\xcf\x53\xf0\xf3\x14\xfd\x02\x6f\x3a\xe7\x44\x3e\
\x79\xc5\x9b\xa1\x7f\x9b\x15\xae\xd2\x11\x99\x3e\x7b\xf7\x7d\x68\
\x5d\x22\x6b\x8a\xbc\xfb\xac\x76\x2e\x3c\xab\x11\x65\x0e\x30\xac\
\xe0\x1f\x0f\xa5\xb8\xf1\x27\x23\xec\xef\xc9\xd0\x3e\xbd\x35\x18\
\x91\xd1\xe3\x4e\xaa\xca\x53\x4c\xa7\x79\xf0\x99\x51\xbe\x76\xfd\
\x36\x6e\xfb\xed\x01\xfa\x86\x3d\x5a\x6a\x4b\x5c\x71\x61\x1b\xd7\
\xff\x57\x27\x17\xbd\x09\xa6\xd5\xe6\x49\x15\x1b\x10\xdd\x8a\x31\
\xf5\x78\x26\x4d\x56\x17\x58\xdc\x36\xca\x17\x2f\x9b\xc2\xcf\xbe\
\xb9\x90\xab\xbf\x32\x8f\x25\x47\xd4\xf2\xfc\x8b\x07\xb8\xe3\xb7\
\xdb\xb8\xeb\xee\x9d\x6c\xd9\xbc\x87\x3a\xdd\xc7\x9c\x56\x8f\xce\
\xf6\x1a\xd6\xae\xef\xe5\x7f\x6f\x5d\xc3\x9d\x77\x6d\x65\xcf\x9e\
\x21\x9a\x33\xc2\xb9\x27\xb7\x72\xfd\xd7\xe6\xf1\xad\x6b\xda\x78\
\xcb\x32\x9f\x69\xd9\x03\xd4\xf8\xc3\xa4\x4b\x69\x94\x5f\x5b\xb6\
\x3c\x8a\x20\x85\xe0\x05\x08\x59\xc4\xd4\x92\xd2\x19\x32\x45\x8f\
\x26\x3d\xc2\x7f\xbc\x25\xc5\xa7\x3f\xd6\xc9\xfc\x99\xc2\x8e\x6d\
\x1b\x38\x31\xb3\x35\x1a\x6a\x54\x94\x0b\x17\x44\xe2\xda\xd5\x62\
\x44\x32\x71\xfd\xe9\x84\x52\xa9\x5b\xf7\xf4\xb2\xec\x8b\xbf\x74\
\x16\x2a\xbf\xee\x57\x2b\x39\x6a\x7e\x07\x6f\x3b\xe9\x70\x00\x6e\
\xfe\xec\x5b\x59\xb3\x63\x3f\xab\xb6\xec\x8d\x8c\xe3\xea\x3b\x56\
\x60\x0c\x5c\x3d\xc9\x4a\x3a\xdb\xf7\xf5\xb1\xec\x4b\xb7\x50\xf4\
\xc7\x4a\xd7\xd9\x30\xb1\xc4\x07\x5d\xe9\xea\x11\x82\xc8\x5e\xdc\
\xcc\xe5\xdf\xf9\x93\x05\xe3\x38\x60\x3a\x03\x57\xdf\xf6\x20\xc6\
\x98\x48\x3e\x65\xd2\xcf\x35\xb7\x3d\x50\x29\x58\x10\x3e\x8b\x57\
\xff\xf2\x81\x83\xb8\xc7\xfd\x41\xc1\x02\xe1\xff\x95\xc2\x04\x98\
\xd1\xd6\x4c\x26\xe5\x51\xf4\x35\x73\x3b\x0e\x5e\x59\x6e\xd8\xd1\
\xcd\x59\x5f\xf8\x79\x9c\x59\x66\xe5\x33\x4d\x6b\x6a\xa4\xa9\xfe\
\xe0\x0b\x17\x3c\xf1\xf2\x56\xde\x75\xed\xed\x15\xc5\xb8\xec\x33\
\x3f\x62\xc5\xf7\x3e\xca\xc2\x99\x6d\x56\x28\x20\x44\x48\x51\xd1\
\x35\xde\xb2\xfb\x00\xcb\x3e\xfb\x93\xca\x54\xbd\x1a\x65\xb9\x61\
\x47\x37\x67\x7d\xfe\xc7\x21\x16\x1d\xd1\xfd\x10\xf2\x1c\x57\x3e\
\xbf\x91\xcb\xbf\xf1\xeb\x0a\x9c\x17\xa9\xe9\x6b\xa2\xb1\xdb\xaf\
\xdd\x7c\x0f\x5a\x1b\xae\xbe\xfc\xdc\xea\xe7\x4b\xbd\x3a\x5e\x40\
\xf2\xba\xb7\x94\x8b\x8c\xe8\x57\x7f\x8f\x69\x2d\x64\xb2\x1e\xc5\
\xd1\x62\xf0\x2c\xbf\xf8\x3b\x22\xf0\xd5\xcb\x26\xc7\x1a\xdd\xb2\
\x6b\x3f\xcb\x3e\xf9\xfd\x48\xec\x71\xeb\xee\xfd\x2c\xfb\xf8\xf7\
\xac\xd8\x70\x39\x6f\xf5\x96\x7f\x70\xd4\x21\xb3\x78\xdb\x69\x47\
\x07\x32\xe3\x3f\x2f\x65\xcd\xb6\xdd\xe3\x06\x1b\x16\xe2\x60\x1c\
\x74\x04\x25\x71\x18\xd6\x79\x8c\x92\x7b\x04\x4f\xaf\x19\xa2\x56\
\xe5\x59\xfb\xc2\x0a\x36\xad\x7a\xa4\x5c\xfa\xcd\x44\x88\x83\xc6\
\x44\x43\x1a\xc6\xc0\xca\xd5\x43\x9c\xf9\xa1\x6f\x31\xad\x7d\x3e\
\x8d\xa5\x02\xbd\x85\x21\x44\x74\x54\x6f\x47\x94\xe8\xf8\xfe\x2a\
\xfa\x05\xb4\xf6\x51\xaa\x84\x2a\xe7\x8d\x1a\x0c\x46\x6b\x7c\xe3\
\xa3\xcb\x8e\x8e\x36\x3e\xef\xff\x8f\xb3\xf9\xe5\xaf\xef\x66\x60\
\x64\x10\x6a\x1a\x43\xc8\xcf\xf8\xf3\xee\xdc\xb9\x13\xc4\x70\xe8\
\x9c\x2c\xef\xbe\x60\x1a\x0d\x75\x25\xb6\x76\xd7\x73\xdb\x9f\x77\
\xf3\xf0\xca\x5e\x2e\xbe\x70\x16\xe7\x9e\xdd\x4a\x5a\x86\x03\x86\
\xa8\xf1\xca\x05\x9e\x7c\x46\x54\x1d\x77\xdf\xa7\xb9\xe9\x97\x7b\
\xf8\xf7\xea\xa1\x20\x2d\x65\x5a\x89\x8f\x5d\x3a\x93\x8b\xdf\x34\
\x8b\xf6\xa6\x02\xa9\x62\x0e\x44\x97\x4b\xd8\x05\x05\x08\x3c\xad\
\x51\x7e\x16\xf1\x33\xa4\x54\x81\x63\x66\x15\x59\x3a\xab\x9e\xc2\
\xf9\xcd\x0c\xe5\x7d\x7c\xa3\x69\xcc\xa6\xc8\x78\xe0\xd3\xc4\xa3\
\xcf\x0c\xf1\xcb\x3f\xec\xe2\x9e\x87\xf7\x31\x30\xea\x93\xc9\x7a\
\x9c\x70\x4c\x0b\x6f\x3d\xbf\x8d\xb3\x4e\x6f\x60\x76\xab\x21\xe3\
\x97\x50\x7e\xb9\x88\x82\x5f\x5b\x66\xf1\x0a\x46\x8a\x20\x7e\xd9\
\x2e\x0a\x6a\xdc\x1a\x14\x90\x02\x5d\x0b\x18\x14\x86\xac\x19\xe1\
\x3d\x6f\xac\xa3\x30\x3a\x83\x6f\x7c\x6f\x1b\xcf\x3e\xf5\x14\x1c\
\x3d\x37\x86\xa4\x06\xdd\x4a\x22\x71\x2d\xdc\xbd\x03\xab\x9d\x4c\
\x17\x2b\xd2\x26\x47\x38\x63\x3e\x26\xde\xb6\x2b\xd6\x4d\xa1\x4a\
\xa7\xf0\x4a\xdc\xd5\xd1\x67\x8f\xa4\x42\xef\x38\x3a\x9c\x4b\xf5\
\x76\x62\xae\x38\x99\x31\x71\xc8\x26\xd2\xf8\xda\x2a\xf8\x2e\x2e\
\x46\x5e\xb5\x8e\x25\x24\xf4\x05\x85\xe4\x72\xfb\x55\xd6\x2a\xd6\
\x7c\x56\x26\xd1\x6e\xca\x50\x35\x47\xc2\x8e\xf9\x60\x29\x02\xe3\
\x58\xdf\xa4\xb6\x69\x76\x93\xdb\x8a\xa0\x74\x75\x3a\x31\x13\x17\
\x5c\x8f\xe5\x61\xba\x7a\x3f\x5a\xe4\x96\x58\x0b\x2a\xa1\x7a\x3f\
\x4c\x97\xa2\x26\xda\x09\xc5\xb8\x9a\xb1\x4f\xa2\xa1\x79\x04\x3a\
\x0b\xad\x97\x38\x7a\xcf\x26\x9e\x4f\x43\xd5\x86\xe9\xd5\xd2\x7c\
\x26\xd3\xe2\x2e\x52\xcc\x01\x47\x23\x6d\xaa\x34\x38\x10\x27\x0b\
\x35\x3a\xec\x30\x43\xdc\x65\xd4\x27\x75\x28\x72\x35\x6d\x87\x48\
\xe9\xb3\x58\x1b\xc2\x90\xfc\x48\x7c\xf6\xe8\x01\x9d\x91\xed\xe7\
\x98\xc6\x1d\x4c\xcb\x0c\x73\x7a\xeb\x46\x7a\x0b\x0d\x13\x5a\x33\
\xfb\xfa\x4b\xac\xed\xad\xe7\x86\xdf\x3e\x53\x01\x1e\x7c\xa3\x19\
\x2c\x0e\xa1\x29\x91\x52\x41\xb1\x00\x4f\x09\x29\x05\x9e\x02\x6f\
\xac\xc8\x43\x99\x1d\x5b\x6d\x39\x04\x83\xd1\x25\xf0\x0b\x28\xe3\
\x73\xdb\x1f\x1f\xe7\xaa\x1b\xee\x80\x8e\x25\x31\x87\xe0\x88\xa1\
\xa7\x59\xb3\xe6\x65\x54\x0a\x3e\xf3\x81\x79\xfc\xe7\x47\xe6\xf3\
\xf2\x0b\x03\xdc\x74\xfb\x4e\xee\x7d\x6c\x2f\x9f\x78\xdf\x7c\x3e\
\xff\xfe\x29\xd4\xa8\x41\x94\x78\x48\xa9\x3c\x59\xca\x50\x50\x59\
\x7e\xf5\xcf\x02\xdf\xfd\xf1\x76\xd6\x6d\x1b\xc5\x18\x98\x3d\x55\
\xf8\xf4\x47\x67\xf2\xbe\x77\x36\x53\x6b\x7c\x3c\xa3\x10\xe3\x83\
\x2a\x56\xf2\x25\x31\x0a\x45\x09\xd1\x41\x47\x13\x23\x5e\x50\x43\
\xd6\xf8\x88\x31\x81\xd1\xa1\x3c\x4a\xda\x90\xd7\xb5\xdc\xfa\x97\
\x21\x6e\xbe\x73\x37\xab\x37\x96\x40\x7c\x5e\xb3\x30\xc5\x7b\xce\
\x9f\xc6\x5b\xcf\xa9\x67\x6e\x67\x1a\xcf\xf8\x78\xa6\x88\x28\x29\
\x2f\xad\x41\x19\x85\x31\xa9\x32\x52\x59\x0c\xca\xf7\x99\x72\x42\
\x48\x99\xb0\x64\x90\x72\x97\x15\x85\x56\x1a\xa5\x53\x68\x0d\xfd\
\xc5\x06\xae\xff\xc9\x4e\xfe\xe7\xf6\x9d\xd4\x35\xb5\xd2\xb3\xe0\
\x6c\x2b\x86\x69\x13\x57\x9c\x0d\x9c\x13\x0e\xb6\x93\xe9\x68\x75\
\x42\xb0\x2d\x3d\x4d\xb4\xd5\x8f\x21\xb9\x4f\x65\x58\xe0\xd8\x15\
\x7d\xc6\x60\x16\xdb\x9a\x4c\x6c\xab\x65\xf7\x29\x24\x2a\x88\x2a\
\xdd\xdb\xad\xee\x0b\x22\xce\xbc\x51\x27\x61\x20\xa2\x94\xcb\x9f\
\x55\x86\xe4\x32\x5b\x2e\xf6\xa1\x4b\x59\x3a\xc8\x29\xce\x8e\xf0\
\x8e\xf9\x8f\x54\x09\x12\x12\x3b\xc5\xbb\x04\xbb\x21\x2e\xc8\x5c\
\xa5\xd6\xec\x2e\x23\xe1\x71\x4d\xd4\xf9\x02\x3b\x35\xc1\x56\x3a\
\x8c\x33\x79\x63\xe3\x4f\xea\x5f\x6a\x57\x30\x72\xd9\x01\x12\xdd\
\x87\x3a\x9c\x86\x90\xd0\x10\x3b\x89\x57\x16\x19\x9b\x24\x7a\xd7\
\x95\xd8\x59\x52\x4f\xc8\x58\xbf\xd3\x70\x79\xbb\xb1\x5c\x62\x15\
\x25\xa7\x39\xdb\xe7\x59\x03\x0d\x1b\x23\xca\x65\xe8\x60\x11\xde\
\x92\x48\x54\x2e\x85\xa4\xa2\xa9\x35\x38\xf6\x0c\x0e\xb2\x20\x26\
\x99\x06\x11\xdb\x4f\x8e\x86\xea\x95\xbd\xa1\x2c\x83\xa7\xbc\xee\
\x11\x22\x63\x1c\x39\xaa\x54\x7b\xd2\x26\xd2\xbc\x20\x72\x06\xc2\
\x04\xc8\xca\xbd\xc6\x2b\x70\xec\xca\x37\xb1\x2b\xb7\x84\xc5\xf5\
\x7b\x39\xbd\x75\x23\xa4\x9b\x27\x74\x2e\x36\xed\xef\xe3\xca\xaf\
\x7e\x3f\x12\x3a\xf5\x44\xd1\x92\x69\x22\xe7\x8f\x52\x32\xa3\x21\
\x9b\xc0\xd0\x3f\x30\xc4\xb6\xae\x3d\xf4\xf5\x0c\x52\x2a\x95\xa8\
\xad\xa9\x61\x46\x47\x1b\x73\xe6\x4c\x27\x95\x4e\x85\x8c\xc3\xa0\
\x0a\x2b\x5a\x97\x9b\x37\x07\x5f\xf0\xae\x37\x1e\xc7\x8f\x6e\xfe\
\x23\xbb\x73\xbd\x50\x3b\x25\x54\xcd\xc8\xb0\x75\xcb\x1e\x32\xe2\
\x31\xb3\x3d\xc5\xb2\x53\x3a\x58\xf1\xc4\x7e\x6e\xfc\xf9\x2e\x9e\
\x7c\xae\x97\xb7\x9e\x3b\x8d\x4b\x2f\x6c\xa1\x86\x21\x3c\x53\x2a\
\x2b\xbc\x14\x62\x52\x14\x54\x8a\x7b\x9f\x28\xf0\xe3\x5f\xec\x64\
\xed\xd6\x1c\xbe\x52\x74\x34\xc1\xe7\x3e\x34\x87\x4b\xdf\xd1\x42\
\x2d\x03\xe5\x1a\x14\xc1\x9a\x0b\x41\x91\x75\xd1\x41\x6c\x4c\x44\
\x82\xa2\xea\x92\xc3\x90\xc2\x27\x85\x16\x8f\xb4\x2f\xa0\x7d\x4a\
\x2a\xc5\xce\xe1\x66\x6e\xfd\xed\x2e\x7e\xfc\xeb\x2d\xf4\xf6\x6b\
\x9a\xeb\x3c\xde\x72\x4e\x3b\x97\xbd\xbd\x99\x13\x96\xd4\x90\x22\
\x07\xe4\xca\xa8\x79\x09\x5f\xf9\x68\xf1\x10\x23\x28\xdf\x47\x22\
\xa8\x40\xb9\xad\x5a\x90\x20\x53\xf6\x14\xcb\x61\x3f\xf1\xd0\x65\
\xd9\xec\xe9\x2c\xcd\xe9\x21\x2e\x7a\xc7\x54\x1e\x7b\x7e\x88\xa7\
\x5f\xec\x8f\x89\xd4\x54\xf4\x0f\xb6\xe5\xee\xae\xa7\x17\xaf\xd2\
\x92\xd4\x1e\xaa\xca\x41\x36\x24\xe4\x05\x96\x6b\x3c\x86\xcd\x3f\
\x23\xa1\x6a\x3c\x96\x43\xe5\x6c\x80\x1a\x6e\x43\x55\xa5\x87\xa7\
\xd8\xec\x50\xcb\x2b\x54\x24\xdc\xdb\x2a\x94\x60\x37\x71\xb6\xab\
\x88\xd8\xad\x97\xc4\x4c\xd0\x77\x52\xdc\x0d\xaf\x63\xde\xbf\x5d\
\x42\xc7\x24\xb1\x4c\xac\xb2\x80\x62\x91\x1f\xad\x3e\x91\x61\x56\
\x1d\xae\x3c\x40\x57\xfa\x91\xc4\x3b\x9a\xc7\xd8\x93\x26\x4e\xa2\
\xb2\x99\x96\x62\x29\x6a\x1d\xce\xbd\x0c\x31\x51\x95\xb8\xe1\xb3\
\x98\xb0\xc6\x62\xa3\xda\xb5\x75\x1d\x73\x2b\x76\x5a\x89\x72\x34\
\x37\x87\x58\xd5\x27\xdb\xfb\xad\xca\xd2\x74\xa0\x30\x76\x9f\x56\
\xbb\xd8\x80\x18\x47\xe5\x1f\xcb\xb3\xd5\x58\x0a\xcc\xb1\x5e\x31\
\xcf\x98\x28\x4b\x1e\x93\xe0\xb0\x8a\xa3\xed\x1c\xf1\xf0\x44\x2c\
\x1f\xda\x6a\xff\x67\x94\x7b\x3d\x70\xa0\x20\x94\xf9\x06\x15\x16\
\x8c\xdd\x52\x50\x27\xc1\x63\x96\xc7\x2a\x38\xfb\x93\x86\x8d\x6e\
\x63\x1c\x60\x92\xc4\x53\xd4\xc2\xf3\x1a\x8a\x1f\x37\xd7\x67\xaa\
\x42\xe5\x43\x39\x9f\xa1\x9c\x61\xfa\xbc\x25\x94\x8c\x26\x65\x19\
\x74\x35\x5e\x2d\x25\x93\xa2\x68\x86\xd1\xda\xe7\x95\x35\xeb\xd9\
\xb4\x79\x07\xc6\x27\xc8\x75\xd4\xd0\x67\x86\xd8\xd3\x75\x80\x75\
\x2f\x6f\xe3\xd8\xe3\x8e\xa0\xbd\xa3\x25\x6a\x3c\x59\xa8\x4d\x4d\
\x5a\x78\xdb\x1b\x4f\xe7\x47\xbf\x5d\x01\xb5\x53\x23\x22\xbb\x90\
\x2f\x50\x23\xc2\x89\xaf\x9d\xc9\x48\x5f\x86\xff\xf9\xd9\x2b\x3c\
\xb2\x6a\x88\x59\xed\x69\xde\xfb\xc6\x69\xcc\x6c\x2e\x94\xe3\x96\
\xd9\x72\xec\xd2\x43\x9b\x34\x2f\xae\x53\xfc\xe0\xd6\x3d\xbc\xbc\
\x71\x10\xa3\x52\xd4\xa4\x7d\x2e\xbd\x70\x2e\xef\x7d\xdb\x14\xea\
\x65\x00\x65\x52\x95\x79\xa9\xf4\xf6\xac\x18\x96\xba\x3c\x84\x74\
\x64\xfb\x88\xd1\x28\x1d\x78\x7e\xbd\x43\xf5\xdc\xf8\xf3\x9d\xfc\
\xf2\x37\x5d\x8c\x14\x61\xc1\xcc\x0c\x1f\xba\x68\x36\x17\xbd\xb9\
\x91\xf6\xa6\x11\xa4\x98\x0f\x88\x3b\xaa\xdc\xae\xc5\x78\x28\x9d\
\x2e\x93\x7c\xca\x5e\x36\x29\x40\x05\x64\x1f\x29\x31\x56\xc5\x25\
\x78\x9e\x74\x64\x6f\xa6\x50\x41\x9c\xd3\xd3\x78\xa6\xc4\x61\xb3\
\x5a\x78\xdb\x99\x9d\xbc\xb4\x7a\x0d\x0d\xdb\xef\x67\xef\xec\x33\
\x09\x49\x02\x57\x90\x44\x39\xa0\x3e\x53\x1d\x99\xd1\xe5\x8d\xed\
\xaa\x96\x61\xbf\xb0\x1a\xf7\xea\xd0\x68\x24\x74\xbf\x70\xdd\x57\
\x4f\x47\xe1\x92\xa4\x8a\x1c\xc6\xb2\x84\xc7\x02\xff\xda\x4e\xac\
\x0e\x0b\x6a\xeb\xb9\xc5\x12\x04\xe1\x7f\x8f\x95\x17\x93\x90\xc5\
\x4e\x92\x52\xb3\x43\xa5\xa1\x31\x39\xdb\xcb\x28\xa2\x2d\x04\xd4\
\xf8\x77\x54\x94\x67\xc8\x8a\x8e\xf4\x33\x34\x4e\xbd\x1b\x59\xca\
\x80\x43\x15\x6c\x5a\xa3\x43\x02\x4f\x39\x06\xee\x82\xde\xf4\x78\
\x77\x96\xf0\xfd\x25\xe4\x79\x56\xac\x70\xa2\x7d\xf7\xc6\xd8\xb3\
\x92\xe4\xb1\xe8\x90\x27\x1e\x6e\x0e\x5d\xa6\xa6\xc9\xd8\xdc\x78\
\xd6\x73\x3b\x3c\xc3\xa4\x3d\x4a\x15\xe4\x3a\x4c\xc6\x19\x6b\x4d\
\x64\x2c\x66\xa1\x62\x5c\x51\x4b\x68\x2f\x54\xf6\xa6\x89\x92\x46\
\x3c\x97\xd1\x23\xd6\x1a\x87\xf6\xb2\x32\xb1\x06\x23\x31\x01\xac\
\x8d\xc3\x1b\x0b\x19\x71\x6a\x8c\xd0\x12\x5e\xd7\xd0\xbc\x55\xc8\
\x6e\x65\x62\xdf\xd8\x7e\x52\x84\x48\x79\x2e\x23\xce\x8c\x17\x2c\
\xaf\x10\xf7\x2c\x34\xc7\xc4\xcb\x38\xc6\xce\x75\x04\x46\x76\x30\
\x89\x8d\x58\xb5\x8d\xc5\x32\x8a\x43\xe7\x22\x32\x97\x63\x5e\x38\
\xa1\xf7\x4d\xdc\x23\x25\xe4\x8d\x89\x76\x1b\xec\xb1\x76\x65\x65\
\xd9\xa6\xb5\x45\x86\x8c\xe6\x7c\x36\x37\x64\xc6\x5f\xf5\xe1\x57\
\x96\xe6\xfa\x2c\xc3\x39\xc3\xb1\xa7\x9e\xc3\xe0\x30\x74\xed\xec\
\xa5\x6b\x6f\x0f\xfb\x07\x86\x28\x8d\xc5\xde\x80\x94\xa4\xc9\x4a\
\x23\xab\x57\x6f\x60\xc3\xc6\xed\x81\xb2\xd4\x81\x8c\x14\x23\x15\
\xc5\x39\x32\x90\xe7\xa9\x95\xab\xd9\xbf\xb7\xbf\x32\x46\xa9\x18\
\xc5\xe5\xdf\xcb\x3f\xa7\x1e\x7f\x24\x32\xda\x0f\xbe\x0f\x7e\xf0\
\x2c\xa7\xd7\x6f\xc0\x30\x8c\xe7\x19\x1a\x9a\xda\xb8\xf5\xd7\xdb\
\x79\xfa\xc5\x11\x3c\xad\x39\xef\xb4\x26\x4e\x3d\x2e\x8d\x12\x1f\
\xad\xb2\x18\xd2\x88\x9f\x01\xed\x31\x30\x92\xe5\x8e\x3b\x7b\x78\
\xe4\xe9\x7e\xf2\x08\x29\x8a\x9c\x7f\x72\x1b\x57\xbe\xa7\x85\xc6\
\x74\x3e\x98\x57\x55\xaa\xf8\x74\xee\x5c\xf9\x71\x8f\x4f\x90\xca\
\x73\x69\x55\x64\xd8\x34\x71\xeb\x1f\x7a\xf8\xf5\x9d\x3b\x28\x16\
\x6a\x38\xf6\xd0\x56\xbe\xfa\x99\x45\x5c\x71\xa9\x47\x6b\x53\x2f\
\xca\xcf\x06\x4b\xe4\xf9\xe5\xe8\x63\x1a\x74\x2d\xa2\x6b\x11\xbf\
\x36\x78\x7e\x35\x0c\x6a\x08\xa3\x72\x18\x29\xa1\x45\xd0\xe2\xc5\
\xbf\x77\xec\x3f\xad\x40\x4a\x18\xe5\x83\xc9\x90\xd6\x23\x9c\xfd\
\x86\x34\x8b\x66\xa7\x39\x70\xa0\x3b\x72\x0e\x82\x3c\xcc\x70\x8c\
\x4b\x2c\xb8\xb2\x1a\x9b\x27\x46\x0a\x4a\x88\x21\xba\x52\x52\x8c\
\x15\xe1\x8e\xe5\x42\xc9\xf8\x61\x1b\x83\xcc\xc2\x02\xd5\xe5\x49\
\x44\x2c\xc7\xd0\xc5\x61\x2b\x34\x02\xaf\x18\x2b\xd6\x18\x86\x8e\
\x4c\x5c\x08\xc7\xf2\x55\xad\x18\x98\x38\x4a\xe5\x45\x52\xd6\xec\
\x4a\x29\x49\xde\x91\x03\x36\x8e\x41\xdd\xd1\xdc\x4c\x27\x34\x69\
\x6c\x92\x83\xa3\x8e\x69\xcc\x88\x70\x79\x40\x21\xc3\x42\xac\x38\
\x2e\x3a\xa1\x1e\xa9\xc4\x8b\x15\x18\xd7\xc1\xb1\xe3\x98\xd6\x5e\
\x11\x49\x80\x6f\x93\x62\x09\x92\x4c\x98\x09\x43\xc9\xc6\x15\x00\
\x32\x0e\x54\xa4\x4a\x4c\xac\x62\xd0\x29\x6b\xff\x99\x64\x98\x9b\
\x04\x4f\xc5\x15\x37\x0e\x5f\x1f\xdb\x1f\x8e\x06\xc9\x76\xcc\x1c\
\x07\xe4\x1d\xcb\xbd\x0d\x1b\x2b\xe1\xb0\x87\x2b\xce\x6d\x1c\xe1\
\x08\x13\x27\x3f\x45\xa0\xce\x58\xd5\x73\xeb\x1a\x7b\xab\x85\x63\
\xd9\xca\x1a\xa7\x8b\x85\x1f\xfe\x1e\x15\x65\x82\x8a\x38\x0a\x7f\
\x58\x90\x7e\x18\xed\xc1\xd5\x60\xde\x9a\xbf\x30\x03\xd7\x3a\xc3\
\xcd\x0d\x19\x6b\xcb\x45\xf7\xe2\x48\x6e\x80\x63\x4e\x3c\x8f\x62\
\x29\x50\x90\xa5\x82\x61\xa0\x90\x67\xa0\x3f\x8f\x97\x82\xba\xba\
\x34\x4d\x75\xb5\xec\x3f\xb0\x8f\x3d\xbb\xf7\x91\xf1\x32\x94\x0a\
\xa5\x72\xbf\x49\xf0\xc4\xc3\xf8\xa6\x2c\xd2\x84\x52\xbe\xc4\xaa\
\xa7\xd6\x71\xea\x79\x47\xe3\xa9\x84\x66\xf4\x18\x8e\x5d\x32\x9b\
\x9a\xb4\x62\xb4\x94\x87\x74\x1d\x88\x61\xcf\xae\x7d\x68\x63\x28\
\x18\xe1\xd1\x27\xb7\xb3\x71\xcb\x7e\x8a\xa4\xe8\x68\x32\x5c\x70\
\xca\x54\x1a\x54\x60\x58\xf8\x2a\x87\x96\x34\xca\x18\xb4\xf2\xf8\
\xdb\x03\xfb\xf9\xe3\x3d\x3b\x28\xe8\x2c\x62\x4a\xcc\x6a\x4b\x73\
\xe9\x9b\xa7\x33\x7f\xda\x30\x5e\xc9\x43\x4b\x06\x23\xa5\x83\x4a\
\xb5\x85\x62\x90\x3a\xe3\xd5\x73\xcf\x23\x05\xfe\xf7\xb7\xdb\x19\
\xcc\x19\x96\x1c\xea\xf1\xe5\x8f\xb7\x73\xd6\x69\x19\xd2\x7e\x1e\
\xaf\xd4\x82\x31\x69\xf0\x06\xf0\x53\x3e\x9a\x2c\x18\x1f\x45\x11\
\xc1\x07\xf1\x51\x7e\x1a\xfc\xe6\x32\xec\x6a\x10\xd1\x98\x31\x56\
\xb4\xa8\x04\x7e\x87\x00\x63\x31\xd6\x34\x8a\x21\xe6\xcd\x2a\x71\
\xd8\xa1\x35\xac\xee\x1a\x20\xda\xad\xc4\x4f\xb0\x54\x13\x05\x85\
\xb8\xcf\xbc\x72\x19\xa6\x16\x61\x23\x06\xfd\x58\x82\x32\x9c\x44\
\x6d\xd7\x52\x8f\xe4\xf6\x55\x71\x17\x84\x64\x4f\xc3\x84\x62\x14\
\x91\xae\x25\x61\x61\xa9\xdc\x90\x94\xb3\x7c\x9e\xa5\x09\x8c\x4c\
\x10\xc2\x30\x09\x16\xb6\x3d\x9e\xb0\xc7\x60\x09\xc7\x31\x38\x32\
\xdc\xd3\x50\x24\x6e\x00\x54\x8d\x87\x8a\xbb\xb2\x4d\x62\xf3\x5c\
\x49\x88\x5f\x27\xf1\x81\x5c\x04\x1a\x0e\x3e\x61\x3d\xa6\xcb\x2d\
\x94\x40\xac\x0e\x1a\x49\xac\x6d\x57\xf9\x37\x97\xf2\x14\xbb\x3b\
\x85\xcb\x4d\x27\x5a\x44\xc0\x86\xe6\x95\x05\x5d\xda\x39\x7d\x4e\
\x7e\x00\x16\x5c\xee\x20\x38\xb9\xd6\xc6\x3e\x47\x4e\x68\xc3\xd5\
\x08\x37\xf4\xfd\x15\x44\x3f\xa4\x08\xc4\xc4\x8b\x73\x40\x9c\x38\
\x17\xab\x22\xa3\xe3\xd0\xba\x49\xb2\x67\x12\x88\x78\x61\xa5\x2e\
\x76\xfc\xdf\x2a\xf1\xe6\xaa\x31\x5c\x2d\x8c\x11\xf3\xcc\x89\x17\
\xed\x70\x92\xc9\x1c\x16\x96\x44\x1b\xd1\x03\x4c\x19\x83\x64\x13\
\xf6\x79\xbe\x64\x98\xd1\x39\x17\x9d\xad\xa1\xe4\x97\x28\x96\x8a\
\x94\x5b\x26\xe3\x97\x60\x70\xa0\xc8\xc0\x40\x81\xcd\x9b\xd7\xe1\
\x17\x7c\x3c\x95\xc2\x4b\x7b\xd4\xb7\x34\xe2\xe5\x0f\x30\x7c\x60\
\x17\xed\x33\x0f\x65\xef\x9e\x1c\xa5\x7c\x50\xaa\x6e\xb0\x77\x88\
\x7d\x3b\x7a\xe8\x9c\x33\x35\x21\x34\x03\xb5\x59\x61\xe9\x11\x0b\
\x79\x6a\xfb\x28\xa4\x03\x86\x7a\x6f\xcf\x30\xc6\x78\x8c\x16\x35\
\xaf\x6c\xda\x83\x56\x06\x45\x91\xc3\xe6\x36\x72\xcc\xe2\x26\x54\
\x29\x05\xa9\x12\x86\x21\x50\x3e\x5a\xb2\x6c\xda\x5d\xe4\xb6\xbb\
\xf6\xb2\x77\x58\x83\xf2\xf1\x44\x73\xce\x29\xf3\x39\xfd\xf8\x3a\
\x3c\x3f\x57\xde\x4e\x99\x83\x66\xec\x8b\x14\xd1\x46\xd8\xd7\x3f\
\x85\xdf\xde\xb5\x85\x2d\xbb\x4b\xb4\x35\x6b\x3e\x70\x71\x33\xe7\
\x9e\x5a\x20\xe3\x97\x10\xdd\x54\xde\xaf\x05\x0c\xf5\xa8\x92\x2e\
\x57\x61\x55\x88\xa7\x10\x29\x04\xde\xa4\x2a\x95\x15\xb0\x0e\x5e\
\x65\xe3\x22\xe8\x1e\x93\xe4\xeb\xe9\x20\xb6\x6a\x54\xb9\xdc\x9f\
\x22\xaf\xeb\x98\xb3\x60\x0e\xc8\xaa\xc8\x19\x4a\xd9\x35\x96\xdd\
\x6d\x6d\xb4\x15\x53\x74\x79\x56\x16\x4b\x34\xdc\xed\x3d\x51\xc2\
\xba\x92\x8c\x71\x17\x83\x8f\x11\x51\x2c\x26\xa3\xd1\xd1\xc3\x06\
\x71\xc5\x6f\x93\x4b\x8c\xd5\xae\x28\x1c\xfb\xb3\xbd\x4e\x2c\xab\
\x3a\x32\x6e\x07\x44\x68\x33\x03\x71\x08\xfb\xb0\x25\xef\x8a\x93\
\x44\xc8\x28\xc9\x8e\xbd\xb3\x13\x49\xac\x53\x89\x44\x8b\x64\xc7\
\x6a\xbc\x12\x13\x00\xf1\x9e\x96\x3a\x5a\x62\x2c\x4c\xa4\x32\x8e\
\xc2\xe3\xa2\xa3\x7b\xc9\x98\x04\x85\x24\xc9\x5e\x78\xc4\x18\xb2\
\x04\xe2\x44\xe0\x47\xb8\x09\x70\xcc\x53\x25\xda\x6a\x2b\x91\xdd\
\x63\xef\x7f\x71\x14\xb4\x27\x4a\x1e\x72\x11\xa7\xc4\xb1\x1f\x92\
\x3c\x7b\x63\x93\x93\xc4\xaa\x59\xeb\x60\xcc\x86\x3d\x48\x42\x1e\
\x5e\x8c\x45\x3c\x96\x7d\x2d\x71\xc3\x09\x57\xed\xda\x24\x4f\xdc\
\xb1\xf9\x6c\xbe\x9d\x72\x91\xbb\xec\x39\x74\x18\x53\x61\x83\x74\
\x0c\x29\x32\xf6\x79\x71\x19\x47\x56\x0f\xd0\xc8\x7a\x10\xe7\x12\
\x18\xe2\xa9\x66\xb6\x22\x95\x2a\xac\xf4\x88\x7c\x03\x2f\x95\x8e\
\x2b\xd7\xd0\xa5\x23\x79\x43\x5b\x5b\x07\x75\x4d\xd1\xb2\x96\x85\
\x92\x4f\xa1\xe8\x93\x2f\x8c\x32\x34\x32\x48\x6e\x78\x08\xed\x1b\
\x74\xa1\x48\xa6\x26\x4b\x71\xff\x66\x06\x5e\xfc\x35\x0b\x3a\xeb\
\x79\x70\xe5\x1d\x9c\xf2\xae\xeb\xd8\xb5\x6b\x10\x31\x01\x79\xa5\
\x6f\x5f\x1f\x73\xe6\x4d\x0f\xa2\x03\x65\xc5\xa0\x44\x50\xa4\x10\
\x15\x24\xef\x9f\x76\xe2\xb1\x3c\xf5\xeb\x47\x2a\x83\xca\xe7\x0b\
\x95\xb1\x8e\x81\xcc\x9e\x11\x96\x1e\xd6\x4a\x6b\xab\x0f\xe4\x40\
\xd7\xe0\xd1\x8f\x41\x51\x54\x29\xee\x7d\x6c\x88\xa7\x5e\x29\xe0\
\x4b\x16\x4f\xe7\x69\x6d\x11\xce\x5a\xd6\x44\x5d\x76\x18\x63\x14\
\x5a\x81\x32\xa5\x6a\x7d\x87\x12\xfc\x08\x0f\x24\xc5\x8b\x2f\x17\
\x78\xe2\xb9\x1e\x8c\x32\x9c\xbb\x6c\x26\xef\x38\xb3\x85\x8c\xdf\
\x8f\xa2\x1e\x23\x25\x8c\x97\x47\x1b\x03\xa6\x31\x60\xd1\x16\x4b\
\xe4\x0a\x19\x06\x87\x14\x5a\xea\xf0\xc5\x47\x7b\x39\x6a\x6b\x4b\
\xd4\xd7\xa4\xa8\x4f\x43\xba\xcc\x3a\x0e\xea\xdd\x46\x21\xf8\xa0\
\xbc\x9e\xc1\x78\x25\x20\x85\x68\x0f\x43\x81\x3c\x69\xfe\xb6\x22\
\xc7\xc6\xed\x0a\x51\x91\x38\x61\x88\x25\x8b\x03\x8a\x30\xf6\x21\
\xb2\x4b\x4a\xe1\x60\x45\x5a\x8a\xd3\xc5\x6c\x4b\x84\x9f\x42\xf9\
\x73\x11\x02\x86\x45\xa4\x70\x75\x3d\xa9\xc0\x85\xda\xc1\xd8\x55\
\x0e\x2f\x51\xe2\x9e\x93\x38\xe0\x39\x57\xc9\xb8\x58\xf7\x8f\x04\
\x38\x53\xac\xde\x90\x4c\x40\x86\x0a\x0b\x78\x5b\xb1\xb8\x14\x84\
\x90\x50\x90\x5c\x92\x61\x4e\x71\x90\x48\x8c\x4a\xce\x28\x89\x09\
\x4d\x15\x55\x68\x36\x41\x23\xd6\x09\x26\x89\x04\x94\xe4\x19\xe1\
\x80\x1a\x71\x94\x71\x73\x28\x18\x71\x08\x76\xdb\x9b\xb4\x8d\xae\
\x70\x2c\x59\x13\x85\x37\x9d\xa4\x16\x0b\x51\x88\x78\x8c\x26\x6a\
\x28\x8a\x4e\x60\x30\xe3\x86\x55\x95\x4c\x94\x95\x10\x25\x02\x39\
\x3a\x4e\x8c\x23\x26\x56\x00\xdd\x24\xb4\xba\x33\x2e\x83\x42\xdc\
\x4d\x05\x48\x20\xf2\xc4\x48\x82\x76\xdd\xe2\x6a\x96\x8d\x24\x20\
\x39\x49\xad\xf6\x92\x02\xd1\x0e\x23\x2c\x62\x2c\x8e\x31\xe1\x43\
\xc6\x42\xac\x8e\x71\xd2\x7e\x72\x11\x94\x24\x76\x40\xbc\x4c\x0d\
\xc6\x2f\x62\xac\xa2\x2f\x63\x9f\x28\x6a\x43\x2a\x9d\x8e\x3d\x49\
\x26\xe5\x91\x49\x79\x34\xd4\x66\xa8\xaf\xc9\xe2\x19\x45\x2a\x95\
\x05\x05\x8d\x4d\x4d\x3c\x72\xf3\x75\x7c\xe2\x6d\x8b\xf0\x94\x30\
\x30\x52\x22\x65\x46\xc8\xa6\x53\xa4\xc6\x28\x0e\x45\xa1\xc1\xab\
\xad\xaa\x93\x9a\x1a\xea\x82\x18\x66\x79\x34\x32\x16\xc8\x0e\xc9\
\xe4\x1a\xe5\xb1\x70\x41\x16\x95\x1a\xc6\x98\x1c\xa2\xdb\x83\x62\
\x2e\xc0\x81\xe1\x34\xf7\x3d\x32\xc4\x70\x2e\x05\x26\x83\x22\xc7\
\x51\x8b\x1b\x39\xe6\x28\x41\x11\xf4\xc7\x34\xca\x80\xaf\x99\x38\
\x61\xdf\xde\xd2\x1e\x5a\x7b\x3c\xff\x4a\x2f\x3d\x43\x25\xa6\x34\
\xa6\x79\xe3\xb2\x76\xa6\x64\x0b\x88\xc9\x60\x44\x53\x52\x19\x86\
\x4b\xcd\xec\xde\xe7\xb3\x61\xc3\x10\x2f\xae\xcf\xf1\xca\xc6\x51\
\xf6\xef\xcf\xd3\xdd\x53\x24\x57\x12\x7c\x14\x9e\x68\xa6\x34\x78\
\x74\x4c\xab\xe7\xd0\x79\x35\x1c\x79\x58\x2d\x4b\x97\xa4\x99\xdd\
\x09\x2d\xe9\x62\x00\xbd\x1a\x10\x93\x46\x48\x61\x28\xe2\xab\x22\
\xca\xa4\x10\xad\xd0\xe2\x33\x50\x4c\xf3\xd7\xfb\xbb\x78\xe8\x99\
\x11\x7c\x5f\x47\x64\x5a\x2a\xae\x48\x8c\x03\xee\x31\x13\x6c\x6e\
\x17\x1b\x51\x2c\x0f\x41\x12\x20\x30\x13\xb7\xc8\x4d\x82\xb5\x1a\
\x4b\x53\x89\x57\xdc\x8f\xa6\x40\x58\x31\xb7\x30\x44\x16\x89\xd7\
\x18\x47\xbc\xa6\x5a\x1e\x26\xf1\x06\xbd\x91\x18\x9f\x8e\x33\x03\
\x6d\x76\xa5\x2b\x07\xcd\xd5\xa2\x4a\xac\xd8\x92\xb1\x62\x90\xce\
\x6a\x24\x96\xd2\x16\x47\x9c\xda\xa5\x88\x26\x4a\x03\xb1\x95\x59\
\x38\x2e\x1a\xf3\xa6\xad\xb4\x00\xa9\xa6\xdc\x88\xcf\x69\x8c\x78\
\x29\x16\x0b\x34\xbc\x77\xa0\xea\xc6\x34\x2a\x34\x77\x16\xe1\x2a\
\x26\x03\x8d\x55\x33\x39\x21\x7c\x20\x92\x98\xea\x0b\x0e\xc7\xd4\
\xe5\xb1\x47\xe0\xf3\xa4\x90\x47\xf8\x3c\x1a\x37\x4a\x10\x13\xcd\
\xd5\x60\x5a\x87\xb7\x15\xeb\x57\x49\xc2\x3e\x70\x1c\x4e\xed\xa8\
\x45\x2b\x09\x1e\x73\x44\x09\x6a\x8b\x69\x1c\x6d\x92\x1e\x79\x4f\
\x1b\x07\xca\x22\x8e\xbe\x85\x56\x8c\xd4\x86\x4d\x23\xc8\x8f\x54\
\xe7\x55\xd8\xf7\x8e\x7c\xa7\x2b\x96\x0e\xca\xcb\x62\xd0\xf1\x22\
\x69\x63\x70\x9e\xf2\x28\x15\x4b\x64\x6a\xed\xd0\xbd\x29\x6f\x01\
\xc1\xc3\x43\xfb\x86\x62\xbe\x00\x1a\x94\x1a\x66\xd1\xd2\xd3\x78\
\x61\xd3\x2b\xcc\x9b\x5e\xcf\xc6\x5d\x23\x1c\x7f\x54\x86\x52\x61\
\x08\x0d\x78\x02\xc6\x14\x18\xcd\x0d\x91\x4a\xa7\xf1\x54\xba\x52\
\xd0\x20\xfc\xd3\xd3\xd7\x07\x29\x35\x5e\x44\x43\x34\x4a\x81\x28\
\x0f\xbf\x14\xa4\x61\x64\xb2\x30\x6d\x7a\x09\x8f\x34\xe2\xd7\x80\
\x14\x31\xa4\xd0\x28\xd6\x6f\xf5\x78\x61\x4d\x1f\x06\xbf\xcc\x3e\
\x85\xd7\x1c\x32\x85\xd6\x86\x52\xd0\x17\x53\x09\x4a\x8f\xe5\x3b\
\xea\x83\x2b\xa8\x61\x34\xb9\x82\xb0\x6e\xdb\x28\x45\x5f\x38\x74\
\xa6\xc7\xeb\x8e\x2c\x61\x44\x53\x94\x3a\x0e\xf4\x2b\x9e\x5a\x55\
\xe4\xde\xc7\x0e\xf0\xf4\x0b\xfd\x6c\xd9\x36\xcc\x48\x41\xa3\x7d\
\x48\x89\x42\x89\x41\x3c\x43\x3a\xe3\x51\x04\x36\xef\xf6\x61\x6d\
\x9e\x7b\x1e\x82\x9a\xb4\x66\xf6\xec\x14\xc7\x1e\xdd\xcc\xd9\x27\
\x34\x72\xca\x71\xcd\x4c\x6f\x29\xe2\x69\x8d\x2f\x45\xb4\xca\x95\
\x89\x43\x8a\xb1\x3a\xb9\x5d\x3b\x85\x8d\x9b\x47\x19\x1a\x2c\x05\
\xf9\x9d\xa1\xa1\xa6\xe2\x46\xaf\x55\xc9\x1e\x49\x26\xa7\x88\xc3\
\x63\x09\x33\x46\x4d\x92\x44\xa9\x16\x1b\xb3\xba\x29\x44\x50\x61\
\x89\x97\xaf\x32\xae\x18\x4f\xd8\xab\xb1\x49\x42\x12\x4f\xb2\xaf\
\xe4\x4c\x5a\x85\x11\xc4\x25\xfd\x54\xf5\xe7\x49\x0c\xfd\x8a\x45\
\x07\xac\x12\x6b\x8d\x7c\xad\x8a\x0b\x54\x9b\x89\x8c\x45\x50\x8a\
\x31\x48\x89\x42\x9a\x61\xf2\x13\xca\x82\xce\x24\x39\xf6\x69\xa7\
\x24\x44\xbc\x97\x90\x52\x1b\x8b\xbd\x1a\x9b\x05\x6c\x12\x6a\x0c\
\x4b\x3c\x2f\x32\xfc\x7c\x13\x29\x44\xfb\x43\xae\xc2\xe6\x62\x1b\
\x2f\xee\x5e\xa6\xf1\x82\xe2\x76\xf8\x80\x68\xcc\x2c\xd2\x74\x5d\
\x1c\xc5\x21\x5c\xb1\x3b\xe3\x98\x4b\x49\x88\x95\x87\x19\xb2\x24\
\x66\x7a\x55\x15\x50\xae\x3d\xa9\x1d\xce\x12\x12\x3f\x83\xf6\x79\
\x32\xf6\x7e\x54\xee\x98\x71\xcc\xa8\x0c\x9f\x37\x1c\x8d\xea\xc7\
\x52\x48\x74\xfc\x9c\x53\x65\x2e\x23\xfb\xa5\x1a\x9c\x6c\x93\x8e\
\xec\xdc\x4b\xc7\xda\x90\x40\x38\xb2\x91\xfa\x31\x48\x36\x9d\xc1\
\xd7\x05\x6b\xdf\x8d\x0f\xa4\xae\x36\xc5\xe8\x50\x2f\x5e\xa6\x91\
\x52\x49\xa3\xb5\x41\x89\x22\x9d\x12\x32\xe9\xa0\x2c\x65\x4a\x79\
\xb4\xd4\x37\xb3\x7f\xf4\x00\x18\xc8\x0d\xe4\x98\xf3\x9a\xf3\x19\
\xde\xd9\xc9\x8a\x8d\xab\x39\xfa\xb4\x0b\xd9\xdf\x57\xa2\x36\x9b\
\x41\xfb\x45\x74\xa9\x44\xad\x97\xa7\xaf\x6b\x53\x45\x86\x65\xb2\
\x75\xa4\x6a\xeb\x48\xd7\xd4\x90\x4a\x67\x49\xa5\xb2\x6c\xdf\xb9\
\x0f\x24\x53\x09\xf1\xd4\xd7\xd7\x53\x2c\x16\x99\x3d\x7b\x36\x6b\
\xd7\xae\x25\x48\xf9\xd7\x88\x97\x2e\x6f\x6d\x01\xf1\x81\x34\x98\
\x14\x5b\xb6\x8e\xd0\xd7\x57\x2c\xcf\x55\x09\x2f\x65\x98\x3f\x37\
\x4b\x5a\xe5\x10\x5f\xc0\x64\xd0\x12\x28\x5e\x61\x12\xc5\x6e\x22\
\xc7\xdf\xc3\x57\x35\x14\xf3\x8a\x94\x6f\xa8\x4f\xa7\x48\x67\xea\
\xd8\xd3\x0b\x7f\x7b\xa2\x9f\xbf\xdd\xb7\x8b\x17\x5e\xe8\xa7\x7f\
\x20\xd8\xb4\x75\x75\x8a\x43\xe7\xd6\xb1\x60\x4e\x23\x8b\xe6\x79\
\xcc\xe9\x84\xd6\x16\xc5\xd4\x96\x3a\xfa\x47\x34\xdd\x03\x9a\xee\
\xde\x3c\xeb\x37\xe5\xd8\xb4\x35\xcf\xd6\xae\x3c\xbf\xfd\xf3\x20\
\x77\xff\x73\x98\x93\x5f\x3b\xc0\xbb\xdf\x38\x95\x33\x4f\x69\xa2\
\xb1\x36\x87\x67\x3c\xbc\x52\x1a\xa5\x4b\xa0\x06\xd1\xa9\x3a\x1e\
\x7a\x74\x84\x1d\xbb\x7d\x8c\x08\x62\x65\x10\xa4\xdc\xb2\xdb\xae\
\xd8\x82\x03\x5a\x71\x40\x2f\xe0\x20\x65\x58\x6c\x3f\xd1\x71\x25\
\x6b\x5b\x98\x89\xac\x22\x13\x4d\x40\xae\x24\x2e\x3b\x98\xb3\xe2\
\x20\xd7\xd8\x50\x9f\x58\xf1\x22\xfb\x20\xe1\x38\x8c\x91\x24\xf0\
\x04\x84\x28\x9c\xd3\x39\x16\x7f\x09\xc3\x3f\x49\xcc\xc9\x84\xd0\
\x9e\x13\x09\xb2\x73\x67\x23\x04\x26\x89\xb7\xf3\xb2\x6f\xea\x8c\
\x65\xda\x64\x8f\x24\x5c\x56\x26\x08\x1e\x5a\xe9\x05\x22\xd5\x19\
\xd6\xe2\x60\x3a\x1a\xd7\x7a\x56\x23\x61\xe1\x56\xb6\xf6\xb5\x2e\
\x62\xa8\xb3\xd2\x8d\xab\xc3\x8a\x58\xf1\xcf\x90\xc1\x11\xd9\x2f\
\x09\xc4\x9b\x30\x51\xc5\x98\xe4\xb0\xa9\xb8\xd6\x56\x1c\x4d\x9c\
\x2d\xc3\xc8\x6e\x53\x86\xa3\x07\xa7\x09\xa5\xb0\x84\x99\xbc\xb1\
\xa2\x15\x0e\xb4\x45\x92\xf2\x2e\x5d\x68\x40\x15\xc5\x9f\xf8\xcc\
\xa1\xb9\xd1\x26\x9e\x77\x19\xe3\x4c\xd8\xa1\x0b\xe3\xe6\xab\x80\
\x6b\x4f\x00\x00\x20\x00\x49\x44\x41\x54\x5a\xd8\xa4\x44\xbb\x28\
\x7b\xac\x57\xe8\x44\x88\x93\x71\x76\xe3\x51\xa9\x2c\xba\x94\x4b\
\x84\xb2\xdb\x9a\xb3\xac\x7b\x6e\x25\x0b\x5e\x77\x21\x22\x90\x49\
\x7b\x64\xd2\x82\x94\x8b\xa8\x03\x28\xa5\x58\x34\x7f\x01\xfb\xf7\
\x1d\x28\xa7\x5b\xc0\xbe\xbd\xbd\x64\xb3\xf3\x99\xbb\x64\x11\xbb\
\xfb\xf3\x88\x1e\xc1\xf3\x14\xd9\x74\x8a\xba\x86\x3a\xe6\xce\x3f\
\x14\x9d\x1b\xa4\x50\x1a\x06\x6d\x28\x8c\x0e\x53\x18\x1d\x1e\x0f\
\xa2\xa4\x52\xdc\xf7\xd0\xd3\x50\x37\xaf\xa2\xe5\x5b\x5b\x5b\xd9\
\x31\xf3\x6c\xd6\x18\xf0\xd4\x7a\x7c\xed\xa3\x45\x18\xcd\x65\x30\
\x94\xa8\x14\x62\x36\x1e\xda\x64\xd9\xb9\xab\xaf\x0c\x4f\x06\x9e\
\x58\x3a\x05\xad\x53\x7d\x3c\x55\x84\x52\x2d\x86\x34\x46\xca\x6c\
\xd3\xa0\x8e\xce\xc4\x8a\xb2\x7c\x06\x0c\x9a\x54\x4a\x33\x7d\x4a\
\x86\xb4\x81\xde\x91\x34\xb7\xdf\x3d\xc4\x93\x4f\xec\x64\xc5\x73\
\xfd\x0c\x0d\x6b\xea\x52\x70\xc4\xa2\x66\x4e\x3e\xae\x95\x13\x8f\
\xaf\x61\xc9\x21\x19\xa6\x4f\x4d\x53\x5b\x93\x27\x2d\x79\x44\x6b\
\x44\x7b\x88\xa9\xc1\x00\xbe\xaa\x63\xd4\x78\x74\xf7\x7b\xbc\xb2\
\x6e\x94\x87\x1f\xd9\xcf\xca\x47\x07\x79\xf0\xd1\x6e\x9e\x5a\xd5\
\xc3\x05\xcf\xb5\x73\xc5\x3b\x5a\x79\xdd\x21\x82\x98\x1c\x48\x09\
\x3f\xa5\x78\xe6\x85\x34\x7f\xba\x7b\x07\xa3\x45\x85\x31\x06\xa5\
\xfc\xc8\x26\x1d\x4f\x2b\x89\x28\xbf\x24\x76\xa3\x2b\x59\x3d\x1c\
\xbf\x54\x71\xd8\x32\x42\x34\xb4\x2a\xa8\x44\x33\xcb\x2d\x41\x18\
\xb6\xd4\x2d\x93\x58\xec\xe2\x03\xd5\xa8\x96\xb6\x97\x21\x09\x84\
\x42\xe3\x86\xa1\x5c\x71\x31\x27\xdb\x94\x78\x83\x5a\xc2\x15\x5a\
\xa8\x12\x1b\xb1\x5b\x4b\x85\x85\xa4\x55\x46\x4c\xdb\x5e\x7f\x52\
\x8c\xcc\x6e\xa0\x8b\x45\xba\xc1\x9d\x12\x13\xab\xb7\x9b\x04\x47\
\x56\x61\x63\x3a\x97\x40\xac\x64\x75\x57\xe5\xa8\x50\x83\x5b\x3b\
\x5d\xc6\x19\x03\xb5\xd3\x50\x6c\x01\x6b\xc1\xc5\x31\x21\x2d\x6e\
\x58\xdf\x88\xbb\x74\x61\xf8\x77\x17\x24\x3f\x91\xd1\x23\x0e\xa8\
\x3d\x62\x38\x3a\x20\xc9\x18\x9f\x40\x45\x2b\xd7\x48\x15\x02\x98\
\xdd\xa8\x3b\x52\xf1\xc6\x6e\x18\x6f\x26\x88\x9f\xda\x5e\x70\x98\
\xc5\x8d\x95\xa2\x64\x1c\x2d\xe3\xaa\xec\x13\x97\x21\x6b\x97\x9f\
\xb4\x61\xda\xc4\x31\xba\x10\xb0\x30\xdf\xc3\x6a\x03\x12\x66\xc6\
\xda\xa4\xb4\xaa\x65\xe8\x54\x0c\x9a\x55\xe9\x1a\x54\x71\x18\x6d\
\x7c\xa7\x58\x9a\xd9\x56\xcf\xfa\x57\x9e\x61\xc1\xeb\x2e\x0c\x48\
\x39\x6a\xfc\x15\xfe\x99\x3f\x67\x0e\x3b\xb6\xef\x62\xd7\x8e\x5d\
\x28\x0c\x62\x34\xa5\x92\x61\xa4\x54\x2c\x13\x7d\x0c\x94\x4a\xf8\
\xda\x67\xf1\x51\x87\xd2\xd0\xd0\x08\x0d\x8d\x01\x13\x77\x64\x90\
\xdc\x70\x1f\x25\x3f\x17\x24\xed\x03\xfd\x83\x39\x06\x86\xf3\xd0\
\x52\x57\x79\xde\x17\x6b\x8e\xad\xcc\x6f\x36\x9b\x65\x24\x37\x42\
\xbe\xe0\xb3\x77\x8f\x0e\xca\xca\x51\x2a\xbf\x52\x14\x35\xec\xed\
\xce\x53\xf4\xc7\x26\x33\x78\x3e\xa3\xc7\xfa\x0a\xb8\xd9\xb9\x93\
\x53\x96\x00\x3e\x35\xde\x08\x47\x2d\x6e\x20\x9b\xc9\xb0\x6a\x73\
\x89\xf5\x3f\x5e\xcb\xc8\x60\x91\xc6\x8c\x70\xc6\xeb\x5b\xb8\x60\
\x79\x27\xcb\xdf\xd0\xcc\x82\x19\x25\x6a\xd4\x30\x42\x1e\x43\x0e\
\x63\x3c\x7c\x1a\x18\x28\x64\xe8\xed\x13\x0e\x1c\x18\x65\xa0\x7f\
\xb8\x1c\xae\x15\x8c\x12\xa6\x36\xd7\x70\xc9\xc5\x8b\x38\xf3\x1c\
\xcd\x7d\x0f\x75\xf3\xe7\xbb\xb6\xf3\x87\x3b\xf7\xb2\x79\xcd\x10\
\x97\xbd\x77\x2a\xef\x3c\xa7\x09\xcf\xd3\xac\xdb\xd2\xc2\x8f\x6f\
\xdb\xcb\x4b\x1b\x06\xf1\x49\x23\x0a\xea\xea\xea\x19\x8c\x40\xb2\
\xbe\xa5\x48\x12\x03\x32\xe0\xac\xa0\x12\x16\xcc\x61\xd8\xcb\x8e\
\x67\x6a\x0b\xd2\x0a\x33\x47\x23\x25\xd7\x88\x32\xe9\x70\xd4\x7a\
\x8d\xe5\x4d\x25\xc5\xdd\x1c\x30\xa5\x84\xbc\x59\x45\x3c\x0d\xc3\
\x29\x30\x13\xe9\xa8\x6e\x32\x88\x9d\xa3\xe9\xb2\xf4\x23\xa4\x20\
\xdc\xe9\x04\xb6\x11\x12\x8b\xbd\xb9\x60\x42\xdc\xb5\x67\xc5\x01\
\x9f\x26\x16\x2b\x48\x88\x35\xba\xfa\x33\x56\x0c\x22\x99\xe0\xbc\
\x98\x64\x0f\x35\x52\xe6\xac\x6a\xe2\x66\x32\xf2\x40\x15\x8f\xd4\
\xd9\xe8\x59\xe2\xa4\x22\x7b\xed\x44\x1c\x75\x63\x89\x33\xa1\xc3\
\xcc\x61\x11\x47\x28\x59\xe2\x6c\x6f\x57\x63\x63\x61\x12\x34\x68\
\x07\x4c\x9e\x54\x4d\xcb\x85\xb2\x58\x8d\x83\x2b\x86\x69\xa4\x34\
\xa0\x8e\xc7\x14\xec\x3e\xb2\x6a\x02\x43\x55\xaa\xc4\x05\x5d\x36\
\x88\x48\x15\xd4\xc1\x91\xbb\x89\x2b\xdc\x91\x54\x28\xd7\x51\x41\
\x2b\x6c\x20\xda\x86\x23\x49\xe8\x88\x43\x19\x63\x28\x95\x53\x72\
\x44\xd2\x28\x95\x0e\xaa\xc6\x38\xd6\x63\x5e\x67\x23\xf7\xff\xfd\
\x39\xce\xce\xed\x27\xdd\xd0\x8e\x12\x85\x52\x12\xcb\xa1\x54\xca\
\xe3\x0d\xc7\xbf\x9e\x67\x78\x9a\x9d\xdb\x77\x04\x4a\x93\xa0\x18\
\x81\x2a\xff\x7f\x3a\xe5\x71\xf8\xa2\x85\x34\x48\x86\xc2\x70\x8e\
\x4c\xb9\xa1\x41\xb6\xae\x91\x6c\x5d\x23\x68\xcd\xe8\x70\x3f\xf9\
\xd1\x7e\x9e\x5f\xb7\x0d\x6a\x9a\xca\x25\x03\x6d\x39\x6b\xa8\xad\
\xad\x65\x24\x9f\xa7\xa8\x35\x5b\xb7\x0e\x53\x2c\x4e\x21\x83\x06\
\x95\x47\x44\x81\x2a\x51\x28\x69\x8c\xf1\x82\xaa\x39\xc6\x50\x2c\
\x42\x4f\x6f\x1a\x83\x07\xa2\x11\x29\xa0\x8c\x46\x8c\x17\x78\x97\
\x32\x69\xcd\x19\xe4\x3f\xea\x22\x4b\x8f\x4a\x33\x63\x76\x8a\x57\
\xb6\x28\xcc\xb0\xe6\x75\x4b\x9a\xb8\xe8\x8d\x53\x39\x7f\x79\x2b\
\xb3\xda\x8b\xa4\xcd\x1e\x94\x0f\x98\x2c\x7e\x0a\x06\x4b\x29\xd6\
\x6d\xf0\x78\xea\x85\x41\x9e\x7e\xb1\x8f\xf5\x1b\x47\xd8\xd3\x9d\
\x63\x28\xe7\x83\x56\x08\x0a\x85\xa1\xb6\x56\x68\x6b\xab\x63\xd1\
\x21\x0d\x1c\x7b\x4c\x2b\x97\x5c\xb8\x90\xc7\x1e\xeb\xe3\xa9\x97\
\xf6\xb2\xe5\xa6\x12\xfb\x07\xea\x59\xb2\x64\x1a\xbf\xbc\xa3\x8b\
\xbb\x1e\xd8\x4e\xc1\x78\x80\xc6\xe0\xb3\x68\xd1\x91\x3c\x1f\x51\
\x98\x76\x03\x69\x85\x05\x7b\x5a\x9e\x87\x38\x0e\x69\xe4\x30\x58\
\x42\x2f\x42\x32\x09\x33\xfb\xf4\xb8\xc2\x55\x49\x71\x0f\x57\x42\
\xbc\x0b\xd2\x11\x37\xcb\x22\x46\x45\xb7\xa0\x66\x1d\x62\x51\x46\
\x58\xb3\xba\x8a\x30\xb2\x72\x22\x5d\xbd\x1f\xb5\x05\x33\x19\xcb\
\x8b\x16\x97\x15\x6c\x42\xe3\x09\x41\xb9\xa2\xa3\xe4\x9a\x88\xc7\
\x6c\xc7\x66\x27\x68\xc2\x1c\x23\x47\xe8\x38\xd4\x18\xf9\xb3\x55\
\x1f\x16\xe2\xb9\xf6\x95\xb1\x27\xc4\x55\xc3\x30\xb4\x71\x25\xf0\
\xe3\x56\x42\x36\x44\x96\xb8\x0f\xf4\x24\xe2\x25\x12\x8d\x01\x46\
\x7a\x74\x3a\x62\xab\x76\x22\xbd\x50\x1d\x52\x8e\x84\x28\x70\x34\
\x10\xb0\x8d\x31\xdc\xc5\xd3\x8d\x5d\x8f\x97\x78\xc9\x3d\x57\xa5\
\xab\x08\x7c\x6e\x5f\x28\x55\x62\xb0\x71\xf2\x73\x3c\x1e\x98\x60\
\x80\x68\x2b\xac\x11\x16\xc4\x06\x47\x99\x44\x57\xc1\xf5\x24\xc5\
\xe6\x32\x64\x5d\x76\x5d\x42\x91\x0d\x57\x29\xc1\xc8\x33\xda\x05\
\x1e\x8c\x43\xb7\xda\xcf\xe5\x68\xa6\x50\xfe\xae\xed\xc3\x2d\x14\
\x8c\xc7\xbe\x3d\xdb\x98\x31\xf7\xf0\x4a\xb5\x21\x91\x10\x04\x2f\
\xc2\x9c\x0e\x98\xb3\x5a\xb3\x7b\xeb\x6a\x16\x2c\x3d\x23\x50\x96\
\x22\xce\x82\xea\x99\x4c\x86\x37\x9c\x74\x22\xbb\xb6\xef\xa0\x6b\
\xeb\x56\x86\xfb\xfa\xf0\xfd\x12\x35\xe9\x14\x53\xa6\x34\x33\xbb\
\xb3\x93\x9a\x94\x42\x17\xf2\x0c\xed\xdb\x4f\xb6\x21\x4b\xdd\xb4\
\x69\xa1\x6a\x8e\x8a\xda\xc6\x29\xd4\x34\xb6\xf0\xa3\xdf\xdc\x88\
\xae\x6b\x8b\x7a\xee\x21\x59\x38\x7d\xfa\x74\x0e\xf4\x04\xe9\x23\
\x6b\xd6\x77\xd3\xd3\xdf\x4c\x7d\xcb\x58\x91\xb9\x00\xbe\x6f\x6e\
\x49\xa1\xd4\xf8\x39\x2f\xf9\x86\x4d\xdb\x72\x14\x75\x1b\x19\x35\
\x82\x50\x28\x23\x60\xa9\x71\x51\x6a\xc6\x18\xb9\x26\xb2\xb5\x82\
\x3d\x1e\x40\xc0\xf9\x02\xec\xdb\x9b\x67\xe6\xec\x7a\x16\xcc\x4f\
\x73\xf6\x19\xd3\xd9\x74\x4b\x17\x87\xcd\xad\xe3\x9a\x4f\xbe\x86\
\xd3\x5e\xaf\x49\xa9\x41\x30\x23\xa0\xa0\x48\x96\x82\x6e\xe4\xb1\
\xe7\xf3\xfc\xed\xfe\x6e\x56\x3e\x76\x80\x2d\x3b\x0a\xe4\x8b\x5e\
\x79\xdb\x19\x6a\x6a\x6b\xc8\x64\xb3\xa4\xd2\x19\x86\x47\x86\x19\
\x1c\x2e\xb1\xab\x77\x90\xd5\xeb\x07\xb8\xe7\xde\xdd\xcc\x9e\xd5\
\x44\x7d\x63\x2d\x5e\x7d\x8a\xed\xdb\x73\x7c\xe3\x07\xeb\x98\x3a\
\xb5\x99\xcd\x3b\x7b\xf0\x55\x50\xf9\x47\xb4\x8f\x88\xf0\xbc\x77\
\x44\x64\x8f\xa5\x92\x61\xc9\x84\x18\x43\xc4\x1a\xb7\x2b\x60\x38\
\xd2\x3a\xec\x38\xa8\x72\x30\xda\x74\x48\xa1\x89\x71\xe4\x64\x59\
\xc2\xde\x38\xd2\x05\x5c\x8a\x2d\x62\x31\x87\x59\xb4\xc6\xaa\xd8\
\x63\x33\x65\x13\x84\xa7\xed\x11\x85\xe3\x5e\x61\xa8\x48\x11\x4f\
\x8b\x71\x42\x47\x36\x9b\x35\xa9\x40\xb9\xb6\xa0\x28\x13\xf5\x4e\
\x9d\xc2\x63\x12\x5d\x2c\x94\x43\x10\xd9\x29\x1b\x76\xfc\xd1\xf6\
\x6c\x34\xee\xbc\xb8\xc8\x7c\x25\x29\x4d\x57\x27\x99\x30\x92\x60\
\xa2\x24\x8b\xc9\x3e\x5e\x55\x27\xcd\x55\x54\x3e\x14\x5f\x77\x76\
\xc4\x48\x20\xab\x25\x12\x80\x2c\x22\x96\x8b\x7c\x16\x4b\xc3\x20\
\xa1\xf6\x6c\x15\xcf\x0c\x3b\x1f\x72\xa2\x42\xe9\x8e\x7d\x19\xae\
\x54\xe4\x2c\x5d\x67\xdc\x0c\xd1\x70\xbd\xe7\xa4\xac\x99\x30\x02\
\xa4\xc5\x21\x47\x1c\x6b\x13\xdb\x90\xe5\x6b\xc3\xb5\xa1\x23\x28\
\x87\x8a\x87\x39\x62\x95\x8e\x12\x72\x9c\x8d\x65\x64\x1a\x07\xab\
\xde\xce\xb5\xb5\x3b\xce\x08\x14\x4c\x8a\x5f\x6c\x3b\x9e\x0f\xcb\
\x4a\x36\xaf\x7d\x91\x74\xb6\xc6\xe9\x01\x0b\x86\x29\xa5\x3c\xb7\
\xfd\xf4\xeb\x5c\xf5\xdd\xe3\x50\xd9\xa9\x31\x38\x36\xea\x69\x2a\
\x66\xcd\x9d\xc3\xac\xb9\x73\xf0\xfd\x12\xfe\x68\x0e\xfc\x22\xa6\
\x90\x47\x97\x5f\x46\x07\xe9\x21\xb9\xc1\x7e\x4a\xc5\x3c\x8d\xd3\
\x3b\x50\x5e\xaa\x72\x8f\x97\xd6\x6c\x62\xd5\xd6\xbd\xd0\xbe\x24\
\x28\x8b\x27\x71\x88\xfc\x95\x86\xe3\x81\x35\x68\x51\xac\xee\x1a\
\xe1\xc9\x55\x43\xcc\x38\xb5\x16\x4f\x7b\x20\x69\xd2\x4a\x33\x7b\
\x66\x06\x95\xf2\xa1\xe8\x81\x49\xa1\x4d\x81\x97\x36\xf4\xd1\xdd\
\x3f\x8d\xda\xa6\xa0\x5b\x88\x96\x14\x9e\x52\x41\x61\x73\x54\x99\
\x30\x64\x30\x14\xca\xcb\x18\x34\xb2\x56\x94\x40\xf2\x18\x11\xf2\
\x6a\x1a\x8f\x3e\xbf\x8f\xce\x1d\x45\x4e\x3c\x61\x2a\x17\x9d\x9f\
\xe6\xf9\x17\x06\x79\xf6\x85\x5e\xee\x5a\xb1\x97\x8e\xb6\x56\x0e\
\x5d\xd0\x80\x31\x25\x7c\xea\x58\xbf\x2d\xcd\x5f\xff\x79\x80\xdf\
\xfd\x6d\x3b\xdb\x76\xe7\xd1\x08\x9e\x97\x66\xfe\xa2\xb9\x6c\x6c\
\x79\x03\x88\x61\x18\xc3\xb0\xb1\xe4\x3d\x86\x37\x64\x37\xb3\x7a\
\xf5\x2b\xac\xdf\x3a\x08\xf4\x07\xb1\x56\xa3\xe8\x19\x2c\x71\x60\
\xb0\xb7\xb2\xaf\xa4\x5c\x8a\xb4\xa3\xb3\x93\x5d\xd6\xbe\x4c\xc5\
\x08\x01\x21\xb2\x41\x63\x6d\x96\x0b\x8e\x5b\x14\x99\xdc\xe1\xd1\
\x22\x77\x3f\xb5\x81\xfa\x9a\x34\xc7\x2c\xea\xa4\xe8\xfb\xb4\x36\
\xd6\xf1\xf7\x67\xd6\x57\xe2\x19\x87\xcf\x6a\xe3\xa8\xf9\xd3\xf9\
\xc3\x23\xaf\xb0\x78\x76\x1b\xaf\x99\xd7\x1e\xb1\xea\xd7\xed\xdc\
\xcf\xc6\xdd\x3d\x5c\x70\xdc\xe2\xc8\x81\x1c\xce\xe5\xb9\xfb\xc9\
\xf5\x65\x7c\x3b\xd8\x90\x0d\x35\x69\x8e\x5e\xd4\xc1\xa3\x2f\x6f\
\xaf\x7c\x74\x46\x6b\x23\xed\x2d\xf5\x6c\xdc\xd5\xc3\x31\x0b\x3b\
\x78\x7e\xe3\x9e\xf2\x67\xba\x62\x92\xb2\xa1\x26\xc3\xd1\x0b\x3b\
\x78\xf4\xe5\x6d\xd1\x60\xbf\x25\x0f\x1a\xeb\x32\xe5\xf1\x8c\xbf\
\x37\x9c\x2b\x70\xf7\x53\xeb\x30\x06\x8e\x98\x33\x8d\xa5\x0b\x3a\
\x2a\x82\x71\x28\x57\xe0\xee\xa7\xd7\x73\xc2\x61\xb3\x99\x37\x7d\
\x4a\x8c\xb4\xb4\xb6\xab\x9b\x17\x36\xed\xe6\x88\x39\xed\x2c\x9d\
\xdf\x51\xb9\xe7\xd0\x68\x70\xcf\x86\xda\x0c\x17\x1c\x77\x68\x60\
\x5d\x5a\x4e\xc0\x3f\x9e\x5e\xcf\x51\x0b\x3b\x78\xe4\xe5\x2d\x1c\
\x31\x67\x7a\x70\x7d\x28\xc6\xb5\x76\x7b\x37\x9b\x76\x1e\xe0\xfc\
\xe3\x17\x8f\x33\xd2\xca\x02\x78\xed\xf6\x6e\x5e\xd8\xb4\xcb\x09\
\x7d\x44\x63\x34\x41\x41\xfb\x13\x0e\x9f\xcd\xbc\x8e\x29\x31\xa5\
\xbe\xa6\xab\x9b\x17\x37\xef\x66\xc9\x9c\x76\x5e\x33\xbf\x23\x64\
\x19\x0a\xeb\x76\x74\xf3\xfc\xa6\x9d\x1c\x31\xb7\x9d\xa5\x0b\x66\
\x54\x06\x3f\x34\x52\xe0\xee\xa7\xd6\xd2\x50\x93\xe6\x82\x13\x0e\
\x1b\x6f\xec\x10\xda\x74\x2b\x5f\xdc\xcc\x9e\x9e\xa1\xb8\xc1\x62\
\x34\xcb\x8e\x59\xc8\x13\xaf\x74\x71\xdc\x61\x73\xf8\xf7\xba\x1d\
\x0c\xe7\x0b\x34\x64\xd3\x1c\x7d\xc8\x0c\x1e\x5d\xbd\xad\xa2\xa0\
\x67\xb4\x35\xd3\x3e\xa5\x81\x17\x36\xee\x0c\xe6\x67\x61\x67\x65\
\x4d\x47\x0a\x05\xfe\xf6\xf8\x2b\x18\x28\x8f\xaf\x33\xf2\x5c\x6b\
\xb7\xed\x63\xe3\xce\x6e\x2e\x78\xc3\x11\x31\x62\xc2\xda\xae\xbd\
\xbc\xb0\x71\xa7\x45\x49\x55\x6e\x92\x0b\x70\xc2\xe1\x73\xca\x73\
\x17\x55\x56\x6b\xb6\xee\x61\xd3\xae\x6e\x8e\x3e\x64\x26\x8f\xae\
\xda\x52\x11\x50\x33\xa6\x96\xc7\xbd\x69\x17\x4b\xe6\x4e\xe7\x35\
\x0b\x3a\x23\x3a\x6e\x5d\xd7\x3e\x9e\x5f\xbf\xb3\xf2\x55\xf5\xb5\
\x19\xde\xf8\x86\x25\xa1\x9a\xf7\xe3\xeb\xf7\xcf\xa7\xd7\x70\xce\
\xeb\x0f\x1f\x7f\x06\x81\xa1\x91\x1c\x77\x3f\xbe\x1a\x44\x38\x62\
\x5e\x47\x30\x2f\xa1\xb3\xb0\xb6\x6b\x2f\x9b\x76\x74\x73\xc1\x1b\
\x96\x44\xf7\xf9\x68\x81\xbb\x1f\x5b\x8d\x11\x58\x32\xbf\x83\xd7\
\x54\xd6\xb4\x3c\xae\xed\xfb\xc8\xa6\x53\xcc\xeb\x98\x1a\x31\x18\
\x86\x46\xf3\xdc\xfd\xd8\xcb\x34\xd4\x66\x38\xe6\xd0\x59\x14\x4b\
\x25\xe6\x75\xb6\xc6\x72\x39\xd7\x6e\xdd\xc3\xc6\x9d\xdd\x1c\x7d\
\xc8\x6c\x1e\x7d\x69\x23\xd1\xb4\xb5\x60\x5f\x2c\x99\xd7\xc1\x6b\
\x16\xce\x8c\x9c\x83\x75\xdb\xf7\xf0\xfc\xfa\xed\x15\xc3\xa9\xa1\
\xae\x86\xa3\x0f\x99\xc5\xa3\x2f\x6e\xaa\x9c\xb3\x60\x2f\x34\xf2\
\xc2\x86\xed\xc1\x33\x2f\x9a\x19\x1d\xdf\xe3\xab\x68\xa8\xcd\x72\
\xcc\x21\xb3\x79\xe4\xa5\x8d\x80\x70\xce\xf1\xff\x4f\x65\x67\x1e\
\x1e\x45\x95\xae\xf1\xdf\xa9\xee\xce\x9e\x40\x08\x5b\xd8\x45\x20\
\x08\x2e\x2c\xb2\xa8\xa8\xe0\x82\xa2\x22\x8b\x0c\x3a\x3a\x88\xc3\
\x20\xea\x88\xdb\x9d\x3b\xea\x8c\xcb\x28\xea\xe0\xc2\xb8\xa0\x33\
\x82\xce\xa2\x4e\x04\x05\x51\x03\x5e\xc4\xc1\x95\x25\xec\x86\x5d\
\x20\x01\x25\x40\x20\x80\x40\x12\x92\xf4\x5a\xe7\xfe\x51\x9d\xee\
\xaa\x53\xa7\x3a\x0c\xcf\xd3\x0f\x49\xba\xab\xab\xea\xd4\x39\xe7\
\xdb\xde\xef\x7d\xfb\xb0\x7c\xfd\x4e\x4c\x53\x72\xed\x90\xbe\x7c\
\xb1\x6e\x27\xd9\x99\xe9\xf4\xef\xd5\x99\x95\x9b\xcb\x13\xe3\x59\
\xd4\xa5\x1d\xfd\x7b\x76\xe6\x83\xaf\x36\x2a\x0e\x96\xbb\xbd\x6b\
\x7d\x6d\x37\xf6\xef\x6c\x49\xdf\x9c\x2a\x32\x45\x18\x64\xcc\x42\
\x9e\x4a\x13\x11\xe7\xa0\x95\x98\x60\x66\x50\x79\xf0\x24\x25\x25\
\xf3\xb8\xfd\x8e\xfb\xf0\xf9\xad\x08\x33\xb9\x0c\xf4\x75\x0d\x43\
\x5a\x65\x0f\x33\x6a\x22\x4d\x8b\xd3\xb9\xc9\x58\x36\x05\x04\xd1\
\x60\x90\x9a\xaa\x43\xe4\xb5\x2b\xc4\x97\x96\x86\x69\xc6\x78\x68\
\xe6\x5f\x31\xf3\x3a\xc5\x6b\xdf\x4a\x49\xc0\xe6\xa8\xa4\x05\x02\
\x84\x23\x70\xa2\xd6\xc7\x67\x5f\xd6\x32\x62\x48\x1e\xad\xd2\x4f\
\x63\xc8\x74\x0c\xa0\xe7\x59\x99\xe4\x67\xfb\x39\x7c\xb2\xe9\x11\
\x49\xb6\xfc\x50\xcb\xda\x0d\x0d\x74\x1e\x19\xc3\x88\xa6\x63\x90\
\x03\xbe\xd3\x96\x13\x63\x06\xac\x36\x0d\xa3\x11\x29\xc2\x20\x03\
\x08\x33\x03\x11\xf3\x5b\x44\x05\x3e\x8b\x14\x3d\x2b\x5d\xe0\x4b\
\xcf\xe5\xb9\xb9\xfb\x19\x5b\x05\xd7\x5e\x9d\xcf\xd4\x5b\xdb\x72\
\xb2\xba\x91\x7f\x7f\x54\x4e\x59\xd9\x61\x06\xf7\x6b\x4f\xcb\x16\
\x3e\x4e\xd4\x9c\x60\xc3\x96\xe3\xec\x28\x0f\x12\x8c\x59\x8c\x3c\
\xe7\x9e\x7b\x2e\x5b\x02\xe7\x53\xa1\x96\x52\x5c\xe4\x31\xb0\x3a\
\x74\x36\xf4\x3a\x1b\x04\xe4\xfc\xf0\x09\x0d\x0d\x0d\x16\x71\x41\
\xd3\x7a\x6f\x4a\x90\x98\x31\xfc\x7e\x3f\x55\x85\x57\xb8\x9c\x4e\
\x3f\x31\x35\xb5\x9a\xf4\xe6\x5e\x9a\x7a\x15\x77\x5d\x77\x61\x1c\
\x1d\x65\xfd\xf3\xf9\x0c\x5e\xf9\x64\x2d\x59\xe9\x01\x3a\xb5\xce\
\xe5\xe7\xda\x46\x6e\xbf\xea\x02\x1e\x98\xb3\x8c\xd9\x8b\xd7\xd1\
\x3a\x2f\x93\xd2\x97\xa7\x90\x97\x95\xc1\xb1\x9a\x7a\x16\x3d\x71\
\x33\x39\x19\x4e\x8e\xc5\xc6\x70\x84\x8f\x57\xff\xc0\xed\x57\xf5\
\x73\x7f\xf7\xc7\x6b\xf8\xdd\xdb\x5f\x20\xe3\x77\xfc\xd2\x9d\xd7\
\x50\xd8\x2a\x97\x55\xdb\xe6\x25\x0c\xd2\x80\x1e\x85\x2c\x78\x6c\
\x22\x1f\x7e\xb7\x9d\x36\x2d\xb2\x39\x70\xec\x94\xf5\x99\xed\xfb\
\x51\x69\xb4\x5e\x9a\x3a\x32\x7e\xfc\x7e\x77\xef\x9c\x2d\x65\xf4\
\xd2\xd4\x51\xdc\x75\xc3\x20\xcd\xf5\xac\x66\xc1\x77\xdb\x59\xf6\
\xe7\xc9\xe4\x66\xa6\x25\x02\x50\x9f\xcf\xe0\xe5\x45\xab\x68\xd7\
\x32\x87\x5b\xaf\xb8\x00\xd3\x36\xd1\x85\x80\x50\x24\xc6\xa8\x3f\
\xbe\x43\xc9\x8c\x49\xee\xe3\x3e\x5a\x45\x7a\xc0\xcf\xdd\x37\x0c\
\x4e\xfc\xcd\x34\x4d\xa4\x84\x4d\xe5\x87\x18\xd1\xbf\x3b\x1d\x0b\
\xf2\x08\x17\x47\xf9\xe2\x85\x29\x8e\xe3\x01\x1a\x42\x11\x16\xad\
\xdc\xce\xe4\x91\x03\x1c\xe7\x6d\x7a\xaf\xed\x84\x67\x08\x86\x63\
\x1a\x74\xb3\x52\xcb\xc4\xa4\x7f\x8f\x0e\xcc\xbe\x6f\x34\x02\xe1\
\xb8\x8e\xfa\x60\x98\x1b\x1e\x7b\x87\x25\xcf\xde\x41\x4e\xa6\xfb\
\xf9\x8d\xfe\xe3\xbf\xf8\xf4\x99\xc9\xe4\x66\xa5\x3b\xef\x6d\xc1\
\x0a\xb2\x32\x02\xdc\x35\x7a\x68\xe2\xda\x7c\x3e\x83\x58\xcc\xc4\
\x30\x04\xe5\x07\x8f\x33\xe2\x77\x73\xa9\xfa\xb9\x36\xf1\x7d\x85\
\x05\xb9\x0c\x2a\xea\x44\xc9\xb3\x53\xb8\x7d\xe6\x07\xbc\xfb\xe8\
\xcd\x5c\xfd\xfb\xb9\x6c\xda\x7d\x88\x99\x77\x8e\xa2\xb0\x55\x1e\
\xab\xb6\xbf\x9b\xf8\xfc\x80\x9e\x1d\x58\xf0\xd4\xed\x8c\x7a\xe4\
\x2d\x16\x3f\xf7\x1b\x72\xb3\xd2\x31\x4d\x89\xcf\x67\x19\xb7\xd7\
\x16\xad\xe0\x83\xaf\xcb\x58\xf6\xc2\x34\xc7\xf5\x59\xe3\x13\x66\
\xc1\xb7\x9b\xb9\xe3\x9a\xc1\xae\xac\x7e\x28\x12\xa3\xd5\xe8\xc7\
\x08\x46\x22\x1e\xed\x2b\xce\x7f\xfd\x7b\x76\x62\xf6\xfd\xe3\xb4\
\x63\xb7\xf0\xdb\xcd\xb4\x6e\x91\xcd\xaa\x6d\xff\x48\x18\xbb\x01\
\xbd\x3a\xb2\xe0\xe9\x3b\xb8\xee\x91\xb9\x7c\xfa\xec\x54\x72\x32\
\xd3\x9d\xe3\x1a\x8a\xd0\xf6\xc6\xc7\x69\x0c\x5b\xe7\x7f\x6e\xea\
\xf5\x4c\x1f\x7f\xa9\x6b\x8e\x94\x95\x1f\xe4\xca\x01\xbd\xf8\xcd\
\x0d\x17\x39\x9e\xbf\xcf\x67\xf0\xf2\x87\xdf\xb0\xe0\xeb\x32\xbe\
\xf8\xcb\x3d\xda\x7b\xff\xf8\xbb\x2d\x4c\x1e\x35\xc4\x3d\xcf\x3f\
\xfc\x8a\x85\xdf\x6c\xe6\xf3\x59\xbf\xd5\x5e\xd7\xe7\xeb\x76\x30\
\xfe\xb2\x7e\x8e\xbf\x5b\xe7\xfb\x9a\xac\xf4\x00\x9d\xdb\xe6\x73\
\xea\x74\x03\xb7\x5e\x3d\x48\xb3\x16\xa2\x7c\xf8\xd5\xf7\xe4\xe7\
\x66\xb2\x6a\x5b\x05\xaa\x22\xc0\xd0\x3e\x67\xb1\xec\x2f\xd3\x35\
\xe7\x0d\xd3\xf6\xfa\x47\x68\x8c\x44\xac\xb5\x7c\xef\x38\x0a\x5b\
\xb5\x60\xd5\xe6\xbd\x89\x79\x3d\xa0\xa8\x33\x0b\x9e\x99\xca\xa8\
\xdf\xbd\x41\xc9\xf3\xf7\xb8\xe7\xe3\xfc\x2f\xc9\x4c\x0f\xd0\xa5\
\x5d\x2b\x56\x6e\xa9\xe0\x8a\x01\x45\x2c\x7b\xf9\x7e\x32\xaf\x98\
\x4e\x30\x1c\xe1\xef\x7f\x98\xc4\x93\x6f\x2f\x61\xd0\x39\xdd\xe8\
\xdc\x36\x9f\x95\x5b\xca\x01\xc1\x80\x9e\x9d\xf9\x72\xf6\x83\xb4\
\xc8\xc9\xb4\x19\x4c\x70\x73\x66\x27\xd3\xb7\xd5\x91\x96\x54\xff\
\x9c\x63\x19\x4b\x33\x06\x66\xd4\xf6\x7f\xfc\x67\x69\x42\xa4\x35\
\xfb\xe6\xcc\x63\xe2\xad\x77\x92\x9d\xd5\x02\x61\x08\x5b\x55\x46\
\x62\xc6\x4c\xd7\xba\x96\x66\x2c\xfe\x32\x13\x3f\x63\x9a\x48\x69\
\x3a\x88\x12\xcc\x48\x84\x9a\xaa\x83\x64\xb7\x6e\xc3\xdf\x4b\xbe\
\x62\xc7\xc1\x9f\xa1\x4d\x6f\x37\x06\x45\x61\xa0\xea\x5d\xd4\x9b\
\x6d\xdb\x7e\x20\x46\x1a\xcb\x57\x9d\xe6\xeb\x8d\x8d\x8c\x1d\xee\
\x27\x10\x0d\x02\x50\xd4\x3d\x9b\x3e\xdd\x73\xa8\xde\x54\x83\x69\
\xa9\x4e\x72\xb2\x4e\x50\xb2\xec\x38\x23\x86\xe4\xd3\x3e\xdb\xb4\
\x12\x34\xbe\x88\xa5\x24\x62\x0a\x30\x42\x48\x11\xc2\x14\x10\x8e\
\x65\xe2\x33\xfd\xa4\x11\x06\x11\x40\xca\x74\x84\x69\xe0\xa7\x9e\
\x4b\x06\xe7\x51\xbc\x3c\x97\x27\x5f\xad\x60\xf9\xca\x56\x8c\x1e\
\xd1\x95\x31\xa3\x8b\xf8\xfb\x27\xbb\xd9\x50\x5e\xcf\xf7\xe5\x7b\
\x11\xa6\x25\xbe\x1d\xc3\x47\x8c\x34\x32\x33\xd3\x68\x3c\x67\x0c\
\x5b\x84\x0e\x7d\xed\xc1\x20\x67\x7b\xfb\x74\xd1\x38\x3a\x55\x7f\
\x63\x09\x68\x3b\xf6\x01\xcb\x81\xe9\xd7\xaf\x1f\x1b\x34\xbe\x8b\
\x3f\x55\x0e\x2b\x27\x33\x9d\x92\x35\xbb\x18\xfb\xcc\x87\x89\xbf\
\xbf\x32\x6d\x24\x0f\x8d\x1b\x4a\x49\xe9\x2e\x90\x30\xf9\x2f\x9f\
\xd0\xb6\x65\x36\x8f\xfc\xe2\x12\xe6\x7c\xb6\x81\x07\x6e\x1c\x42\
\x4e\x66\x3a\x3d\xa6\xcc\xe6\x99\xdb\x47\x50\x51\x75\x82\x59\x1f\
\x95\x26\xbe\xb6\xea\x44\x1d\x2b\xb6\x55\x52\xfc\xfb\x71\x94\x94\
\xee\x62\xec\xd3\x1f\x24\x52\x9c\xaf\x4c\xbb\x96\x87\xc6\x5f\xc4\
\x93\xff\xfe\x86\xd3\xc1\x70\x3c\xf2\x4b\xa7\x63\xeb\x5c\x6e\x19\
\x7e\x7e\x62\x92\xf6\x3f\xbb\x90\xcc\xf4\x00\xf9\x39\x99\x89\xcf\
\x24\x0d\x82\x33\x5c\xb3\xde\x23\x59\x27\x75\xd5\x37\xac\xc5\x9b\
\x93\x99\x46\x49\xe9\x0f\x8c\x7d\x7a\x5e\xc2\x6b\x7c\xe5\xae\xeb\
\x78\xe8\xa6\x4b\xe8\xde\xbe\x15\x47\x4e\xd4\xd1\xf5\xfe\xb9\xd4\
\x34\x84\x00\x98\x71\xfb\x15\x3c\x71\xdb\x08\x4a\x4a\x7f\x60\xf1\
\x9a\x1f\x98\xf9\xc1\x77\x9c\xd5\xbe\x15\x48\x49\xff\x1e\x85\xdc\
\x3f\xee\x62\xee\x1b\x7b\xb1\x75\xdc\x7d\x6f\x52\x53\x6f\x71\x2d\
\xce\xb8\xfd\x2a\x9e\xf8\xd5\x15\xe4\xde\xf8\x34\xd3\xff\xba\x04\
\x80\xe0\x67\x4f\x33\xf5\x95\x8f\x29\xfe\xaa\x0c\x10\x14\x3f\x32\
\x11\x04\xdc\x37\xae\xe9\xf8\xbf\x51\x53\x1f\x76\x78\x4b\xc5\x8f\
\xde\xcc\xa6\x3d\x87\xf8\xcb\xc2\x15\x8e\x14\xe2\xae\xca\xa3\x04\
\xc3\xd1\xc4\x64\x19\x7a\x4e\x67\xba\xb5\x4d\x52\x71\xad\xd9\x79\
\x80\xfd\xd5\xa7\x12\xc6\xf3\xcd\xcf\xd6\xf0\xe6\x92\x52\x10\x82\
\xe0\xd2\xe7\x98\xfa\xf2\x22\x8a\xbf\x2c\x03\x09\xc5\x7f\xb8\x99\
\x8a\xaa\xe3\xcc\x5a\xf0\x5d\xe2\xf8\xaa\x9f\x6b\x59\xb1\xed\x27\
\x8a\x1f\xbd\x99\x23\x27\xeb\xe8\x7a\xdb\xf3\xd4\x9c\x0e\x82\x84\
\x19\x77\x5c\xcd\x13\xb7\x5f\x45\xc9\xea\x1d\x2c\x2e\xdd\xc9\xd8\
\x27\xdf\x65\xe8\x39\x9d\x59\xf3\xc6\x7d\x74\xbb\x75\x26\x59\x19\
\x01\xbe\x9f\xf3\x20\x13\x87\x9f\xcf\xab\x8b\x56\x25\x9e\xd3\xcc\
\xa9\xa3\x98\x7c\xcd\x20\x00\xde\xfb\xc3\x2d\x00\x7c\x39\xeb\x6e\
\xee\x79\x79\x11\xb9\x59\x19\xb4\xc8\xc9\x20\xc1\x4a\x12\x9f\x47\
\x99\xe9\x01\x1e\x9a\x70\x39\x47\x4f\x9d\xa6\xeb\x2d\xcf\x32\xfa\
\xe2\xbe\xcc\x79\xe8\x26\x1a\x43\x11\x1e\xb8\xe9\x32\xba\xb5\x6f\
\x45\x8b\x9c\x4c\xe6\x2c\x2e\xe5\xbb\xc4\x06\x6b\x8d\xcf\xe6\x8a\
\xc3\x4c\x7d\x71\x61\xa2\x76\xed\xf3\xc1\x92\x3f\x4f\x65\x44\xbf\
\x1e\x0e\xa0\xd5\xd0\x3e\x5d\xe2\x51\x95\x75\xd2\x35\x3b\xf7\xb3\
\xbf\xfa\x44\x62\x7e\xbc\xf9\x69\x29\x6f\x96\x94\x82\x80\xe0\x17\
\x2f\x32\xf5\xc5\x0f\x29\x5e\xbe\x11\x84\xa4\xf8\xb1\x5f\xd1\xb1\
\x75\x0b\x6e\x19\xd1\x3f\x11\x15\xf6\xef\xd1\x91\xcc\xf4\x34\x1e\
\x9c\x30\x9c\x8a\x43\xc7\x99\xf5\xe1\xd7\xc9\x71\x3d\x56\xcb\x8a\
\x2d\x15\x8e\x2c\xc5\x83\xaf\x7f\xc2\x83\xaf\x7f\x62\x45\x8f\xcb\
\x5e\x60\xfa\xab\x0b\x79\x67\xd9\x7a\x10\x82\xe2\xc7\x26\xb1\x78\
\xd5\x76\xc6\x3e\xf6\x76\xc2\x09\x9a\xf1\xeb\x51\x3c\x71\xc7\xb5\
\x9c\xdd\xa1\x35\x47\x4e\xd4\xd2\x75\xe2\x2c\x6b\xde\xd9\xda\xb6\
\x8a\x1f\xbf\x9d\x92\x95\x5b\xad\xe3\xe2\xa9\xd1\x57\xee\x1b\xc7\
\x43\x37\x5f\x49\xf7\x0e\x6d\xa8\x38\x74\x8c\x59\xf3\xbe\x4a\x6c\
\x34\x55\xc7\x6b\x58\xb1\x65\x2f\x43\xfb\x76\x63\xf6\x47\xdf\xb1\
\x6a\xdb\xde\x44\x84\x35\xe3\x37\x37\xf0\xc4\xaf\x47\x51\xb2\x72\
\x6b\x62\x8c\x16\xaf\xda\xc6\xcc\xe2\x2f\x38\xab\xb0\x35\x48\xe8\
\xdf\xab\x13\xf7\x4f\x18\x9e\x5c\x87\x9a\xda\xf2\xf4\x9b\x2e\xb7\
\xce\x3b\xff\x4b\xdb\x78\x9c\x62\xc5\xe6\x72\x47\xad\x36\x37\x33\
\x83\x8e\x6d\x5b\x72\xcb\xd5\x03\x13\x1b\x60\xff\xa2\xce\x64\xa6\
\xa7\xf1\xd0\xc4\x2b\x39\x72\xa2\x86\xae\x37\x3d\x6f\xdd\x33\x22\
\x7e\x7d\xd7\x53\xb2\x62\x4b\x62\x7b\xc8\xcb\xc9\x48\xe2\x17\xe2\
\x29\xbb\xbc\xec\x0c\x72\xb3\xd2\xe9\xd0\xba\x05\xb7\x5c\x39\x88\
\xac\x8c\x00\xb3\xa6\x4f\x60\x5f\xd5\x71\xae\x7a\xe0\x65\x5b\xaa\
\x5d\x7a\x83\xa8\x12\x08\x5f\x7f\xbc\x09\xde\x48\x2a\xf7\x18\xc2\
\xfa\x5b\x13\xdb\x90\x2f\x40\x03\x26\x63\x27\xdd\xc1\x8a\xc5\x8b\
\x48\xf3\x07\x2c\x44\xa7\x94\x98\x31\x19\x8f\x61\x92\x46\xd3\x32\
\x92\xf1\x57\x2c\x66\xfb\xdd\x19\x61\x26\x8d\xab\xc9\xab\xef\x7d\
\xca\xab\x0b\x96\x62\xb6\xed\x0b\x86\x4f\x5f\x37\xb6\xf5\xbb\x6e\
\xcd\x1e\x48\x5a\x60\x37\x11\xb3\x81\xa3\x35\x30\x7f\xc9\x71\x06\
\x9c\xd7\x89\xae\xf9\x35\x80\xa4\x5d\x9e\xe0\xd2\x81\x2d\x58\xb3\
\xf9\x14\x0d\xa6\x95\x0a\x8f\x21\x59\xbe\xfa\x28\x1f\x7f\x91\xc7\
\x6f\x6e\xce\x24\x10\xab\x8b\x7f\x5f\x24\x5e\x97\x35\x91\x66\x80\
\x98\xcc\x66\xef\xbe\x34\xaa\x0e\x9e\xe4\x8a\x11\x39\x20\x43\x16\
\x99\xbc\x34\x10\x48\x3a\xb6\x0c\x33\xe9\x9a\xb6\x6c\xdd\x7a\x8c\
\xa5\xab\x4f\xb2\x6a\xdd\x29\xba\x74\xce\xa5\xbe\x2e\x4a\x4c\x5a\
\x99\x64\x4c\x11\x4f\x4a\x48\xb2\x32\x05\x0d\x7d\xc6\x28\xad\x55\
\x78\xa0\x9a\xed\xf7\xea\x2c\x7d\x1c\x6c\x3f\x82\x16\x75\x8b\xa9\
\xad\xad\x4d\x18\x4b\x80\x82\x82\x02\x36\x98\xbd\xb4\x48\x69\xbf\
\x37\xfa\x30\xe9\x86\x4c\x1f\x3d\x98\xe1\xe7\x75\x05\xe0\x8a\x0b\
\xce\xa2\xac\xe2\x30\xa1\x68\x0c\xbf\xcf\xa2\x54\x7b\xaa\xf8\x5b\
\xd6\xbe\x3a\x95\xdf\xde\x30\x88\x7b\x47\x0f\xe6\xdd\xe5\x9b\xf9\
\xb1\xfa\x14\x00\x03\x7b\x76\xa0\xf8\xe1\xf1\x89\x33\x2c\xdd\x50\
\xce\x8a\xad\xfb\x13\x93\x6c\xfa\x98\xc1\x0c\x3f\xbf\x9b\xf3\xbb\
\x23\x51\xc7\x35\x0d\xec\xd9\x81\xe2\x47\xc6\x3b\x31\x3e\x5e\x75\
\xb9\x94\x32\x55\x4a\x3d\xce\x74\x17\x89\xa6\x8f\x1e\xcc\xf0\x0b\
\xba\x5b\xd7\xd3\xaf\x3b\x65\x15\x55\x84\x22\x51\x76\x1d\x38\x9e\
\x30\x96\x20\x59\xbf\xfb\x80\xc3\x4b\x9e\x76\xdd\x20\x26\x5f\x3d\
\x20\x71\x7d\x4b\xd7\xef\x21\x18\x8e\x58\xc7\xd5\x87\x13\xa2\xb6\
\xeb\x77\x1d\x72\x83\x44\xb0\x03\xa6\x50\xd2\x84\xc7\xe2\xc6\xd6\
\x56\x6f\x33\xa1\x74\xc7\x7e\x6e\x1e\x7e\x1e\xc5\x71\x03\xd3\x74\
\xde\x60\x38\x4a\xc1\xb8\x67\x08\x86\x23\x20\xe0\x9e\xd1\x43\xb9\
\xed\x8a\x64\x64\x70\xfb\x0b\x0b\x2c\x83\x69\xef\x67\xd3\x42\xfc\
\xad\x0b\x1b\xd8\xab\x13\xc5\x7f\xf8\x65\xf2\xf9\xad\xdf\xc5\x8a\
\x6d\x3f\x26\xaf\xad\x2e\x98\x18\xce\xe4\x98\xe8\xc1\xb1\x7b\x0e\
\x1e\xe7\xd4\xe9\x46\xe5\x7d\xc1\xf2\x8d\xe5\xe4\x64\xa6\x73\xd3\
\x65\xe7\xf3\x7f\x6b\x77\x72\xfd\xd0\x3e\xbc\xb3\x6c\x03\x5b\x7e\
\xac\xc2\x30\x04\x7f\x7d\x70\x3c\x5f\xcd\xba\x87\x93\x75\x0d\x20\
\xa0\x7d\xab\xdc\xc4\xe1\x07\x8e\x9e\xe4\x2f\xf7\x8c\x66\xca\x75\
\x43\xf8\xeb\xa7\xab\x59\xbe\x71\x37\x9f\x3e\x3b\x25\xf1\xfe\xb4\
\x1b\x86\x72\xe7\xf5\x43\x9d\xe3\x33\xe6\x71\x82\x91\x24\x4d\xd8\
\x4b\x77\xdd\xc8\xa8\x21\xe7\x30\xf9\xcf\xf3\xe2\xe3\x66\x3d\xd3\
\x7b\xc6\x5c\xc2\x6d\x57\x0d\x4c\x8e\xdd\xcc\x79\x71\x83\x29\x34\
\x9a\x93\xd8\xe4\xa2\xac\xdf\x07\x16\x75\xa6\xf8\xf1\x49\xda\x79\
\x3b\xb0\xa8\x33\xc5\x8f\x25\xdf\x5b\xba\x6e\x27\x2b\xb6\x55\x34\
\x13\xd4\x0a\x8d\x4c\x5e\xb2\xc4\xb0\x7e\x57\x65\x62\xd0\x77\xed\
\xaf\x4e\x18\x0e\x37\x1f\x32\x4c\x1f\x77\x19\xc3\xfb\xf7\xb4\xe6\
\xf9\xc0\x5e\x94\xed\x39\x40\x28\x12\x65\x60\x51\x17\x8a\x9f\x9c\
\x9c\xbc\xae\xb5\x3b\x58\xb1\x75\x2f\xd3\xc7\x5f\x46\x4e\x66\x3a\
\xab\xb6\xed\x4b\x6c\x3e\xeb\x77\xfd\xa4\x69\x0f\x93\x4c\x1b\x3d\
\x8c\xc9\xa3\x86\x24\xd7\xc2\x9a\x1d\x04\xc3\x11\x5b\x04\xe9\xbe\
\xc1\x81\x45\x5d\x28\x7e\xe2\x0e\xdb\x79\xb7\xb3\x62\x6b\x85\xab\
\x0e\x6d\x5d\xdf\xaf\xdd\x63\x2a\x60\xd7\xfe\x23\xd4\xd4\x87\x12\
\xeb\x68\xfd\x0f\xfb\x9d\xeb\x4c\xa7\xe6\x65\xfb\x43\xff\xa2\x2e\
\xcc\x9f\x31\x35\xf1\xfb\x8c\xe7\xde\xe5\xd4\xe9\x90\xa2\x7a\xa2\
\xa9\x13\xab\x3d\xc7\x86\x61\xbd\x4c\x3f\x18\xb1\x64\xc4\x29\x63\
\x49\x49\xb5\xbc\x42\x76\x1d\x39\xc8\x2d\x77\x3f\xc0\xc2\xbf\xbf\
\x4e\x66\x46\x3a\xa6\x94\xc4\xe2\x0c\x48\x52\xca\x64\x94\x69\x4b\
\xc1\x3a\x22\x4c\x8d\xb1\x04\xf8\xe7\x7f\xd6\xf0\xda\x07\x25\x44\
\x5b\x9f\x03\xbe\x80\xa6\x90\x6f\x27\xb1\x4f\xa6\xcf\x2f\xba\x68\
\x28\xab\x56\xaf\xc0\x14\x3e\x96\xaf\x3c\xc9\xbb\x8b\x5a\xf2\xe0\
\x94\xb6\x64\x1b\xc7\x49\x37\x63\x5c\x31\x2c\x9d\xe2\xa5\x3e\xf6\
\x1e\x26\x4e\x81\x67\x52\x1b\x11\xcc\x99\x7f\x88\xc2\x0e\x9d\xb8\
\x6e\x58\x26\x3e\x29\xf1\x19\x51\x24\x51\x84\x34\x10\xf8\x11\xa4\
\x93\x95\x95\xc1\x97\xa5\x3f\x11\xcd\xca\x65\xf8\xd0\x96\xa4\x99\
\x3f\xc7\x85\xae\xd3\x30\xcc\x10\x63\x2e\x0b\xf0\xc3\xde\xf6\xbc\
\xfc\xce\x71\x4e\x85\x4c\x6a\xca\x6b\xf0\xc9\x34\xab\x0e\x1a\x2f\
\x8f\x08\x2c\x81\xe9\x86\x73\x6e\xd2\xd0\x67\x2a\x8c\x65\x80\x56\
\x15\x47\x79\x84\x35\x3d\x6f\xc4\xf8\xfe\xfd\x44\x9b\x4b\x56\x56\
\x16\xc7\xba\x8e\x74\x97\xed\xe2\x2f\x1b\x4a\x56\x65\xe2\x70\x4e\
\xaa\x76\xf9\x39\x0c\xeb\xdb\x85\xd2\x9d\x07\x18\xfd\xa7\x79\xcc\
\xbe\x67\x14\xa4\x5b\x9e\xd1\xba\x3d\x55\x7c\xb1\xa9\x82\x59\x77\
\x8e\xc4\x94\x92\xe7\xe6\xaf\x04\x29\xa9\x6b\x0c\xb1\x71\x4f\x15\
\x2f\x7c\xb8\xca\x71\xa1\x13\x86\xf5\xc1\xef\xf3\x25\xce\xdb\xae\
\x65\x36\xc3\xce\xed\x4a\xe9\xce\x4a\x46\x3f\x39\x9f\x48\x24\xe6\
\x10\x30\x5e\xbc\x76\x37\x63\x67\xcc\x4b\x78\x07\x37\x0c\x29\x62\
\xc9\x8c\x5b\x1d\x13\xbe\x43\x41\x1e\x13\x2e\xed\x6b\xab\x15\x86\
\x58\xb6\xa9\x1c\x64\xfc\xbd\x4b\xfa\x26\x6b\x3d\x8d\x61\x96\x6d\
\xaa\xd0\x40\xfd\xad\xff\xda\xe5\x67\x33\xec\xdc\x6e\x94\xee\xa8\
\x64\xf4\x9f\x8a\x79\x76\xf2\x95\xdc\x7e\x75\x7f\xa6\x5d\x77\x21\
\x27\x6a\xad\x0d\x7f\xfa\x98\x21\x34\x04\xc3\x44\x63\x92\x2e\x6d\
\xf3\x28\xfe\xb2\x8c\xcf\x37\x58\xe7\x1b\xd8\xb3\x03\x8f\xfe\xf2\
\x72\xfe\xfd\x65\x19\x57\xf5\x3f\xdb\x3a\xee\x74\x23\x48\xc9\xf4\
\x31\x17\xc5\x8f\x8b\xb9\xa1\xfa\xca\xbf\xda\x86\x10\x63\x2f\xee\
\xc3\xb4\xeb\x07\x73\xa2\xae\xc1\xe1\x1d\x15\x7f\xb5\x99\xea\x93\
\xa7\x13\xca\xec\x20\x18\x58\xd4\x91\x47\x6f\x19\x8e\xdf\x2f\x20\
\x6a\x7d\xe1\xe4\x17\x17\x32\xf9\xc5\x85\x1a\xbf\x41\x78\x0b\x1c\
\xc7\x8d\x77\x5d\x63\x88\x8d\xbb\x0f\xf0\xc2\x07\xdf\x3a\x2e\x72\
\xc2\xa5\xe7\xd1\x10\x8c\x30\xf6\x92\x73\x99\x36\x7a\x30\x27\x6a\
\x1b\x00\x12\xb2\x45\x5a\xa0\xae\xf4\x56\x14\xb9\x72\x60\x4f\x6e\
\xbc\xd8\x7a\x3e\x23\x2f\x2c\x02\xe0\xd6\x2b\xfb\xf3\xdd\xd6\x7d\
\xfc\x6d\x49\x29\x7e\xbf\xc1\x65\xe7\x77\x77\x1d\x5a\xd7\x18\x62\
\xcc\xb0\x73\x19\xd1\x1f\x16\x7c\xb3\x99\x6f\xcb\x2a\xf8\x9f\x5f\
\x5c\x4e\x63\x28\xc2\xc9\x3a\xeb\x39\xbd\x59\xb2\x86\x6f\x37\x5b\
\x75\xb3\x81\x45\x9d\x78\xf4\xd6\xab\xf0\x1b\x3e\x90\x16\x3c\x7e\
\xca\x75\x43\x78\x68\xe2\xe5\xbc\xf6\xd1\x0a\xde\x5b\xbe\x01\xbb\
\x6a\xcb\xe4\xe7\xe7\x33\xf9\xf9\x79\xe8\x21\xe1\x34\xd3\x76\x25\
\x58\xbc\x7a\x07\x63\x1f\xff\x47\x22\x22\xbb\xe1\xe2\xbe\x2c\x79\
\xfe\x4e\xea\x1a\x42\x6c\xdc\x55\xc9\x0b\x89\x88\xca\x9a\x07\x13\
\x2e\xed\xc7\xa7\xab\xb7\x12\x35\x63\xf1\xac\x47\x3a\xd7\x0f\x3d\
\x17\x01\xf8\x7d\x06\x43\xfa\x74\xb5\xb2\x07\xd2\xfa\xdd\xc1\x88\
\xa5\xcc\x9b\xf1\x97\x9d\xcf\xb4\xd1\x17\x73\xa2\xa6\xd1\xf1\x6c\
\x7d\x36\x29\xb2\x76\xad\x72\x19\x76\xfe\xd9\x94\x6e\xdb\xc7\xe8\
\x87\xdf\xe4\xb9\xbb\x6e\xb4\xae\xeb\xfd\xe5\x8e\xef\x9b\x70\x79\
\x3f\xfc\x3e\x83\x0e\xad\x5b\x30\xe1\xf2\x7e\x89\xef\x9a\x7e\xd3\
\xe5\xf1\xb9\x6c\x5a\xeb\x19\xe8\xd0\xba\x25\xc5\xff\xd9\xc0\xe7\
\x6b\x77\x58\x6b\xa1\x77\x17\x1e\x9d\x34\x92\x92\x95\x5b\xad\xe3\
\x87\x0f\x48\x18\xfb\xd3\xc1\x20\xcb\xd6\xfd\x10\x1f\x8f\xfd\xf1\
\xf3\x26\xc1\x53\x13\x2e\x1b\xc0\xa7\xab\xb6\x10\x8d\x25\xeb\x4b\
\x8b\x57\x6d\x65\xec\xa3\x73\x13\xf7\x7c\xc3\x45\xe7\xb2\xe4\xa5\
\xdf\x52\x5b\x1f\x64\xfc\xe5\xfd\x98\x76\xe3\x25\xae\xf9\x78\xea\
\x74\x23\xbf\xb8\x62\x20\x77\x8e\xb9\x98\x1e\x1d\xdb\x02\x30\xee\
\xf2\x01\x44\xa2\x16\x63\x4e\xd3\x73\x5d\x5a\xba\x8d\x31\x0f\xff\
\x8d\xec\xcc\x74\x3e\x9b\x35\x9d\xf7\xff\x34\x85\xff\x99\xbd\x90\
\xb7\x97\xac\x24\x25\xa3\x88\xb6\xbf\xbb\x09\x18\xe5\xb3\xe2\x12\
\x19\xd7\xd2\x94\xb1\xc4\xfb\xb2\xa0\x07\xdf\x6c\xdd\xcb\xa0\x6b\
\xc7\xf3\xed\x27\xef\xd3\xa6\x20\xdf\x32\x94\x86\x5a\xd7\x4c\xa6\
\x60\x69\xfa\x3f\xe6\x4e\xc7\x06\xc3\x51\x5e\xf9\x68\x39\xf3\xfe\
\xb3\x92\x68\x41\x4f\x48\xcf\x6e\x06\x39\xe6\x9c\x3b\xdf\x85\xce\
\xa6\x63\xcb\xbd\x54\x9f\x38\x42\x30\x62\xf0\xcf\xf9\xfb\x69\x93\
\x2f\x98\x32\x2e\x8f\x74\x33\xca\x05\xe7\xc6\xb8\xfa\xaa\xf6\xec\
\x2b\xae\x06\xc3\xc4\x27\x0d\xa2\x18\xec\xd8\x1f\x64\xf6\xdb\x87\
\xc9\xf5\x77\xe0\xd2\xa1\x99\xf1\x36\xea\xa6\xfd\x2d\x8a\x30\x4e\
\xd3\xa6\x03\xa4\xb7\xcc\x61\xd6\x9c\x03\x10\xeb\xc6\xc8\xa1\x39\
\x08\x2c\x7a\xbf\x98\x5f\xe0\xf3\x37\x32\xf5\xe6\x76\x54\x57\x1b\
\xfc\xab\xa4\x92\xb0\x00\x64\xc4\xb6\xda\x24\x52\x40\x6e\x6e\x0e\
\xb5\x42\x15\xf5\xd0\x41\xa9\xed\x6c\x5a\x4a\xff\xb0\xc2\x66\x95\
\x9d\x9d\x4d\x7d\x7d\x3d\xb9\xb9\xb9\xd4\xf4\x18\xed\x06\x26\xda\
\x9e\xaf\xdf\xdd\x90\x6d\x6b\x90\x96\x82\xb5\xbb\x0e\xf2\xc6\x92\
\x8d\xcc\xf9\xbf\x4d\x2c\x9d\x71\x2b\xbd\x3b\xb7\x26\x10\xf0\x83\
\x10\x94\xee\x3a\x98\x88\xd4\x9e\x2a\xfe\x8e\x6b\x06\xf6\xe0\x9d\
\x65\x65\xfc\x78\xe4\x14\x08\x78\xf8\x1f\x5f\xf2\xf9\x33\xb7\xb1\
\xf0\x89\x89\x8e\x05\x58\x56\x71\x98\xf2\x43\x3f\xb3\x76\xf7\x01\
\xde\xf8\x6c\x3d\x73\x96\x6e\x62\xe9\x33\xb7\xc5\xbf\xdb\x70\x34\
\x78\x97\xee\xac\xe4\xea\x01\x67\xdb\x0c\xb8\xe5\xd9\x94\x55\x1c\
\x26\x18\x89\xb0\x75\x77\x35\x7d\xba\xb4\x61\x50\x51\x47\x16\x3e\
\x7e\xb3\xe3\x3c\xe3\x67\xcc\x07\x81\xf5\xde\x13\xb7\x38\xdf\x7b\
\x7a\x1e\x9f\x94\xee\x72\x2c\x80\xb5\xbb\x0e\xf0\x46\xc9\x7a\xe6\
\x2c\xde\xc8\xd2\x3f\x4f\xa2\x77\xe7\x36\x04\x7c\x82\x87\xdf\xfe\
\x82\xf3\xce\x6a\xcf\xdc\x07\xc6\x38\x80\x12\xd7\x3f\xf6\x6f\xa6\
\x5e\x37\x90\x41\x45\x9d\x58\xf8\xe4\xad\xca\x3d\x56\x71\xff\x1b\
\x4b\x38\xab\x7d\x3e\x73\x1f\x1a\xeb\x3c\xee\xf1\x77\x09\x46\x92\
\x69\xd3\xd2\x9d\x95\x2e\x67\xb1\x74\x47\x25\xb3\x3f\x5e\xcd\x79\
\xdd\xda\x31\xf7\xc1\x71\xae\xef\xbe\x7a\x60\x0f\xa6\x5d\x3f\xd8\
\x65\x44\xca\x2a\xaa\x08\x86\xa2\x4e\x8e\x4f\x47\xd4\xa2\x38\x46\
\x66\xd2\x33\xae\x6b\x0c\xb1\x75\xdf\xe1\xc4\x54\x78\xf8\xad\xa5\
\x7c\x3e\x73\x0a\x0b\xff\x34\xc9\x79\x8e\xf2\x43\x5c\xf9\xfb\xb7\
\x38\xa7\x6b\x5b\xe6\x3e\x74\x53\xe2\xef\xbf\x7c\x76\x5e\xfc\xda\
\x7f\xe2\xa2\x3e\x5d\x13\x1b\xdf\xae\xca\xa3\x9c\xa8\xb3\x80\x3e\
\xc1\x48\x84\x4d\xbb\x0f\x3a\x5a\x57\xa6\xbc\xf4\x21\xef\xfd\x67\
\x23\x6f\x3e\x78\x13\xf7\xbe\xf6\x31\x73\xff\x67\x02\x03\xef\x7e\
\x85\xda\xfa\x30\x48\xc1\xec\x8f\x57\x33\x7b\xd1\xea\xc4\x1c\xb8\
\xe1\xa2\xde\xcc\x98\x72\x0d\xf7\xbf\xfe\x09\x5d\xdb\xe5\x73\xe9\
\xf9\xdd\x99\x38\xa2\x1f\x13\x47\xf4\xa3\x31\x14\x61\xf4\x1f\xff\
\xc1\xba\x1f\x2a\xe9\xd1\xb1\x80\x7b\xc7\x5d\xc2\xbd\xe3\x2e\x71\
\x5c\x7b\x30\x6c\x65\x30\x86\x9d\x77\x36\x6f\x3e\x34\x81\x6f\xca\
\x2a\xf8\xdf\x37\x17\x6b\x78\x63\xd5\x1a\x30\x1a\xf2\x06\x6b\x60\
\xeb\x1a\x83\x6c\x6d\x02\x5b\x49\x49\xe9\xf6\x1f\xb9\x3a\x6e\xfc\
\xed\xdf\x59\x56\x7e\x90\xfb\x67\x2f\x62\xc9\xcc\x3b\x59\xf8\xf4\
\x14\xe7\xb8\xee\x39\x48\xc9\xea\x6d\x89\x08\xe9\x85\xbb\xc6\xf0\
\xdb\x71\xc3\x12\xef\xdf\x3d\x66\x18\x77\x8f\x19\xc6\xe6\xf2\x83\
\xfc\xeb\xf3\x75\x8c\xe8\xdf\xd3\x45\x50\x5e\xb6\xc7\xfa\xfe\xb3\
\x0a\x5b\x31\xf7\x7f\x7f\xa9\x7c\xff\x01\xf6\x1c\x38\xca\xda\x9d\
\x3f\xf1\xc6\x27\x2b\x99\x53\xb2\x9a\xa5\x2f\xde\x4d\xef\x2e\xed\
\x08\xa4\xa5\xf1\xf0\x9b\x25\x7c\xfe\xd2\x3d\x2c\x7c\xe6\x37\xae\
\xe3\xca\x0f\x1e\x63\xd0\x39\x5d\x1d\xef\xd5\x37\x86\xb8\xfe\xf7\
\x6f\x32\x75\xf4\xc5\x7c\xbf\xc7\x02\xdd\x58\x9f\x99\xea\x3a\x3e\
\x18\x8e\x68\xdf\x1b\xff\xc7\xb9\x3c\xfc\xb7\x4f\xf8\x7c\xd6\xbd\
\xda\xe3\x4a\x56\x6d\x49\x6c\x60\xa5\x5b\xf7\x72\xf5\xe0\x73\x5c\
\x08\xfe\xb2\x3d\x95\xdc\xff\xea\x07\x9c\x55\x58\xc0\xdc\x87\x7f\
\x95\x9c\x8f\x7f\xfa\x07\x00\x8f\xfc\xed\x63\xba\x15\x16\xf0\xd6\
\x23\xc9\x79\x3c\xef\x69\xeb\x3e\xc2\x91\x28\x65\xbb\xf7\x73\x61\
\xef\x2e\xac\xde\xb6\x97\xa8\x29\xa9\xa9\x0f\x32\xea\x77\xaf\xb3\
\x74\xd6\x74\xde\x7a\x74\x12\x6f\x2f\x59\x85\x9b\xaf\x59\x15\x96\
\xc0\x2d\xc9\xe7\x70\x40\x85\x95\x1a\x95\x3e\x47\x90\x67\x16\xf4\
\x66\x5f\x5d\x35\x45\x97\x5c\xcb\xcb\xcf\x3c\xce\x2f\x6f\xbc\xc6\
\xd1\xc7\xe8\x99\x8e\x95\xce\x08\x73\xdd\xee\x4a\x9e\xf9\xe7\x42\
\x7e\x3c\x56\x47\xac\x75\x5f\x4b\xc6\x4b\x6a\x5a\x85\x9a\x8c\x85\
\xa1\x21\x0e\x01\x0e\xf5\x18\x49\x46\xd9\xfb\x04\x23\x51\xaa\x8f\
\x4b\xe6\xbc\x5b\x49\x8b\xcc\x1e\xfc\xe2\xaa\x5c\x32\x8c\x1a\x26\
\x5e\xd9\x81\xd2\xd2\x06\xb6\x54\xd4\x21\xa4\x81\x34\xa3\x20\x60\
\xcd\xe6\x20\x2f\xbe\x75\x84\x3a\xd9\x85\x6b\x2e\x6a\x43\xa6\x79\
\x0a\x29\xc3\x08\x11\xc5\x47\x94\x74\x43\x70\xfd\x35\x6d\x59\xb1\
\xae\x86\xd7\xde\xae\x44\x98\xdd\x18\x79\x51\x36\xc8\x08\x61\xb2\
\x30\x0c\x83\x76\xad\x8e\x73\xdf\xa4\x3c\x1a\xea\x3b\xb2\xe8\xab\
\x43\x04\x0d\x19\x47\xb2\x5a\x3a\x95\xc8\x28\xed\x0b\xdb\x51\xeb\
\xd9\x39\x26\xdc\xbd\xb9\x12\x37\x5b\x96\xc2\xd0\x56\x50\x50\x40\
\x6e\x6e\xae\x05\xf2\x11\x8a\x80\x80\xe2\x54\x08\xae\x99\x21\x5d\
\xad\x5b\x32\x55\x7f\x97\x97\xd3\x22\x9d\x9b\xb1\xbd\x7d\xa3\xd9\
\x46\x56\xa9\xf0\x91\x2a\x19\x54\x17\xaa\x5f\x69\x24\x76\x29\x79\
\x48\x77\x43\xb9\x8b\xe4\x59\x49\xe1\xda\x61\xc8\xc2\x03\x92\xef\
\x99\xfe\x45\xdf\x54\xed\xd5\x43\x26\xce\xa0\x39\xdf\xd5\xdb\x26\
\xf0\xa6\xae\xf3\xb8\x4e\xb5\x9f\x56\xc7\x77\xea\x48\x6d\x40\x4a\
\x15\x11\x4f\x1a\x3f\x95\x43\x58\xe9\xe3\x6d\xac\x44\x0c\x00\x00\
\x10\xee\x49\x44\x41\x54\x74\x51\x01\x7a\x45\x66\x0a\x92\x57\x95\
\x88\x13\x1a\xee\x58\x95\x70\x40\x17\xf8\xe9\x86\x47\x7a\xd4\x3e\
\xbc\x7a\xdf\x55\xba\x3e\x95\x46\x4f\xe8\x6a\xe4\x29\xda\x46\x3c\
\x45\xaa\x3d\xa8\x82\x24\x1e\xf4\x85\xcd\xad\x29\x91\x9a\x50\x44\
\x4a\xb4\x5c\xc4\x8e\x31\x95\x29\x68\x31\x35\x92\x5b\xd2\x8b\xa2\
\x50\x7a\xf0\x34\x6b\xd2\xcd\xf6\x92\x81\xa3\xbf\x53\x1d\x7f\x65\
\x2c\x74\x84\x03\x9e\x84\x07\x38\xa9\x0c\x95\xb5\x26\x04\xf8\x7c\
\x3e\x2b\xc2\xd5\x4d\x7c\x99\x42\x29\xc7\xc1\x36\x25\x9d\xe3\x2c\
\x45\x5c\x39\x25\x79\x51\x22\xdc\x80\xef\x54\x25\xf9\x19\x30\xeb\
\xa9\x27\x19\x36\xa8\x1f\x59\x19\x99\x98\xd1\x28\x66\x38\x68\xb5\
\x91\x84\x42\xd6\xcf\xa1\x10\x32\x16\xa2\xb6\xf1\x34\xab\xb6\xed\
\xe1\xbd\xcf\xbf\x65\x5b\xc5\x7e\x62\x79\x1d\x30\x73\xda\x27\xd9\
\xa0\x74\x7b\x89\xda\xee\xa3\xab\x71\x49\x49\xa0\xac\x18\xd3\x34\
\x11\xf8\xe8\xdc\x36\x8d\xfb\xee\x68\xcd\xe4\x09\xd9\xa4\x07\x72\
\x99\x5b\x5c\xc3\xcc\xd7\xcb\x39\xd5\xe8\x27\x4a\x18\x0c\x13\x41\
\x80\x80\x8c\xd1\xa7\x57\x3a\xbf\xbd\xb5\x23\xe3\x46\xe6\x93\x9b\
\x5d\x87\xdf\x8c\x60\xc8\x0c\xa4\x90\x04\x8d\x6c\xde\xff\xac\x9e\
\x99\xb3\xf7\x90\xdf\x2a\x8b\x69\x93\xf2\xf9\xc5\xb5\x01\xb2\x0d\
\x81\xcf\x4c\x47\x60\xf5\x3e\xee\x3d\x92\xc1\x6b\xff\x3e\xce\x7b\
\x1f\x57\x51\x1f\x32\xe2\x54\x7d\x02\xa4\x9f\xc2\x76\x1d\xa8\xea\
\x3c\xec\x0c\xfa\xc9\xa4\x77\x5f\xb4\x54\xfa\x9b\x5d\x76\xca\x7b\
\x6f\x16\x5c\x37\x43\x6a\xdb\x0f\x1c\x04\xd2\xd2\xa3\x99\x57\x81\
\xf1\x3a\x88\xd0\xa5\xb3\xa9\xdf\x61\xc8\x70\x33\xcd\xe8\x6e\x0c\
\xf4\xfc\xda\x52\x6a\xfa\xd0\x34\xf7\xa0\x5b\xc4\xba\xfb\x11\x9a\
\x41\xd3\x12\x1e\x69\x36\x21\xa1\x41\x9f\xe9\x90\xb8\x0e\xaf\x47\
\x6d\xcc\x47\xdf\xa4\xed\x30\x3a\x52\x23\x3b\xa5\xdb\x60\x95\xcd\
\xd6\x21\xff\x25\xf4\x46\x40\xdd\xf4\x54\x3e\x5d\x17\x11\xab\x42\
\x34\xaf\x72\xd2\xea\x8c\x88\x48\xe5\x3c\xa9\x24\xe6\xca\x60\xa6\
\x64\x72\xf2\xd2\x49\x93\x78\x93\x97\xca\xd4\xaa\x23\x9e\x7d\xad\
\x1a\x6e\x58\x87\x7a\x85\xbd\x3f\x56\xe8\x19\x82\x52\x5e\xbf\x32\
\x17\x1c\xce\x8e\x74\x0b\x68\x23\x3d\xc6\x55\x7d\x46\xca\xf3\x17\
\x1a\xa3\x24\x15\x74\xa1\x97\x33\xa8\x1d\x66\x8d\x63\xa5\x0e\x97\
\xe3\x7c\xba\x75\xa6\x38\x48\x0e\x31\xf9\x66\xf6\x07\x97\x00\x81\
\x6c\xde\xc1\x73\xa8\xd1\xd8\x09\x4c\x74\xce\x88\x6c\xce\x9b\x4a\
\x66\x6b\x44\x93\xce\xae\x74\x13\x1f\xd8\xb5\x39\xa5\xae\xd7\x55\
\x42\x63\x2d\xbe\xfa\xa3\x18\xa1\x5a\x7a\x74\xef\x46\x51\xaf\x22\
\xba\x16\x16\x52\x90\x97\x47\x9a\x21\x38\x76\xfc\x67\xaa\x7f\xfe\
\x99\x1d\xe5\xe5\xec\xda\xbb\x97\xa8\x08\x10\xcb\x6a\x85\x99\xdb\
\x1e\x8c\x80\xed\x92\x0c\x0f\x6a\xd2\x14\xf7\x21\x9d\xbc\xbd\x19\
\x5b\xe7\x13\x8e\x44\x31\xf1\xd1\x26\xcb\xc7\x1d\x13\xf2\x99\xf4\
\xab\x6c\x32\x33\xf2\x79\x61\x66\x25\xf3\x3f\xab\xa6\xb1\x29\xc6\
\x35\x2c\x41\x67\x9f\xf4\xd1\xae\x05\x4c\x18\xd3\x86\x5b\x26\xe6\
\xd3\xf7\x2c\x83\x8c\x58\x0c\x5f\x2c\x8c\xe9\x8b\x52\x13\xcd\xe3\
\xd5\x7f\xd6\xf3\xca\x3b\x07\x48\xcf\x0e\x30\x79\x74\x0b\x7e\x3d\
\x2a\x9f\x5e\xdd\x0d\x8b\x04\xdd\x17\x22\x1a\x08\x71\xa4\xb6\x0d\
\xef\xce\x3f\xc9\x3b\x0b\x0f\x51\x79\xac\x9e\xa8\x00\x89\x0f\x1f\
\x7e\x62\x03\x26\xba\x9f\x6d\xb3\x3d\xd9\x52\xff\xcc\x5c\x76\x45\
\xe7\x98\x27\x8f\xf3\xd1\x7d\xc4\x53\x4e\xc8\x6d\x7c\x55\x18\xc2\
\x29\x4b\xa4\x4a\x14\xd9\x89\xa4\x85\xc4\x93\x2d\x45\x35\x50\x3a\
\x3d\x43\x95\xdf\x51\xdb\x70\xae\xfc\x4d\x28\xf5\x38\xbb\x87\x29\
\x35\x6a\xf4\x2e\xd1\x5f\xe1\x06\xbe\x08\x3b\x5d\x99\x32\xc1\xec\
\xc7\x1a\x42\x13\xd1\x78\x2c\x66\xe1\x5d\xbb\x73\x5c\xb7\x97\x28\
\xae\x54\x6e\x58\x15\x0a\xd6\xb5\x8e\xe8\x2e\x44\xa5\x5f\x73\x6c\
\x72\x86\x33\x7d\xeb\x68\xac\xf7\x4c\x3d\xd8\xde\x52\xa8\xe6\x74\
\xa2\xd0\x32\x15\x62\x4b\x15\x48\x96\x9a\xb9\x83\x42\x18\xa1\x4a\
\x34\xe9\x34\x50\x75\xfc\x78\x8a\xe1\xb1\x3b\x1e\x5a\xc3\x2c\x3c\
\xd2\x06\x38\xe9\x20\x21\x01\xec\xd2\xd2\xd2\x09\x85\xe0\x5e\xbb\
\xd2\x35\x3c\xa7\xe0\x26\x5a\x40\x71\xd6\x3c\x89\x23\x34\x91\x9f\
\x50\x1d\x35\xc3\xb9\x56\x54\xda\x46\x52\x08\x98\x0b\x8d\xe3\xec\
\xc5\xe7\xaa\x53\x66\x71\x50\x6a\x2a\x8b\xda\xc0\xc6\x53\xab\xe1\
\xd7\x55\xf7\x00\x81\xf7\xc6\xa1\xcd\x84\x29\xec\x56\xea\x38\x0b\
\x0d\x61\xbe\xfd\xc4\x32\x0e\xc6\x49\x18\x19\xd3\xea\x3b\x24\x5e\
\x0e\x69\xaa\x37\x62\xc6\x3f\xd7\x44\x66\xde\x04\xfe\x91\x56\x1d\
\xdd\x8c\x26\x6b\x9b\x66\x0c\x0c\x3f\x32\x23\x0f\x33\xb3\x80\x9f\
\x6b\x43\x94\xff\x54\xc9\x86\x2d\x5b\xf9\x6e\xe3\x26\xbe\x5e\xbf\
\x89\x75\xdb\x77\xb3\xbd\xf2\x30\x47\x1a\x4c\xa2\xb9\x1d\x88\xe5\
\x16\x22\xd3\x73\x6c\x8e\x60\x9c\xe0\xd5\x94\x36\x4d\xd6\xf8\xf9\
\xed\xa0\x27\xa9\xe1\x06\x46\x38\x82\x81\x68\xbb\xf3\x68\x1d\x3e\
\x40\x63\xe3\x69\x82\xc1\x4c\xb6\x6c\x6d\xa4\x7c\x6f\x03\xd9\xb9\
\x59\x9c\xd7\xaf\x90\xca\x23\x0d\x54\x55\x05\x91\x64\x59\xfd\x8c\
\x86\x89\x24\xc0\xe9\xa0\x8f\xef\xb7\xd5\xb2\x7d\x7b\x98\x68\x2c\
\x83\xb6\x85\x99\xe4\xe4\x80\x21\x7d\x64\xc8\x18\x03\x8a\x5a\x83\
\xc8\xa1\x74\x53\x1d\x6b\x37\xd5\xb1\x6d\x7b\x88\x88\x99\x45\xeb\
\x4e\x7e\xb2\x73\x23\xf8\x22\x69\xe4\xf8\x7d\x0c\x3e\x2f\x83\xee\
\x5d\x73\x68\xac\x8b\x51\x55\xd5\x48\x44\x4a\x4c\x21\x29\x08\x56\
\x13\x6a\xd5\x3d\xd1\x7a\xe8\xde\xaf\xf1\x76\x9e\x5d\x94\xa5\xa0\
\xe5\x0f\x96\x3a\xd5\x1a\xf0\xd1\x63\xc4\x53\x8e\x49\x6d\xd7\xfe\
\x11\xaa\x64\x92\x86\xb2\x4b\x28\x11\xa2\xd7\x86\xef\x00\x13\x69\
\x22\x09\x89\x22\x3e\x6d\x4f\x65\xe1\x34\x64\x76\x23\x22\x74\x9b\
\x87\xd0\x44\x47\x38\x99\x64\x1c\x05\x7b\xfb\xa0\xd9\xbf\x57\xa1\
\xcf\x52\x75\x28\x0d\x3c\x08\xa0\x41\xab\x24\x6f\x37\x30\x5a\xee\
\x59\xd5\x13\x4f\xa5\x6c\xaf\xec\x16\x42\xe8\xa3\x21\x15\x90\xd0\
\x54\xbf\x70\x31\xac\xa8\x51\xad\x92\x6e\x75\x79\x66\xaa\x93\xa2\
\x46\xb0\x3a\xe1\x68\x4d\x84\x29\xa5\xb3\x80\x9f\x20\x5f\xd7\x38\
\x01\x52\xc4\xbd\x74\xa1\x17\x39\x77\x71\xd9\xab\x46\x50\xe7\x7c\
\xd8\x9d\x3c\x85\x04\xdf\x13\xc1\x84\x33\x2a\xb3\x1b\x49\x43\x49\
\x2f\xaa\x22\xe7\x2e\x07\xcc\x38\x03\x37\xd9\x9e\xc1\x51\xb2\x37\
\x9e\x00\x2b\x0d\x17\xb3\x4a\xd1\x27\x15\x6e\x55\xc7\x7c\x92\x29\
\x1c\x3e\x85\xcb\x57\xcd\x9c\x38\xc8\xec\xd5\x31\x56\x9c\x64\x89\
\x9e\x13\xd6\xf1\x39\x0d\xff\x2e\x68\xd4\x65\x3c\xb4\x4b\x55\x70\
\x5b\xe2\x79\x19\x8a\xba\x12\x6e\xa7\x4f\x28\xce\x65\x9c\xd6\xcd\
\x32\x82\x24\xc5\xe1\x9b\x8c\x67\x93\xb1\x4c\xec\xa3\x4d\x40\x9d\
\xa6\x63\xe2\x46\x32\x1a\x82\x68\x38\xfe\x7f\x10\x22\x8d\x10\x09\
\x42\xb4\xe9\xff\xa0\xd5\x96\x21\xfc\x98\xfe\x0c\x4c\x5f\x16\x66\
\x5a\x36\x66\x5a\x16\xa6\x2f\x03\x69\x04\xe2\xe7\x8b\xd9\x22\x5a\
\xe9\xdc\xa7\xec\xe3\x64\x0a\xc5\x69\x44\x91\x68\x93\x6e\x3e\x70\
\x04\x0d\xf9\x45\x0c\x69\x1b\xa3\xfa\xc8\x61\x22\xd2\xcf\x8f\x07\
\x43\x94\x6e\x3c\xca\xc9\x90\x20\x3d\x37\x93\x63\xc7\x1a\x09\x05\
\xe3\x7d\xe2\x46\x14\x21\x4c\x8b\x9e\x5d\x48\x0e\x1d\x0d\xb2\x7e\
\x5d\x0d\x7b\xf6\x08\xa4\xaf\x05\x79\xad\xf2\xc8\x4a\xcb\x26\x2b\
\x70\x82\x0b\xfb\xc5\x48\x0b\xa4\xb1\xb3\x3c\xc8\xf6\xfd\xa7\x59\
\xb5\xe1\x14\x5b\x77\x35\x52\xd7\x98\x49\x6e\x6e\x2e\x79\x79\x26\
\x81\xb4\x53\x9c\xdd\x43\x30\x70\x60\x4b\x5a\xe6\xe5\x70\xf2\x58\
\x8c\x93\x27\x25\xf5\x0d\x51\x5a\x34\x56\x12\x6c\xd5\x43\xbf\x3e\
\xa5\x52\xc2\x70\xd9\x2d\xa1\x59\x27\xba\xbd\x55\xba\x32\xa1\x3e\
\x7a\xc5\x0d\x26\xba\x02\xb1\x70\x7b\xcb\x2e\x9a\x2f\x5c\x3d\x3d\
\x2e\x8f\x51\x4d\x85\xa8\xa9\x1f\x29\x9c\x7c\xb6\x02\xb7\xf1\x32\
\xd0\xa4\xeb\xa4\xfb\x3a\x53\x0a\x19\x0b\xe7\xf5\x1a\x86\xa2\x8e\
\xe0\x95\xe6\xd5\x11\xc5\x7b\xd4\x5f\x1c\xb2\x5f\x52\xa9\x1f\x1a\
\xee\xa8\x55\xa8\x7d\x5c\xe8\xa3\x6d\x55\x7d\x41\x08\x77\xa4\x28\
\x3c\x38\x35\xd1\x78\xf9\x8e\x34\x9b\xe2\xa9\x79\x79\xd5\x78\x5c\
\x8f\x5a\xff\x94\x1e\x06\x42\xa7\x72\x92\x18\x7b\x23\x29\x3b\xa5\
\x12\x2d\xa4\x94\xc0\xd2\x94\x0d\x12\x9f\x31\x14\xe7\x49\x13\x9d\
\x49\xa1\x89\xce\xbd\xee\x5d\x47\x66\x8f\x3e\x13\xa3\x5e\xbf\x50\
\x29\x10\xbd\xb4\xdb\xbc\xb2\x48\x0a\xe5\x9e\x43\x43\x54\x93\xd9\
\x91\x1e\xd2\x5f\x3a\x27\x2e\xe1\x88\xa0\xd1\xa0\x94\x1a\x10\x85\
\x3d\x85\xab\xe1\xf2\x35\x34\x35\x65\xb5\xb5\xa5\x89\xb2\xd2\xa1\
\x50\x22\xdd\x7c\xd5\x6a\x2d\x58\xa8\x0e\x8f\xa1\x37\x6c\xf6\xb1\
\x37\x44\x0a\x96\x1b\x34\xd2\x86\x68\xa2\x0b\x85\x16\xd4\x9e\x0e\
\x4f\x8c\x75\x2c\x89\x86\x6d\xd2\xf6\xb4\x47\x92\xb1\x30\xc4\x22\
\x10\x8d\x58\x86\x32\x16\xb6\x5e\xd1\x90\xf5\x8a\x84\x2c\x63\x19\
\x09\x26\x7f\x8f\xd9\x3e\x17\x8b\x58\x2f\x33\x62\x23\x42\xb0\xbd\
\xcc\x98\xbb\xdc\x64\xda\xa2\x4c\xfb\xbe\x20\xd4\x3a\xb5\xc7\xf3\
\x07\x0e\x89\x42\x0a\x7b\xf7\x23\x3b\x74\x80\xba\x86\x1a\xea\x43\
\x26\x7b\x2a\x4e\x73\xa0\xb2\x8e\xd3\xa7\x83\x98\x32\x1e\x59\x63\
\xa9\x7b\x20\x05\xa6\x19\x43\x0a\x08\x99\xb0\xf7\x60\x3d\xdf\xac\
\x3e\xc6\xe6\x9d\xf5\xd4\xc7\x02\x64\xb4\xca\xa6\xa0\x85\x60\x70\
\xdf\x2c\xda\xb5\xc9\xe6\x50\x75\x94\xfd\xd5\x21\x7e\x3c\x10\x61\
\xc5\x9a\x1a\x56\x6e\x38\x49\xf9\xa1\x18\x35\x91\x4c\x84\x2f\x9b\
\x36\xf9\x59\x5c\x3c\xa8\x35\x17\x9e\x5b\x40\x76\xba\xa0\xae\xf6\
\x34\xc7\x7e\xae\x45\x1c\xd9\xc1\x80\x76\x3e\xaa\x44\x1b\x77\x40\
\x21\x34\x19\x4d\xe9\x91\xa1\xf0\xdc\xaf\x0c\x57\x60\x22\x18\x35\
\x43\xba\x1d\x5c\xa5\x68\xed\x09\x90\x51\xea\x12\x2e\x84\x92\x6e\
\xb1\xa1\x9f\xdc\xba\x92\x91\x54\x37\x79\xdc\xdc\x8f\x9e\x05\x77\
\x0f\xcf\x52\x7a\x64\xea\xb4\x00\x0c\x91\x22\xf7\x2d\xdd\x8a\xee\
\x29\x5a\xb5\xdc\x4e\x46\xaa\xf6\x04\xcd\xdf\x5d\x11\xbc\xea\x59\
\xa7\x00\x25\x78\xf2\xaf\x36\x57\xc3\xf4\x92\x98\x50\xc0\x56\x0a\
\x6b\x48\xd2\x91\xf1\x02\x66\xe8\x86\x58\xa9\x7d\x0a\x0f\x0e\x53\
\xf0\xd0\xaa\xd4\xd5\x66\xa4\xb3\x1f\x0b\x0d\xac\x5c\xe8\x40\x35\
\x5e\x93\x06\x8f\xe8\x45\xba\x81\x4f\xf6\xe8\x57\x0b\xf6\x91\x67\
\xd6\x9e\xa2\x46\x51\x76\x23\xa2\x20\x64\xdd\xcf\x5e\x43\x30\xae\
\xca\x92\xe9\xea\xac\x32\x05\x3e\xc0\xb3\x3e\xea\xd1\x3a\x24\x74\
\x51\x7e\x33\x00\x3a\xad\x10\xb7\x74\x6b\xe6\x7a\x4a\xe6\x69\x6a\
\xc1\xae\x47\xaa\x82\x9d\x50\x00\x46\x9a\xbd\xa5\xa9\x87\x92\x58\
\x3c\xe5\xa9\xac\xa1\xa6\x39\x6a\x36\xa5\x41\x9b\xe6\x89\xca\xf6\
\x13\xb3\x45\x9d\x4d\x91\xa7\xdd\xe8\x99\xc9\x73\x89\x38\xba\x56\
\x18\xc9\x97\xe1\xb7\xfe\x66\x18\x20\x7c\xf1\x9f\xe3\x9f\x31\x02\
\x96\x13\x61\x34\x7d\xce\x50\xca\x45\x4d\x81\x02\x8a\xa3\xaa\x91\
\xc3\x13\x1a\xd4\xa5\x94\x74\xac\xfa\x92\xaa\xc3\x47\xe3\x8f\xd6\
\xd2\xce\x14\x42\x20\xa5\xc1\x05\x17\xf4\xa3\xb6\x4d\x3f\x02\xfb\
\x4b\x29\xaf\xa8\xc0\x6c\x22\x1c\x91\x26\x7e\x20\x33\xdd\xa0\x47\
\xcf\x3c\xae\xba\xa8\x25\x23\x2f\x0e\xd0\xb3\x67\x1b\x76\x1f\x94\
\xbc\xf3\xd1\x5e\xfe\xb3\xec\x04\xb5\xb5\x60\x0a\x13\x0c\x93\xec\
\x2c\x83\x2e\x6d\x33\xe8\xdd\x3d\x87\x5e\xdd\x0c\xba\x76\xca\x20\
\xbf\x75\x7b\xaa\xaa\xb3\x58\x59\x56\xc7\xf2\x95\x3b\x38\x5d\xd7\
\x88\xdf\x17\xa0\x73\xe7\x2e\xec\x6d\x75\x91\xc7\xbc\xf2\xc0\x37\
\x48\x71\x66\xf5\x6f\x54\x94\xac\xf0\xa8\xff\x49\xa9\x01\x98\x78\
\x19\x0e\x45\x0a\xc9\x21\x69\xa4\x01\xde\x08\x0d\xda\x51\x6a\x22\
\x29\x15\x1d\x28\x14\x3d\x4d\x81\x06\x08\xe3\xb1\xd1\x49\x8f\x6b\
\x50\xeb\x61\x5a\xe4\xa0\x0e\x03\xe2\xa1\x23\xa9\x6e\x9a\xba\x4d\
\x55\xd5\x1c\xf5\x3a\x87\x43\xf5\xe3\x0c\xd1\x79\x9e\x06\xd9\x0b\
\x3d\x26\xf5\xa9\x33\x57\x9d\xf0\x0c\x10\x68\x6a\x3a\x56\x78\x79\
\xec\xa9\xd2\x7c\xca\x06\x29\xd5\x1a\xa7\x7d\xa2\x9f\x01\x1b\xbb\
\x3d\xed\x6b\x8f\x66\xd4\xeb\xf7\x44\x9a\x6a\x6a\xe2\xd2\xcb\xa9\
\x93\xe8\xb5\x48\x35\x19\x01\xa9\x51\xf3\xd1\xa6\x91\xd5\xef\x47\
\x1f\xfd\xe9\x2e\xce\x1e\x89\xaa\x5a\xa6\x3a\x3d\x57\x97\x73\x27\
\x3c\x36\x1a\xa9\xaf\x4b\xaa\x86\xde\xde\x0f\xe7\xe5\xe8\x48\x45\
\xd7\xd4\xae\xa6\x91\x2a\xd2\xf5\x44\x8a\x7b\x00\xa9\x48\x61\x38\
\xd1\x94\x0c\x5c\xf3\xd9\x56\x27\xb4\x47\xd8\xa6\x4c\xaa\x30\x35\
\xcd\x19\xd3\xce\x94\x10\x4f\x9d\xc6\x62\xce\x7a\xa5\x69\xab\x2f\
\x4a\x7b\xad\x53\x05\x04\xd9\x52\xdf\x09\xa3\x69\x24\x5b\x55\x84\
\x01\x3e\x03\xf0\x59\x46\xd2\xe7\x4b\xd6\xd4\x13\x73\xd1\x0e\x02\
\x8a\x8b\x9e\x37\xc9\xe9\x19\x46\xea\xe0\x43\xb3\xae\xb2\x03\xd0\
\xf9\xf8\x5a\xf6\xed\xdb\x4b\x38\x1c\xc6\x30\xac\xb4\x5a\x76\x76\
\x0e\x75\x7d\xc6\x25\xc6\xfc\x82\x70\x19\xbb\x77\xef\x26\x14\x0e\
\x81\xb4\x4c\x8d\x29\x0d\xfc\x7e\x41\x87\x02\x1f\x17\x14\xe5\x30\
\xe4\xc2\x76\x74\x3b\x3b\x9f\x1f\x2b\x23\x7c\xbe\x7c\x3f\x9b\xb7\
\x9d\xa0\x31\x8a\xc5\x37\x1b\x5f\x0f\x7e\x01\xe9\x7e\xc8\x4a\xf3\
\x91\x9e\x96\x46\x20\x37\x93\x03\x47\x4e\x10\x0d\x4b\xa4\x69\x29\
\xc3\xf8\xfd\x06\x85\x85\xed\xa8\x6c\x37\x5c\x0f\xa2\xd3\x2e\x13\
\x2f\xe4\xb9\xfe\xbe\x93\x11\xa6\x6e\xa3\x70\xa4\x73\xa4\x26\xed\
\x84\x66\x61\x7a\x41\x75\xd5\xb4\xaf\x74\xd6\x03\x85\x07\x6a\x49\
\x34\x83\x4c\xf3\x44\xca\xda\x17\x83\x92\x26\xf6\x6a\x61\x41\x17\
\x65\x48\xef\xf6\x07\x4f\x63\x9d\x22\xea\x72\x2d\x04\xaf\xb6\x12\
\x55\xf9\x5d\x01\x6b\x78\xb6\xea\xc8\x33\x33\x1e\x5a\x87\xc8\x2b\
\xea\x55\x11\x90\x3a\xa7\x23\x05\xbc\x3f\x65\xc4\xe2\x15\xe9\xa0\
\xd4\xb5\x95\xfa\xaa\x54\xd3\x86\x67\xf2\xfd\xca\x33\x46\x79\x76\
\x67\xe4\x84\xe0\x81\xb0\xf5\x92\x21\x3b\x43\x59\xb0\xe6\x5a\xb7\
\x5c\x0e\xc3\x19\xb4\x97\xa4\x8a\xca\x84\x57\x54\x96\x0a\x8d\xad\
\x3a\x76\x52\xaf\x18\xe4\x42\x4e\xa7\x5a\x0b\xcd\x38\x51\x52\x01\
\xe8\xa4\x12\x80\x4f\xd5\x3b\xa2\x73\x5a\x54\x66\x18\x5d\x9b\x94\
\x36\x01\x64\x33\x72\x89\xf7\xcd\x64\x0a\xd4\x6e\x58\xed\xcd\xd5\
\xa6\x69\x53\x10\x8c\x1b\x4e\x3b\x28\x48\x92\xfc\x4e\xd4\xe0\xc3\
\x48\xaa\x0b\x61\x24\x23\x4c\xc3\x96\x36\x34\x6c\x11\xa3\xb0\xd5\
\x69\xed\xe9\x6b\x07\x78\xd3\x50\x52\x92\x1a\x47\x9d\x14\xed\x3f\
\xf1\x5f\x07\x67\x1f\xa1\x62\xcb\x3a\x6a\x6a\x6a\x90\x52\x32\x64\
\xc8\x10\xd6\xc4\x8a\x1c\xa2\xe9\x05\x81\x10\x81\xbd\x5f\x73\xe2\
\xc4\x09\xa2\x31\x90\xd2\x8f\x01\xa4\x4b\x93\x34\x9f\x49\x7e\x2b\
\x3f\x67\xf7\x6e\x4d\x46\x5e\x06\xe5\xfb\x4e\x51\x51\x5e\x87\x34\
\x7d\xc8\x78\x34\xef\xf7\xf9\x30\x30\x11\x98\x08\x24\x31\x43\xe2\
\x0b\x04\xc8\xca\xcc\x26\xbf\x65\x6b\x0e\x15\x5c\x48\xa3\xe9\xf3\
\xc8\x12\x6a\x26\x8c\x2e\xb1\x81\x46\xb5\x4a\xa4\x32\x98\x32\x55\
\x1a\x45\x7a\x7b\x6b\xba\xd6\x05\xc1\x19\xf4\xb4\x69\x7a\x36\x55\
\x89\x1d\x75\x33\x94\xc2\x09\xd8\xf1\x6c\x95\x40\x0f\x7a\x71\xa5\
\x84\x85\x07\xd5\x98\x87\xf7\xac\x95\x4d\xb2\x45\x06\xba\x28\xf9\
\xbf\x49\xc1\xe9\x8c\xad\x1a\x61\xd2\xdc\xa6\x91\xe2\x33\x3a\x6d\
\x45\x41\x33\x86\x1e\x8f\x48\x55\x63\x44\x5c\x9c\x9b\x1a\xc7\xc3\
\x11\xd9\xe8\x24\xa8\x34\x6d\x1f\xba\xfb\x48\x65\x28\xa5\x22\x55\
\x27\xd4\x3a\x94\x52\xcf\x15\x9a\xeb\x57\xe7\x81\xc3\x78\x0b\xcd\
\xb9\x54\x03\xae\x64\x28\x74\xa9\xc3\x54\xd9\x00\xa9\x46\xe7\x52\
\x9f\x32\xf5\xec\x5a\xf1\x8a\xd2\x51\x8c\xa3\x1a\x45\xff\x37\x46\
\x5e\xc7\x6e\x2d\x3c\x8c\x1e\xcd\x64\x19\x54\x07\x5c\x2a\x80\x27\
\xf4\x84\xda\xcd\x1a\x4c\xd5\xef\xd3\x44\xec\xda\xe8\xb5\xb9\xa1\
\x90\xc9\xd4\xa9\x69\xd3\xdb\x4d\x80\x7d\xa4\x32\x77\x4d\xf7\x35\
\x4a\x8f\x3d\xc5\x3e\x7f\xed\x08\x6c\xe1\x85\x0b\x33\x9c\x88\x71\
\x17\x0b\x94\x1d\x29\x6f\x38\xb3\x04\x9e\xc0\x28\xaf\x8c\x88\xc7\
\x3a\x97\x70\x59\xc6\x3e\x76\xef\xde\x4d\x75\xd7\x6b\xf5\x7b\x9b\
\x90\x74\x4a\xaf\x27\xf6\xd3\x4a\x4e\x9c\x38\x45\x24\x18\x8e\x6f\
\x6f\x96\x96\x26\x22\x80\x91\x16\x25\x1c\x8e\x82\x19\xc0\x07\x04\
\xfc\x82\x7e\xfd\xfa\xb3\x96\x9e\x4a\x66\x4e\xcd\xd8\xa5\x68\x59\
\x22\x45\x1b\x99\xda\xc2\xe7\x89\xe6\xf7\x34\x98\xa9\x52\x33\xa9\
\xc2\x5a\x7b\x2f\x5d\xaa\xfe\xb0\x33\xa8\xdb\xe8\xa2\x52\x5d\x03\
\xb9\xd0\xd5\xad\x52\xd4\xa0\x5c\xe9\x1f\x15\xd1\x99\x8a\xb8\x40\
\xe9\x17\x14\xd2\xa3\x6e\x26\xdc\x51\xb7\x54\xc1\x39\xa9\xa2\xc0\
\x54\xc6\xe0\x0c\xd3\x9a\xa9\xa2\x20\x91\x22\xad\xe6\x4a\xb3\xab\
\xfa\x8f\x52\xeb\x65\x6a\xd3\xef\x28\x51\x7d\xb3\x75\x31\x7b\xcb\
\x87\xd0\xf7\xc7\x0a\x5d\xbd\xd1\xc3\x73\x14\x3a\x27\x47\x03\x88\
\x72\x08\x08\xeb\x52\x88\x67\x18\x29\xea\x9e\x9b\x67\x74\xe5\xe5\
\x84\x7a\xac\x11\x35\xe2\x4b\x45\x00\xe0\x35\xb6\xaa\xd1\x77\x3d\
\x47\xd5\x51\x48\xb5\xee\x95\x88\xda\xf3\xb4\x9a\xb9\x24\x3c\x6a\
\x49\xaa\x03\xee\x28\x0f\x35\x93\xa5\x48\x59\x0b\xd5\x65\xb1\x34\
\x19\x01\xd5\x69\x16\x34\x8f\xbd\x40\x71\x40\xb4\x48\x71\x5b\x1a\
\xd7\x15\xd5\xeb\xfa\x7b\x51\xe8\xd9\x74\xa0\x44\x2f\x94\xb7\xa1\
\x00\x77\x0c\xf7\xda\xf2\xac\x5b\xd3\xcc\x5e\x7d\x06\x99\x1b\xbb\
\x73\xaf\xed\x29\xc6\x81\x38\xce\x4b\x37\xe8\x97\x59\xcd\xd1\xaa\
\x1f\xa9\xad\xa9\xa1\xb1\x3e\x4a\x28\x64\xc4\x3f\x5a\x8f\x61\x98\
\x08\x01\x79\x2d\x5a\x70\xb0\x70\xe4\x7f\x97\xa1\xf0\xaa\xd1\xa7\
\x9c\xcf\x6a\xe6\x51\x3a\x80\x6e\x49\xe2\x02\x6d\x5b\x83\xc6\xe0\
\x79\xd5\x03\x5c\x1e\xa5\xe6\xc4\x9e\x46\x08\x37\x5c\x5e\x7a\x3d\
\x3c\xdc\x50\xf0\x33\xf1\x82\xbd\x1a\xb8\xb5\x02\xf0\x52\x63\x44\
\x53\x78\xc1\xf6\xda\xaa\xab\x9c\xe2\xd5\xa6\xd0\x4c\xdd\xc5\xb1\
\x78\x48\x1a\x39\xd5\x5b\x3a\x93\x89\xae\x7b\xb6\x42\x78\xc3\xee\
\x49\x05\x54\xc2\x1b\x98\xe4\x19\x5d\x34\xe3\xdc\xa8\xb5\x6f\x17\
\xb1\x32\xee\x34\x95\x2b\x15\xe8\x01\x2e\xc2\x6b\x51\x2b\x06\xc4\
\xb3\x7e\xab\x79\x4e\xf6\xf9\x2a\xa5\x3e\x35\xad\x75\xc2\x74\xd7\
\xa7\xd4\x7b\x3d\xeb\xf0\xd2\xd9\x16\x95\xaa\x04\xe0\xd9\x53\x7a\
\xa6\x35\xf0\x54\x8c\x4c\x29\xb0\x01\x3a\x91\x6e\x1d\x1d\x63\xb3\
\x1b\x9e\xf4\xde\x70\xf1\x5a\x36\xd2\x1b\x64\x88\x72\x0d\x42\x03\
\x5c\x6b\x76\x2f\x49\x91\x19\x90\x1e\x59\x13\xa9\x64\x58\xd4\x39\
\xe0\x62\x82\xd7\x88\x90\xab\xec\x46\x89\xd6\x30\xf4\x7d\xa3\xa9\
\x18\xc3\x74\x51\xbe\x6c\x66\x69\x4b\x25\x4d\xa9\x65\x63\x93\xee\
\xf5\xaa\x63\x28\xd3\x81\x1b\xb5\x58\x0a\xa1\xaf\x35\x0b\xe9\x5d\
\xa3\xd6\x39\x62\xea\x39\xbc\x12\x86\x5a\x62\x1d\xb4\xd7\x6f\xe9\
\x61\x0a\x8f\xf4\x98\x48\x21\x09\x83\x17\x32\x4f\x1d\x38\x92\x7d\
\x40\x2e\xa0\x8d\x0d\x84\xd0\xf4\x40\x4d\x25\x72\xd0\x01\x4f\x52\
\x21\xfa\x5c\x91\x8c\x91\x7a\x72\xaa\xf7\xea\x22\x37\xf0\xf2\x60\
\x95\x74\xac\x3a\x3e\xaa\xa1\xd3\xa2\x3c\x0d\xa7\x17\x4c\x13\x04\
\xdc\x76\x0d\xf6\xbe\x51\x15\x15\xab\xcd\x02\x68\x7e\x56\xfb\xdb\
\xec\x69\x38\x99\xca\x08\x8a\x14\xde\x3c\xce\xe8\xc7\xd5\xdf\xaa\
\x49\x97\x28\xc9\x0d\x27\x94\xdd\x66\x34\x0c\x8f\x68\x51\x2a\x73\
\xcd\xc5\x4a\x74\x26\xd1\xa0\x68\x3e\xd9\x91\x32\x88\x51\xd2\x5c\
\x46\xfc\x1a\x4c\x25\xed\xe5\x8a\x98\x3c\x88\x39\x52\x6e\xd6\xc2\
\x89\xe0\x94\x29\x80\x6e\xba\xd6\x1b\x2f\x88\xa8\x50\x1a\xb4\xa5\
\x9a\x9e\x36\x34\x46\x57\xc3\x72\xa5\x6e\xd6\x9e\xa0\x1e\x8f\xb6\
\x10\xaf\x8d\x1d\xdc\x94\x7e\xea\xf7\x7a\xa6\xe5\xf0\xa6\x6c\x34\
\x70\x22\x88\x51\x0c\x89\x44\x13\x28\x48\x8f\xb4\x97\x6e\x7d\xe1\
\x9e\x50\x2a\x30\x30\xe5\x1c\x15\x4a\x54\x8a\xad\x35\x42\xa1\xef\
\x74\xac\x7b\xd9\x0c\x72\x5c\xea\x01\x7e\x42\x43\x8f\xa9\xee\xfb\
\x2a\xf9\x86\xf0\xc0\x7c\x18\xea\x33\x36\xf4\x75\x43\xc7\x78\x09\
\x77\x8b\x9e\x67\xf6\x4d\xed\xbc\x68\x4a\x71\xab\x9e\x93\xb2\xf7\
\x0a\xcd\xfd\x08\x37\xf2\xd7\x31\xaf\x5c\xd8\x80\xe4\x75\xfc\x3f\
\xfe\x2c\x2f\x6f\x56\x8a\x6a\xa1\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
"
qt_resource_name = "\
\x00\x06\
\x05\x03\x7d\xc3\
\x00\x49\
\x00\x6d\x00\x61\x00\x67\x00\x65\x00\x73\
\x00\x1b\
\x0b\x95\xbc\xe7\
\x00\x46\
\x00\x52\x00\x43\x00\x54\x00\x6f\x00\x6f\x00\x6c\x00\x73\x00\x49\x00\x6e\x00\x73\x00\x74\x00\x61\x00\x6c\x00\x6c\x00\x65\x00\x72\
\x00\x48\x00\x65\x00\x61\x00\x64\x00\x65\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x12\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| Python |
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
# This file is part of urlgrabber, a high-level cross-protocol url-grabber
# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
"""An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
>>> import urllib2
>>> from keepalive import HTTPHandler
>>> keepalive_handler = HTTPHandler()
>>> opener = urllib2.build_opener(keepalive_handler)
>>> urllib2.install_opener(opener)
>>>
>>> fo = urllib2.urlopen('http://www.python.org')
If a connection to a given host is requested, and all of the existing
connections are still in use, another connection will be opened. If
the handler tries to use an existing connection but it fails in some
way, it will be closed and removed from the pool.
To remove the handler, simply re-run build_opener with no arguments, and
install that opener.
You can explicitly close connections by using the close_connection()
method of the returned file-like object (described below) or you can
use the handler methods:
close_connection(host)
close_all()
open_connections()
NOTE: using the close_connection and close_all methods of the handler
should be done with care when using multiple threads.
* there is nothing that prevents another thread from creating new
connections immediately after connections are closed
* no checks are done to prevent in-use connections from being closed
>>> keepalive_handler.close_all()
EXTRA ATTRIBUTES AND METHODS
Upon a status of 200, the object returned has a few additional
attributes and methods, which should not be used if you want to
remain consistent with the normal urllib2-returned objects:
close_connection() - close the connection to the host
readlines() - you know, readlines()
status - the return status (ie 404)
reason - english translation of status (ie 'File not found')
If you want the best of both worlds, use this inside an
AttributeError-catching try:
>>> try: status = fo.status
>>> except AttributeError: status = None
Unfortunately, these are ONLY there if status == 200, so it's not
easy to distinguish between non-200 responses. The reason is that
urllib2 tries to do clever things with error codes 301, 302, 401,
and 407, and it wraps the object upon return.
For python versions earlier than 2.4, you can avoid this fancy error
handling by setting the module-level global HANDLE_ERRORS to zero.
You see, prior to 2.4, it's the HTTP Handler's job to determine what
to handle specially, and what to just pass up. HANDLE_ERRORS == 0
means "pass everything up". In python 2.4, however, this job no
longer belongs to the HTTP Handler and is now done by a NEW handler,
HTTPErrorProcessor. Here's the bottom line:
python version < 2.4
HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as
errors
HANDLE_ERRORS == 0 pass everything up, error processing is
left to the calling code
python version >= 2.4
HANDLE_ERRORS == 1 pass up 200, treat the rest as errors
HANDLE_ERRORS == 0 (default) pass everything up, let the
other handlers (specifically,
HTTPErrorProcessor) decide what to do
In practice, setting the variable either way makes little difference
in python 2.4, so for the most consistent behavior across versions,
you probably just want to use the defaults, which will give you
exceptions on errors.
"""
# $Id: keepalive.py,v 1.17 2006/12/08 00:14:16 mstenner Exp $
import urllib2
import httplib
import socket
import thread
DEBUG = None
import sslfactory
import sys
if sys.version_info < (2, 4): HANDLE_ERRORS = 1
else: HANDLE_ERRORS = 0
class ConnectionManager:
"""
The connection manager must be able to:
* keep track of all existing
"""
def __init__(self):
self._lock = thread.allocate_lock()
self._hostmap = {} # map hosts to a list of connections
self._connmap = {} # map connections to host
self._readymap = {} # map connection to ready state
def add(self, host, connection, ready):
self._lock.acquire()
try:
if not self._hostmap.has_key(host): self._hostmap[host] = []
self._hostmap[host].append(connection)
self._connmap[connection] = host
self._readymap[connection] = ready
finally:
self._lock.release()
def remove(self, connection):
self._lock.acquire()
try:
try:
host = self._connmap[connection]
except KeyError:
pass
else:
del self._connmap[connection]
del self._readymap[connection]
self._hostmap[host].remove(connection)
if not self._hostmap[host]: del self._hostmap[host]
finally:
self._lock.release()
def set_ready(self, connection, ready):
try: self._readymap[connection] = ready
except KeyError: pass
def get_ready_conn(self, host):
conn = None
self._lock.acquire()
try:
if self._hostmap.has_key(host):
for c in self._hostmap[host]:
if self._readymap[c]:
self._readymap[c] = 0
conn = c
break
finally:
self._lock.release()
return conn
def get_all(self, host=None):
if host:
return list(self._hostmap.get(host, []))
else:
return dict(self._hostmap)
class KeepAliveHandler:
def __init__(self):
self._cm = ConnectionManager()
#### Connection Management
def open_connections(self):
"""return a list of connected hosts and the number of connections
to each. [('foo.com:80', 2), ('bar.org', 1)]"""
return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
def close_connection(self, host):
"""close connection(s) to <host>
host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
no error occurs if there is no connection to that host."""
for h in self._cm.get_all(host):
self._cm.remove(h)
h.close()
def close_all(self):
"""close all open connections"""
for host, conns in self._cm.get_all().items():
for h in conns:
self._cm.remove(h)
h.close()
def _request_closed(self, request, host, connection):
"""tells us that this request is now closed and the the
connection is ready for another request"""
self._cm.set_ready(connection, 1)
def _remove_connection(self, host, connection, close=0):
if close: connection.close()
self._cm.remove(connection)
#### Transaction Execution
def do_open(self, req):
host = req.get_host()
if not host:
raise urllib2.URLError('no host given')
try:
h = self._cm.get_ready_conn(host)
while h:
r = self._reuse_connection(h, req, host)
# if this response is non-None, then it worked and we're
# done. Break out, skipping the else block.
if r: break
# connection is bad - possibly closed by server
# discard it and ask for the next free connection
h.close()
self._cm.remove(h)
h = self._cm.get_ready_conn(host)
else:
# no (working) free connections were found. Create a new one.
h = self._get_connection(host)
if DEBUG: DEBUG.info("creating new connection to %s (%d)",
host, id(h))
self._cm.add(host, h, 0)
self._start_transaction(h, req)
r = h.getresponse()
except (socket.error, httplib.HTTPException), err:
raise urllib2.URLError(err)
if DEBUG: DEBUG.info("STATUS: %s, %s", r.status, r.reason)
# if not a persistent connection, don't try to reuse it
if r.will_close:
if DEBUG: DEBUG.info('server will close connection, discarding')
self._cm.remove(h)
r._handler = self
r._host = host
r._url = req.get_full_url()
r._connection = h
r.code = r.status
r.headers = r.msg
r.msg = r.reason
if r.status == 200 or not HANDLE_ERRORS:
return r
else:
return self.parent.error('http', req, r,
r.status, r.msg, r.headers)
def _reuse_connection(self, h, req, host):
"""start the transaction with a re-used connection
return a response object (r) upon success or None on failure.
This DOES not close or remove bad connections in cases where
it returns. However, if an unexpected exception occurs, it
will close and remove the connection before re-raising.
"""
try:
self._start_transaction(h, req)
r = h.getresponse()
# note: just because we got something back doesn't mean it
# worked. We'll check the version below, too.
except (socket.error, httplib.HTTPException):
r = None
except:
# adding this block just in case we've missed
# something we will still raise the exception, but
# lets try and close the connection and remove it
# first. We previously got into a nasty loop
# where an exception was uncaught, and so the
# connection stayed open. On the next try, the
# same exception was raised, etc. The tradeoff is
# that it's now possible this call will raise
# a DIFFERENT exception
if DEBUG: DEBUG.error("unexpected exception - closing " + \
"connection to %s (%d)", host, id(h))
self._cm.remove(h)
h.close()
raise
if r is None or r.version == 9:
# httplib falls back to assuming HTTP 0.9 if it gets a
# bad header back. This is most likely to happen if
# the socket has been closed by the server since we
# last used the connection.
if DEBUG: DEBUG.info("failed to re-use connection to %s (%d)",
host, id(h))
r = None
else:
if DEBUG: DEBUG.info("re-using connection to %s (%d)", host, id(h))
return r
def _start_transaction(self, h, req):
try:
if req.has_data():
data = req.get_data()
h.putrequest('POST', req.get_selector())
if not req.headers.has_key('Content-type'):
h.putheader('Content-type',
'application/x-www-form-urlencoded')
if not req.headers.has_key('Content-length'):
h.putheader('Content-length', '%d' % len(data))
else:
h.putrequest('GET', req.get_selector())
except (socket.error, httplib.HTTPException), err:
raise urllib2.URLError(err)
for args in self.parent.addheaders:
h.putheader(*args)
for k, v in req.headers.items():
h.putheader(k, v)
h.endheaders()
if req.has_data():
h.send(data)
def _get_connection(self, host):
return NotImplementedError
class HTTPHandler(KeepAliveHandler, urllib2.HTTPHandler):
def __init__(self):
KeepAliveHandler.__init__(self)
def http_open(self, req):
return self.do_open(req)
def _get_connection(self, host):
return HTTPConnection(host)
class HTTPSHandler(KeepAliveHandler, urllib2.HTTPSHandler):
def __init__(self, ssl_factory=None):
KeepAliveHandler.__init__(self)
if not ssl_factory:
ssl_factory = sslfactory.get_factory()
self._ssl_factory = ssl_factory
def https_open(self, req):
return self.do_open(req)
def _get_connection(self, host):
try: return self._ssl_factory.get_https_connection(host)
except AttributeError: return HTTPSConnection(host)
class HTTPResponse(httplib.HTTPResponse):
# we need to subclass HTTPResponse in order to
# 1) add readline() and readlines() methods
# 2) add close_connection() methods
# 3) add info() and geturl() methods
# in order to add readline(), read must be modified to deal with a
# buffer. example: readline must read a buffer and then spit back
# one line at a time. The only real alternative is to read one
# BYTE at a time (ick). Once something has been read, it can't be
# put back (ok, maybe it can, but that's even uglier than this),
# so if you THEN do a normal read, you must first take stuff from
# the buffer.
# the read method wraps the original to accomodate buffering,
# although read() never adds to the buffer.
# Both readline and readlines have been stolen with almost no
# modification from socket.py
def __init__(self, sock, debuglevel=0, strict=0, method=None):
if method: # the httplib in python 2.3 uses the method arg
httplib.HTTPResponse.__init__(self, sock, debuglevel, method)
else: # 2.2 doesn't
httplib.HTTPResponse.__init__(self, sock, debuglevel)
self.fileno = sock.fileno
self.code = None
self._rbuf = ''
self._rbufsize = 8096
self._handler = None # inserted by the handler later
self._host = None # (same)
self._url = None # (same)
self._connection = None # (same)
_raw_read = httplib.HTTPResponse.read
def close(self):
if self.fp:
self.fp.close()
self.fp = None
if self._handler:
self._handler._request_closed(self, self._host,
self._connection)
def close_connection(self):
self._handler._remove_connection(self._host, self._connection, close=1)
self.close()
def info(self):
return self.headers
def geturl(self):
return self._url
def read(self, amt=None):
# the _rbuf test is only in this first if for speed. It's not
# logically necessary
if self._rbuf and not amt is None:
L = len(self._rbuf)
if amt > L:
amt -= L
else:
s = self._rbuf[:amt]
self._rbuf = self._rbuf[amt:]
return s
s = self._rbuf + self._raw_read(amt)
self._rbuf = ''
return s
def readline(self, limit=-1):
data = ""
i = self._rbuf.find('\n')
while i < 0 and not (0 < limit <= len(self._rbuf)):
new = self._raw_read(self._rbufsize)
if not new: break
i = new.find('\n')
if i >= 0: i = i + len(self._rbuf)
self._rbuf = self._rbuf + new
if i < 0: i = len(self._rbuf)
else: i = i+1
if 0 <= limit < len(self._rbuf): i = limit
data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
return data
def readlines(self, sizehint = 0):
total = 0
list = []
while 1:
line = self.readline()
if not line: break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
class HTTPConnection(httplib.HTTPConnection):
# use the modified response class
response_class = HTTPResponse
class HTTPSConnection(httplib.HTTPSConnection):
response_class = HTTPResponse
#########################################################################
##### TEST FUNCTIONS
#########################################################################
def error_handler(url):
global HANDLE_ERRORS
orig = HANDLE_ERRORS
keepalive_handler = HTTPHandler()
opener = urllib2.build_opener(keepalive_handler)
urllib2.install_opener(opener)
pos = {0: 'off', 1: 'on'}
for i in (0, 1):
print " fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
HANDLE_ERRORS = i
try:
fo = urllib2.urlopen(url)
foo = fo.read()
fo.close()
try: status, reason = fo.status, fo.reason
except AttributeError: status, reason = None, None
except IOError, e:
print " EXCEPTION: %s" % e
raise
else:
print " status = %s, reason = %s" % (status, reason)
HANDLE_ERRORS = orig
hosts = keepalive_handler.open_connections()
print "open connections:", hosts
keepalive_handler.close_all()
def continuity(url):
import md5
format = '%25s: %s'
# first fetch the file with the normal http handler
opener = urllib2.build_opener()
urllib2.install_opener(opener)
fo = urllib2.urlopen(url)
foo = fo.read()
fo.close()
m = md5.new(foo)
print format % ('normal urllib', m.hexdigest())
# now install the keepalive handler and try again
opener = urllib2.build_opener(HTTPHandler())
urllib2.install_opener(opener)
fo = urllib2.urlopen(url)
foo = fo.read()
fo.close()
m = md5.new(foo)
print format % ('keepalive read', m.hexdigest())
fo = urllib2.urlopen(url)
foo = ''
while 1:
f = fo.readline()
if f: foo = foo + f
else: break
fo.close()
m = md5.new(foo)
print format % ('keepalive readline', m.hexdigest())
def comp(N, url):
print ' making %i connections to:\n %s' % (N, url)
sys.stdout.write(' first using the normal urllib handlers')
# first use normal opener
opener = urllib2.build_opener()
urllib2.install_opener(opener)
t1 = fetch(N, url)
print ' TIME: %.3f s' % t1
sys.stdout.write(' now using the keepalive handler ')
# now install the keepalive handler and try again
opener = urllib2.build_opener(HTTPHandler())
urllib2.install_opener(opener)
t2 = fetch(N, url)
print ' TIME: %.3f s' % t2
print ' improvement factor: %.2f' % (t1/t2, )
def fetch(N, url, delay=0):
import time
lens = []
starttime = time.time()
for i in range(N):
if delay and i > 0: time.sleep(delay)
fo = urllib2.urlopen(url)
foo = fo.read()
fo.close()
lens.append(len(foo))
diff = time.time() - starttime
j = 0
for i in lens[1:]:
j = j + 1
if not i == lens[0]:
print "WARNING: inconsistent length on read %i: %i" % (j, i)
return diff
def test_timeout(url):
global DEBUG
dbbackup = DEBUG
class FakeLogger:
def debug(self, msg, *args): print msg % args
info = warning = error = debug
DEBUG = FakeLogger()
print " fetching the file to establish a connection"
fo = urllib2.urlopen(url)
data1 = fo.read()
fo.close()
i = 20
print " waiting %i seconds for the server to close the connection" % i
while i > 0:
sys.stdout.write('\r %2i' % i)
sys.stdout.flush()
time.sleep(1)
i -= 1
sys.stderr.write('\r')
print " fetching the file a second time"
fo = urllib2.urlopen(url)
data2 = fo.read()
fo.close()
if data1 == data2:
print ' data are identical'
else:
print ' ERROR: DATA DIFFER'
DEBUG = dbbackup
def test(url, N=10):
print "checking error hander (do this on a non-200)"
try: error_handler(url)
except IOError, e:
print "exiting - exception will prevent further tests"
sys.exit()
print
print "performing continuity test (making sure stuff isn't corrupted)"
continuity(url)
print
print "performing speed comparison"
comp(N, url)
print
print "performing dropped-connection check"
test_timeout(url)
if __name__ == '__main__':
import time
import sys
try:
N = int(sys.argv[1])
url = sys.argv[2]
except:
print "%s <integer> <url>" % sys.argv[0]
else:
test(url, N)
| Python |
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
# This file is part of urlgrabber, a high-level cross-protocol url-grabber
# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
"""Module for downloading files from a pool of mirrors
DESCRIPTION
This module provides support for downloading files from a pool of
mirrors with configurable failover policies. To a large extent, the
failover policy is chosen by using different classes derived from
the main class, MirrorGroup.
Instances of MirrorGroup (and cousins) act very much like URLGrabber
instances in that they have urlread, urlgrab, and urlopen methods.
They can therefore, be used in very similar ways.
from urlgrabber.grabber import URLGrabber
from urlgrabber.mirror import MirrorGroup
gr = URLGrabber()
mg = MirrorGroup(gr, ['http://foo.com/some/directory/',
'http://bar.org/maybe/somewhere/else/',
'ftp://baz.net/some/other/place/entirely/']
mg.urlgrab('relative/path.zip')
The assumption is that all mirrors are identical AFTER the base urls
specified, so that any mirror can be used to fetch any file.
FAILOVER
The failover mechanism is designed to be customized by subclassing
from MirrorGroup to change the details of the behavior. In general,
the classes maintain a master mirror list and a "current mirror"
index. When a download is initiated, a copy of this list and index
is created for that download only. The specific failover policy
depends on the class used, and so is documented in the class
documentation. Note that ANY behavior of the class can be
overridden, so any failover policy at all is possible (although
you may need to change the interface in extreme cases).
CUSTOMIZATION
Most customization of a MirrorGroup object is done at instantiation
time (or via subclassing). There are four major types of
customization:
1) Pass in a custom urlgrabber - The passed in urlgrabber will be
used (by default... see #2) for the grabs, so options to it
apply for the url-fetching
2) Custom mirror list - Mirror lists can simply be a list of
stings mirrors (as shown in the example above) but each can
also be a dict, allowing for more options. For example, the
first mirror in the list above could also have been:
{'mirror': 'http://foo.com/some/directory/',
'grabber': <a custom grabber to be used for this mirror>,
'kwargs': { <a dict of arguments passed to the grabber> }}
All mirrors are converted to this format internally. If
'grabber' is omitted, the default grabber will be used. If
kwargs are omitted, then (duh) they will not be used.
3) Pass keyword arguments when instantiating the mirror group.
See, for example, the failure_callback argument.
4) Finally, any kwargs passed in for the specific file (to the
urlgrab method, for example) will be folded in. The options
passed into the grabber's urlXXX methods will override any
options specified in a custom mirror dict.
"""
# $Id: mirror.py,v 1.14 2006/02/22 18:26:46 mstenner Exp $
import random
import thread # needed for locking to make this threadsafe
from grabber import URLGrabError, CallbackObject, DEBUG
try:
from i18n import _
except ImportError, msg:
def _(st): return st
class GrabRequest:
"""This is a dummy class used to hold information about the specific
request. For example, a single file. By maintaining this information
separately, we can accomplish two things:
1) make it a little easier to be threadsafe
2) have request-specific parameters
"""
pass
class MirrorGroup:
"""Base Mirror class
Instances of this class are built with a grabber object and a list
of mirrors. Then all calls to urlXXX should be passed relative urls.
The requested file will be searched for on the first mirror. If the
grabber raises an exception (possibly after some retries) then that
mirror will be removed from the list, and the next will be attempted.
If all mirrors are exhausted, then an exception will be raised.
MirrorGroup has the following failover policy:
* downloads begin with the first mirror
* by default (see default_action below) a failure (after retries)
causes it to increment the local AND master indices. Also,
the current mirror is removed from the local list (but NOT the
master list - the mirror can potentially be used for other
files)
* if the local list is ever exhausted, a URLGrabError will be
raised (errno=256, no more mirrors)
OPTIONS
In addition to the required arguments "grabber" and "mirrors",
MirrorGroup also takes the following optional arguments:
default_action
A dict that describes the actions to be taken upon failure
(after retries). default_action can contain any of the
following keys (shown here with their default values):
default_action = {'increment': 1,
'increment_master': 1,
'remove': 1,
'remove_master': 0,
'fail': 0}
In this context, 'increment' means "use the next mirror" and
'remove' means "never use this mirror again". The two
'master' values refer to the instance-level mirror list (used
for all files), whereas the non-master values refer to the
current download only.
The 'fail' option will cause immediate failure by re-raising
the exception and no further attempts to get the current
download.
This dict can be set at instantiation time,
mg = MirrorGroup(grabber, mirrors, default_action={'fail':1})
at method-execution time (only applies to current fetch),
filename = mg.urlgrab(url, default_action={'increment': 0})
or by returning an action dict from the failure_callback
return {'fail':0}
in increasing precedence.
If all three of these were done, the net result would be:
{'increment': 0, # set in method
'increment_master': 1, # class default
'remove': 1, # class default
'remove_master': 0, # class default
'fail': 0} # set at instantiation, reset
# from callback
failure_callback
this is a callback that will be called when a mirror "fails",
meaning the grabber raises some URLGrabError. If this is a
tuple, it is interpreted to be of the form (cb, args, kwargs)
where cb is the actual callable object (function, method,
etc). Otherwise, it is assumed to be the callable object
itself. The callback will be passed a grabber.CallbackObject
instance along with args and kwargs (if present). The following
attributes are defined withing the instance:
obj.exception = < exception that was raised >
obj.mirror = < the mirror that was tried >
obj.relative_url = < url relative to the mirror >
obj.url = < full url that failed >
# .url is just the combination of .mirror
# and .relative_url
The failure callback can return an action dict, as described
above.
Like default_action, the failure_callback can be set at
instantiation time or when the urlXXX method is called. In
the latter case, it applies only for that fetch.
The callback can re-raise the exception quite easily. For
example, this is a perfectly adequate callback function:
def callback(obj): raise obj.exception
WARNING: do not save the exception object (or the
CallbackObject instance). As they contain stack frame
references, they can lead to circular references.
Notes:
* The behavior can be customized by deriving and overriding the
'CONFIGURATION METHODS'
* The 'grabber' instance is kept as a reference, not copied.
Therefore, the grabber instance can be modified externally
and changes will take effect immediately.
"""
# notes on thread-safety:
# A GrabRequest should never be shared by multiple threads because
# it's never saved inside the MG object and never returned outside it.
# therefore, it should be safe to access/modify grabrequest data
# without a lock. However, accessing the mirrors and _next attributes
# of the MG itself must be done when locked to prevent (for example)
# removal of the wrong mirror.
##############################################################
# CONFIGURATION METHODS - intended to be overridden to
# customize behavior
def __init__(self, grabber, mirrors, **kwargs):
"""Initialize the MirrorGroup object.
REQUIRED ARGUMENTS
grabber - URLGrabber instance
mirrors - a list of mirrors
OPTIONAL ARGUMENTS
failure_callback - callback to be used when a mirror fails
default_action - dict of failure actions
See the module-level and class level documentation for more
details.
"""
# OVERRIDE IDEAS:
# shuffle the list to randomize order
self.grabber = grabber
self.mirrors = self._parse_mirrors(mirrors)
self._next = 0
self._lock = thread.allocate_lock()
self.default_action = None
self._process_kwargs(kwargs)
# if these values are found in **kwargs passed to one of the urlXXX
# methods, they will be stripped before getting passed on to the
# grabber
options = ['default_action', 'failure_callback']
def _process_kwargs(self, kwargs):
self.failure_callback = kwargs.get('failure_callback')
self.default_action = kwargs.get('default_action')
def _parse_mirrors(self, mirrors):
parsed_mirrors = []
for m in mirrors:
if type(m) == type(''): m = {'mirror': m}
parsed_mirrors.append(m)
return parsed_mirrors
def _load_gr(self, gr):
# OVERRIDE IDEAS:
# shuffle gr list
self._lock.acquire()
gr.mirrors = list(self.mirrors)
gr._next = self._next
self._lock.release()
def _get_mirror(self, gr):
# OVERRIDE IDEAS:
# return a random mirror so that multiple mirrors get used
# even without failures.
if not gr.mirrors:
raise URLGrabError(256, _('No more mirrors to try.'))
return gr.mirrors[gr._next]
def _failure(self, gr, cb_obj):
# OVERRIDE IDEAS:
# inspect the error - remove=1 for 404, remove=2 for connection
# refused, etc. (this can also be done via
# the callback)
cb = gr.kw.get('failure_callback') or self.failure_callback
if cb:
if type(cb) == type( () ):
cb, args, kwargs = cb
else:
args, kwargs = (), {}
action = cb(cb_obj, *args, **kwargs) or {}
else:
action = {}
# XXXX - decide - there are two ways to do this
# the first is action-overriding as a whole - use the entire action
# or fall back on module level defaults
#action = action or gr.kw.get('default_action') or self.default_action
# the other is to fall through for each element in the action dict
a = dict(self.default_action or {})
a.update(gr.kw.get('default_action', {}))
a.update(action)
action = a
self.increment_mirror(gr, action)
if action and action.get('fail', 0): raise
def increment_mirror(self, gr, action={}):
"""Tell the mirror object increment the mirror index
This increments the mirror index, which amounts to telling the
mirror object to use a different mirror (for this and future
downloads).
This is a SEMI-public method. It will be called internally,
and you may never need to call it. However, it is provided
(and is made public) so that the calling program can increment
the mirror choice for methods like urlopen. For example, with
urlopen, there's no good way for the mirror group to know that
an error occurs mid-download (it's already returned and given
you the file object).
remove --- can have several values
0 do not remove the mirror from the list
1 remove the mirror for this download only
2 remove the mirror permanently
beware of remove=0 as it can lead to infinite loops
"""
badmirror = gr.mirrors[gr._next]
self._lock.acquire()
try:
ind = self.mirrors.index(badmirror)
except ValueError:
pass
else:
if action.get('remove_master', 0):
del self.mirrors[ind]
elif self._next == ind and action.get('increment_master', 1):
self._next += 1
if self._next >= len(self.mirrors): self._next = 0
self._lock.release()
if action.get('remove', 1):
del gr.mirrors[gr._next]
elif action.get('increment', 1):
gr._next += 1
if gr._next >= len(gr.mirrors): gr._next = 0
if DEBUG:
grm = [m['mirror'] for m in gr.mirrors]
DEBUG.info('GR mirrors: [%s] %i', ' '.join(grm), gr._next)
selfm = [m['mirror'] for m in self.mirrors]
DEBUG.info('MAIN mirrors: [%s] %i', ' '.join(selfm), self._next)
#####################################################################
# NON-CONFIGURATION METHODS
# these methods are designed to be largely workhorse methods that
# are not intended to be overridden. That doesn't mean you can't;
# if you want to, feel free, but most things can be done by
# by overriding the configuration methods :)
def _join_url(self, base_url, rel_url):
if base_url.endswith('/') or rel_url.startswith('/'):
return base_url + rel_url
else:
return base_url + '/' + rel_url
def _mirror_try(self, func, url, kw):
gr = GrabRequest()
gr.func = func
gr.url = url
gr.kw = dict(kw)
self._load_gr(gr)
for k in self.options:
try: del kw[k]
except KeyError: pass
while 1:
mirrorchoice = self._get_mirror(gr)
fullurl = self._join_url(mirrorchoice['mirror'], gr.url)
kwargs = dict(mirrorchoice.get('kwargs', {}))
kwargs.update(kw)
grabber = mirrorchoice.get('grabber') or self.grabber
func_ref = getattr(grabber, func)
if DEBUG: DEBUG.info('MIRROR: trying %s -> %s', url, fullurl)
try:
return func_ref( *(fullurl,), **kwargs )
except URLGrabError, e:
if DEBUG: DEBUG.info('MIRROR: failed')
obj = CallbackObject()
obj.exception = e
obj.mirror = mirrorchoice['mirror']
obj.relative_url = gr.url
obj.url = fullurl
self._failure(gr, obj)
def urlgrab(self, url, filename=None, **kwargs):
kw = dict(kwargs)
kw['filename'] = filename
func = 'urlgrab'
return self._mirror_try(func, url, kw)
def urlopen(self, url, **kwargs):
kw = dict(kwargs)
func = 'urlopen'
return self._mirror_try(func, url, kw)
def urlread(self, url, limit=None, **kwargs):
kw = dict(kwargs)
kw['limit'] = limit
func = 'urlread'
return self._mirror_try(func, url, kw)
class MGRandomStart(MirrorGroup):
"""A mirror group that starts at a random mirror in the list.
This behavior of this class is identical to MirrorGroup, except that
it starts at a random location in the mirror list.
"""
def __init__(self, grabber, mirrors, **kwargs):
"""Initialize the object
The arguments for intialization are the same as for MirrorGroup
"""
MirrorGroup.__init__(self, grabber, mirrors, **kwargs)
self._next = random.randrange(len(mirrors))
class MGRandomOrder(MirrorGroup):
"""A mirror group that uses mirrors in a random order.
This behavior of this class is identical to MirrorGroup, except that
it uses the mirrors in a random order. Note that the order is set at
initialization time and fixed thereafter. That is, it does not pick a
random mirror after each failure.
"""
def __init__(self, grabber, mirrors, **kwargs):
"""Initialize the object
The arguments for intialization are the same as for MirrorGroup
"""
MirrorGroup.__init__(self, grabber, mirrors, **kwargs)
random.shuffle(self.mirrors)
if __name__ == '__main__':
pass
| Python |
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
# This file is part of urlgrabber, a high-level cross-protocol url-grabber
# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
"""A high-level cross-protocol url-grabber.
GENERAL ARGUMENTS (kwargs)
Where possible, the module-level default is indicated, and legal
values are provided.
copy_local = 0 [0|1]
ignored except for file:// urls, in which case it specifies
whether urlgrab should still make a copy of the file, or simply
point to the existing copy. The module level default for this
option is 0.
close_connection = 0 [0|1]
tells URLGrabber to close the connection after a file has been
transfered. This is ignored unless the download happens with the
http keepalive handler (keepalive=1). Otherwise, the connection
is left open for further use. The module level default for this
option is 0 (keepalive connections will not be closed).
keepalive = 1 [0|1]
specifies whether keepalive should be used for HTTP/1.1 servers
that support it. The module level default for this option is 1
(keepalive is enabled).
progress_obj = None
a class instance that supports the following methods:
po.start(filename, url, basename, length, text)
# length will be None if unknown
po.update(read) # read == bytes read so far
po.end()
text = None
specifies alternative text to be passed to the progress meter
object. If not given, the default progress meter will use the
basename of the file.
throttle = 1.0
a number - if it's an int, it's the bytes/second throttle limit.
If it's a float, it is first multiplied by bandwidth. If throttle
== 0, throttling is disabled. If None, the module-level default
(which can be set on default_grabber.throttle) is used. See
BANDWIDTH THROTTLING for more information.
timeout = None
a positive float expressing the number of seconds to wait for socket
operations. If the value is None or 0.0, socket operations will block
forever. Setting this option causes urlgrabber to call the settimeout
method on the Socket object used for the request. See the Python
documentation on settimeout for more information.
http://www.python.org/doc/current/lib/socket-objects.html
bandwidth = 0
the nominal max bandwidth in bytes/second. If throttle is a float
and bandwidth == 0, throttling is disabled. If None, the
module-level default (which can be set on
default_grabber.bandwidth) is used. See BANDWIDTH THROTTLING for
more information.
range = None
a tuple of the form (first_byte, last_byte) describing a byte
range to retrieve. Either or both of the values may set to
None. If first_byte is None, byte offset 0 is assumed. If
last_byte is None, the last byte available is assumed. Note that
the range specification is python-like in that (0,10) will yeild
the first 10 bytes of the file.
If set to None, no range will be used.
reget = None [None|'simple'|'check_timestamp']
whether to attempt to reget a partially-downloaded file. Reget
only applies to .urlgrab and (obviously) only if there is a
partially downloaded file. Reget has two modes:
'simple' -- the local file will always be trusted. If there
are 100 bytes in the local file, then the download will always
begin 100 bytes into the requested file.
'check_timestamp' -- the timestamp of the server file will be
compared to the timestamp of the local file. ONLY if the
local file is newer than or the same age as the server file
will reget be used. If the server file is newer, or the
timestamp is not returned, the entire file will be fetched.
NOTE: urlgrabber can do very little to verify that the partial
file on disk is identical to the beginning of the remote file.
You may want to either employ a custom "checkfunc" or simply avoid
using reget in situations where corruption is a concern.
user_agent = 'urlgrabber/VERSION'
a string, usually of the form 'AGENT/VERSION' that is provided to
HTTP servers in the User-agent header. The module level default
for this option is "urlgrabber/VERSION".
http_headers = None
a tuple of 2-tuples, each containing a header and value. These
will be used for http and https requests only. For example, you
can do
http_headers = (('Pragma', 'no-cache'),)
ftp_headers = None
this is just like http_headers, but will be used for ftp requests.
proxies = None
a dictionary that maps protocol schemes to proxy hosts. For
example, to use a proxy server on host "foo" port 3128 for http
and https URLs:
proxies={ 'http' : 'http://foo:3128', 'https' : 'http://foo:3128' }
note that proxy authentication information may be provided using
normal URL constructs:
proxies={ 'http' : 'http://user:host@foo:3128' }
Lastly, if proxies is None, the default environment settings will
be used.
prefix = None
a url prefix that will be prepended to all requested urls. For
example:
g = URLGrabber(prefix='http://foo.com/mirror/')
g.urlgrab('some/file.txt')
## this will fetch 'http://foo.com/mirror/some/file.txt'
This option exists primarily to allow identical behavior to
MirrorGroup (and derived) instances. Note: a '/' will be inserted
if necessary, so you cannot specify a prefix that ends with a
partial file or directory name.
opener = None
Overrides the default urllib2.OpenerDirector provided to urllib2
when making requests. This option exists so that the urllib2
handler chain may be customized. Note that the range, reget,
proxy, and keepalive features require that custom handlers be
provided to urllib2 in order to function properly. If an opener
option is provided, no attempt is made by urlgrabber to ensure
chain integrity. You are responsible for ensuring that any
extension handlers are present if said features are required.
cache_openers = True
controls whether urllib2 openers should be cached and reused, or
whether they should be created each time. There's a modest
overhead in recreating them, but it's slightly safer to do so if
you're modifying the handlers between calls.
data = None
Only relevant for the HTTP family (and ignored for other
protocols), this allows HTTP POSTs. When the data kwarg is
present (and not None), an HTTP request will automatically become
a POST rather than GET. This is done by direct passthrough to
urllib2. If you use this, you may also want to set the
'Content-length' and 'Content-type' headers with the http_headers
option. Note that python 2.2 handles the case of these
badly and if you do not use the proper case (shown here), your
values will be overridden with the defaults.
urlparser = URLParser()
The URLParser class handles pre-processing of URLs, including
auth-handling for user/pass encoded in http urls, file handing
(that is, filenames not sent as a URL), and URL quoting. If you
want to override any of this behavior, you can pass in a
replacement instance. See also the 'quote' option.
quote = None
Whether or not to quote the path portion of a url.
quote = 1 -> quote the URLs (they're not quoted yet)
quote = 0 -> do not quote them (they're already quoted)
quote = None -> guess what to do
This option only affects proper urls like 'file:///etc/passwd'; it
does not affect 'raw' filenames like '/etc/passwd'. The latter
will always be quoted as they are converted to URLs. Also, only
the path part of a url is quoted. If you need more fine-grained
control, you should probably subclass URLParser and pass it in via
the 'urlparser' option.
ssl_ca_cert = None
this option can be used if M2Crypto is available and will be
ignored otherwise. If provided, it will be used to create an SSL
context. If both ssl_ca_cert and ssl_context are provided, then
ssl_context will be ignored and a new context will be created from
ssl_ca_cert.
ssl_context = None
this option can be used if M2Crypto is available and will be
ignored otherwise. If provided, this SSL context will be used.
If both ssl_ca_cert and ssl_context are provided, then ssl_context
will be ignored and a new context will be created from
ssl_ca_cert.
RETRY RELATED ARGUMENTS
retry = None
the number of times to retry the grab before bailing. If this is
zero, it will retry forever. This was intentional... really, it
was :). If this value is not supplied or is supplied but is None
retrying does not occur.
retrycodes = [-1,2,4,5,6,7]
a sequence of errorcodes (values of e.errno) for which it should
retry. See the doc on URLGrabError for more details on this. You
might consider modifying a copy of the default codes rather than
building yours from scratch so that if the list is extended in the
future (or one code is split into two) you can still enjoy the
benefits of the default list. You can do that with something like
this:
retrycodes = urlgrabber.grabber.URLGrabberOptions().retrycodes
if 12 not in retrycodes:
retrycodes.append(12)
checkfunc = None
a function to do additional checks. This defaults to None, which
means no additional checking. The function should simply return
on a successful check. It should raise URLGrabError on an
unsuccessful check. Raising of any other exception will be
considered immediate failure and no retries will occur.
If it raises URLGrabError, the error code will determine the retry
behavior. Negative error numbers are reserved for use by these
passed in functions, so you can use many negative numbers for
different types of failure. By default, -1 results in a retry,
but this can be customized with retrycodes.
If you simply pass in a function, it will be given exactly one
argument: a CallbackObject instance with the .url attribute
defined and either .filename (for urlgrab) or .data (for urlread).
For urlgrab, .filename is the name of the local file. For
urlread, .data is the actual string data. If you need other
arguments passed to the callback (program state of some sort), you
can do so like this:
checkfunc=(function, ('arg1', 2), {'kwarg': 3})
if the downloaded file has filename /tmp/stuff, then this will
result in this call (for urlgrab):
function(obj, 'arg1', 2, kwarg=3)
# obj.filename = '/tmp/stuff'
# obj.url = 'http://foo.com/stuff'
NOTE: both the "args" tuple and "kwargs" dict must be present if
you use this syntax, but either (or both) can be empty.
failure_callback = None
The callback that gets called during retries when an attempt to
fetch a file fails. The syntax for specifying the callback is
identical to checkfunc, except for the attributes defined in the
CallbackObject instance. The attributes for failure_callback are:
exception = the raised exception
url = the url we're trying to fetch
tries = the number of tries so far (including this one)
retry = the value of the retry option
The callback is present primarily to inform the calling program of
the failure, but if it raises an exception (including the one it's
passed) that exception will NOT be caught and will therefore cause
future retries to be aborted.
The callback is called for EVERY failure, including the last one.
On the last try, the callback can raise an alternate exception,
but it cannot (without severe trickiness) prevent the exception
from being raised.
interrupt_callback = None
This callback is called if KeyboardInterrupt is received at any
point in the transfer. Basically, this callback can have three
impacts on the fetch process based on the way it exits:
1) raise no exception: the current fetch will be aborted, but
any further retries will still take place
2) raise a URLGrabError: if you're using a MirrorGroup, then
this will prompt a failover to the next mirror according to
the behavior of the MirrorGroup subclass. It is recommended
that you raise URLGrabError with code 15, 'user abort'. If
you are NOT using a MirrorGroup subclass, then this is the
same as (3).
3) raise some other exception (such as KeyboardInterrupt), which
will not be caught at either the grabber or mirror levels.
That is, it will be raised up all the way to the caller.
This callback is very similar to failure_callback. They are
passed the same arguments, so you could use the same function for
both.
BANDWIDTH THROTTLING
urlgrabber supports throttling via two values: throttle and
bandwidth Between the two, you can either specify and absolute
throttle threshold or specify a theshold as a fraction of maximum
available bandwidth.
throttle is a number - if it's an int, it's the bytes/second
throttle limit. If it's a float, it is first multiplied by
bandwidth. If throttle == 0, throttling is disabled. If None, the
module-level default (which can be set with set_throttle) is used.
bandwidth is the nominal max bandwidth in bytes/second. If throttle
is a float and bandwidth == 0, throttling is disabled. If None, the
module-level default (which can be set with set_bandwidth) is used.
THROTTLING EXAMPLES:
Lets say you have a 100 Mbps connection. This is (about) 10^8 bits
per second, or 12,500,000 Bytes per second. You have a number of
throttling options:
*) set_bandwidth(12500000); set_throttle(0.5) # throttle is a float
This will limit urlgrab to use half of your available bandwidth.
*) set_throttle(6250000) # throttle is an int
This will also limit urlgrab to use half of your available
bandwidth, regardless of what bandwidth is set to.
*) set_throttle(6250000); set_throttle(1.0) # float
Use half your bandwidth
*) set_throttle(6250000); set_throttle(2.0) # float
Use up to 12,500,000 Bytes per second (your nominal max bandwidth)
*) set_throttle(6250000); set_throttle(0) # throttle = 0
Disable throttling - this is more efficient than a very large
throttle setting.
*) set_throttle(0); set_throttle(1.0) # throttle is float, bandwidth = 0
Disable throttling - this is the default when the module is loaded.
SUGGESTED AUTHOR IMPLEMENTATION (THROTTLING)
While this is flexible, it's not extremely obvious to the user. I
suggest you implement a float throttle as a percent to make the
distinction between absolute and relative throttling very explicit.
Also, you may want to convert the units to something more convenient
than bytes/second, such as kbps or kB/s, etc.
"""
# $Id: grabber.py,v 1.52 2006/12/12 19:08:46 mstenner Exp $
import os
import os.path
import sys
import urlparse
import rfc822
import time
import string
import urllib
import urllib2
import thread
from stat import * # S_* and ST_*
########################################################################
# MODULE INITIALIZATION
########################################################################
try:
exec('from ' + (__name__.split('.'))[0] + ' import __version__')
except:
__version__ = '???'
import sslfactory
auth_handler = urllib2.HTTPBasicAuthHandler( \
urllib2.HTTPPasswordMgrWithDefaultRealm())
try:
from i18n import _
except ImportError, msg:
def _(st): return st
try:
from httplib import HTTPException
except ImportError, msg:
HTTPException = None
try:
# This is a convenient way to make keepalive optional.
# Just rename the module so it can't be imported.
import keepalive
from keepalive import HTTPHandler, HTTPSHandler
have_keepalive = True
keepalive_http_handler = HTTPHandler()
except ImportError, msg:
have_keepalive = False
keepalive_http_handler = None
try:
# add in range support conditionally too
import byterange
from byterange import HTTPRangeHandler, HTTPSRangeHandler, \
FileRangeHandler, FTPRangeHandler, range_tuple_normalize, \
range_tuple_to_header, RangeError
except ImportError, msg:
range_handlers = ()
RangeError = None
have_range = 0
else:
range_handlers = (HTTPRangeHandler(), HTTPSRangeHandler(),
FileRangeHandler(), FTPRangeHandler())
have_range = 1
# check whether socket timeout support is available (Python >= 2.3)
import socket
try:
TimeoutError = socket.timeout
have_socket_timeout = True
except AttributeError:
TimeoutError = None
have_socket_timeout = False
########################################################################
# functions for debugging output. These functions are here because they
# are also part of the module initialization.
DEBUG = None
def set_logger(DBOBJ):
"""Set the DEBUG object. This is called by _init_default_logger when
the environment variable URLGRABBER_DEBUG is set, but can also be
called by a calling program. Basically, if the calling program uses
the logging module and would like to incorporate urlgrabber logging,
then it can do so this way. It's probably not necessary as most
internal logging is only for debugging purposes.
The passed-in object should be a logging.Logger instance. It will
be pushed into the keepalive and byterange modules if they're
being used. The mirror module pulls this object in on import, so
you will need to manually push into it. In fact, you may find it
tidier to simply push your logging object (or objects) into each
of these modules independently.
"""
global DEBUG
DEBUG = DBOBJ
if have_keepalive and keepalive.DEBUG is None:
keepalive.DEBUG = DBOBJ
if have_range and byterange.DEBUG is None:
byterange.DEBUG = DBOBJ
if sslfactory.DEBUG is None:
sslfactory.DEBUG = DBOBJ
def _init_default_logger(logspec=None):
'''Examines the environment variable URLGRABBER_DEBUG and creates
a logging object (logging.logger) based on the contents. It takes
the form
URLGRABBER_DEBUG=level,filename
where "level" can be either an integer or a log level from the
logging module (DEBUG, INFO, etc). If the integer is zero or
less, logging will be disabled. Filename is the filename where
logs will be sent. If it is "-", then stdout will be used. If
the filename is empty or missing, stderr will be used. If the
variable cannot be processed or the logging module cannot be
imported (python < 2.3) then logging will be disabled. Here are
some examples:
URLGRABBER_DEBUG=1,debug.txt # log everything to debug.txt
URLGRABBER_DEBUG=WARNING,- # log warning and higher to stdout
URLGRABBER_DEBUG=INFO # log info and higher to stderr
This funtion is called during module initialization. It is not
intended to be called from outside. The only reason it is a
function at all is to keep the module-level namespace tidy and to
collect the code into a nice block.'''
try:
if logspec is None:
logspec = os.environ['URLGRABBER_DEBUG']
dbinfo = logspec.split(',')
import logging
level = logging._levelNames.get(dbinfo[0], None)
if level is None: level = int(dbinfo[0])
if level < 1: raise ValueError()
formatter = logging.Formatter('%(asctime)s %(message)s')
if len(dbinfo) > 1: filename = dbinfo[1]
else: filename = ''
if filename == '': handler = logging.StreamHandler(sys.stderr)
elif filename == '-': handler = logging.StreamHandler(sys.stdout)
else: handler = logging.FileHandler(filename)
handler.setFormatter(formatter)
DBOBJ = logging.getLogger('urlgrabber')
DBOBJ.addHandler(handler)
DBOBJ.setLevel(level)
except (KeyError, ImportError, ValueError):
DBOBJ = None
set_logger(DBOBJ)
def _log_package_state():
if not DEBUG: return
DEBUG.info('urlgrabber version = %s' % __version__)
DEBUG.info('have_m2crypto = %s' % sslfactory.have_m2crypto)
DEBUG.info('trans function "_" = %s' % _)
DEBUG.info('have_keepalive = %s' % have_keepalive)
DEBUG.info('have_range = %s' % have_range)
DEBUG.info('have_socket_timeout = %s' % have_socket_timeout)
_init_default_logger()
_log_package_state()
########################################################################
# END MODULE INITIALIZATION
########################################################################
class URLGrabError(IOError):
"""
URLGrabError error codes:
URLGrabber error codes (0 -- 255)
0 - everything looks good (you should never see this)
1 - malformed url
2 - local file doesn't exist
3 - request for non-file local file (dir, etc)
4 - IOError on fetch
5 - OSError on fetch
6 - no content length header when we expected one
7 - HTTPException
8 - Exceeded read limit (for urlread)
9 - Requested byte range not satisfiable.
10 - Byte range requested, but range support unavailable
11 - Illegal reget mode
12 - Socket timeout
13 - malformed proxy url
14 - HTTPError (includes .code and .exception attributes)
15 - user abort
16 - error writing to local file
MirrorGroup error codes (256 -- 511)
256 - No more mirrors left to try
Custom (non-builtin) classes derived from MirrorGroup (512 -- 767)
[ this range reserved for application-specific error codes ]
Retry codes (< 0)
-1 - retry the download, unknown reason
Note: to test which group a code is in, you can simply do integer
division by 256: e.errno / 256
Negative codes are reserved for use by functions passed in to
retrygrab with checkfunc. The value -1 is built in as a generic
retry code and is already included in the retrycodes list.
Therefore, you can create a custom check function that simply
returns -1 and the fetch will be re-tried. For more customized
retries, you can use other negative number and include them in
retry-codes. This is nice for outputting useful messages about
what failed.
You can use these error codes like so:
try: urlgrab(url)
except URLGrabError, e:
if e.errno == 3: ...
# or
print e.strerror
# or simply
print e #### print '[Errno %i] %s' % (e.errno, e.strerror)
"""
pass
class CallbackObject:
"""Container for returned callback data.
This is currently a dummy class into which urlgrabber can stuff
information for passing to callbacks. This way, the prototype for
all callbacks is the same, regardless of the data that will be
passed back. Any function that accepts a callback function as an
argument SHOULD document what it will define in this object.
It is possible that this class will have some greater
functionality in the future.
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def urlgrab(url, filename=None, **kwargs):
"""grab the file at <url> and make a local copy at <filename>
If filename is none, the basename of the url is used.
urlgrab returns the filename of the local file, which may be different
from the passed-in filename if the copy_local kwarg == 0.
See module documentation for a description of possible kwargs.
"""
return default_grabber.urlgrab(url, filename, **kwargs)
def urlopen(url, **kwargs):
"""open the url and return a file object
If a progress object or throttle specifications exist, then
a special file object will be returned that supports them.
The file object can be treated like any other file object.
See module documentation for a description of possible kwargs.
"""
return default_grabber.urlopen(url, **kwargs)
def urlread(url, limit=None, **kwargs):
"""read the url into a string, up to 'limit' bytes
If the limit is exceeded, an exception will be thrown. Note that urlread
is NOT intended to be used as a way of saying "I want the first N bytes"
but rather 'read the whole file into memory, but don't use too much'
See module documentation for a description of possible kwargs.
"""
return default_grabber.urlread(url, limit, **kwargs)
class URLParser:
"""Process the URLs before passing them to urllib2.
This class does several things:
* add any prefix
* translate a "raw" file to a proper file: url
* handle any http or https auth that's encoded within the url
* quote the url
Only the "parse" method is called directly, and it calls sub-methods.
An instance of this class is held in the options object, which
means that it's easy to change the behavior by sub-classing and
passing the replacement in. It need only have a method like:
url, parts = urlparser.parse(url, opts)
"""
def parse(self, url, opts):
"""parse the url and return the (modified) url and its parts
Note: a raw file WILL be quoted when it's converted to a URL.
However, other urls (ones which come with a proper scheme) may
or may not be quoted according to opts.quote
opts.quote = 1 --> quote it
opts.quote = 0 --> do not quote it
opts.quote = None --> guess
"""
quote = opts.quote
if opts.prefix:
url = self.add_prefix(url, opts.prefix)
parts = urlparse.urlparse(url)
(scheme, host, path, parm, query, frag) = parts
if not scheme or (len(scheme) == 1 and scheme in string.letters):
# if a scheme isn't specified, we guess that it's "file:"
if url[0] not in '/\\': url = os.path.abspath(url)
url = 'file:' + urllib.pathname2url(url)
parts = urlparse.urlparse(url)
quote = 0 # pathname2url quotes, so we won't do it again
if scheme in ['http', 'https']:
parts = self.process_http(parts)
if quote is None:
quote = self.guess_should_quote(parts)
if quote:
parts = self.quote(parts)
url = urlparse.urlunparse(parts)
return url, parts
def add_prefix(self, url, prefix):
if prefix[-1] == '/' or url[0] == '/':
url = prefix + url
else:
url = prefix + '/' + url
return url
def process_http(self, parts):
(scheme, host, path, parm, query, frag) = parts
if '@' in host and auth_handler:
try:
user_pass, host = host.split('@', 1)
if ':' in user_pass:
user, password = user_pass.split(':', 1)
except ValueError, e:
raise URLGrabError(1, _('Bad URL: %s') % url)
if DEBUG: DEBUG.info('adding HTTP auth: %s, %s', user, password)
auth_handler.add_password(None, host, user, password)
return (scheme, host, path, parm, query, frag)
def quote(self, parts):
"""quote the URL
This method quotes ONLY the path part. If you need to quote
other parts, you should override this and pass in your derived
class. The other alternative is to quote other parts before
passing into urlgrabber.
"""
(scheme, host, path, parm, query, frag) = parts
path = urllib.quote(path)
return (scheme, host, path, parm, query, frag)
hexvals = '0123456789ABCDEF'
def guess_should_quote(self, parts):
"""
Guess whether we should quote a path. This amounts to
guessing whether it's already quoted.
find ' ' -> 1
find '%' -> 1
find '%XX' -> 0
else -> 1
"""
(scheme, host, path, parm, query, frag) = parts
if ' ' in path:
return 1
ind = string.find(path, '%')
if ind > -1:
while ind > -1:
if len(path) < ind+3:
return 1
code = path[ind+1:ind+3].upper()
if code[0] not in self.hexvals or \
code[1] not in self.hexvals:
return 1
ind = string.find(path, '%', ind+1)
return 0
return 1
class URLGrabberOptions:
"""Class to ease kwargs handling."""
def __init__(self, delegate=None, **kwargs):
"""Initialize URLGrabberOptions object.
Set default values for all options and then update options specified
in kwargs.
"""
self.delegate = delegate
if delegate is None:
self._set_defaults()
self._set_attributes(**kwargs)
def __getattr__(self, name):
if self.delegate and hasattr(self.delegate, name):
return getattr(self.delegate, name)
raise AttributeError, name
def raw_throttle(self):
"""Calculate raw throttle value from throttle and bandwidth
values.
"""
if self.throttle <= 0:
return 0
elif type(self.throttle) == type(0):
return float(self.throttle)
else: # throttle is a float
return self.bandwidth * self.throttle
def derive(self, **kwargs):
"""Create a derived URLGrabberOptions instance.
This method creates a new instance and overrides the
options specified in kwargs.
"""
return URLGrabberOptions(delegate=self, **kwargs)
def _set_attributes(self, **kwargs):
"""Update object attributes with those provided in kwargs."""
self.__dict__.update(kwargs)
if have_range and kwargs.has_key('range'):
# normalize the supplied range value
self.range = range_tuple_normalize(self.range)
if not self.reget in [None, 'simple', 'check_timestamp']:
raise URLGrabError(11, _('Illegal reget mode: %s') \
% (self.reget, ))
def _set_defaults(self):
"""Set all options to their default values.
When adding new options, make sure a default is
provided here.
"""
self.progress_obj = None
self.throttle = 1.0
self.bandwidth = 0
self.retry = None
self.retrycodes = [-1,2,4,5,6,7]
self.checkfunc = None
self.copy_local = 0
self.close_connection = 0
self.range = None
self.user_agent = 'urlgrabber/%s' % __version__
self.keepalive = 1
self.proxies = None
self.reget = None
self.failure_callback = None
self.interrupt_callback = None
self.prefix = None
self.opener = None
self.cache_openers = True
self.timeout = None
self.text = None
self.http_headers = None
self.ftp_headers = None
self.data = None
self.urlparser = URLParser()
self.quote = None
self.ssl_ca_cert = None
self.ssl_context = None
def __repr__(self):
return self.format()
def format(self, indent=' '):
keys = self.__dict__.keys()
if self.delegate is not None:
keys.remove('delegate')
keys.sort()
s = '{\n'
for k in keys:
s = s + indent + '%-15s: %s,\n' % \
(repr(k), repr(self.__dict__[k]))
if self.delegate:
df = self.delegate.format(indent + ' ')
s = s + indent + '%-15s: %s\n' % ("'delegate'", df)
s = s + indent + '}'
return s
class URLGrabber:
"""Provides easy opening of URLs with a variety of options.
All options are specified as kwargs. Options may be specified when
the class is created and may be overridden on a per request basis.
New objects inherit default values from default_grabber.
"""
def __init__(self, **kwargs):
self.opts = URLGrabberOptions(**kwargs)
def _retry(self, opts, func, *args):
tries = 0
while 1:
# there are only two ways out of this loop. The second has
# several "sub-ways"
# 1) via the return in the "try" block
# 2) by some exception being raised
# a) an excepton is raised that we don't "except"
# b) a callback raises ANY exception
# c) we're not retry-ing or have run out of retries
# d) the URLGrabError code is not in retrycodes
# beware of infinite loops :)
tries = tries + 1
exception = None
retrycode = None
callback = None
if DEBUG: DEBUG.info('attempt %i/%s: %s',
tries, opts.retry, args[0])
try:
r = apply(func, (opts,) + args, {})
if DEBUG: DEBUG.info('success')
return r
except URLGrabError, e:
exception = e
callback = opts.failure_callback
retrycode = e.errno
except KeyboardInterrupt, e:
exception = e
callback = opts.interrupt_callback
if DEBUG: DEBUG.info('exception: %s', exception)
if callback:
if DEBUG: DEBUG.info('calling callback: %s', callback)
cb_func, cb_args, cb_kwargs = self._make_callback(callback)
obj = CallbackObject(exception=exception, url=args[0],
tries=tries, retry=opts.retry)
cb_func(obj, *cb_args, **cb_kwargs)
if (opts.retry is None) or (tries == opts.retry):
if DEBUG: DEBUG.info('retries exceeded, re-raising')
raise
if (retrycode is not None) and (retrycode not in opts.retrycodes):
if DEBUG: DEBUG.info('retrycode (%i) not in list %s, re-raising',
retrycode, opts.retrycodes)
raise
def urlopen(self, url, **kwargs):
"""open the url and return a file object
If a progress object or throttle value specified when this
object was created, then a special file object will be
returned that supports them. The file object can be treated
like any other file object.
"""
opts = self.opts.derive(**kwargs)
if DEBUG: DEBUG.debug('combined options: %s' % repr(opts))
(url,parts) = opts.urlparser.parse(url, opts)
def retryfunc(opts, url):
return URLGrabberFileObject(url, filename=None, opts=opts)
return self._retry(opts, retryfunc, url)
def urlgrab(self, url, filename=None, **kwargs):
"""grab the file at <url> and make a local copy at <filename>
If filename is none, the basename of the url is used.
urlgrab returns the filename of the local file, which may be
different from the passed-in filename if copy_local == 0.
"""
opts = self.opts.derive(**kwargs)
if DEBUG: DEBUG.debug('combined options: %s' % repr(opts))
(url,parts) = opts.urlparser.parse(url, opts)
(scheme, host, path, parm, query, frag) = parts
if filename is None:
filename = os.path.basename( urllib.unquote(path) )
if scheme == 'file' and not opts.copy_local:
# just return the name of the local file - don't make a
# copy currently
path = urllib.url2pathname(path)
if host:
path = os.path.normpath('//' + host + path)
if not os.path.exists(path):
raise URLGrabError(2,
_('Local file does not exist: %s') % (path, ))
elif not os.path.isfile(path):
raise URLGrabError(3,
_('Not a normal file: %s') % (path, ))
elif not opts.range:
return path
def retryfunc(opts, url, filename):
fo = URLGrabberFileObject(url, filename, opts)
try:
fo._do_grab()
if not opts.checkfunc is None:
cb_func, cb_args, cb_kwargs = \
self._make_callback(opts.checkfunc)
obj = CallbackObject()
obj.filename = filename
obj.url = url
apply(cb_func, (obj, )+cb_args, cb_kwargs)
finally:
fo.close()
return filename
return self._retry(opts, retryfunc, url, filename)
def urlread(self, url, limit=None, **kwargs):
"""read the url into a string, up to 'limit' bytes
If the limit is exceeded, an exception will be thrown. Note
that urlread is NOT intended to be used as a way of saying
"I want the first N bytes" but rather 'read the whole file
into memory, but don't use too much'
"""
opts = self.opts.derive(**kwargs)
if DEBUG: DEBUG.debug('combined options: %s' % repr(opts))
(url,parts) = opts.urlparser.parse(url, opts)
if limit is not None:
limit = limit + 1
def retryfunc(opts, url, limit):
fo = URLGrabberFileObject(url, filename=None, opts=opts)
s = ''
try:
# this is an unfortunate thing. Some file-like objects
# have a default "limit" of None, while the built-in (real)
# file objects have -1. They each break the other, so for
# now, we just force the default if necessary.
if limit is None: s = fo.read()
else: s = fo.read(limit)
if not opts.checkfunc is None:
cb_func, cb_args, cb_kwargs = \
self._make_callback(opts.checkfunc)
obj = CallbackObject()
obj.data = s
obj.url = url
apply(cb_func, (obj, )+cb_args, cb_kwargs)
finally:
fo.close()
return s
s = self._retry(opts, retryfunc, url, limit)
if limit and len(s) > limit:
raise URLGrabError(8,
_('Exceeded limit (%i): %s') % (limit, url))
return s
def _make_callback(self, callback_obj):
if callable(callback_obj):
return callback_obj, (), {}
else:
return callback_obj
# create the default URLGrabber used by urlXXX functions.
# NOTE: actual defaults are set in URLGrabberOptions
default_grabber = URLGrabber()
class URLGrabberFileObject:
"""This is a file-object wrapper that supports progress objects
and throttling.
This exists to solve the following problem: lets say you want to
drop-in replace a normal open with urlopen. You want to use a
progress meter and/or throttling, but how do you do that without
rewriting your code? Answer: urlopen will return a wrapped file
object that does the progress meter and-or throttling internally.
"""
def __init__(self, url, filename, opts):
self.url = url
self.filename = filename
self.opts = opts
self.fo = None
self._rbuf = ''
self._rbufsize = 1024*8
self._ttime = time.time()
self._tsize = 0
self._amount_read = 0
self._opener = None
self._do_open()
def __getattr__(self, name):
"""This effectively allows us to wrap at the instance level.
Any attribute not found in _this_ object will be searched for
in self.fo. This includes methods."""
if hasattr(self.fo, name):
return getattr(self.fo, name)
raise AttributeError, name
def _get_opener(self):
"""Build a urllib2 OpenerDirector based on request options."""
if self.opts.opener:
return self.opts.opener
elif self._opener is None:
handlers = []
need_keepalive_handler = (have_keepalive and self.opts.keepalive)
need_range_handler = (range_handlers and \
(self.opts.range or self.opts.reget))
# if you specify a ProxyHandler when creating the opener
# it _must_ come before all other handlers in the list or urllib2
# chokes.
if self.opts.proxies:
handlers.append( _proxy_handler_cache.get(self.opts.proxies) )
# -------------------------------------------------------
# OK, these next few lines are a serious kludge to get
# around what I think is a bug in python 2.2's
# urllib2. The basic idea is that default handlers
# get applied first. If you override one (like a
# proxy handler), then the default gets pulled, but
# the replacement goes on the end. In the case of
# proxies, this means the normal handler picks it up
# first and the proxy isn't used. Now, this probably
# only happened with ftp or non-keepalive http, so not
# many folks saw it. The simple approach to fixing it
# is just to make sure you override the other
# conflicting defaults as well. I would LOVE to see
# these go way or be dealt with more elegantly. The
# problem isn't there after 2.2. -MDS 2005/02/24
if not need_keepalive_handler:
handlers.append( urllib2.HTTPHandler() )
if not need_range_handler:
handlers.append( urllib2.FTPHandler() )
# -------------------------------------------------------
ssl_factory = _ssl_factory_cache.get( (self.opts.ssl_ca_cert,
self.opts.ssl_context) )
if need_keepalive_handler:
handlers.append(keepalive_http_handler)
handlers.append(_https_handler_cache.get(ssl_factory))
if need_range_handler:
handlers.extend( range_handlers )
handlers.append( auth_handler )
if self.opts.cache_openers:
self._opener = _opener_cache.get([ssl_factory,] + handlers)
else:
self._opener = _opener_cache.create([ssl_factory,] + handlers)
# OK, I don't like to do this, but otherwise, we end up with
# TWO user-agent headers.
self._opener.addheaders = []
return self._opener
def _do_open(self):
opener = self._get_opener()
req = urllib2.Request(self.url, self.opts.data) # build request object
self._add_headers(req) # add misc headers that we need
self._build_range(req) # take care of reget and byterange stuff
fo, hdr = self._make_request(req, opener)
if self.reget_time and self.opts.reget == 'check_timestamp':
# do this if we have a local file with known timestamp AND
# we're in check_timestamp reget mode.
fetch_again = 0
try:
modified_tuple = hdr.getdate_tz('last-modified')
modified_stamp = rfc822.mktime_tz(modified_tuple)
if modified_stamp > self.reget_time: fetch_again = 1
except (TypeError,):
fetch_again = 1
if fetch_again:
# the server version is newer than the (incomplete) local
# version, so we should abandon the version we're getting
# and fetch the whole thing again.
fo.close()
self.opts.reget = None
del req.headers['Range']
self._build_range(req)
fo, hdr = self._make_request(req, opener)
(scheme, host, path, parm, query, frag) = urlparse.urlparse(self.url)
path = urllib.unquote(path)
if not (self.opts.progress_obj or self.opts.raw_throttle() \
or self.opts.timeout):
# if we're not using the progress_obj, throttling, or timeout
# we can get a performance boost by going directly to
# the underlying fileobject for reads.
self.read = fo.read
if hasattr(fo, 'readline'):
self.readline = fo.readline
elif self.opts.progress_obj:
try:
length = int(hdr['Content-Length'])
length = length + self._amount_read # Account for regets
except (KeyError, ValueError, TypeError):
length = None
self.opts.progress_obj.start(str(self.filename),
urllib.unquote(self.url),
os.path.basename(path),
length, text=self.opts.text)
self.opts.progress_obj.update(0)
(self.fo, self.hdr) = (fo, hdr)
def _add_headers(self, req):
if self.opts.user_agent:
req.add_header('User-agent', self.opts.user_agent)
try: req_type = req.get_type()
except ValueError: req_type = None
if self.opts.http_headers and req_type in ('http', 'https'):
for h, v in self.opts.http_headers:
req.add_header(h, v)
if self.opts.ftp_headers and req_type == 'ftp':
for h, v in self.opts.ftp_headers:
req.add_header(h, v)
def _build_range(self, req):
self.reget_time = None
self.append = 0
reget_length = 0
rt = None
if have_range and self.opts.reget and type(self.filename) == type(''):
# we have reget turned on and we're dumping to a file
try:
s = os.stat(self.filename)
except OSError:
pass
else:
self.reget_time = s[ST_MTIME]
reget_length = s[ST_SIZE]
# Set initial length when regetting
self._amount_read = reget_length
rt = reget_length, ''
self.append = 1
if self.opts.range:
if not have_range:
raise URLGrabError(10, _('Byte range requested but range '\
'support unavailable'))
rt = self.opts.range
if rt[0]: rt = (rt[0] + reget_length, rt[1])
if rt:
header = range_tuple_to_header(rt)
if header: req.add_header('Range', header)
def _make_request(self, req, opener):
try:
if have_socket_timeout and self.opts.timeout:
old_to = socket.getdefaulttimeout()
socket.setdefaulttimeout(self.opts.timeout)
try:
fo = opener.open(req)
finally:
socket.setdefaulttimeout(old_to)
else:
fo = opener.open(req)
hdr = fo.info()
except ValueError, e:
raise URLGrabError(1, _('Bad URL: %s') % (e, ))
except RangeError, e:
raise URLGrabError(9, str(e))
except urllib2.HTTPError, e:
new_e = URLGrabError(14, str(e))
new_e.code = e.code
new_e.exception = e
raise new_e
except IOError, e:
if hasattr(e, 'reason') and have_socket_timeout and \
isinstance(e.reason, TimeoutError):
raise URLGrabError(12, _('Timeout: %s') % (e, ))
else:
raise URLGrabError(4, _('IOError: %s') % (e, ))
except OSError, e:
raise URLGrabError(5, _('OSError: %s') % (e, ))
except HTTPException, e:
raise URLGrabError(7, _('HTTP Exception (%s): %s') % \
(e.__class__.__name__, e))
else:
return (fo, hdr)
def _do_grab(self):
"""dump the file to self.filename."""
if self.append: mode = 'ab'
else: mode = 'wb'
if DEBUG: DEBUG.info('opening local file "%s" with mode %s' % \
(self.filename, mode))
try:
new_fo = open(self.filename, mode)
except IOError, e:
raise URLGrabError(16, _(\
'error opening local file, IOError: %s') % (e, ))
try:
# if we have a known range, only try to read that much.
(low, high) = self.opts.range
amount = high - low
except TypeError, ValueError:
amount = None
bs = 1024*8
size = 0
if amount is not None: bs = min(bs, amount - size)
block = self.read(bs)
size = size + len(block)
while block:
try:
new_fo.write(block)
except IOError, e:
raise URLGrabError(16, _(\
'error writing to local file, IOError: %s') % (e, ))
if amount is not None: bs = min(bs, amount - size)
block = self.read(bs)
size = size + len(block)
new_fo.close()
try:
modified_tuple = self.hdr.getdate_tz('last-modified')
modified_stamp = rfc822.mktime_tz(modified_tuple)
os.utime(self.filename, (modified_stamp, modified_stamp))
except (TypeError,), e: pass
return size
def _fill_buffer(self, amt=None):
"""fill the buffer to contain at least 'amt' bytes by reading
from the underlying file object. If amt is None, then it will
read until it gets nothing more. It updates the progress meter
and throttles after every self._rbufsize bytes."""
# the _rbuf test is only in this first 'if' for speed. It's not
# logically necessary
if self._rbuf and not amt is None:
L = len(self._rbuf)
if amt > L:
amt = amt - L
else:
return
# if we've made it here, then we don't have enough in the buffer
# and we need to read more.
buf = [self._rbuf]
bufsize = len(self._rbuf)
while amt is None or amt:
# first, delay if necessary for throttling reasons
if self.opts.raw_throttle():
diff = self._tsize/self.opts.raw_throttle() - \
(time.time() - self._ttime)
if diff > 0: time.sleep(diff)
self._ttime = time.time()
# now read some data, up to self._rbufsize
if amt is None: readamount = self._rbufsize
else: readamount = min(amt, self._rbufsize)
try:
new = self.fo.read(readamount)
except socket.error, e:
raise URLGrabError(4, _('Socket Error: %s') % (e, ))
except TimeoutError, e:
raise URLGrabError(12, _('Timeout: %s') % (e, ))
except IOError, e:
raise URLGrabError(4, _('IOError: %s') %(e,))
newsize = len(new)
if not newsize: break # no more to read
if amt: amt = amt - newsize
buf.append(new)
bufsize = bufsize + newsize
self._tsize = newsize
self._amount_read = self._amount_read + newsize
if self.opts.progress_obj:
self.opts.progress_obj.update(self._amount_read)
self.opts.progress_obj.end(self._amount_read)
self._rbuf = string.join(buf, '')
return
def read(self, amt=None):
self._fill_buffer(amt)
if amt is None:
s, self._rbuf = self._rbuf, ''
else:
s, self._rbuf = self._rbuf[:amt], self._rbuf[amt:]
return s
def readline(self, limit=-1):
i = string.find(self._rbuf, '\n')
while i < 0 and not (0 < limit <= len(self._rbuf)):
L = len(self._rbuf)
self._fill_buffer(L + self._rbufsize)
if not len(self._rbuf) > L: break
i = string.find(self._rbuf, '\n', L)
if i < 0: i = len(self._rbuf)
else: i = i+1
if 0 <= limit < len(self._rbuf): i = limit
s, self._rbuf = self._rbuf[:i], self._rbuf[i:]
return s
def close(self):
if self.opts.progress_obj:
self.opts.progress_obj.end(self._amount_read)
self.fo.close()
if self.opts.close_connection:
try: self.fo.close_connection()
except: pass
#####################################################################
class NoDefault: pass
class ObjectCache:
def __init__(self, name=None):
self.name = name or self.__class__.__name__
self._lock = thread.allocate_lock()
self._cache = []
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
def get(self, key, create=None, found=None):
for (k, v) in self._cache:
if k == key:
if DEBUG:
DEBUG.debug('%s: found key' % self.name)
DEBUG.debug('%s: key = %s' % (self.name, key))
DEBUG.debug('%s: val = %s' % (self.name, v))
found = found or getattr(self, 'found', None)
if found: v = found(key, v)
return v
if DEBUG:
DEBUG.debug('%s: no key found' % self.name)
DEBUG.debug('%s: key = %s' % (self.name, key))
create = create or getattr(self, 'create', None)
if create:
value = create(key)
if DEBUG:
DEBUG.info('%s: new value created' % self.name)
DEBUG.debug('%s: val = %s' % (self.name, value))
self._cache.append( (key, value) )
return value
else:
raise KeyError('key not found: %s' % key)
def set(self, key, value):
if DEBUG:
DEBUG.info('%s: inserting key' % self.name)
DEBUG.debug('%s: key = %s' % (self.name, key))
DEBUG.debug('%s: val = %s' % (self.name, value))
self._cache.append( (key, value) )
def ts_get(self, key, create=None, found=None):
self._lock.acquire()
try:
self.get(key, create, found)
finally:
self._lock.release()
def ts_set(self, key, value):
self._lock.acquire()
try:
self.set(key, value)
finally:
self._lock.release()
class OpenerCache(ObjectCache):
def found(self, factory_and_handlers, opener):
for handler in factory_and_handlers[1:]:
handler.add_parent(opener)
return opener
def create(self, factory_and_handlers):
factory = factory_and_handlers[0]
handlers = factory_and_handlers[1:]
return factory.create_opener(*handlers)
_opener_cache = OpenerCache()
class ProxyHandlerCache(ObjectCache):
def create(self, proxies):
for k, v in proxies.items():
utype, url = urllib.splittype(v)
host, other = urllib.splithost(url)
if (utype is None) or (host is None):
raise URLGrabError(13, _('Bad proxy URL: %s') % v)
return urllib2.ProxyHandler(proxies)
_proxy_handler_cache = ProxyHandlerCache()
class HTTPSHandlerCache(ObjectCache):
def create(self, ssl_factory):
return HTTPSHandler(ssl_factory)
_https_handler_cache = HTTPSHandlerCache()
class SSLFactoryCache(ObjectCache):
def create(self, cert_and_context):
return sslfactory.get_factory(*cert_and_context)
_ssl_factory_cache = SSLFactoryCache()
#####################################################################
# DEPRECATED FUNCTIONS
def set_throttle(new_throttle):
"""Deprecated. Use: default_grabber.throttle = new_throttle"""
default_grabber.throttle = new_throttle
def set_bandwidth(new_bandwidth):
"""Deprecated. Use: default_grabber.bandwidth = new_bandwidth"""
default_grabber.bandwidth = new_bandwidth
def set_progress_obj(new_progress_obj):
"""Deprecated. Use: default_grabber.progress_obj = new_progress_obj"""
default_grabber.progress_obj = new_progress_obj
def set_user_agent(new_user_agent):
"""Deprecated. Use: default_grabber.user_agent = new_user_agent"""
default_grabber.user_agent = new_user_agent
def retrygrab(url, filename=None, copy_local=0, close_connection=0,
progress_obj=None, throttle=None, bandwidth=None,
numtries=3, retrycodes=[-1,2,4,5,6,7], checkfunc=None):
"""Deprecated. Use: urlgrab() with the retry arg instead"""
kwargs = {'copy_local' : copy_local,
'close_connection' : close_connection,
'progress_obj' : progress_obj,
'throttle' : throttle,
'bandwidth' : bandwidth,
'retry' : numtries,
'retrycodes' : retrycodes,
'checkfunc' : checkfunc
}
return urlgrab(url, filename, **kwargs)
#####################################################################
# TESTING
def _main_test():
import sys
try: url, filename = sys.argv[1:3]
except ValueError:
print 'usage:', sys.argv[0], \
'<url> <filename> [copy_local=0|1] [close_connection=0|1]'
sys.exit()
kwargs = {}
for a in sys.argv[3:]:
k, v = string.split(a, '=', 1)
kwargs[k] = int(v)
set_throttle(1.0)
set_bandwidth(32 * 1024)
print "throttle: %s, throttle bandwidth: %s B/s" % (default_grabber.throttle,
default_grabber.bandwidth)
try: from progress import text_progress_meter
except ImportError, e: pass
else: kwargs['progress_obj'] = text_progress_meter()
try: name = apply(urlgrab, (url, filename), kwargs)
except URLGrabError, e: print e
else: print 'LOCAL FILE:', name
def _retry_test():
import sys
try: url, filename = sys.argv[1:3]
except ValueError:
print 'usage:', sys.argv[0], \
'<url> <filename> [copy_local=0|1] [close_connection=0|1]'
sys.exit()
kwargs = {}
for a in sys.argv[3:]:
k, v = string.split(a, '=', 1)
kwargs[k] = int(v)
try: from progress import text_progress_meter
except ImportError, e: pass
else: kwargs['progress_obj'] = text_progress_meter()
def cfunc(filename, hello, there='foo'):
print hello, there
import random
rnum = random.random()
if rnum < .5:
print 'forcing retry'
raise URLGrabError(-1, 'forcing retry')
if rnum < .75:
print 'forcing failure'
raise URLGrabError(-2, 'forcing immediate failure')
print 'success'
return
kwargs['checkfunc'] = (cfunc, ('hello',), {'there':'there'})
try: name = apply(retrygrab, (url, filename), kwargs)
except URLGrabError, e: print e
else: print 'LOCAL FILE:', name
def _file_object_test(filename=None):
import random, cStringIO, sys
if filename is None:
filename = __file__
print 'using file "%s" for comparisons' % filename
fo = open(filename)
s_input = fo.read()
fo.close()
for testfunc in [_test_file_object_smallread,
_test_file_object_readall,
_test_file_object_readline,
_test_file_object_readlines]:
fo_input = cStringIO.StringIO(s_input)
fo_output = cStringIO.StringIO()
wrapper = URLGrabberFileObject(fo_input, None, 0)
print 'testing %-30s ' % testfunc.__name__,
testfunc(wrapper, fo_output)
s_output = fo_output.getvalue()
if s_output == s_input: print 'passed'
else: print 'FAILED'
def _test_file_object_smallread(wrapper, fo_output):
while 1:
s = wrapper.read(23)
fo_output.write(s)
if not s: return
def _test_file_object_readall(wrapper, fo_output):
s = wrapper.read()
fo_output.write(s)
def _test_file_object_readline(wrapper, fo_output):
while 1:
s = wrapper.readline()
fo_output.write(s)
if not s: return
def _test_file_object_readlines(wrapper, fo_output):
li = wrapper.readlines()
fo_output.write(string.join(li, ''))
if __name__ == '__main__':
_main_test()
_retry_test()
_file_object_test('test')
| Python |
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
# This file is part of urlgrabber, a high-level cross-protocol url-grabber
import httplib
import urllib2
try:
from M2Crypto import SSL
from M2Crypto import httpslib
from M2Crypto import m2urllib2
have_m2crypto = True
except ImportError:
have_m2crypto = False
DEBUG = None
if have_m2crypto:
class M2SSLFactory:
def __init__(self, ssl_ca_cert, ssl_context):
self.ssl_context = self._get_ssl_context(ssl_ca_cert, ssl_context)
def _get_ssl_context(self, ssl_ca_cert, ssl_context):
"""
Create an ssl context using the CA cert file or ssl context.
The CA cert is used first if it was passed as an option. If not,
then the supplied ssl context is used. If no ssl context was supplied,
None is returned.
"""
if ssl_ca_cert:
context = SSL.Context()
context.load_verify_locations(ssl_ca_cert)
context.set_verify(SSL.verify_peer, -1)
return context
else:
return ssl_context
def create_https_connection(self, host, response_class = None):
connection = httplib.HTTPSConnection(host, self.ssl_context)
if response_class:
connection.response_class = response_class
return connection
def create_opener(self, *handlers):
return m2urllib2.build_opener(self.ssl_context, *handlers)
class SSLFactory:
def create_https_connection(self, host, response_class = None):
connection = httplib.HTTPSConnection(host)
if response_class:
connection.response_class = response_class
return connection
def create_opener(self, *handlers):
return urllib2.build_opener(*handlers)
def get_factory(ssl_ca_cert = None, ssl_context = None):
""" Return an SSLFactory, based on if M2Crypto is available. """
if have_m2crypto:
return M2SSLFactory(ssl_ca_cert, ssl_context)
else:
# Log here if someone provides the args but we don't use them.
if ssl_ca_cert or ssl_context:
if DEBUG:
DEBUG.warning("SSL arguments supplied, but M2Crypto is not available. "
"Using Python SSL.")
return SSLFactory()
| Python |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Copyright 2002-2006 Michael D. Stenner, Ryan Tomayko
# $Id: __init__.py,v 1.20 2006/09/22 00:58:55 mstenner Exp $
"""A high-level cross-protocol url-grabber.
Using urlgrabber, data can be fetched in three basic ways:
urlgrab(url) copy the file to the local filesystem
urlopen(url) open the remote file and return a file object
(like urllib2.urlopen)
urlread(url) return the contents of the file as a string
When using these functions (or methods), urlgrabber supports the
following features:
* identical behavior for http://, ftp://, and file:// urls
* http keepalive - faster downloads of many files by using
only a single connection
* byte ranges - fetch only a portion of the file
* reget - for a urlgrab, resume a partial download
* progress meters - the ability to report download progress
automatically, even when using urlopen!
* throttling - restrict bandwidth usage
* retries - automatically retry a download if it fails. The
number of retries and failure types are configurable.
* authenticated server access for http and ftp
* proxy support - support for authenticated http and ftp proxies
* mirror groups - treat a list of mirrors as a single source,
automatically switching mirrors if there is a failure.
"""
__version__ = '3.1.0'
__date__ = '2006/09/21'
__author__ = 'Michael D. Stenner <mstenner@linux.duke.edu>, ' \
'Ryan Tomayko <rtomayko@naeblis.cx>'
__url__ = 'http://linux.duke.edu/projects/urlgrabber/'
from grabber import urlgrab, urlopen, urlread
| Python |
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
# This file is part of urlgrabber, a high-level cross-protocol url-grabber
# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
# $Id: progress.py,v 1.7 2005/08/19 21:59:07 mstenner Exp $
import sys
import time
import math
import thread
class BaseMeter:
def __init__(self):
self.update_period = 0.3 # seconds
self.filename = None
self.url = None
self.basename = None
self.text = None
self.size = None
self.start_time = None
self.last_amount_read = 0
self.last_update_time = None
self.re = RateEstimator()
def start(self, filename=None, url=None, basename=None,
size=None, now=None, text=None):
self.filename = filename
self.url = url
self.basename = basename
self.text = text
#size = None ######### TESTING
self.size = size
if not size is None: self.fsize = format_number(size) + 'B'
if now is None: now = time.time()
self.start_time = now
self.re.start(size, now)
self.last_amount_read = 0
self.last_update_time = now
self._do_start(now)
def _do_start(self, now=None):
pass
def update(self, amount_read, now=None):
# for a real gui, you probably want to override and put a call
# to your mainloop iteration function here
if now is None: now = time.time()
if (now >= self.last_update_time + self.update_period) or \
not self.last_update_time:
self.re.update(amount_read, now)
self.last_amount_read = amount_read
self.last_update_time = now
self._do_update(amount_read, now)
def _do_update(self, amount_read, now=None):
pass
def end(self, amount_read, now=None):
if now is None: now = time.time()
self.re.update(amount_read, now)
self.last_amount_read = amount_read
self.last_update_time = now
self._do_end(amount_read, now)
def _do_end(self, amount_read, now=None):
pass
class TextMeter(BaseMeter):
def __init__(self, fo=sys.stderr):
BaseMeter.__init__(self)
self.fo = fo
def _do_update(self, amount_read, now=None):
etime = self.re.elapsed_time()
fetime = format_time(etime)
fread = format_number(amount_read)
#self.size = None
if self.text is not None:
text = self.text
else:
text = self.basename
if self.size is None:
out = '\r%-60.60s %5sB %s ' % \
(text, fread, fetime)
else:
rtime = self.re.remaining_time()
frtime = format_time(rtime)
frac = self.re.fraction_read()
bar = '='*int(25 * frac)
out = '\r%-25.25s %3i%% |%-25.25s| %5sB %8s ETA ' % \
(text, frac*100, bar, fread, frtime)
self.fo.write(out)
self.fo.flush()
def _do_end(self, amount_read, now=None):
total_time = format_time(self.re.elapsed_time())
total_size = format_number(amount_read)
if self.text is not None:
text = self.text
else:
text = self.basename
if self.size is None:
out = '\r%-60.60s %5sB %s ' % \
(text, total_size, total_time)
else:
bar = '='*25
out = '\r%-25.25s %3i%% |%-25.25s| %5sB %8s ' % \
(text, 100, bar, total_size, total_time)
self.fo.write(out + '\n')
self.fo.flush()
text_progress_meter = TextMeter
class MultiFileHelper(BaseMeter):
def __init__(self, master):
BaseMeter.__init__(self)
self.master = master
def _do_start(self, now):
self.master.start_meter(self, now)
def _do_update(self, amount_read, now):
# elapsed time since last update
self.master.update_meter(self, now)
def _do_end(self, amount_read, now):
self.ftotal_time = format_time(now - self.start_time)
self.ftotal_size = format_number(self.last_amount_read)
self.master.end_meter(self, now)
def failure(self, message, now=None):
self.master.failure_meter(self, message, now)
def message(self, message):
self.master.message_meter(self, message)
class MultiFileMeter:
helperclass = MultiFileHelper
def __init__(self):
self.meters = []
self.in_progress_meters = []
self._lock = thread.allocate_lock()
self.update_period = 0.3 # seconds
self.numfiles = None
self.finished_files = 0
self.failed_files = 0
self.open_files = 0
self.total_size = None
self.failed_size = 0
self.start_time = None
self.finished_file_size = 0
self.last_update_time = None
self.re = RateEstimator()
def start(self, numfiles=None, total_size=None, now=None):
if now is None: now = time.time()
self.numfiles = numfiles
self.finished_files = 0
self.failed_files = 0
self.open_files = 0
self.total_size = total_size
self.failed_size = 0
self.start_time = now
self.finished_file_size = 0
self.last_update_time = now
self.re.start(total_size, now)
self._do_start(now)
def _do_start(self, now):
pass
def end(self, now=None):
if now is None: now = time.time()
self._do_end(now)
def _do_end(self, now):
pass
def lock(self): self._lock.acquire()
def unlock(self): self._lock.release()
###########################################################
# child meter creation and destruction
def newMeter(self):
newmeter = self.helperclass(self)
self.meters.append(newmeter)
return newmeter
def removeMeter(self, meter):
self.meters.remove(meter)
###########################################################
# child functions - these should only be called by helpers
def start_meter(self, meter, now):
if not meter in self.meters:
raise ValueError('attempt to use orphaned meter')
self._lock.acquire()
try:
if not meter in self.in_progress_meters:
self.in_progress_meters.append(meter)
self.open_files += 1
finally:
self._lock.release()
self._do_start_meter(meter, now)
def _do_start_meter(self, meter, now):
pass
def update_meter(self, meter, now):
if not meter in self.meters:
raise ValueError('attempt to use orphaned meter')
if (now >= self.last_update_time + self.update_period) or \
not self.last_update_time:
self.re.update(self._amount_read(), now)
self.last_update_time = now
self._do_update_meter(meter, now)
def _do_update_meter(self, meter, now):
pass
def end_meter(self, meter, now):
if not meter in self.meters:
raise ValueError('attempt to use orphaned meter')
self._lock.acquire()
try:
try: self.in_progress_meters.remove(meter)
except ValueError: pass
self.open_files -= 1
self.finished_files += 1
self.finished_file_size += meter.last_amount_read
finally:
self._lock.release()
self._do_end_meter(meter, now)
def _do_end_meter(self, meter, now):
pass
def failure_meter(self, meter, message, now):
if not meter in self.meters:
raise ValueError('attempt to use orphaned meter')
self._lock.acquire()
try:
try: self.in_progress_meters.remove(meter)
except ValueError: pass
self.open_files -= 1
self.failed_files += 1
if meter.size and self.failed_size is not None:
self.failed_size += meter.size
else:
self.failed_size = None
finally:
self._lock.release()
self._do_failure_meter(meter, message, now)
def _do_failure_meter(self, meter, message, now):
pass
def message_meter(self, meter, message):
pass
########################################################
# internal functions
def _amount_read(self):
tot = self.finished_file_size
for m in self.in_progress_meters:
tot += m.last_amount_read
return tot
class TextMultiFileMeter(MultiFileMeter):
def __init__(self, fo=sys.stderr):
self.fo = fo
MultiFileMeter.__init__(self)
# files: ###/### ###% data: ######/###### ###% time: ##:##:##/##:##:##
def _do_update_meter(self, meter, now):
self._lock.acquire()
try:
format = "files: %3i/%-3i %3i%% data: %6.6s/%-6.6s %3i%% " \
"time: %8.8s/%8.8s"
df = self.finished_files
tf = self.numfiles or 1
pf = 100 * float(df)/tf + 0.49
dd = self.re.last_amount_read
td = self.total_size
pd = 100 * (self.re.fraction_read() or 0) + 0.49
dt = self.re.elapsed_time()
rt = self.re.remaining_time()
if rt is None: tt = None
else: tt = dt + rt
fdd = format_number(dd) + 'B'
ftd = format_number(td) + 'B'
fdt = format_time(dt, 1)
ftt = format_time(tt, 1)
out = '%-79.79s' % (format % (df, tf, pf, fdd, ftd, pd, fdt, ftt))
self.fo.write('\r' + out)
self.fo.flush()
finally:
self._lock.release()
def _do_end_meter(self, meter, now):
self._lock.acquire()
try:
format = "%-30.30s %6.6s %8.8s %9.9s"
fn = meter.basename
size = meter.last_amount_read
fsize = format_number(size) + 'B'
et = meter.re.elapsed_time()
fet = format_time(et, 1)
frate = format_number(size / et) + 'B/s'
out = '%-79.79s' % (format % (fn, fsize, fet, frate))
self.fo.write('\r' + out + '\n')
finally:
self._lock.release()
self._do_update_meter(meter, now)
def _do_failure_meter(self, meter, message, now):
self._lock.acquire()
try:
format = "%-30.30s %6.6s %s"
fn = meter.basename
if type(message) in (type(''), type(u'')):
message = message.splitlines()
if not message: message = ['']
out = '%-79s' % (format % (fn, 'FAILED', message[0] or ''))
self.fo.write('\r' + out + '\n')
for m in message[1:]: self.fo.write(' ' + m + '\n')
self._lock.release()
finally:
self._do_update_meter(meter, now)
def message_meter(self, meter, message):
self._lock.acquire()
try:
pass
finally:
self._lock.release()
def _do_end(self, now):
self._do_update_meter(None, now)
self._lock.acquire()
try:
self.fo.write('\n')
self.fo.flush()
finally:
self._lock.release()
######################################################################
# support classes and functions
class RateEstimator:
def __init__(self, timescale=5.0):
self.timescale = timescale
def start(self, total=None, now=None):
if now is None: now = time.time()
self.total = total
self.start_time = now
self.last_update_time = now
self.last_amount_read = 0
self.ave_rate = None
def update(self, amount_read, now=None):
if now is None: now = time.time()
if amount_read == 0:
# if we just started this file, all bets are off
self.last_update_time = now
self.last_amount_read = 0
self.ave_rate = None
return
#print 'times', now, self.last_update_time
time_diff = now - self.last_update_time
read_diff = amount_read - self.last_amount_read
self.last_update_time = now
self.last_amount_read = amount_read
self.ave_rate = self._temporal_rolling_ave(\
time_diff, read_diff, self.ave_rate, self.timescale)
#print 'results', time_diff, read_diff, self.ave_rate
#####################################################################
# result methods
def average_rate(self):
"get the average transfer rate (in bytes/second)"
return self.ave_rate
def elapsed_time(self):
"the time between the start of the transfer and the most recent update"
return self.last_update_time - self.start_time
def remaining_time(self):
"estimated time remaining"
if not self.ave_rate or not self.total: return None
return (self.total - self.last_amount_read) / self.ave_rate
def fraction_read(self):
"""the fraction of the data that has been read
(can be None for unknown transfer size)"""
if self.total is None: return None
elif self.total == 0: return 1.0
else: return float(self.last_amount_read)/self.total
#########################################################################
# support methods
def _temporal_rolling_ave(self, time_diff, read_diff, last_ave, timescale):
"""a temporal rolling average performs smooth averaging even when
updates come at irregular intervals. This is performed by scaling
the "epsilon" according to the time since the last update.
Specifically, epsilon = time_diff / timescale
As a general rule, the average will take on a completely new value
after 'timescale' seconds."""
epsilon = time_diff / timescale
if epsilon > 1: epsilon = 1.0
return self._rolling_ave(time_diff, read_diff, last_ave, epsilon)
def _rolling_ave(self, time_diff, read_diff, last_ave, epsilon):
"""perform a "rolling average" iteration
a rolling average "folds" new data into an existing average with
some weight, epsilon. epsilon must be between 0.0 and 1.0 (inclusive)
a value of 0.0 means only the old value (initial value) counts,
and a value of 1.0 means only the newest value is considered."""
try:
recent_rate = read_diff / time_diff
except ZeroDivisionError:
recent_rate = None
if last_ave is None: return recent_rate
elif recent_rate is None: return last_ave
# at this point, both last_ave and recent_rate are numbers
return epsilon * recent_rate + (1 - epsilon) * last_ave
def _round_remaining_time(self, rt, start_time=15.0):
"""round the remaining time, depending on its size
If rt is between n*start_time and (n+1)*start_time round downward
to the nearest multiple of n (for any counting number n).
If rt < start_time, round down to the nearest 1.
For example (for start_time = 15.0):
2.7 -> 2.0
25.2 -> 25.0
26.4 -> 26.0
35.3 -> 34.0
63.6 -> 60.0
"""
if rt < 0: return 0.0
shift = int(math.log(rt/start_time)/math.log(2))
rt = int(rt)
if shift <= 0: return rt
return float(int(rt) >> shift << shift)
def format_time(seconds, use_hours=0):
if seconds is None or seconds < 0:
if use_hours: return '--:--:--'
else: return '--:--'
else:
seconds = int(seconds)
minutes = seconds / 60
seconds = seconds % 60
if use_hours:
hours = minutes / 60
minutes = minutes % 60
return '%02i:%02i:%02i' % (hours, minutes, seconds)
else:
return '%02i:%02i' % (minutes, seconds)
def format_number(number, SI=0, space=' '):
"""Turn numbers into human-readable metric-like numbers"""
symbols = ['', # (none)
'k', # kilo
'M', # mega
'G', # giga
'T', # tera
'P', # peta
'E', # exa
'Z', # zetta
'Y'] # yotta
if SI: step = 1000.0
else: step = 1024.0
thresh = 999
depth = 0
max_depth = len(symbols) - 1
# we want numbers between 0 and thresh, but don't exceed the length
# of our list. In that event, the formatting will be screwed up,
# but it'll still show the right number.
while number > thresh and depth < max_depth:
depth = depth + 1
number = number / step
if type(number) == type(1) or type(number) == type(1L):
# it's an int or a long, which means it didn't get divided,
# which means it's already short enough
format = '%i%s%s'
elif number < 9.95:
# must use 9.95 for proper sizing. For example, 9.99 will be
# rounded to 10.0 with the .1f format string (which is too long)
format = '%.1f%s%s'
else:
format = '%.0f%s%s'
return(format % (float(number or 0), space, symbols[depth]))
| Python |
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
# This file is part of urlgrabber, a high-level cross-protocol url-grabber
# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
# $Id: byterange.py,v 1.12 2006/07/20 20:15:58 mstenner Exp $
import os
import stat
import urllib
import urllib2
import rfc822
DEBUG = None
try:
from cStringIO import StringIO
except ImportError, msg:
from StringIO import StringIO
class RangeError(IOError):
"""Error raised when an unsatisfiable range is requested."""
pass
class HTTPRangeHandler(urllib2.BaseHandler):
"""Handler that enables HTTP Range headers.
This was extremely simple. The Range header is a HTTP feature to
begin with so all this class does is tell urllib2 that the
"206 Partial Content" reponse from the HTTP server is what we
expected.
Example:
import urllib2
import byterange
range_handler = range.HTTPRangeHandler()
opener = urllib2.build_opener(range_handler)
# install it
urllib2.install_opener(opener)
# create Request and set Range header
req = urllib2.Request('http://www.python.org/')
req.header['Range'] = 'bytes=30-50'
f = urllib2.urlopen(req)
"""
def http_error_206(self, req, fp, code, msg, hdrs):
# 206 Partial Content Response
r = urllib.addinfourl(fp, hdrs, req.get_full_url())
r.code = code
r.msg = msg
return r
def http_error_416(self, req, fp, code, msg, hdrs):
# HTTP's Range Not Satisfiable error
raise RangeError('Requested Range Not Satisfiable')
class HTTPSRangeHandler(HTTPRangeHandler):
""" Range Header support for HTTPS. """
def https_error_206(self, req, fp, code, msg, hdrs):
return self.http_error_206(req, fp, code, msg, hdrs)
def https_error_416(self, req, fp, code, msg, hdrs):
self.https_error_416(req, fp, code, msg, hdrs)
class RangeableFileObject:
"""File object wrapper to enable raw range handling.
This was implemented primarilary for handling range
specifications for file:// urls. This object effectively makes
a file object look like it consists only of a range of bytes in
the stream.
Examples:
# expose 10 bytes, starting at byte position 20, from
# /etc/aliases.
>>> fo = RangeableFileObject(file('/etc/passwd', 'r'), (20,30))
# seek seeks within the range (to position 23 in this case)
>>> fo.seek(3)
# tell tells where your at _within the range_ (position 3 in
# this case)
>>> fo.tell()
# read EOFs if an attempt is made to read past the last
# byte in the range. the following will return only 7 bytes.
>>> fo.read(30)
"""
def __init__(self, fo, rangetup):
"""Create a RangeableFileObject.
fo -- a file like object. only the read() method need be
supported but supporting an optimized seek() is
preferable.
rangetup -- a (firstbyte,lastbyte) tuple specifying the range
to work over.
The file object provided is assumed to be at byte offset 0.
"""
self.fo = fo
(self.firstbyte, self.lastbyte) = range_tuple_normalize(rangetup)
self.realpos = 0
self._do_seek(self.firstbyte)
def __getattr__(self, name):
"""This effectively allows us to wrap at the instance level.
Any attribute not found in _this_ object will be searched for
in self.fo. This includes methods."""
if hasattr(self.fo, name):
return getattr(self.fo, name)
raise AttributeError, name
def tell(self):
"""Return the position within the range.
This is different from fo.seek in that position 0 is the
first byte position of the range tuple. For example, if
this object was created with a range tuple of (500,899),
tell() will return 0 when at byte position 500 of the file.
"""
return (self.realpos - self.firstbyte)
def seek(self,offset,whence=0):
"""Seek within the byte range.
Positioning is identical to that described under tell().
"""
assert whence in (0, 1, 2)
if whence == 0: # absolute seek
realoffset = self.firstbyte + offset
elif whence == 1: # relative seek
realoffset = self.realpos + offset
elif whence == 2: # absolute from end of file
# XXX: are we raising the right Error here?
raise IOError('seek from end of file not supported.')
# do not allow seek past lastbyte in range
if self.lastbyte and (realoffset >= self.lastbyte):
realoffset = self.lastbyte
self._do_seek(realoffset - self.realpos)
def read(self, size=-1):
"""Read within the range.
This method will limit the size read based on the range.
"""
size = self._calc_read_size(size)
rslt = self.fo.read(size)
self.realpos += len(rslt)
return rslt
def readline(self, size=-1):
"""Read lines within the range.
This method will limit the size read based on the range.
"""
size = self._calc_read_size(size)
rslt = self.fo.readline(size)
self.realpos += len(rslt)
return rslt
def _calc_read_size(self, size):
"""Handles calculating the amount of data to read based on
the range.
"""
if self.lastbyte:
if size > -1:
if ((self.realpos + size) >= self.lastbyte):
size = (self.lastbyte - self.realpos)
else:
size = (self.lastbyte - self.realpos)
return size
def _do_seek(self,offset):
"""Seek based on whether wrapped object supports seek().
offset is relative to the current position (self.realpos).
"""
assert offset >= 0
if not hasattr(self.fo, 'seek'):
self._poor_mans_seek(offset)
else:
self.fo.seek(self.realpos + offset)
self.realpos+= offset
def _poor_mans_seek(self,offset):
"""Seek by calling the wrapped file objects read() method.
This is used for file like objects that do not have native
seek support. The wrapped objects read() method is called
to manually seek to the desired position.
offset -- read this number of bytes from the wrapped
file object.
raise RangeError if we encounter EOF before reaching the
specified offset.
"""
pos = 0
bufsize = 1024
while pos < offset:
if (pos + bufsize) > offset:
bufsize = offset - pos
buf = self.fo.read(bufsize)
if len(buf) != bufsize:
raise RangeError('Requested Range Not Satisfiable')
pos+= bufsize
class FileRangeHandler(urllib2.FileHandler):
"""FileHandler subclass that adds Range support.
This class handles Range headers exactly like an HTTP
server would.
"""
def open_local_file(self, req):
import mimetypes
import mimetools
host = req.get_host()
file = req.get_selector()
localfile = urllib.url2pathname(file)
stats = os.stat(localfile)
size = stats[stat.ST_SIZE]
modified = rfc822.formatdate(stats[stat.ST_MTIME])
mtype = mimetypes.guess_type(file)[0]
if host:
host, port = urllib.splitport(host)
if port or socket.gethostbyname(host) not in self.get_names():
raise urllib2.URLError('file not on local host')
fo = open(localfile,'rb')
brange = req.headers.get('Range',None)
brange = range_header_to_tuple(brange)
assert brange != ()
if brange:
(fb,lb) = brange
if lb == '': lb = size
if fb < 0 or fb > size or lb > size:
raise RangeError('Requested Range Not Satisfiable')
size = (lb - fb)
fo = RangeableFileObject(fo, (fb,lb))
headers = mimetools.Message(StringIO(
'Content-Type: %s\nContent-Length: %d\nLast-modified: %s\n' %
(mtype or 'text/plain', size, modified)))
return urllib.addinfourl(fo, headers, 'file:'+file)
# FTP Range Support
# Unfortunately, a large amount of base FTP code had to be copied
# from urllib and urllib2 in order to insert the FTP REST command.
# Code modifications for range support have been commented as
# follows:
# -- range support modifications start/end here
from urllib import splitport, splituser, splitpasswd, splitattr, \
unquote, addclosehook, addinfourl
import ftplib
import socket
import sys
import ftplib
import mimetypes
import mimetools
class FTPRangeHandler(urllib2.FTPHandler):
def ftp_open(self, req):
host = req.get_host()
if not host:
raise IOError, ('ftp error', 'no host given')
host, port = splitport(host)
if port is None:
port = ftplib.FTP_PORT
# username/password handling
user, host = splituser(host)
if user:
user, passwd = splitpasswd(user)
else:
passwd = None
host = unquote(host)
user = unquote(user or '')
passwd = unquote(passwd or '')
try:
host = socket.gethostbyname(host)
except socket.error, msg:
raise urllib2.URLError(msg)
path, attrs = splitattr(req.get_selector())
dirs = path.split('/')
dirs = map(unquote, dirs)
dirs, file = dirs[:-1], dirs[-1]
if dirs and not dirs[0]:
dirs = dirs[1:]
try:
fw = self.connect_ftp(user, passwd, host, port, dirs)
type = file and 'I' or 'D'
for attr in attrs:
attr, value = splitattr(attr)
if attr.lower() == 'type' and \
value in ('a', 'A', 'i', 'I', 'd', 'D'):
type = value.upper()
# -- range support modifications start here
rest = None
range_tup = range_header_to_tuple(req.headers.get('Range',None))
assert range_tup != ()
if range_tup:
(fb,lb) = range_tup
if fb > 0: rest = fb
# -- range support modifications end here
fp, retrlen = fw.retrfile(file, type, rest)
# -- range support modifications start here
if range_tup:
(fb,lb) = range_tup
if lb == '':
if retrlen is None or retrlen == 0:
raise RangeError('Requested Range Not Satisfiable due to unobtainable file length.')
lb = retrlen
retrlen = lb - fb
if retrlen < 0:
# beginning of range is larger than file
raise RangeError('Requested Range Not Satisfiable')
else:
retrlen = lb - fb
fp = RangeableFileObject(fp, (0,retrlen))
# -- range support modifications end here
headers = ""
mtype = mimetypes.guess_type(req.get_full_url())[0]
if mtype:
headers += "Content-Type: %s\n" % mtype
if retrlen is not None and retrlen >= 0:
headers += "Content-Length: %d\n" % retrlen
sf = StringIO(headers)
headers = mimetools.Message(sf)
return addinfourl(fp, headers, req.get_full_url())
except ftplib.all_errors, msg:
raise IOError, ('ftp error', msg), sys.exc_info()[2]
def connect_ftp(self, user, passwd, host, port, dirs):
fw = ftpwrapper(user, passwd, host, port, dirs)
return fw
class ftpwrapper(urllib.ftpwrapper):
# range support note:
# this ftpwrapper code is copied directly from
# urllib. The only enhancement is to add the rest
# argument and pass it on to ftp.ntransfercmd
def retrfile(self, file, type, rest=None):
self.endtransfer()
if type in ('d', 'D'): cmd = 'TYPE A'; isdir = 1
else: cmd = 'TYPE ' + type; isdir = 0
try:
self.ftp.voidcmd(cmd)
except ftplib.all_errors:
self.init()
self.ftp.voidcmd(cmd)
conn = None
if file and not isdir:
# Use nlst to see if the file exists at all
try:
self.ftp.nlst(file)
except ftplib.error_perm, reason:
raise IOError, ('ftp error', reason), sys.exc_info()[2]
# Restore the transfer mode!
self.ftp.voidcmd(cmd)
# Try to retrieve as a file
try:
cmd = 'RETR ' + file
conn = self.ftp.ntransfercmd(cmd, rest)
except ftplib.error_perm, reason:
if str(reason)[:3] == '501':
# workaround for REST not supported error
fp, retrlen = self.retrfile(file, type)
fp = RangeableFileObject(fp, (rest,''))
return (fp, retrlen)
elif str(reason)[:3] != '550':
raise IOError, ('ftp error', reason), sys.exc_info()[2]
if not conn:
# Set transfer mode to ASCII!
self.ftp.voidcmd('TYPE A')
# Try a directory listing
if file: cmd = 'LIST ' + file
else: cmd = 'LIST'
conn = self.ftp.ntransfercmd(cmd)
self.busy = 1
# Pass back both a suitably decorated object and a retrieval length
return (addclosehook(conn[0].makefile('rb'),
self.endtransfer), conn[1])
####################################################################
# Range Tuple Functions
# XXX: These range tuple functions might go better in a class.
_rangere = None
def range_header_to_tuple(range_header):
"""Get a (firstbyte,lastbyte) tuple from a Range header value.
Range headers have the form "bytes=<firstbyte>-<lastbyte>". This
function pulls the firstbyte and lastbyte values and returns
a (firstbyte,lastbyte) tuple. If lastbyte is not specified in
the header value, it is returned as an empty string in the
tuple.
Return None if range_header is None
Return () if range_header does not conform to the range spec
pattern.
"""
global _rangere
if range_header is None: return None
if _rangere is None:
import re
_rangere = re.compile(r'^bytes=(\d{1,})-(\d*)')
match = _rangere.match(range_header)
if match:
tup = range_tuple_normalize(match.group(1,2))
if tup and tup[1]:
tup = (tup[0],tup[1]+1)
return tup
return ()
def range_tuple_to_header(range_tup):
"""Convert a range tuple to a Range header value.
Return a string of the form "bytes=<firstbyte>-<lastbyte>" or None
if no range is needed.
"""
if range_tup is None: return None
range_tup = range_tuple_normalize(range_tup)
if range_tup:
if range_tup[1]:
range_tup = (range_tup[0],range_tup[1] - 1)
return 'bytes=%s-%s' % range_tup
def range_tuple_normalize(range_tup):
"""Normalize a (first_byte,last_byte) range tuple.
Return a tuple whose first element is guaranteed to be an int
and whose second element will be '' (meaning: the last byte) or
an int. Finally, return None if the normalized tuple == (0,'')
as that is equivelant to retrieving the entire file.
"""
if range_tup is None: return None
# handle first byte
fb = range_tup[0]
if fb in (None,''): fb = 0
else: fb = int(fb)
# handle last byte
try: lb = range_tup[1]
except IndexError: lb = ''
else:
if lb is None: lb = ''
elif lb != '': lb = int(lb)
# check if range is over the entire file
if (fb,lb) == (0,''): return None
# check that the range is valid
if lb < fb: raise RangeError('Invalid byte range: %s-%s' % (fb,lb))
return (fb,lb)
| Python |
#!/usr/bin/env python
# Team 4067 FRC Tools Installer - easy installer for FRC Tools
# Copyright (C) 2013 River Hill HS Robotics Team (Albert H.)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
class DownloadProgress(QtCore.QThread):
signalUpdateProcess = QtCore.pyqtSignal(int)
signalDone = QtCore.pyqtSignal(bool)
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.exiting = False
self.doneDownload = False
def run(self):
while not self.exiting and not self.doneDownload:
bla
def __del__(self):
self.exiting = True
self.wait()
class VerificationProgress(QtCore.QThread):
signalUpdateProcess = QtCore.pyqtSignal(int)
signalDone = QtCore.pyqtSignal(bool)
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.exiting = False
self.doneVerify = False
def run(self):
while not self.exiting and not self.doneVerify:
bla
def __del__(self):
self.exiting = True
self.wait()
class ExtractProgress(QtCore.QThread):
signalUpdateProcess = QtCore.pyqtSignal(int)
signalDone = QtCore.pyqtSignal(bool)
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.exiting = False
self.doneExtract = False
def run(self):
while not self.exiting and not self.doneExtract:
bla
def __del__(self):
self.exiting = True
self.wait()
| Python |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Options.ui'
#
# Created: Sat Jan 19 21:47:35 2013
# by: PyQt4 UI code generator 4.9.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_OptionsForm(object):
def setupUi(self, OptionsForm):
OptionsForm.setObjectName(_fromUtf8("OptionsForm"))
OptionsForm.resize(612, 226)
self.horizontalLayout_2 = QtGui.QHBoxLayout(OptionsForm)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.logoGraphicsView = QtGui.QGraphicsView(OptionsForm)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.logoGraphicsView.sizePolicy().hasHeightForWidth())
self.logoGraphicsView.setSizePolicy(sizePolicy)
self.logoGraphicsView.setObjectName(_fromUtf8("logoGraphicsView"))
self.horizontalLayout_2.addWidget(self.logoGraphicsView)
self.tabLayout = QtGui.QVBoxLayout()
self.tabLayout.setObjectName(_fromUtf8("tabLayout"))
self.optionTabs = QtGui.QTabWidget(OptionsForm)
self.optionTabs.setObjectName(_fromUtf8("optionTabs"))
self.networkTab = QtGui.QWidget()
self.networkTab.setObjectName(_fromUtf8("networkTab"))
self.notImplementedLbl = QtGui.QLabel(self.networkTab)
self.notImplementedLbl.setGeometry(QtCore.QRect(10, 10, 101, 16))
self.notImplementedLbl.setObjectName(_fromUtf8("notImplementedLbl"))
self.optionTabs.addTab(self.networkTab, _fromUtf8(""))
self.serverTab = QtGui.QWidget()
self.serverTab.setObjectName(_fromUtf8("serverTab"))
self.verticalLayoutWidget_2 = QtGui.QWidget(self.serverTab)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(9, 9, 379, 141))
self.verticalLayoutWidget_2.setObjectName(_fromUtf8("verticalLayoutWidget_2"))
self.serverOptionsLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget_2)
self.serverOptionsLayout.setMargin(0)
self.serverOptionsLayout.setObjectName(_fromUtf8("serverOptionsLayout"))
self.serverPromptLbl = QtGui.QLabel(self.verticalLayoutWidget_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.serverPromptLbl.sizePolicy().hasHeightForWidth())
self.serverPromptLbl.setSizePolicy(sizePolicy)
self.serverPromptLbl.setWordWrap(True)
self.serverPromptLbl.setObjectName(_fromUtf8("serverPromptLbl"))
self.serverOptionsLayout.addWidget(self.serverPromptLbl)
self.urlLayout = QtGui.QHBoxLayout()
self.urlLayout.setObjectName(_fromUtf8("urlLayout"))
self.urlLbl = QtGui.QLabel(self.verticalLayoutWidget_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.urlLbl.sizePolicy().hasHeightForWidth())
self.urlLbl.setSizePolicy(sizePolicy)
self.urlLbl.setObjectName(_fromUtf8("urlLbl"))
self.urlLayout.addWidget(self.urlLbl)
self.urlTxt = QtGui.QLineEdit(self.verticalLayoutWidget_2)
self.urlTxt.setObjectName(_fromUtf8("urlTxt"))
self.urlLayout.addWidget(self.urlTxt)
self.serverOptionsLayout.addLayout(self.urlLayout)
self.optionTabs.addTab(self.serverTab, _fromUtf8(""))
self.packagesTab = QtGui.QWidget()
self.packagesTab.setObjectName(_fromUtf8("packagesTab"))
self.verticalLayoutWidget_3 = QtGui.QWidget(self.packagesTab)
self.verticalLayoutWidget_3.setGeometry(QtCore.QRect(9, 9, 381, 141))
self.verticalLayoutWidget_3.setObjectName(_fromUtf8("verticalLayoutWidget_3"))
self.packagesOptionsLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget_3)
self.packagesOptionsLayout.setMargin(0)
self.packagesOptionsLayout.setObjectName(_fromUtf8("packagesOptionsLayout"))
self.packagesPromptLbl = QtGui.QLabel(self.verticalLayoutWidget_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.packagesPromptLbl.sizePolicy().hasHeightForWidth())
self.packagesPromptLbl.setSizePolicy(sizePolicy)
self.packagesPromptLbl.setObjectName(_fromUtf8("packagesPromptLbl"))
self.packagesOptionsLayout.addWidget(self.packagesPromptLbl)
self.FRCToolsDriverStationCheckbox = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.FRCToolsDriverStationCheckbox.setObjectName(_fromUtf8("FRCToolsDriverStationCheckbox"))
self.packagesOptionsLayout.addWidget(self.FRCToolsDriverStationCheckbox)
self.FRCUtilitiesUpdateCheckbox = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.FRCUtilitiesUpdateCheckbox.setObjectName(_fromUtf8("FRCUtilitiesUpdateCheckbox"))
self.packagesOptionsLayout.addWidget(self.FRCUtilitiesUpdateCheckbox)
self.FRCDriverStationUpdateCheckbox = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.FRCDriverStationUpdateCheckbox.setObjectName(_fromUtf8("FRCDriverStationUpdateCheckbox"))
self.packagesOptionsLayout.addWidget(self.FRCDriverStationUpdateCheckbox)
self.optionTabs.addTab(self.packagesTab, _fromUtf8(""))
self.tabLayout.addWidget(self.optionTabs)
self.buttonLayout = QtGui.QHBoxLayout()
self.buttonLayout.setObjectName(_fromUtf8("buttonLayout"))
self.btnOK = QtGui.QPushButton(OptionsForm)
self.btnOK.setObjectName(_fromUtf8("btnOK"))
self.buttonLayout.addWidget(self.btnOK)
self.btnCancel = QtGui.QPushButton(OptionsForm)
self.btnCancel.setObjectName(_fromUtf8("btnCancel"))
self.buttonLayout.addWidget(self.btnCancel)
self.tabLayout.addLayout(self.buttonLayout)
self.horizontalLayout_2.addLayout(self.tabLayout)
self.horizontalLayout_2.setStretch(1, 1)
self.retranslateUi(OptionsForm)
self.optionTabs.setCurrentIndex(2)
QtCore.QMetaObject.connectSlotsByName(OptionsForm)
def retranslateUi(self, OptionsForm):
OptionsForm.setWindowTitle(_translate("OptionsForm", "Team 4067 - 2013 FRC Software Installer - Options", None))
self.notImplementedLbl.setText(_translate("OptionsForm", "Not implemented...", None))
self.optionTabs.setTabText(self.optionTabs.indexOf(self.networkTab), _translate("OptionsForm", "&Network", None))
self.serverPromptLbl.setText(_translate("OptionsForm", "You may specify a different server URL to download and install packages from.", None))
self.urlLbl.setText(_translate("OptionsForm", "<html><head/><body><p><span style=\" font-weight:600;\">URL:</span></p></body></html>", None))
self.optionTabs.setTabText(self.optionTabs.indexOf(self.serverTab), _translate("OptionsForm", "&Server", None))
self.packagesPromptLbl.setText(_translate("OptionsForm", "You may select which packages you wish to download and install.", None))
self.FRCToolsDriverStationCheckbox.setText(_translate("OptionsForm", "2013 FRC Tools and Driver Station", None))
self.FRCUtilitiesUpdateCheckbox.setText(_translate("OptionsForm", "2013 FRC Utilities Update", None))
self.FRCDriverStationUpdateCheckbox.setText(_translate("OptionsForm", "2013 FRC Driver Station Update", None))
self.optionTabs.setTabText(self.optionTabs.indexOf(self.packagesTab), _translate("OptionsForm", "&Packages", None))
self.btnOK.setText(_translate("OptionsForm", "&OK", None))
self.btnCancel.setText(_translate("OptionsForm", "&Cancel", None))
import MainWindows_rc
| Python |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'LicenseAgreements.ui'
#
# Created: Sat Jan 19 21:47:33 2013
# by: PyQt4 UI code generator 4.9.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_LicenseAgreementsForm(object):
def setupUi(self, LicenseAgreementsForm):
LicenseAgreementsForm.setObjectName(_fromUtf8("LicenseAgreementsForm"))
LicenseAgreementsForm.resize(427, 312)
self.verticalLayout_2 = QtGui.QVBoxLayout(LicenseAgreementsForm)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.licenseAgreementPromptLbl = QtGui.QLabel(LicenseAgreementsForm)
self.licenseAgreementPromptLbl.setObjectName(_fromUtf8("licenseAgreementPromptLbl"))
self.verticalLayout_2.addWidget(self.licenseAgreementPromptLbl)
self.licenseTabs = QtGui.QTabWidget(LicenseAgreementsForm)
self.licenseTabs.setObjectName(_fromUtf8("licenseTabs"))
self.installerLicenseTab = QtGui.QWidget()
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.installerLicenseTab.sizePolicy().hasHeightForWidth())
self.installerLicenseTab.setSizePolicy(sizePolicy)
self.installerLicenseTab.setObjectName(_fromUtf8("installerLicenseTab"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.installerLicenseTab)
self.horizontalLayout_2.setMargin(2)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.installerLicenseLayout = QtGui.QVBoxLayout()
self.installerLicenseLayout.setObjectName(_fromUtf8("installerLicenseLayout"))
self.installerLicenseTxt = QtGui.QTextEdit(self.installerLicenseTab)
self.installerLicenseTxt.setObjectName(_fromUtf8("installerLicenseTxt"))
self.installerLicenseLayout.addWidget(self.installerLicenseTxt)
self.horizontalLayout_2.addLayout(self.installerLicenseLayout)
self.licenseTabs.addTab(self.installerLicenseTab, _fromUtf8(""))
self.otherLicenseTab = QtGui.QWidget()
self.otherLicenseTab.setObjectName(_fromUtf8("otherLicenseTab"))
self.verticalLayout_5 = QtGui.QVBoxLayout(self.otherLicenseTab)
self.verticalLayout_5.setMargin(2)
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.otherLicenseLayout = QtGui.QVBoxLayout()
self.otherLicenseLayout.setObjectName(_fromUtf8("otherLicenseLayout"))
self.otherLicenseTxt = QtGui.QTextEdit(self.otherLicenseTab)
self.otherLicenseTxt.setObjectName(_fromUtf8("otherLicenseTxt"))
self.otherLicenseLayout.addWidget(self.otherLicenseTxt)
self.verticalLayout_5.addLayout(self.otherLicenseLayout)
self.licenseTabs.addTab(self.otherLicenseTab, _fromUtf8(""))
self.verticalLayout_2.addWidget(self.licenseTabs)
self.radioBtnLayout = QtGui.QVBoxLayout()
self.radioBtnLayout.setObjectName(_fromUtf8("radioBtnLayout"))
self.acceptRadioBtn = QtGui.QRadioButton(LicenseAgreementsForm)
self.acceptRadioBtn.setEnabled(False)
self.acceptRadioBtn.setObjectName(_fromUtf8("acceptRadioBtn"))
self.radioBtnLayout.addWidget(self.acceptRadioBtn)
self.declineRadioBtn = QtGui.QRadioButton(LicenseAgreementsForm)
self.declineRadioBtn.setChecked(True)
self.declineRadioBtn.setObjectName(_fromUtf8("declineRadioBtn"))
self.radioBtnLayout.addWidget(self.declineRadioBtn)
self.verticalLayout_2.addLayout(self.radioBtnLayout)
self.buttonLayout = QtGui.QHBoxLayout()
self.buttonLayout.setObjectName(_fromUtf8("buttonLayout"))
self.continueBtn = QtGui.QPushButton(LicenseAgreementsForm)
self.continueBtn.setEnabled(False)
self.continueBtn.setObjectName(_fromUtf8("continueBtn"))
self.buttonLayout.addWidget(self.continueBtn)
self.declineExitBtn = QtGui.QPushButton(LicenseAgreementsForm)
self.declineExitBtn.setObjectName(_fromUtf8("declineExitBtn"))
self.buttonLayout.addWidget(self.declineExitBtn)
self.verticalLayout_2.addLayout(self.buttonLayout)
self.retranslateUi(LicenseAgreementsForm)
self.licenseTabs.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(LicenseAgreementsForm)
def retranslateUi(self, LicenseAgreementsForm):
LicenseAgreementsForm.setWindowTitle(_translate("LicenseAgreementsForm", "Team 4067 - 2013 FRC Software Installer - License Agreements", None))
self.licenseAgreementPromptLbl.setText(_translate("LicenseAgreementsForm", "<html><head/><body><p><span style=\" font-weight:600;\">License Agreements</span></p></body></html>", None))
self.installerLicenseTxt.setHtml(_translate("LicenseAgreementsForm", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:16pt; font-weight:600;\">Sample License Agreement</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">This is a License Agreement of intellectual property between:</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">The Toy Company Ltd, an USA Corporation doing business as The Toy Company Ltd, Their Address here, hereafter referred to as (the "COMPANY");</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">And</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">[Inventor],</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">(the “LICENSOR”)</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt; font-weight:600;\">Grant:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">The LICENSOR hereby grants to the COMPANY the exclusive license for the invented product</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">described in Schedule A (the "Licensed Item") and to use the know how to: manufacture, sell, market and distribute the Licensed Item. For consideration of the grant, COMPANY shall pay royalties of 5% of the sales price of the item. Conditions for renewal are timely payment of royalties in excess of $1.00 per quarter.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt; font-weight:600;\">Payment of Royalties:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">Company will pay royalties within 31 days of the end of each quarter covering goods / each Licensed Item shipped during the quarter. Accompanying each royalty payment will be a list of accounts, which purchased the Licensed Item during that quarter and the amount of each sale. In the event of late payment, the COMPANY will pay interest on such delinquent amount at the rate per annum equal to 20%. Royalties will not be due for accounts, which have filed for bankruptcy or are insolvent or have not paid outstanding invoices for over 120 days and adjustments for these amounts may be made on subsequent royalty payments.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt; font-weight:600;\">Termination:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">In the event that COMPANY does not ship this item before Dec 31, 2999, all rights conveyed from this license to COMPANY are null and void and all rights revert back to OWNER. If the COMPANY should discontinue the manufacturing of the Licensed Item, or should the COMPANY terminate its business or enter into liquidation, then all rights to said Licensed Item shall revert back to the OWNER forthwith.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt; font-weight:600;\">Warranties:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">The Company represents and warrants to the Licensor that: (i) this Agreement constitutes the legal, valid and binding obligation of the Company enforceable against the Company in accordance with its terms and (ii) products based upon the Licensed Item will be of good quality in design material and workmanship and will be manufactured, sold and distributed in accordance with applicable laws and regulations.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">The Inventor/Licensor warrants that the subject of the license is original work and is wholely owned concept by the inventor and indemnifies licensee against claims from competing claims of ownership to the intellectual property, which is the subject of this license.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt; font-weight:600;\">Indemnity:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">The Company shall indemnify and hold the Licensor, Inventor and Owner harmless from any claim, action, proceeding or judgment and all costs associated with same.</span></p></body></html>", None))
self.licenseTabs.setTabText(self.licenseTabs.indexOf(self.installerLicenseTab), _translate("LicenseAgreementsForm", "2013 FRC Software Installer License", None))
self.otherLicenseTxt.setHtml(_translate("LicenseAgreementsForm", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:16pt; font-weight:600;\">Sample License Agreement 2</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">This is a License Agreement of intellectual property between:</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">The Toy Company Ltd, an USA Corporation doing business as The Toy Company Ltd, Their Address here, hereafter referred to as (the "COMPANY");</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">And</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">[Inventor],</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">(the “LICENSOR”)</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt; font-weight:600;\">Grant:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">The LICENSOR hereby grants to the COMPANY the exclusive license for the invented product</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">described in Schedule A (the "Licensed Item") and to use the know how to: manufacture, sell, market and distribute the Licensed Item. For consideration of the grant, COMPANY shall pay royalties of 5% of the sales price of the item. Conditions for renewal are timely payment of royalties in excess of $1.00 per quarter.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt; font-weight:600;\">Payment of Royalties:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">Company will pay royalties within 31 days of the end of each quarter covering goods / each Licensed Item shipped during the quarter. Accompanying each royalty payment will be a list of accounts, which purchased the Licensed Item during that quarter and the amount of each sale. In the event of late payment, the COMPANY will pay interest on such delinquent amount at the rate per annum equal to 20%. Royalties will not be due for accounts, which have filed for bankruptcy or are insolvent or have not paid outstanding invoices for over 120 days and adjustments for these amounts may be made on subsequent royalty payments.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt; font-weight:600;\">Termination:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">In the event that COMPANY does not ship this item before Dec 31, 2999, all rights conveyed from this license to COMPANY are null and void and all rights revert back to OWNER. If the COMPANY should discontinue the manufacturing of the Licensed Item, or should the COMPANY terminate its business or enter into liquidation, then all rights to said Licensed Item shall revert back to the OWNER forthwith.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt; font-weight:600;\">Warranties:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">The Company represents and warrants to the Licensor that: (i) this Agreement constitutes the legal, valid and binding obligation of the Company enforceable against the Company in accordance with its terms and (ii) products based upon the Licensed Item will be of good quality in design material and workmanship and will be manufactured, sold and distributed in accordance with applicable laws and regulations.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">The Inventor/Licensor warrants that the subject of the license is original work and is wholely owned concept by the inventor and indemnifies licensee against claims from competing claims of ownership to the intellectual property, which is the subject of this license.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt; font-weight:600;\">Indemnity:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">The Company shall indemnify and hold the Licensor, Inventor and Owner harmless from any claim, action, proceeding or judgment and all costs associated with same.</span></p></body></html>", None))
self.licenseTabs.setTabText(self.licenseTabs.indexOf(self.otherLicenseTab), _translate("LicenseAgreementsForm", "National Instruments™ License", None))
self.acceptRadioBtn.setText(_translate("LicenseAgreementsForm", "I accept the terms in all 2 license agreements (20)", None))
self.declineRadioBtn.setText(_translate("LicenseAgreementsForm", "I do not accept the terms in the license agreements", None))
self.continueBtn.setText(_translate("LicenseAgreementsForm", "&Continue (20)", None))
self.declineExitBtn.setText(_translate("LicenseAgreementsForm", "Decline and E&xit", None))
| Python |
#!/usr/bin/env python
# Team 4067 FRC Tools Installer - easy installer for FRC Tools
# Copyright (C) 2013 River Hill HS Robotics Team (Albert H.)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# PackageManager class - initialized from GUI
# Package dictionary format:
# self.Packages = { "package_name" : {
# "url" : "http://someplaceamazing.com/dir/bla",
# "md5" : "md5_checksum_goes_here",
# "sha512" : "sha512_hash_goes_here",
# "licenses" : {
# "company1" : "http://url/to/license.rtf",
# "company2" : "http://url/to/html_license.html"
# },
# "dependencies" : [ "dep1", "dep2" ],
# "install_steps" : [ "extract file.iso", "run file/bla.exe", "copy file dest", "move file dest", "del file" ]
# }
DEFAULT_SERVER_URL = "http://riverhillrobotics.org/Resources/FRC2013/Software/"
DEFAULT_PACKAGE_LIST = [ "frc2013tools", "frc2013utilupdate", "frc2013dsupdate" ]
class PackageManagerException(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
class PackageManager():
def __init__():
self.ServerURL = ""
self.FetchedPackageList = False
self.Packages = {}
self.SelectedPackages = DEFAULT_PACKAGE_LIST
def fetchPackageList(self):
if self.ServerURL = "":
throw PackageManagerException("Tried to fetch the package list without setting a server URL")
def processPackages(self):
if not self.FetchedPackageList:
self.fetchPackageList()
for PACKAGE in self.SelectedPackages:
if not PACKAGE in self.Packages:
throw PackageManagerException("Could not find package '"+PACKAGE+"' in the available package list")
| Python |
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
import Options
srcdir = '.'
blddir = 'build'
VERSION = '0.1'
def set_options(opt):
opt.tool_options('compiler_cxx')
def configure(conf):
conf.check_tool('compiler_cxx')
conf.check_tool('node_addon')
conf.env.append_value('CCFLAGS', ['-O3'])
conf.env.append_value('CXXFLAGS', ['-O3'])
if Options.platform == 'darwin': conf.env.append_value('LINKFLAGS', ['-undefined', 'dynamic_lookup'])
conf.env.append_value("CPPPATH_PROTOBUF", "%s/include"%(os.environ['PROTOBUF']))
conf.env.append_value("LIBPATH_PROTOBUF", "%s/lib"%(os.environ['PROTOBUF']))
conf.env.append_value("LIB_PROTOBUF", "protobuf")
def build(bld):
# protobuf_for_node comes as a library to link against for services
# and an addon to use for plain serialization.
obj = bld.new_task_gen('cxx', 'shlib')
obj.target = 'protobuf_for_node_lib'
obj.source = 'protobuf_for_node.cc'
obj.uselib = ['NODE', 'PROTOBUF']
obj = bld.new_task_gen('cxx', 'shlib', 'node_addon')
obj.target = 'protobuf_for_node'
obj.source = 'addon.cc'
obj.uselib = ['PROTOBUF']
obj.uselib_local = 'protobuf_for_node_lib'
# Example service. If you build your own add-on that exports a
# protobuf service, you will need to replace uselib_local with
# uselib and point CPPPATH, LIBPATH and LIB to where you've
# installed protobuf_for_node.
obj = bld.new_task_gen('cxx', 'shlib', 'node_addon')
obj.target = 'protoservice'
obj.source = ['example/protoservice.pb.cc', 'example/protoservice.cc']
obj.uselib = ['PROTOBUF']
obj.uselib_local = 'protobuf_for_node_lib'
| Python |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Adapted for freedmark by Kevin Read
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Displays a rotating torus using OpenGL.
This example demonstrates:
* Using a 3D projection on a window by overriding the default on_resize
handler
* Enabling multisampling if available
* Drawing a simple 3D primitive using vertex and index arrays
* Using a display list
* Fixed-pipeline lighting
'''
from math import pi, sin, cos
from pyglet.gl import *
from pyglet import clock
from pyglet import window
try:
# Try and create a window with multisampling (antialiasing)
config = Config(sample_buffers=1, samples=4,
depth_size=16, double_buffer=True,)
w = window.Window(resizable=True, config=config)
w.set_vsync(False)
w.set_fullscreen (True)
except window.NoSuchConfigException:
# Fall back to no multisampling for old hardware
w = window.Window(resizable=True)
@w.event
def on_resize(width, height):
# Override the default on_resize handler to create a 3D projection
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., width / float(height), .1, 1000.)
glMatrixMode(GL_MODELVIEW)
def setup():
# One-time GL setup
glClearColor(1, 1, 1, 1)
glColor3f(1, 0, 0)
glEnable(GL_DEPTH_TEST)
# Uncomment this line for a wireframe view
#glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
# Simple light setup. On Windows GL_LIGHT0 is enabled by default,
# but this is not the case on Linux or Mac, so remember to always
# include it.
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_LIGHT1)
# Define a simple function to create ctypes arrays of floats:
def vec(*args):
return (GLfloat * len(args))(*args)
glLightfv(GL_LIGHT0, GL_POSITION, vec(.5, .5, 1, 0))
glLightfv(GL_LIGHT0, GL_SPECULAR, vec(.5, .5, 1, 1))
glLightfv(GL_LIGHT0, GL_DIFFUSE, vec(1, 1, 1, 1))
glLightfv(GL_LIGHT1, GL_POSITION, vec(1, 0, .5, 0))
glLightfv(GL_LIGHT1, GL_DIFFUSE, vec(.5, .5, .5, 1))
glLightfv(GL_LIGHT1, GL_SPECULAR, vec(1, 1, 1, 1))
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, vec(0.5, 0, 0.3, 1))
glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, vec(1, 1, 1, 1))
glMaterialf(GL_FRONT_AND_BACK, GL_SHININESS, 50)
class Torus(object):
def __init__(self, radius, inner_radius, slices, inner_slices):
# Create the vertex and normal arrays.
vertices = []
normals = []
u_step = 2 * pi / (slices - 1)
v_step = 2 * pi / (inner_slices - 1)
u = 0.
for i in range(slices):
cos_u = cos(u)
sin_u = sin(u)
v = 0.
for j in range(inner_slices):
cos_v = cos(v)
sin_v = sin(v)
d = (radius + inner_radius * cos_v)
x = d * cos_u
y = d * sin_u
z = inner_radius * sin_v
nx = cos_u * cos_v
ny = sin_u * cos_v
nz = sin_v
vertices.extend([x, y, z])
normals.extend([nx, ny, nz])
v += v_step
u += u_step
# Create ctypes arrays of the lists
vertices = (GLfloat * len(vertices))(*vertices)
normals = (GLfloat * len(normals))(*normals)
# Create a list of triangle indices.
indices = []
for i in range(slices - 1):
for j in range(inner_slices - 1):
p = i * inner_slices + j
indices.extend([p, p + inner_slices, p + inner_slices + 1])
indices.extend([p, p + 1, p + inner_slices + 1])
indices = (GLuint * len(indices))(*indices)
# Compile a display list
self.list = glGenLists(1)
glNewList(self.list, GL_COMPILE)
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_NORMAL_ARRAY)
glVertexPointer(3, GL_FLOAT, 0, vertices)
glNormalPointer(GL_FLOAT, 0, normals)
glDrawElements(GL_TRIANGLES, len(indices), GL_UNSIGNED_INT, indices)
glPopClientAttrib()
glEndList()
def draw(self):
glCallList(self.list)
setup()
torus = Torus(1, 0.3, 50, 30)
rx = ry = rz = 0
steps = 0
tt = 0
while not w.has_exit and tt < 5:
dt = clock.tick()
rx += dt * 1
ry += dt * 80
rz += dt * 30
rx %= 360
ry %= 360
rz %= 360
w.dispatch_events()
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glTranslatef(0, 0, -4)
glRotatef(rz, 0, 0, 1)
glRotatef(ry, 0, 1, 0)
glRotatef(rx, 1, 0, 0)
torus.draw()
tt = tt + dt
steps = steps + 1
w.flip()
print float(steps) / tt
| Python |
#!/usr/bin/python
# vim: set fileencoding=utf-8 :
# Mandelbrot shader test for freedmark.
# Created by Jonas Wagner, adapted for Geforce FX shader length restrictions by Kevin Read
# Original header:
###########################################################################
# #
# .--~*teu. .uef^" #
# dF 988Nx .xn!~%x. :d88E #
# d888b `8888\ x888 888. u . `888E #
# ?8888 98888F X8888 8888: us888u. .udR88N 888E .z8k #
# "**" x88888~ 88888 X8888 .@88 "8888" /888'888k 888E~?888L #
# d8888*` 88888 88888 9888 9888 9888 'Y" 888E 888E #
# z8**"` : `8888 :88888X 9888 9888 9888 888E 888E #
# :?..... ..F `"**~ 88888' 9888 9888 . 9888 888E 888E #
# /""888888888~ .xx. 88888 9888 9888 .@8c ?8888u../ 888E 888E #
# 8: "888888* '8888 8888~ "888*""888" '%888" "8888P' m888N= 888/ #
# "" "**"` 888" :88% ^Y" ^Y' ^* "P' `Y" 888 #
# ^"==="" J88" #
# glslmandelbrot.py ,---. ,@% #
# Description: renders the mandelbrot set on the gpu |'o o'| #
# Author: Jonas Wagner B=.| m |.=B #
# License: GNU GPL V3 or later `,-.´ #
# Website: http://29a.ch/ B=´ `=B #
# #
# Usage: #
# You can move arround by dragging with the left mouse button #
# You can zoom in and out with your mouse wheel #
# You can toggle the fullscreen mode with the F key #
# You can toggle the fps display with the F1 key #
# enjoy! #
# #
# Legal Foo #
# #
# Copyright (C) 2008 Jonas Wagner #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
###########################################################################
import ctypes as c
import pyglet
import pyglet.clock
import pyglet.window
from pyglet.window import key
from pyglet import gl
vertex_shader = """
uniform float real;
uniform float w;
uniform float imag;
uniform float h;
varying float xpos;
varying float ypos;
void main(void)
{
xpos = clamp(gl_Vertex.x, 0.0,1.0)*w+real;
ypos = clamp(gl_Vertex.y, 0.0,1.0)*h+imag;
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}
"""
fragment_shader = """
varying float xpos;
varying float ypos;
varying float zpos;
void main (void)
{
float iter = 0.0;
float max_square = 3.0;
float square = 0.0;
float r = 0.0;
float i = 0.0;
float rt = 0.0;
float it = 0.0;
while(iter < 1.0 && square < max_square)
{
rt = (r*r) - (i*i) + xpos;
it = (2.0 * r * i) + ypos;
r = rt;
i = it;
square = (r*r)+(i*i);
iter += 0.01;
}
gl_FragColor = vec4 (iter, iter, sin(iter*2.00), 1.0);
}
"""
class ShaderException(Exception):
pass
class Shader(object):
"""Wrapper to create opengl 2.0 shader programms"""
def __init__(self, vertex_source, fragment_source):
self.program = gl.glCreateProgram()
self.vertex_shader = self.create_shader(vertex_source,
gl.GL_VERTEX_SHADER)
self.fragment_shader = self.create_shader(fragment_source,
gl.GL_FRAGMENT_SHADER)
gl.glAttachShader(self.program, self.vertex_shader)
gl.glAttachShader(self.program, self.fragment_shader)
gl.glLinkProgram(self.program)
message = self.get_program_log(self.program)
if message:
raise ShaderException(message)
def create_shader(self, source, shadertype):
# get a char[]
sbuffer = c.create_string_buffer(source)
# get a char **
pointer = c.cast(c.pointer(c.pointer(sbuffer)),
c.POINTER(c.POINTER(c.c_char)))
# a long * NULL pointer
nulll = c.POINTER(c.c_long)()
shader = gl.glCreateShader(shadertype)
gl.glShaderSource(shader, 1, pointer, None)
gl.glCompileShader(shader)
message = self.get_shader_log(shader)
if message:
raise ShaderException(message)
return shader
def set_uniform_f(self, name, value):
location = gl.glGetUniformLocation(self.program, name)
gl.glUniform1f(location, value)
def __setitem__(self, name, value):
"""pass a variable to the shader"""
if isinstance(value, float):
self.set_uniform_f(name, value)
else:
raise TypeError("Only floats are supported so far")
def use(self):
gl.glUseProgram(self.program)
def stop(self):
gl.glUseProgram(0)
def get_shader_log(self, shader):
return self.get_log(shader, gl.glGetShaderInfoLog)
def get_program_log(self, shader):
return self.get_log(shader, gl.glGetProgramInfoLog)
def get_log(self, obj, func):
log_buffer = c.create_string_buffer(4096)
buffer_pointer = c.cast(c.pointer(log_buffer), c.POINTER(c.c_char))
written = c.c_int()
func(obj, 4096, c.pointer(written), buffer_pointer)
return log_buffer.value
class MainWindow(pyglet.window.Window):
def __init__(self):
pyglet.window.Window.__init__(self, width=640, height=480,
resizable=True)
self.fps = pyglet.clock.ClockDisplay()
self.shader = Shader(vertex_shader, fragment_shader)
self.real = -2.0
self.w = 3.0
self.imag = -1.0
self.h = 2.0
self.show_fps = False
self.dt = 0.0
self.frames = 0
def on_key_press(self, symbol, modifiers):
if symbol == key.ESCAPE:
self.has_exit = True
elif symbol == key.F:
self.set_fullscreen(not self.fullscreen)
elif symbol == key.F1:
self.show_fps = not self.show_fps
def zoom_in (self):
self.real += 0.008 * self.w
self.w *= 0.99
self.imag += 0.006 * self.h
self.h *= 0.99
def run(self):
while not self.has_exit and self.w > 0.0022 and self.dt < 20.0:
self.dispatch_events()
self.zoom_in ()
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
gl.glLoadIdentity()
self.shader.use()
self.shader["real"] = self.real
self.shader["w"] = self.w
self.shader["imag"] = self.imag
self.shader["h"] = self.h
gl.glBegin(gl.GL_QUADS)
gl.glVertex3f(0.0, 0.0, 0.0)
gl.glVertex3f(0.0, self.height, 0.0)
gl.glVertex3f(self.width, self.height, 0.0)
gl.glVertex3f(self.width, 0.0, 0.0)
gl.glEnd()
self.shader.stop()
self.dt += pyglet.clock.tick()
self.frames = self.frames + 1
if self.show_fps:
self.fps.draw()
self.flip()
print "W" + str (self.w)
print "Time: " + str (self.dt)
print "FPS: " + str (float(self.frames)/self.dt)
def main():
MainWindow().run()
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Adapted for freedmark by Kevin Read
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Displays a rotating torus using OpenGL.
This example demonstrates:
* Using a 3D projection on a window by overriding the default on_resize
handler
* Enabling multisampling if available
* Drawing a simple 3D primitive using vertex and index arrays
* Using a display list
* Fixed-pipeline lighting
'''
from math import pi, sin, cos
from pyglet.gl import *
from pyglet import clock
from pyglet import window
try:
# Try and create a window with multisampling (antialiasing)
config = Config(sample_buffers=1, samples=4,
depth_size=16, double_buffer=True,)
w = window.Window(resizable=True, config=config)
w.set_vsync(False)
w.set_fullscreen (True)
except window.NoSuchConfigException:
# Fall back to no multisampling for old hardware
w = window.Window(resizable=True)
@w.event
def on_resize(width, height):
# Override the default on_resize handler to create a 3D projection
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., width / float(height), .1, 1000.)
glMatrixMode(GL_MODELVIEW)
def setup():
# One-time GL setup
glClearColor(1, 1, 1, 1)
glColor3f(1, 0, 0)
glEnable(GL_DEPTH_TEST)
# Uncomment this line for a wireframe view
#glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
# Simple light setup. On Windows GL_LIGHT0 is enabled by default,
# but this is not the case on Linux or Mac, so remember to always
# include it.
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_LIGHT1)
# Define a simple function to create ctypes arrays of floats:
def vec(*args):
return (GLfloat * len(args))(*args)
glLightfv(GL_LIGHT0, GL_POSITION, vec(.5, .5, 1, 0))
glLightfv(GL_LIGHT0, GL_SPECULAR, vec(.5, .5, 1, 1))
glLightfv(GL_LIGHT0, GL_DIFFUSE, vec(1, 1, 1, 1))
glLightfv(GL_LIGHT1, GL_POSITION, vec(1, 0, .5, 0))
glLightfv(GL_LIGHT1, GL_DIFFUSE, vec(.5, .5, .5, 1))
glLightfv(GL_LIGHT1, GL_SPECULAR, vec(1, 1, 1, 1))
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, vec(0.5, 0, 0.3, 1))
glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, vec(1, 1, 1, 1))
glMaterialf(GL_FRONT_AND_BACK, GL_SHININESS, 50)
class Torus(object):
def __init__(self, radius, inner_radius, slices, inner_slices):
# Create the vertex and normal arrays.
vertices = []
normals = []
u_step = 2 * pi / (slices - 1)
v_step = 2 * pi / (inner_slices - 1)
u = 0.
for i in range(slices):
cos_u = cos(u)
sin_u = sin(u)
v = 0.
for j in range(inner_slices):
cos_v = cos(v)
sin_v = sin(v)
d = (radius + inner_radius * cos_v)
x = d * cos_u
y = d * sin_u
z = inner_radius * sin_v
nx = cos_u * cos_v
ny = sin_u * cos_v
nz = sin_v
vertices.extend([x, y, z])
normals.extend([nx, ny, nz])
v += v_step
u += u_step
# Create ctypes arrays of the lists
vertices = (GLfloat * len(vertices))(*vertices)
normals = (GLfloat * len(normals))(*normals)
# Create a list of triangle indices.
indices = []
for i in range(slices - 1):
for j in range(inner_slices - 1):
p = i * inner_slices + j
indices.extend([p, p + inner_slices, p + inner_slices + 1])
indices.extend([p, p + 1, p + inner_slices + 1])
indices = (GLuint * len(indices))(*indices)
# Compile a display list
self.list = glGenLists(1)
glNewList(self.list, GL_COMPILE)
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_NORMAL_ARRAY)
glVertexPointer(3, GL_FLOAT, 0, vertices)
glNormalPointer(GL_FLOAT, 0, normals)
glDrawElements(GL_TRIANGLES, len(indices), GL_UNSIGNED_INT, indices)
glPopClientAttrib()
glEndList()
def draw(self):
glCallList(self.list)
setup()
torus = Torus(1, 0.3, 50, 30)
rx = ry = rz = 0
steps = 0
tt = 0
while not w.has_exit and tt < 5:
dt = clock.tick()
rx += dt * 1
ry += dt * 80
rz += dt * 30
rx %= 360
ry %= 360
rz %= 360
w.dispatch_events()
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glTranslatef(0, 0, -4)
glRotatef(rz, 0, 0, 1)
glRotatef(ry, 0, 1, 0)
glRotatef(rx, 1, 0, 0)
torus.draw()
tt = tt + dt
steps = steps + 1
w.flip()
print float(steps) / tt
| Python |
#!/usr/bin/python
# vim: set fileencoding=utf-8 :
# Mandelbrot shader test for freedmark.
# Created by Jonas Wagner, adapted for Geforce FX shader length restrictions by Kevin Read
# Original header:
###########################################################################
# #
# .--~*teu. .uef^" #
# dF 988Nx .xn!~%x. :d88E #
# d888b `8888\ x888 888. u . `888E #
# ?8888 98888F X8888 8888: us888u. .udR88N 888E .z8k #
# "**" x88888~ 88888 X8888 .@88 "8888" /888'888k 888E~?888L #
# d8888*` 88888 88888 9888 9888 9888 'Y" 888E 888E #
# z8**"` : `8888 :88888X 9888 9888 9888 888E 888E #
# :?..... ..F `"**~ 88888' 9888 9888 . 9888 888E 888E #
# /""888888888~ .xx. 88888 9888 9888 .@8c ?8888u../ 888E 888E #
# 8: "888888* '8888 8888~ "888*""888" '%888" "8888P' m888N= 888/ #
# "" "**"` 888" :88% ^Y" ^Y' ^* "P' `Y" 888 #
# ^"==="" J88" #
# glslmandelbrot.py ,---. ,@% #
# Description: renders the mandelbrot set on the gpu |'o o'| #
# Author: Jonas Wagner B=.| m |.=B #
# License: GNU GPL V3 or later `,-.´ #
# Website: http://29a.ch/ B=´ `=B #
# #
# Usage: #
# You can move arround by dragging with the left mouse button #
# You can zoom in and out with your mouse wheel #
# You can toggle the fullscreen mode with the F key #
# You can toggle the fps display with the F1 key #
# enjoy! #
# #
# Legal Foo #
# #
# Copyright (C) 2008 Jonas Wagner #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
###########################################################################
import ctypes as c
import pyglet
import pyglet.clock
import pyglet.window
from pyglet.window import key
from pyglet import gl
vertex_shader = """
uniform float real;
uniform float w;
uniform float imag;
uniform float h;
varying float xpos;
varying float ypos;
void main(void)
{
xpos = clamp(gl_Vertex.x, 0.0,1.0)*w+real;
ypos = clamp(gl_Vertex.y, 0.0,1.0)*h+imag;
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}
"""
fragment_shader = """
varying float xpos;
varying float ypos;
varying float zpos;
void main (void)
{
float iter = 0.0;
float max_square = 3.0;
float square = 0.0;
float r = 0.0;
float i = 0.0;
float rt = 0.0;
float it = 0.0;
while(iter < 1.0 && square < max_square)
{
rt = (r*r) - (i*i) + xpos;
it = (2.0 * r * i) + ypos;
r = rt;
i = it;
square = (r*r)+(i*i);
iter += 0.01;
}
gl_FragColor = vec4 (iter, iter, sin(iter*2.00), 1.0);
}
"""
class ShaderException(Exception):
pass
class Shader(object):
"""Wrapper to create opengl 2.0 shader programms"""
def __init__(self, vertex_source, fragment_source):
self.program = gl.glCreateProgram()
self.vertex_shader = self.create_shader(vertex_source,
gl.GL_VERTEX_SHADER)
self.fragment_shader = self.create_shader(fragment_source,
gl.GL_FRAGMENT_SHADER)
gl.glAttachShader(self.program, self.vertex_shader)
gl.glAttachShader(self.program, self.fragment_shader)
gl.glLinkProgram(self.program)
message = self.get_program_log(self.program)
if message:
raise ShaderException(message)
def create_shader(self, source, shadertype):
# get a char[]
sbuffer = c.create_string_buffer(source)
# get a char **
pointer = c.cast(c.pointer(c.pointer(sbuffer)),
c.POINTER(c.POINTER(c.c_char)))
# a long * NULL pointer
nulll = c.POINTER(c.c_long)()
shader = gl.glCreateShader(shadertype)
gl.glShaderSource(shader, 1, pointer, None)
gl.glCompileShader(shader)
message = self.get_shader_log(shader)
if message:
raise ShaderException(message)
return shader
def set_uniform_f(self, name, value):
location = gl.glGetUniformLocation(self.program, name)
gl.glUniform1f(location, value)
def __setitem__(self, name, value):
"""pass a variable to the shader"""
if isinstance(value, float):
self.set_uniform_f(name, value)
else:
raise TypeError("Only floats are supported so far")
def use(self):
gl.glUseProgram(self.program)
def stop(self):
gl.glUseProgram(0)
def get_shader_log(self, shader):
return self.get_log(shader, gl.glGetShaderInfoLog)
def get_program_log(self, shader):
return self.get_log(shader, gl.glGetProgramInfoLog)
def get_log(self, obj, func):
log_buffer = c.create_string_buffer(4096)
buffer_pointer = c.cast(c.pointer(log_buffer), c.POINTER(c.c_char))
written = c.c_int()
func(obj, 4096, c.pointer(written), buffer_pointer)
return log_buffer.value
class MainWindow(pyglet.window.Window):
def __init__(self):
pyglet.window.Window.__init__(self, width=640, height=480,
resizable=True)
self.fps = pyglet.clock.ClockDisplay()
self.shader = Shader(vertex_shader, fragment_shader)
self.real = -2.0
self.w = 3.0
self.imag = -1.0
self.h = 2.0
self.show_fps = False
self.dt = 0.0
self.frames = 0
def on_key_press(self, symbol, modifiers):
if symbol == key.ESCAPE:
self.has_exit = True
elif symbol == key.F:
self.set_fullscreen(not self.fullscreen)
elif symbol == key.F1:
self.show_fps = not self.show_fps
def zoom_in (self):
self.real += 0.008 * self.w
self.w *= 0.99
self.imag += 0.006 * self.h
self.h *= 0.99
def run(self):
while not self.has_exit and self.w > 0.0022 and self.dt < 20.0:
self.dispatch_events()
self.zoom_in ()
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
gl.glLoadIdentity()
self.shader.use()
self.shader["real"] = self.real
self.shader["w"] = self.w
self.shader["imag"] = self.imag
self.shader["h"] = self.h
gl.glBegin(gl.GL_QUADS)
gl.glVertex3f(0.0, 0.0, 0.0)
gl.glVertex3f(0.0, self.height, 0.0)
gl.glVertex3f(self.width, self.height, 0.0)
gl.glVertex3f(self.width, 0.0, 0.0)
gl.glEnd()
self.shader.stop()
self.dt += pyglet.clock.tick()
self.frames = self.frames + 1
if self.show_fps:
self.fps.draw()
self.flip()
print "W" + str (self.w)
print "Time: " + str (self.dt)
print "FPS: " + str (float(self.frames)/self.dt)
def main():
MainWindow().run()
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Starting template for Google App Engine applications.
Use this project as a starting point if you are just beginning to build a Google
App Engine project. Remember to fill in the OAuth 2.0 client_id and
client_secret which can be obtained from the Developer Console
<https://code.google.com/apis/console/>
"""
__author__ = 'Wolff Dobson'
import settings
import cgi
import httplib2
import logging
import os
import pickle
import urllib
from apiclient.discovery import build
from oauth2client.appengine import OAuth2Decorator
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
# The client_id and client_secret are copied from the API Access tab on
# the Google APIs Console <http://code.google.com/apis/console>
decorator = OAuth2Decorator(
client_id=settings.CLIENT_ID,
client_secret=settings.CLIENT_SECRET,
scope = 'https://www.googleapis.com/auth/plus.me' )
http = httplib2.Http(memcache)
httpUnauth = httplib2.Http(memcache)
# Get discovery document
ul = urllib.urlopen(settings.DISCOVERY_DOCUMENT)
discovery_doc = ul.read()
ul.close()
service = build("plus", "v1", http=http)
serviceUnauth = build("plus", "v1", http=http, developerKey=settings.API_KEY)
class WelcomeHandler(webapp.RequestHandler):
def get(self):
self.redirect(self.request.body + '/play')
class PlayHandler(webapp.RequestHandler):
@decorator.oauth_aware
def get(self):
if (not decorator.has_credentials()):
self.redirect(self.request.body + '/login')
return
http = decorator.http()
people = service.people().get(userId='me').execute(http)
import pprint
logging.info(pprint.pformat(people))
path = os.path.join(os.path.dirname(__file__), 'play.html')
me = service.people().get(userId='me').execute(decorator.http())
# Now I have my own id, I can do things unauth'd
# I could continue using my authenticated service,
# but for example we'll use a second unauth'd one.
activities_doc = serviceUnauth.activities().list(userId=me['id'], collection='public').execute(httpUnauth)
activities = []
if 'items' in activities_doc:
activities += activities_doc['items']
top_activity_content = "No top activity content"
if len(activities) > 0:
activities_doc = serviceUnauth.activities().get(activityId=activities[0]['id']).execute(httpUnauth)
top_activity_content = activities_doc['object']['content']
self.response.out.write(
template.render(path, {'me': me, 'activities': activities,
'top_activity_content': top_activity_content}))
class LoginHandler(webapp.RequestHandler):
@decorator.oauth_aware
def get(self):
if ( decorator.has_credentials() ):
self.redirect(self.request.body + '/play')
return
path = os.path.join(os.path.dirname(__file__), 'login.html')
self.response.out.write(
template.render(path, {}))
return
@decorator.oauth_required
def post(self):
self.redirect(self.request.body + '/play')
def main():
application = webapp.WSGIApplication(
[
('/', WelcomeHandler),
('/play', PlayHandler),
('/login', LoginHandler),
],
debug=True)
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
import os
# 1. Go to:
# https://code.google.com/apis/console
# 2. select choose your project.
# 3. Choose 'API Access'
# 4. If you have not generated a client id, do so.
# 5. Make your callback:
# http://localhost:8080
CLIENT_ID = "YOUR_CLIENT_ID"
CLIENT_SECRET = "YOUR_CLIENT_SECRET"
API_KEY = "YOUR_API_KEY"
DISCOVERY_DOCUMENT = "https://www.googleapis.com/discovery/v1/apis/plus/v1/rest"
| Python |
import os
# 1. Go to:
# https://code.google.com/apis/console
# 2. select choose your project.
# 3. Choose 'API Access'
# 4. If you have not generated a client id, do so.
# 5. Make your callback:
# http://localhost:8080
CLIENT_ID = None
CLIENT_SECRET = None
API_KEY = None
| Python |
# Copyright (C) 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Wolff Dobson'
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
from oauth2client.file import Storage
import apiclient.discovery
import httplib2
import pprint
import os.path
import settings
import time
import urllib
def authorize_self(client_id='None', client_secret='None'):
if client_id is None or client_secret is None:
raise Exception('Please register at the API Console at: https://code.google.com/apis/console. See README.txt for details!')
flow = OAuth2WebServerFlow(
client_id=client_id,
client_secret=client_secret,
scope='https://www.googleapis.com/auth/plus.me',
user_agent='google-api-client-python-plus-cmdline/1.0',
xoauth_displayname='Google Plus Client Example App'
)
#Remove this file if you want to do the OAuth2 dance again!
credentials_file = 'plus_auth.dat'
storage = Storage(credentials_file)
if os.path.exists(credentials_file):
credentials = storage.get()
else:
credentials = run(flow, storage)
return credentials
def build_service(credentials, http, api_key=None):
if ( credentials != None ):
http = credentials.authorize(http)
service = apiclient.discovery.build('plus', 'v1', http=http, developerKey=api_key)
return service
def main():
http = httplib2.Http()
credentials = authorize_self(settings.CLIENT_ID,settings.CLIENT_SECRET)
service = build_service(credentials,http)
person = service.people().get(userId='me').execute(http)
print "Got your ID: " + person['displayName']
# Now, we can continue on unauthorized
# I could continue using my authenticated service, of course
# but for example we'll use a second unauth'd one
httpUnauth = httplib2.Http()
serviceUnauth = build_service(None, httpUnauth, settings.API_KEY)
activities_doc = serviceUnauth.activities().list(userId=person['id'],collection='public').execute(httpUnauth)
activities = []
npt = None
if 'items' in activities_doc:
activities = activities_doc[ 'items' ]
print "Retrieved %d activities" % len(activities_doc['items'])
npt = activities_doc['nextPageToken']
while ( npt != None ):
activities_doc = serviceUnauth.activities().list(userId=person['id'],collection='public').execute(httpUnauth)
if 'items' in activities_doc:
activities += activities_doc['items']
print "Retrieved %d more activities" % len(activities_doc['items'])
if not 'nextPageToken' in activities_doc or activities_doc['nextPageToken'] == npt:
"---Done"
break
npt = activities_doc['nextPageToken']
print "----------------\nPublic activities count:", len(activities)
print
if len(activities) > 0:
for item in activities:
print ' activity\t', item['object']['content'][:40], item['id']
# Now, ask for the first item on the list
top_activity = serviceUnauth.activities().get(activityId=activities[0]['id']).execute(httpUnauth)
print '\n\ntop activity: ' + top_activity['id'] + ': ' + top_activity['object']['content']
print '\n\nSUCCESS: Everything worked'
if __name__=='__main__':
main()
| Python |
#!/usr/bin/env python
#
# plot-timeline.py - A simple program to plot timelines based on a Linux strace(1) log.
# Copyright (C) 2007 Federico Mena-Quintero, Johan Dahlin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Authors: Federico Mena-Quintero <federico@gnu.org>
# Johan Dahlin <johan@gnome.org>
import math
import optparse
import os
import re
import sys
import cairo
import Image, ImageChops
### CUSTOMIZATION BEGINS HERE
FONT_NAME = "Bitstream Vera Sans"
FONT_SIZE = 12
PIXELS_PER_SECOND = 30000
PIXELS_PER_LINE = 14
PLOT_WIDTH = 1400
TIME_SCALE_WIDTH = 20
SYSCALL_MARKER_WIDTH = 20
LOG_TEXT_XPOS = 300
LOG_MARKER_WIDTH = 20
BACKGROUND_COLOR = (0, 0, 0)
# list of strings to ignore in the plot
ignore_strings = [
# "nautilus_directory_async_state_changed"
]
# list of pairs ("string", (r, g, b)) to give a special color to some strings
special_colors = [
# ("nautilus_window_size_allocate", (1, 1, 1)),
# ("STARTING MAIN LOOP", (1, 0, 0)),
]
### CUSTOMIZATION ENDS HERE
def get_special_color (string):
for sc in special_colors:
if string.find (sc[0]) >= 0:
return sc[1]
return None
def string_has_substrings (string, substrings):
for i in substrings:
if string.find (i) >= 0:
return True
return False
# assumes "strace -ttt -f"
mark_regex = re.compile (r'^\d+ +(\d+\.\d+) +access\("MARK: ([^:]*: )(.*)", F_OK.*')
mark_timestamp_group = 1
mark_program_group = 2
mark_log_group = 3
# 3273 1141862703.998196 execve("/usr/bin/dbus-launch", ["/usr/bin/dbus-launch", "--sh-syntax", "--exit-with-session", "/usr/X11R6/bin/gnome"], [/* 61 vars */]) = 0
# 3275 1141862704.003623 execve("/home/devel/bin/dbus-daemon", ["dbus-daemon", "--fork", "--print-pid", "8", "--print-address", "6", "--session"], [/* 61 vars */]) = -1 ENOENT (No such file or directory)
complete_exec_regex = re.compile (r'^(\d+) +(\d+\.\d+) +execve\("(.*)", \[".*= (0|-1 ENOENT \(No such file or directory\))$')
complete_exec_pid_group = 1
complete_exec_timestamp_group = 2
complete_exec_command_group = 3
complete_exec_result_group = 4
# 3283 1141862704.598008 execve("/opt/gnome/lib/GConf/2/gconf-sanity-check-2", ["/opt/gnome/lib/GConf/2/gconf-san"...], [/* 66 vars */] <unfinished ...>
unfinished_exec_regex = re.compile (r'^(\d+) +(\d+\.\d+) +execve\("(.*)", \[".*<unfinished \.\.\.>$')
unfinished_exec_pid_group = 1
unfinished_exec_timestamp_group = 2
unfinished_exec_command_group = 3
# 3283 1141862704.598704 <... execve resumed> ) = 0
# 3309 1141862707.027481 <... execve resumed> ) = -1 ENOENT (No such file or directory)
resumed_exec_regex = re.compile (r'^(\d+) +(\d+\.\d+) +<\.\.\. execve resumed.*= (0|-1 ENOENT \(No such file or directory\))$')
resumed_exec_pid_group = 1
resumed_exec_timestamp_group = 2
resumed_exec_result_group = 3
success_result = "0"
class BaseMark:
colors = 0, 0, 0
def __init__(self, timestamp, log):
self.timestamp = timestamp
self.log = log
self.timestamp_ypos = 0
self.log_ypos = 0
class AccessMark(BaseMark):
pass
class LastMark(BaseMark):
colors = 1.0, 0, 0
class FirstMark(BaseMark):
colors = 1.0, 0, 0
class ExecMark(BaseMark):
# colors = 0.75, 0.33, 0.33
colors = (1.0, 0.0, 0.0)
def __init__(self, timestamp, log, is_complete, is_resumed):
# if is_complete:
text = 'execve: '
# elif is_resumed:
# text = 'execve resumed: '
# else:
# text = 'execve started: '
text = text + os.path.basename(log)
BaseMark.__init__(self, timestamp, text)
class Metrics:
def __init__(self):
self.width = 0
self.height = 0
# don't use black or red
palette = [
(0.12, 0.29, 0.49),
(0.36, 0.51, 0.71),
(0.75, 0.31, 0.30),
(0.62, 0.73, 0.38),
(0.50, 0.40, 0.63),
(0.29, 0.67, 0.78),
(0.96, 0.62, 0.34),
(1.0 - 0.12, 1.0 - 0.29, 1.0 - 0.49),
(1.0 - 0.36, 1.0 - 0.51, 1.0 - 0.71),
(1.0 - 0.75, 1.0 - 0.31, 1.0 - 0.30),
(1.0 - 0.62, 1.0 - 0.73, 1.0 - 0.38),
(1.0 - 0.50, 1.0 - 0.40, 1.0 - 0.63),
(1.0 - 0.29, 1.0 - 0.67, 1.0 - 0.78),
(1.0 - 0.96, 1.0 - 0.62, 1.0 - 0.34)
]
class SyscallParser:
def __init__ (self):
self.pending_execs = []
self.syscalls = []
def search_pending_execs (self, search_pid):
n = len (self.pending_execs)
for i in range (n):
(pid, timestamp, command) = self.pending_execs[i]
if pid == search_pid:
return (i, timestamp, command)
return (None, None, None)
def add_line (self, str):
m = mark_regex.search (str)
if m:
timestamp = float (m.group (mark_timestamp_group))
program = m.group (mark_program_group)
text = program + m.group (mark_log_group)
if text == 'last':
self.syscalls.append (LastMark (timestamp, text))
elif text == 'first':
self.syscalls.append (FirstMark (timestamp, text))
else:
if not string_has_substrings (text, ignore_strings):
s = AccessMark (timestamp, text)
c = get_special_color (text)
if c:
s.colors = c
else:
program_hash = program.__hash__ ()
s.colors = palette[program_hash % len (palette)]
self.syscalls.append (s)
return
m = complete_exec_regex.search (str)
if m:
result = m.group (complete_exec_result_group)
if result == success_result:
pid = m.group (complete_exec_pid_group)
timestamp = float (m.group (complete_exec_timestamp_group))
command = m.group (complete_exec_command_group)
self.syscalls.append (ExecMark (timestamp, command, True, False))
return
m = unfinished_exec_regex.search (str)
if m:
pid = m.group (unfinished_exec_pid_group)
timestamp = float (m.group (unfinished_exec_timestamp_group))
command = m.group (unfinished_exec_command_group)
self.pending_execs.append ((pid, timestamp, command))
# self.syscalls.append (ExecMark (timestamp, command, False, False))
return
m = resumed_exec_regex.search (str)
if m:
pid = m.group (resumed_exec_pid_group)
timestamp = float (m.group (resumed_exec_timestamp_group))
result = m.group (resumed_exec_result_group)
(index, old_timestamp, command) = self.search_pending_execs (pid)
if index == None:
print "Didn't find pid %s in pending_execs!" % pid
sys.exit (1)
del self.pending_execs[index]
if result == success_result:
self.syscalls.append (ExecMark (timestamp, command, False, True))
def parse_strace(filename):
parser = SyscallParser ()
for line in file(filename, "r"):
if line == "":
break
parser.add_line (line)
return parser.syscalls
def normalize_timestamps(syscalls):
first_timestamp = syscalls[0].timestamp
for syscall in syscalls:
syscall.timestamp -= first_timestamp
def compute_syscall_metrics(syscalls):
num_syscalls = len(syscalls)
metrics = Metrics()
metrics.width = PLOT_WIDTH
last_timestamp = syscalls[num_syscalls - 1].timestamp
num_seconds = int(math.ceil(last_timestamp))
metrics.height = max(num_seconds * PIXELS_PER_SECOND,
num_syscalls * PIXELS_PER_LINE)
text_ypos = 0
for syscall in syscalls:
syscall.timestamp_ypos = syscall.timestamp * PIXELS_PER_SECOND
syscall.log_ypos = text_ypos + FONT_SIZE
text_ypos += PIXELS_PER_LINE
return metrics
def plot_time_scale(surface, ctx, metrics):
num_seconds = (metrics.height + PIXELS_PER_SECOND - 1) / PIXELS_PER_SECOND
ctx.set_source_rgb(0.5, 0.5, 0.5)
ctx.set_line_width(1.0)
for i in range(num_seconds):
ypos = i * PIXELS_PER_SECOND
ctx.move_to(0, ypos + 0.5)
ctx.line_to(TIME_SCALE_WIDTH, ypos + 0.5)
ctx.stroke()
ctx.move_to(0, ypos + 2 + FONT_SIZE)
ctx.show_text("%d s" % i)
def plot_syscall(surface, ctx, syscall):
ctx.set_source_rgb(*syscall.colors)
# Line
ctx.move_to(TIME_SCALE_WIDTH, syscall.timestamp_ypos)
ctx.line_to(TIME_SCALE_WIDTH + SYSCALL_MARKER_WIDTH, syscall.timestamp_ypos)
ctx.line_to(LOG_TEXT_XPOS - LOG_MARKER_WIDTH, syscall.log_ypos - FONT_SIZE / 2 + 0.5)
ctx.line_to(LOG_TEXT_XPOS, syscall.log_ypos - FONT_SIZE / 2 + 0.5)
ctx.stroke()
# Log text
ctx.move_to(LOG_TEXT_XPOS, syscall.log_ypos)
ctx.show_text("%8.5f: %s" % (syscall.timestamp, syscall.log))
def plot_syscalls_to_surface(syscalls, metrics):
num_syscalls = len(syscalls)
surface = cairo.ImageSurface(cairo.FORMAT_RGB24,
metrics.width, metrics.height)
ctx = cairo.Context(surface)
ctx.select_font_face(FONT_NAME)
ctx.set_font_size(FONT_SIZE)
# Background
ctx.set_source_rgb (*BACKGROUND_COLOR)
ctx.rectangle(0, 0, metrics.width, metrics.height)
ctx.fill()
# Time scale
plot_time_scale(surface, ctx, metrics)
# Contents
ctx.set_line_width(1.0)
for syscall in syscalls:
plot_syscall(surface, ctx, syscall)
return surface
def autocrop(file, bgcolor):
im = Image.open(file)
if im.mode != "RGB":
im = im.convert("RGB")
bg = Image.new("RGB", im.size, bgcolor)
diff = ImageChops.difference(im, bg)
bbox = diff.getbbox()
if not bbox:
return None # no contents
im = im.crop(bbox)
im.save(file)
def main(args):
option_parser = optparse.OptionParser(
usage="usage: %prog -o output.png <strace.txt>")
option_parser.add_option("-o",
"--output", dest="output",
metavar="FILE",
help="Name of output file (output is a PNG file)")
options, args = option_parser.parse_args()
if not options.output:
print 'Please specify an output filename with "-o file.png" or "--output=file.png".'
return 1
if len(args) != 1:
print 'Please specify only one input filename, which is an strace log taken with "strace -ttt -f"'
return 1
in_filename = args[0]
out_filename = options.output
syscalls = []
for syscall in parse_strace(in_filename):
syscalls.append(syscall)
if isinstance(syscall, FirstMark):
syscalls = []
elif isinstance(syscall, LastMark):
break
if not syscalls:
print 'No marks in %s, add access("MARK: ...", F_OK)' % in_filename
return 1
normalize_timestamps(syscalls)
metrics = compute_syscall_metrics(syscalls)
surface = plot_syscalls_to_surface(syscalls, metrics)
surface.write_to_png(out_filename)
autocrop(out_filename, BACKGROUND_COLOR)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| Python |
#!/usr/bin/env python
#
# plot-timeline.py - A simple program to plot timelines based on a Linux strace(1) log.
# Copyright (C) 2007 Federico Mena-Quintero, Johan Dahlin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Authors: Federico Mena-Quintero <federico@gnu.org>
# Johan Dahlin <johan@gnome.org>
import math
import optparse
import os
import re
import sys
import cairo
import Image, ImageChops
### CUSTOMIZATION BEGINS HERE
FONT_NAME = "Bitstream Vera Sans"
FONT_SIZE = 12
PIXELS_PER_SECOND = 30000
PIXELS_PER_LINE = 14
PLOT_WIDTH = 1400
TIME_SCALE_WIDTH = 20
SYSCALL_MARKER_WIDTH = 20
LOG_TEXT_XPOS = 300
LOG_MARKER_WIDTH = 20
BACKGROUND_COLOR = (0, 0, 0)
# list of strings to ignore in the plot
ignore_strings = [
# "nautilus_directory_async_state_changed"
]
# list of pairs ("string", (r, g, b)) to give a special color to some strings
special_colors = [
# ("nautilus_window_size_allocate", (1, 1, 1)),
# ("STARTING MAIN LOOP", (1, 0, 0)),
]
### CUSTOMIZATION ENDS HERE
def get_special_color (string):
for sc in special_colors:
if string.find (sc[0]) >= 0:
return sc[1]
return None
def string_has_substrings (string, substrings):
for i in substrings:
if string.find (i) >= 0:
return True
return False
# assumes "strace -ttt -f"
mark_regex = re.compile (r'^\d+ +(\d+\.\d+) +access\("MARK: ([^:]*: )(.*)", F_OK.*')
mark_timestamp_group = 1
mark_program_group = 2
mark_log_group = 3
# 3273 1141862703.998196 execve("/usr/bin/dbus-launch", ["/usr/bin/dbus-launch", "--sh-syntax", "--exit-with-session", "/usr/X11R6/bin/gnome"], [/* 61 vars */]) = 0
# 3275 1141862704.003623 execve("/home/devel/bin/dbus-daemon", ["dbus-daemon", "--fork", "--print-pid", "8", "--print-address", "6", "--session"], [/* 61 vars */]) = -1 ENOENT (No such file or directory)
complete_exec_regex = re.compile (r'^(\d+) +(\d+\.\d+) +execve\("(.*)", \[".*= (0|-1 ENOENT \(No such file or directory\))$')
complete_exec_pid_group = 1
complete_exec_timestamp_group = 2
complete_exec_command_group = 3
complete_exec_result_group = 4
# 3283 1141862704.598008 execve("/opt/gnome/lib/GConf/2/gconf-sanity-check-2", ["/opt/gnome/lib/GConf/2/gconf-san"...], [/* 66 vars */] <unfinished ...>
unfinished_exec_regex = re.compile (r'^(\d+) +(\d+\.\d+) +execve\("(.*)", \[".*<unfinished \.\.\.>$')
unfinished_exec_pid_group = 1
unfinished_exec_timestamp_group = 2
unfinished_exec_command_group = 3
# 3283 1141862704.598704 <... execve resumed> ) = 0
# 3309 1141862707.027481 <... execve resumed> ) = -1 ENOENT (No such file or directory)
resumed_exec_regex = re.compile (r'^(\d+) +(\d+\.\d+) +<\.\.\. execve resumed.*= (0|-1 ENOENT \(No such file or directory\))$')
resumed_exec_pid_group = 1
resumed_exec_timestamp_group = 2
resumed_exec_result_group = 3
success_result = "0"
class BaseMark:
colors = 0, 0, 0
def __init__(self, timestamp, log):
self.timestamp = timestamp
self.log = log
self.timestamp_ypos = 0
self.log_ypos = 0
class AccessMark(BaseMark):
pass
class LastMark(BaseMark):
colors = 1.0, 0, 0
class FirstMark(BaseMark):
colors = 1.0, 0, 0
class ExecMark(BaseMark):
# colors = 0.75, 0.33, 0.33
colors = (1.0, 0.0, 0.0)
def __init__(self, timestamp, log, is_complete, is_resumed):
# if is_complete:
text = 'execve: '
# elif is_resumed:
# text = 'execve resumed: '
# else:
# text = 'execve started: '
text = text + os.path.basename(log)
BaseMark.__init__(self, timestamp, text)
class Metrics:
def __init__(self):
self.width = 0
self.height = 0
# don't use black or red
palette = [
(0.12, 0.29, 0.49),
(0.36, 0.51, 0.71),
(0.75, 0.31, 0.30),
(0.62, 0.73, 0.38),
(0.50, 0.40, 0.63),
(0.29, 0.67, 0.78),
(0.96, 0.62, 0.34),
(1.0 - 0.12, 1.0 - 0.29, 1.0 - 0.49),
(1.0 - 0.36, 1.0 - 0.51, 1.0 - 0.71),
(1.0 - 0.75, 1.0 - 0.31, 1.0 - 0.30),
(1.0 - 0.62, 1.0 - 0.73, 1.0 - 0.38),
(1.0 - 0.50, 1.0 - 0.40, 1.0 - 0.63),
(1.0 - 0.29, 1.0 - 0.67, 1.0 - 0.78),
(1.0 - 0.96, 1.0 - 0.62, 1.0 - 0.34)
]
class SyscallParser:
def __init__ (self):
self.pending_execs = []
self.syscalls = []
def search_pending_execs (self, search_pid):
n = len (self.pending_execs)
for i in range (n):
(pid, timestamp, command) = self.pending_execs[i]
if pid == search_pid:
return (i, timestamp, command)
return (None, None, None)
def add_line (self, str):
m = mark_regex.search (str)
if m:
timestamp = float (m.group (mark_timestamp_group))
program = m.group (mark_program_group)
text = program + m.group (mark_log_group)
if text == 'last':
self.syscalls.append (LastMark (timestamp, text))
elif text == 'first':
self.syscalls.append (FirstMark (timestamp, text))
else:
if not string_has_substrings (text, ignore_strings):
s = AccessMark (timestamp, text)
c = get_special_color (text)
if c:
s.colors = c
else:
program_hash = program.__hash__ ()
s.colors = palette[program_hash % len (palette)]
self.syscalls.append (s)
return
m = complete_exec_regex.search (str)
if m:
result = m.group (complete_exec_result_group)
if result == success_result:
pid = m.group (complete_exec_pid_group)
timestamp = float (m.group (complete_exec_timestamp_group))
command = m.group (complete_exec_command_group)
self.syscalls.append (ExecMark (timestamp, command, True, False))
return
m = unfinished_exec_regex.search (str)
if m:
pid = m.group (unfinished_exec_pid_group)
timestamp = float (m.group (unfinished_exec_timestamp_group))
command = m.group (unfinished_exec_command_group)
self.pending_execs.append ((pid, timestamp, command))
# self.syscalls.append (ExecMark (timestamp, command, False, False))
return
m = resumed_exec_regex.search (str)
if m:
pid = m.group (resumed_exec_pid_group)
timestamp = float (m.group (resumed_exec_timestamp_group))
result = m.group (resumed_exec_result_group)
(index, old_timestamp, command) = self.search_pending_execs (pid)
if index == None:
print "Didn't find pid %s in pending_execs!" % pid
sys.exit (1)
del self.pending_execs[index]
if result == success_result:
self.syscalls.append (ExecMark (timestamp, command, False, True))
def parse_strace(filename):
parser = SyscallParser ()
for line in file(filename, "r"):
if line == "":
break
parser.add_line (line)
return parser.syscalls
def normalize_timestamps(syscalls):
first_timestamp = syscalls[0].timestamp
for syscall in syscalls:
syscall.timestamp -= first_timestamp
def compute_syscall_metrics(syscalls):
num_syscalls = len(syscalls)
metrics = Metrics()
metrics.width = PLOT_WIDTH
last_timestamp = syscalls[num_syscalls - 1].timestamp
num_seconds = int(math.ceil(last_timestamp))
metrics.height = max(num_seconds * PIXELS_PER_SECOND,
num_syscalls * PIXELS_PER_LINE)
text_ypos = 0
for syscall in syscalls:
syscall.timestamp_ypos = syscall.timestamp * PIXELS_PER_SECOND
syscall.log_ypos = text_ypos + FONT_SIZE
text_ypos += PIXELS_PER_LINE
return metrics
def plot_time_scale(surface, ctx, metrics):
num_seconds = (metrics.height + PIXELS_PER_SECOND - 1) / PIXELS_PER_SECOND
ctx.set_source_rgb(0.5, 0.5, 0.5)
ctx.set_line_width(1.0)
for i in range(num_seconds):
ypos = i * PIXELS_PER_SECOND
ctx.move_to(0, ypos + 0.5)
ctx.line_to(TIME_SCALE_WIDTH, ypos + 0.5)
ctx.stroke()
ctx.move_to(0, ypos + 2 + FONT_SIZE)
ctx.show_text("%d s" % i)
def plot_syscall(surface, ctx, syscall):
ctx.set_source_rgb(*syscall.colors)
# Line
ctx.move_to(TIME_SCALE_WIDTH, syscall.timestamp_ypos)
ctx.line_to(TIME_SCALE_WIDTH + SYSCALL_MARKER_WIDTH, syscall.timestamp_ypos)
ctx.line_to(LOG_TEXT_XPOS - LOG_MARKER_WIDTH, syscall.log_ypos - FONT_SIZE / 2 + 0.5)
ctx.line_to(LOG_TEXT_XPOS, syscall.log_ypos - FONT_SIZE / 2 + 0.5)
ctx.stroke()
# Log text
ctx.move_to(LOG_TEXT_XPOS, syscall.log_ypos)
ctx.show_text("%8.5f: %s" % (syscall.timestamp, syscall.log))
def plot_syscalls_to_surface(syscalls, metrics):
num_syscalls = len(syscalls)
surface = cairo.ImageSurface(cairo.FORMAT_RGB24,
metrics.width, metrics.height)
ctx = cairo.Context(surface)
ctx.select_font_face(FONT_NAME)
ctx.set_font_size(FONT_SIZE)
# Background
ctx.set_source_rgb (*BACKGROUND_COLOR)
ctx.rectangle(0, 0, metrics.width, metrics.height)
ctx.fill()
# Time scale
plot_time_scale(surface, ctx, metrics)
# Contents
ctx.set_line_width(1.0)
for syscall in syscalls:
plot_syscall(surface, ctx, syscall)
return surface
def autocrop(file, bgcolor):
im = Image.open(file)
if im.mode != "RGB":
im = im.convert("RGB")
bg = Image.new("RGB", im.size, bgcolor)
diff = ImageChops.difference(im, bg)
bbox = diff.getbbox()
if not bbox:
return None # no contents
im = im.crop(bbox)
im.save(file)
def main(args):
option_parser = optparse.OptionParser(
usage="usage: %prog -o output.png <strace.txt>")
option_parser.add_option("-o",
"--output", dest="output",
metavar="FILE",
help="Name of output file (output is a PNG file)")
options, args = option_parser.parse_args()
if not options.output:
print 'Please specify an output filename with "-o file.png" or "--output=file.png".'
return 1
if len(args) != 1:
print 'Please specify only one input filename, which is an strace log taken with "strace -ttt -f"'
return 1
in_filename = args[0]
out_filename = options.output
syscalls = []
for syscall in parse_strace(in_filename):
syscalls.append(syscall)
if isinstance(syscall, FirstMark):
syscalls = []
elif isinstance(syscall, LastMark):
break
if not syscalls:
print 'No marks in %s, add access("MARK: ...", F_OK)' % in_filename
return 1
normalize_timestamps(syscalls)
metrics = compute_syscall_metrics(syscalls)
surface = plot_syscalls_to_surface(syscalls, metrics)
surface.write_to_png(out_filename)
autocrop(out_filename, BACKGROUND_COLOR)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| Python |
#!/usr/bin/env python
#
# plot-timeline.py - A simple program to plot timelines based on a Linux strace(1) log.
# Copyright (C) 2007 Federico Mena-Quintero, Johan Dahlin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Authors: Federico Mena-Quintero <federico@gnu.org>
# Johan Dahlin <johan@gnome.org>
import math
import optparse
import os
import re
import sys
import cairo
import Image, ImageChops
### CUSTOMIZATION BEGINS HERE
FONT_NAME = "Bitstream Vera Sans"
FONT_SIZE = 12
PIXELS_PER_SECOND = 30000
PIXELS_PER_LINE = 14
PLOT_WIDTH = 1400
TIME_SCALE_WIDTH = 20
SYSCALL_MARKER_WIDTH = 20
LOG_TEXT_XPOS = 300
LOG_MARKER_WIDTH = 20
BACKGROUND_COLOR = (0, 0, 0)
# list of strings to ignore in the plot
ignore_strings = [
# "nautilus_directory_async_state_changed"
]
# list of pairs ("string", (r, g, b)) to give a special color to some strings
special_colors = [
# ("nautilus_window_size_allocate", (1, 1, 1)),
# ("STARTING MAIN LOOP", (1, 0, 0)),
]
### CUSTOMIZATION ENDS HERE
def get_special_color (string):
for sc in special_colors:
if string.find (sc[0]) >= 0:
return sc[1]
return None
def string_has_substrings (string, substrings):
for i in substrings:
if string.find (i) >= 0:
return True
return False
# assumes "strace -ttt -f"
mark_regex = re.compile (r'^\d+ +(\d+\.\d+) +access\("MARK: ([^:]*: )(.*)", F_OK.*')
mark_timestamp_group = 1
mark_program_group = 2
mark_log_group = 3
# 3273 1141862703.998196 execve("/usr/bin/dbus-launch", ["/usr/bin/dbus-launch", "--sh-syntax", "--exit-with-session", "/usr/X11R6/bin/gnome"], [/* 61 vars */]) = 0
# 3275 1141862704.003623 execve("/home/devel/bin/dbus-daemon", ["dbus-daemon", "--fork", "--print-pid", "8", "--print-address", "6", "--session"], [/* 61 vars */]) = -1 ENOENT (No such file or directory)
complete_exec_regex = re.compile (r'^(\d+) +(\d+\.\d+) +execve\("(.*)", \[".*= (0|-1 ENOENT \(No such file or directory\))$')
complete_exec_pid_group = 1
complete_exec_timestamp_group = 2
complete_exec_command_group = 3
complete_exec_result_group = 4
# 3283 1141862704.598008 execve("/opt/gnome/lib/GConf/2/gconf-sanity-check-2", ["/opt/gnome/lib/GConf/2/gconf-san"...], [/* 66 vars */] <unfinished ...>
unfinished_exec_regex = re.compile (r'^(\d+) +(\d+\.\d+) +execve\("(.*)", \[".*<unfinished \.\.\.>$')
unfinished_exec_pid_group = 1
unfinished_exec_timestamp_group = 2
unfinished_exec_command_group = 3
# 3283 1141862704.598704 <... execve resumed> ) = 0
# 3309 1141862707.027481 <... execve resumed> ) = -1 ENOENT (No such file or directory)
resumed_exec_regex = re.compile (r'^(\d+) +(\d+\.\d+) +<\.\.\. execve resumed.*= (0|-1 ENOENT \(No such file or directory\))$')
resumed_exec_pid_group = 1
resumed_exec_timestamp_group = 2
resumed_exec_result_group = 3
success_result = "0"
class BaseMark:
colors = 0, 0, 0
def __init__(self, timestamp, log):
self.timestamp = timestamp
self.log = log
self.timestamp_ypos = 0
self.log_ypos = 0
class AccessMark(BaseMark):
pass
class LastMark(BaseMark):
colors = 1.0, 0, 0
class FirstMark(BaseMark):
colors = 1.0, 0, 0
class ExecMark(BaseMark):
# colors = 0.75, 0.33, 0.33
colors = (1.0, 0.0, 0.0)
def __init__(self, timestamp, log, is_complete, is_resumed):
# if is_complete:
text = 'execve: '
# elif is_resumed:
# text = 'execve resumed: '
# else:
# text = 'execve started: '
text = text + os.path.basename(log)
BaseMark.__init__(self, timestamp, text)
class Metrics:
def __init__(self):
self.width = 0
self.height = 0
# don't use black or red
palette = [
(0.12, 0.29, 0.49),
(0.36, 0.51, 0.71),
(0.75, 0.31, 0.30),
(0.62, 0.73, 0.38),
(0.50, 0.40, 0.63),
(0.29, 0.67, 0.78),
(0.96, 0.62, 0.34),
(1.0 - 0.12, 1.0 - 0.29, 1.0 - 0.49),
(1.0 - 0.36, 1.0 - 0.51, 1.0 - 0.71),
(1.0 - 0.75, 1.0 - 0.31, 1.0 - 0.30),
(1.0 - 0.62, 1.0 - 0.73, 1.0 - 0.38),
(1.0 - 0.50, 1.0 - 0.40, 1.0 - 0.63),
(1.0 - 0.29, 1.0 - 0.67, 1.0 - 0.78),
(1.0 - 0.96, 1.0 - 0.62, 1.0 - 0.34)
]
class SyscallParser:
def __init__ (self):
self.pending_execs = []
self.syscalls = []
def search_pending_execs (self, search_pid):
n = len (self.pending_execs)
for i in range (n):
(pid, timestamp, command) = self.pending_execs[i]
if pid == search_pid:
return (i, timestamp, command)
return (None, None, None)
def add_line (self, str):
m = mark_regex.search (str)
if m:
timestamp = float (m.group (mark_timestamp_group))
program = m.group (mark_program_group)
text = program + m.group (mark_log_group)
if text == 'last':
self.syscalls.append (LastMark (timestamp, text))
elif text == 'first':
self.syscalls.append (FirstMark (timestamp, text))
else:
if not string_has_substrings (text, ignore_strings):
s = AccessMark (timestamp, text)
c = get_special_color (text)
if c:
s.colors = c
else:
program_hash = program.__hash__ ()
s.colors = palette[program_hash % len (palette)]
self.syscalls.append (s)
return
m = complete_exec_regex.search (str)
if m:
result = m.group (complete_exec_result_group)
if result == success_result:
pid = m.group (complete_exec_pid_group)
timestamp = float (m.group (complete_exec_timestamp_group))
command = m.group (complete_exec_command_group)
self.syscalls.append (ExecMark (timestamp, command, True, False))
return
m = unfinished_exec_regex.search (str)
if m:
pid = m.group (unfinished_exec_pid_group)
timestamp = float (m.group (unfinished_exec_timestamp_group))
command = m.group (unfinished_exec_command_group)
self.pending_execs.append ((pid, timestamp, command))
# self.syscalls.append (ExecMark (timestamp, command, False, False))
return
m = resumed_exec_regex.search (str)
if m:
pid = m.group (resumed_exec_pid_group)
timestamp = float (m.group (resumed_exec_timestamp_group))
result = m.group (resumed_exec_result_group)
(index, old_timestamp, command) = self.search_pending_execs (pid)
if index == None:
print "Didn't find pid %s in pending_execs!" % pid
sys.exit (1)
del self.pending_execs[index]
if result == success_result:
self.syscalls.append (ExecMark (timestamp, command, False, True))
def parse_strace(filename):
parser = SyscallParser ()
for line in file(filename, "r"):
if line == "":
break
parser.add_line (line)
return parser.syscalls
def normalize_timestamps(syscalls):
first_timestamp = syscalls[0].timestamp
for syscall in syscalls:
syscall.timestamp -= first_timestamp
def compute_syscall_metrics(syscalls):
num_syscalls = len(syscalls)
metrics = Metrics()
metrics.width = PLOT_WIDTH
last_timestamp = syscalls[num_syscalls - 1].timestamp
num_seconds = int(math.ceil(last_timestamp))
metrics.height = max(num_seconds * PIXELS_PER_SECOND,
num_syscalls * PIXELS_PER_LINE)
text_ypos = 0
for syscall in syscalls:
syscall.timestamp_ypos = syscall.timestamp * PIXELS_PER_SECOND
syscall.log_ypos = text_ypos + FONT_SIZE
text_ypos += PIXELS_PER_LINE
return metrics
def plot_time_scale(surface, ctx, metrics):
num_seconds = (metrics.height + PIXELS_PER_SECOND - 1) / PIXELS_PER_SECOND
ctx.set_source_rgb(0.5, 0.5, 0.5)
ctx.set_line_width(1.0)
for i in range(num_seconds):
ypos = i * PIXELS_PER_SECOND
ctx.move_to(0, ypos + 0.5)
ctx.line_to(TIME_SCALE_WIDTH, ypos + 0.5)
ctx.stroke()
ctx.move_to(0, ypos + 2 + FONT_SIZE)
ctx.show_text("%d s" % i)
def plot_syscall(surface, ctx, syscall):
ctx.set_source_rgb(*syscall.colors)
# Line
ctx.move_to(TIME_SCALE_WIDTH, syscall.timestamp_ypos)
ctx.line_to(TIME_SCALE_WIDTH + SYSCALL_MARKER_WIDTH, syscall.timestamp_ypos)
ctx.line_to(LOG_TEXT_XPOS - LOG_MARKER_WIDTH, syscall.log_ypos - FONT_SIZE / 2 + 0.5)
ctx.line_to(LOG_TEXT_XPOS, syscall.log_ypos - FONT_SIZE / 2 + 0.5)
ctx.stroke()
# Log text
ctx.move_to(LOG_TEXT_XPOS, syscall.log_ypos)
ctx.show_text("%8.5f: %s" % (syscall.timestamp, syscall.log))
def plot_syscalls_to_surface(syscalls, metrics):
num_syscalls = len(syscalls)
surface = cairo.ImageSurface(cairo.FORMAT_RGB24,
metrics.width, metrics.height)
ctx = cairo.Context(surface)
ctx.select_font_face(FONT_NAME)
ctx.set_font_size(FONT_SIZE)
# Background
ctx.set_source_rgb (*BACKGROUND_COLOR)
ctx.rectangle(0, 0, metrics.width, metrics.height)
ctx.fill()
# Time scale
plot_time_scale(surface, ctx, metrics)
# Contents
ctx.set_line_width(1.0)
for syscall in syscalls:
plot_syscall(surface, ctx, syscall)
return surface
def autocrop(file, bgcolor):
im = Image.open(file)
if im.mode != "RGB":
im = im.convert("RGB")
bg = Image.new("RGB", im.size, bgcolor)
diff = ImageChops.difference(im, bg)
bbox = diff.getbbox()
if not bbox:
return None # no contents
im = im.crop(bbox)
im.save(file)
def main(args):
option_parser = optparse.OptionParser(
usage="usage: %prog -o output.png <strace.txt>")
option_parser.add_option("-o",
"--output", dest="output",
metavar="FILE",
help="Name of output file (output is a PNG file)")
options, args = option_parser.parse_args()
if not options.output:
print 'Please specify an output filename with "-o file.png" or "--output=file.png".'
return 1
if len(args) != 1:
print 'Please specify only one input filename, which is an strace log taken with "strace -ttt -f"'
return 1
in_filename = args[0]
out_filename = options.output
syscalls = []
for syscall in parse_strace(in_filename):
syscalls.append(syscall)
if isinstance(syscall, FirstMark):
syscalls = []
elif isinstance(syscall, LastMark):
break
if not syscalls:
print 'No marks in %s, add access("MARK: ...", F_OK)' % in_filename
return 1
normalize_timestamps(syscalls)
metrics = compute_syscall_metrics(syscalls)
surface = plot_syscalls_to_surface(syscalls, metrics)
surface.write_to_png(out_filename)
autocrop(out_filename, BACKGROUND_COLOR)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| Python |
#!/usr/bin/env python
#
# plot-timeline.py - A simple program to plot timelines based on a Linux strace(1) log.
# Copyright (C) 2007 Federico Mena-Quintero, Johan Dahlin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Authors: Federico Mena-Quintero <federico@gnu.org>
# Johan Dahlin <johan@gnome.org>
import math
import optparse
import os
import re
import sys
import cairo
import Image, ImageChops
### CUSTOMIZATION BEGINS HERE
FONT_NAME = "Bitstream Vera Sans"
FONT_SIZE = 12
PIXELS_PER_SECOND = 30000
PIXELS_PER_LINE = 14
PLOT_WIDTH = 1400
TIME_SCALE_WIDTH = 20
SYSCALL_MARKER_WIDTH = 20
LOG_TEXT_XPOS = 300
LOG_MARKER_WIDTH = 20
BACKGROUND_COLOR = (0, 0, 0)
# list of strings to ignore in the plot
ignore_strings = [
# "nautilus_directory_async_state_changed"
]
# list of pairs ("string", (r, g, b)) to give a special color to some strings
special_colors = [
# ("nautilus_window_size_allocate", (1, 1, 1)),
# ("STARTING MAIN LOOP", (1, 0, 0)),
]
### CUSTOMIZATION ENDS HERE
def get_special_color (string):
for sc in special_colors:
if string.find (sc[0]) >= 0:
return sc[1]
return None
def string_has_substrings (string, substrings):
for i in substrings:
if string.find (i) >= 0:
return True
return False
# assumes "strace -ttt -f"
mark_regex = re.compile (r'^\d+ +(\d+\.\d+) +access\("MARK: ([^:]*: )(.*)", F_OK.*')
mark_timestamp_group = 1
mark_program_group = 2
mark_log_group = 3
# 3273 1141862703.998196 execve("/usr/bin/dbus-launch", ["/usr/bin/dbus-launch", "--sh-syntax", "--exit-with-session", "/usr/X11R6/bin/gnome"], [/* 61 vars */]) = 0
# 3275 1141862704.003623 execve("/home/devel/bin/dbus-daemon", ["dbus-daemon", "--fork", "--print-pid", "8", "--print-address", "6", "--session"], [/* 61 vars */]) = -1 ENOENT (No such file or directory)
complete_exec_regex = re.compile (r'^(\d+) +(\d+\.\d+) +execve\("(.*)", \[".*= (0|-1 ENOENT \(No such file or directory\))$')
complete_exec_pid_group = 1
complete_exec_timestamp_group = 2
complete_exec_command_group = 3
complete_exec_result_group = 4
# 3283 1141862704.598008 execve("/opt/gnome/lib/GConf/2/gconf-sanity-check-2", ["/opt/gnome/lib/GConf/2/gconf-san"...], [/* 66 vars */] <unfinished ...>
unfinished_exec_regex = re.compile (r'^(\d+) +(\d+\.\d+) +execve\("(.*)", \[".*<unfinished \.\.\.>$')
unfinished_exec_pid_group = 1
unfinished_exec_timestamp_group = 2
unfinished_exec_command_group = 3
# 3283 1141862704.598704 <... execve resumed> ) = 0
# 3309 1141862707.027481 <... execve resumed> ) = -1 ENOENT (No such file or directory)
resumed_exec_regex = re.compile (r'^(\d+) +(\d+\.\d+) +<\.\.\. execve resumed.*= (0|-1 ENOENT \(No such file or directory\))$')
resumed_exec_pid_group = 1
resumed_exec_timestamp_group = 2
resumed_exec_result_group = 3
success_result = "0"
class BaseMark:
colors = 0, 0, 0
def __init__(self, timestamp, log):
self.timestamp = timestamp
self.log = log
self.timestamp_ypos = 0
self.log_ypos = 0
class AccessMark(BaseMark):
pass
class LastMark(BaseMark):
colors = 1.0, 0, 0
class FirstMark(BaseMark):
colors = 1.0, 0, 0
class ExecMark(BaseMark):
# colors = 0.75, 0.33, 0.33
colors = (1.0, 0.0, 0.0)
def __init__(self, timestamp, log, is_complete, is_resumed):
# if is_complete:
text = 'execve: '
# elif is_resumed:
# text = 'execve resumed: '
# else:
# text = 'execve started: '
text = text + os.path.basename(log)
BaseMark.__init__(self, timestamp, text)
class Metrics:
def __init__(self):
self.width = 0
self.height = 0
# don't use black or red
palette = [
(0.12, 0.29, 0.49),
(0.36, 0.51, 0.71),
(0.75, 0.31, 0.30),
(0.62, 0.73, 0.38),
(0.50, 0.40, 0.63),
(0.29, 0.67, 0.78),
(0.96, 0.62, 0.34),
(1.0 - 0.12, 1.0 - 0.29, 1.0 - 0.49),
(1.0 - 0.36, 1.0 - 0.51, 1.0 - 0.71),
(1.0 - 0.75, 1.0 - 0.31, 1.0 - 0.30),
(1.0 - 0.62, 1.0 - 0.73, 1.0 - 0.38),
(1.0 - 0.50, 1.0 - 0.40, 1.0 - 0.63),
(1.0 - 0.29, 1.0 - 0.67, 1.0 - 0.78),
(1.0 - 0.96, 1.0 - 0.62, 1.0 - 0.34)
]
class SyscallParser:
def __init__ (self):
self.pending_execs = []
self.syscalls = []
def search_pending_execs (self, search_pid):
n = len (self.pending_execs)
for i in range (n):
(pid, timestamp, command) = self.pending_execs[i]
if pid == search_pid:
return (i, timestamp, command)
return (None, None, None)
def add_line (self, str):
m = mark_regex.search (str)
if m:
timestamp = float (m.group (mark_timestamp_group))
program = m.group (mark_program_group)
text = program + m.group (mark_log_group)
if text == 'last':
self.syscalls.append (LastMark (timestamp, text))
elif text == 'first':
self.syscalls.append (FirstMark (timestamp, text))
else:
if not string_has_substrings (text, ignore_strings):
s = AccessMark (timestamp, text)
c = get_special_color (text)
if c:
s.colors = c
else:
program_hash = program.__hash__ ()
s.colors = palette[program_hash % len (palette)]
self.syscalls.append (s)
return
m = complete_exec_regex.search (str)
if m:
result = m.group (complete_exec_result_group)
if result == success_result:
pid = m.group (complete_exec_pid_group)
timestamp = float (m.group (complete_exec_timestamp_group))
command = m.group (complete_exec_command_group)
self.syscalls.append (ExecMark (timestamp, command, True, False))
return
m = unfinished_exec_regex.search (str)
if m:
pid = m.group (unfinished_exec_pid_group)
timestamp = float (m.group (unfinished_exec_timestamp_group))
command = m.group (unfinished_exec_command_group)
self.pending_execs.append ((pid, timestamp, command))
# self.syscalls.append (ExecMark (timestamp, command, False, False))
return
m = resumed_exec_regex.search (str)
if m:
pid = m.group (resumed_exec_pid_group)
timestamp = float (m.group (resumed_exec_timestamp_group))
result = m.group (resumed_exec_result_group)
(index, old_timestamp, command) = self.search_pending_execs (pid)
if index == None:
print "Didn't find pid %s in pending_execs!" % pid
sys.exit (1)
del self.pending_execs[index]
if result == success_result:
self.syscalls.append (ExecMark (timestamp, command, False, True))
def parse_strace(filename):
parser = SyscallParser ()
for line in file(filename, "r"):
if line == "":
break
parser.add_line (line)
return parser.syscalls
def normalize_timestamps(syscalls):
first_timestamp = syscalls[0].timestamp
for syscall in syscalls:
syscall.timestamp -= first_timestamp
def compute_syscall_metrics(syscalls):
num_syscalls = len(syscalls)
metrics = Metrics()
metrics.width = PLOT_WIDTH
last_timestamp = syscalls[num_syscalls - 1].timestamp
num_seconds = int(math.ceil(last_timestamp))
metrics.height = max(num_seconds * PIXELS_PER_SECOND,
num_syscalls * PIXELS_PER_LINE)
text_ypos = 0
for syscall in syscalls:
syscall.timestamp_ypos = syscall.timestamp * PIXELS_PER_SECOND
syscall.log_ypos = text_ypos + FONT_SIZE
text_ypos += PIXELS_PER_LINE
return metrics
def plot_time_scale(surface, ctx, metrics):
num_seconds = (metrics.height + PIXELS_PER_SECOND - 1) / PIXELS_PER_SECOND
ctx.set_source_rgb(0.5, 0.5, 0.5)
ctx.set_line_width(1.0)
for i in range(num_seconds):
ypos = i * PIXELS_PER_SECOND
ctx.move_to(0, ypos + 0.5)
ctx.line_to(TIME_SCALE_WIDTH, ypos + 0.5)
ctx.stroke()
ctx.move_to(0, ypos + 2 + FONT_SIZE)
ctx.show_text("%d s" % i)
def plot_syscall(surface, ctx, syscall):
ctx.set_source_rgb(*syscall.colors)
# Line
ctx.move_to(TIME_SCALE_WIDTH, syscall.timestamp_ypos)
ctx.line_to(TIME_SCALE_WIDTH + SYSCALL_MARKER_WIDTH, syscall.timestamp_ypos)
ctx.line_to(LOG_TEXT_XPOS - LOG_MARKER_WIDTH, syscall.log_ypos - FONT_SIZE / 2 + 0.5)
ctx.line_to(LOG_TEXT_XPOS, syscall.log_ypos - FONT_SIZE / 2 + 0.5)
ctx.stroke()
# Log text
ctx.move_to(LOG_TEXT_XPOS, syscall.log_ypos)
ctx.show_text("%8.5f: %s" % (syscall.timestamp, syscall.log))
def plot_syscalls_to_surface(syscalls, metrics):
num_syscalls = len(syscalls)
surface = cairo.ImageSurface(cairo.FORMAT_RGB24,
metrics.width, metrics.height)
ctx = cairo.Context(surface)
ctx.select_font_face(FONT_NAME)
ctx.set_font_size(FONT_SIZE)
# Background
ctx.set_source_rgb (*BACKGROUND_COLOR)
ctx.rectangle(0, 0, metrics.width, metrics.height)
ctx.fill()
# Time scale
plot_time_scale(surface, ctx, metrics)
# Contents
ctx.set_line_width(1.0)
for syscall in syscalls:
plot_syscall(surface, ctx, syscall)
return surface
def autocrop(file, bgcolor):
im = Image.open(file)
if im.mode != "RGB":
im = im.convert("RGB")
bg = Image.new("RGB", im.size, bgcolor)
diff = ImageChops.difference(im, bg)
bbox = diff.getbbox()
if not bbox:
return None # no contents
im = im.crop(bbox)
im.save(file)
def main(args):
option_parser = optparse.OptionParser(
usage="usage: %prog -o output.png <strace.txt>")
option_parser.add_option("-o",
"--output", dest="output",
metavar="FILE",
help="Name of output file (output is a PNG file)")
options, args = option_parser.parse_args()
if not options.output:
print 'Please specify an output filename with "-o file.png" or "--output=file.png".'
return 1
if len(args) != 1:
print 'Please specify only one input filename, which is an strace log taken with "strace -ttt -f"'
return 1
in_filename = args[0]
out_filename = options.output
syscalls = []
for syscall in parse_strace(in_filename):
syscalls.append(syscall)
if isinstance(syscall, FirstMark):
syscalls = []
elif isinstance(syscall, LastMark):
break
if not syscalls:
print 'No marks in %s, add access("MARK: ...", F_OK)' % in_filename
return 1
normalize_timestamps(syscalls)
metrics = compute_syscall_metrics(syscalls)
surface = plot_syscalls_to_surface(syscalls, metrics)
surface.write_to_png(out_filename)
autocrop(out_filename, BACKGROUND_COLOR)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| Python |
#!/usr/bin/env python
#
# plot-timeline.py - A simple program to plot timelines based on a Linux strace(1) log.
# Copyright (C) 2007 Federico Mena-Quintero, Johan Dahlin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Authors: Federico Mena-Quintero <federico@gnu.org>
# Johan Dahlin <johan@gnome.org>
import math
import optparse
import os
import re
import sys
import cairo
import Image, ImageChops
### CUSTOMIZATION BEGINS HERE
FONT_NAME = "Bitstream Vera Sans"
FONT_SIZE = 12
PIXELS_PER_SECOND = 30000
PIXELS_PER_LINE = 14
PLOT_WIDTH = 1400
TIME_SCALE_WIDTH = 20
SYSCALL_MARKER_WIDTH = 20
LOG_TEXT_XPOS = 300
LOG_MARKER_WIDTH = 20
BACKGROUND_COLOR = (0, 0, 0)
# list of strings to ignore in the plot
ignore_strings = [
# "nautilus_directory_async_state_changed"
]
# list of pairs ("string", (r, g, b)) to give a special color to some strings
special_colors = [
# ("nautilus_window_size_allocate", (1, 1, 1)),
# ("STARTING MAIN LOOP", (1, 0, 0)),
]
### CUSTOMIZATION ENDS HERE
def get_special_color (string):
for sc in special_colors:
if string.find (sc[0]) >= 0:
return sc[1]
return None
def string_has_substrings (string, substrings):
for i in substrings:
if string.find (i) >= 0:
return True
return False
# assumes "strace -ttt -f"
mark_regex = re.compile (r'^\d+ +(\d+\.\d+) +access\("MARK: ([^:]*: )(.*)", F_OK.*')
mark_timestamp_group = 1
mark_program_group = 2
mark_log_group = 3
# 3273 1141862703.998196 execve("/usr/bin/dbus-launch", ["/usr/bin/dbus-launch", "--sh-syntax", "--exit-with-session", "/usr/X11R6/bin/gnome"], [/* 61 vars */]) = 0
# 3275 1141862704.003623 execve("/home/devel/bin/dbus-daemon", ["dbus-daemon", "--fork", "--print-pid", "8", "--print-address", "6", "--session"], [/* 61 vars */]) = -1 ENOENT (No such file or directory)
complete_exec_regex = re.compile (r'^(\d+) +(\d+\.\d+) +execve\("(.*)", \[".*= (0|-1 ENOENT \(No such file or directory\))$')
complete_exec_pid_group = 1
complete_exec_timestamp_group = 2
complete_exec_command_group = 3
complete_exec_result_group = 4
# 3283 1141862704.598008 execve("/opt/gnome/lib/GConf/2/gconf-sanity-check-2", ["/opt/gnome/lib/GConf/2/gconf-san"...], [/* 66 vars */] <unfinished ...>
unfinished_exec_regex = re.compile (r'^(\d+) +(\d+\.\d+) +execve\("(.*)", \[".*<unfinished \.\.\.>$')
unfinished_exec_pid_group = 1
unfinished_exec_timestamp_group = 2
unfinished_exec_command_group = 3
# 3283 1141862704.598704 <... execve resumed> ) = 0
# 3309 1141862707.027481 <... execve resumed> ) = -1 ENOENT (No such file or directory)
resumed_exec_regex = re.compile (r'^(\d+) +(\d+\.\d+) +<\.\.\. execve resumed.*= (0|-1 ENOENT \(No such file or directory\))$')
resumed_exec_pid_group = 1
resumed_exec_timestamp_group = 2
resumed_exec_result_group = 3
success_result = "0"
class BaseMark:
colors = 0, 0, 0
def __init__(self, timestamp, log):
self.timestamp = timestamp
self.log = log
self.timestamp_ypos = 0
self.log_ypos = 0
class AccessMark(BaseMark):
pass
class LastMark(BaseMark):
colors = 1.0, 0, 0
class FirstMark(BaseMark):
colors = 1.0, 0, 0
class ExecMark(BaseMark):
# colors = 0.75, 0.33, 0.33
colors = (1.0, 0.0, 0.0)
def __init__(self, timestamp, log, is_complete, is_resumed):
# if is_complete:
text = 'execve: '
# elif is_resumed:
# text = 'execve resumed: '
# else:
# text = 'execve started: '
text = text + os.path.basename(log)
BaseMark.__init__(self, timestamp, text)
class Metrics:
def __init__(self):
self.width = 0
self.height = 0
# don't use black or red
palette = [
(0.12, 0.29, 0.49),
(0.36, 0.51, 0.71),
(0.75, 0.31, 0.30),
(0.62, 0.73, 0.38),
(0.50, 0.40, 0.63),
(0.29, 0.67, 0.78),
(0.96, 0.62, 0.34),
(1.0 - 0.12, 1.0 - 0.29, 1.0 - 0.49),
(1.0 - 0.36, 1.0 - 0.51, 1.0 - 0.71),
(1.0 - 0.75, 1.0 - 0.31, 1.0 - 0.30),
(1.0 - 0.62, 1.0 - 0.73, 1.0 - 0.38),
(1.0 - 0.50, 1.0 - 0.40, 1.0 - 0.63),
(1.0 - 0.29, 1.0 - 0.67, 1.0 - 0.78),
(1.0 - 0.96, 1.0 - 0.62, 1.0 - 0.34)
]
class SyscallParser:
def __init__ (self):
self.pending_execs = []
self.syscalls = []
def search_pending_execs (self, search_pid):
n = len (self.pending_execs)
for i in range (n):
(pid, timestamp, command) = self.pending_execs[i]
if pid == search_pid:
return (i, timestamp, command)
return (None, None, None)
def add_line (self, str):
m = mark_regex.search (str)
if m:
timestamp = float (m.group (mark_timestamp_group))
program = m.group (mark_program_group)
text = program + m.group (mark_log_group)
if text == 'last':
self.syscalls.append (LastMark (timestamp, text))
elif text == 'first':
self.syscalls.append (FirstMark (timestamp, text))
else:
if not string_has_substrings (text, ignore_strings):
s = AccessMark (timestamp, text)
c = get_special_color (text)
if c:
s.colors = c
else:
program_hash = program.__hash__ ()
s.colors = palette[program_hash % len (palette)]
self.syscalls.append (s)
return
m = complete_exec_regex.search (str)
if m:
result = m.group (complete_exec_result_group)
if result == success_result:
pid = m.group (complete_exec_pid_group)
timestamp = float (m.group (complete_exec_timestamp_group))
command = m.group (complete_exec_command_group)
self.syscalls.append (ExecMark (timestamp, command, True, False))
return
m = unfinished_exec_regex.search (str)
if m:
pid = m.group (unfinished_exec_pid_group)
timestamp = float (m.group (unfinished_exec_timestamp_group))
command = m.group (unfinished_exec_command_group)
self.pending_execs.append ((pid, timestamp, command))
# self.syscalls.append (ExecMark (timestamp, command, False, False))
return
m = resumed_exec_regex.search (str)
if m:
pid = m.group (resumed_exec_pid_group)
timestamp = float (m.group (resumed_exec_timestamp_group))
result = m.group (resumed_exec_result_group)
(index, old_timestamp, command) = self.search_pending_execs (pid)
if index == None:
print "Didn't find pid %s in pending_execs!" % pid
sys.exit (1)
del self.pending_execs[index]
if result == success_result:
self.syscalls.append (ExecMark (timestamp, command, False, True))
def parse_strace(filename):
parser = SyscallParser ()
for line in file(filename, "r"):
if line == "":
break
parser.add_line (line)
return parser.syscalls
def normalize_timestamps(syscalls):
first_timestamp = syscalls[0].timestamp
for syscall in syscalls:
syscall.timestamp -= first_timestamp
def compute_syscall_metrics(syscalls):
num_syscalls = len(syscalls)
metrics = Metrics()
metrics.width = PLOT_WIDTH
last_timestamp = syscalls[num_syscalls - 1].timestamp
num_seconds = int(math.ceil(last_timestamp))
metrics.height = max(num_seconds * PIXELS_PER_SECOND,
num_syscalls * PIXELS_PER_LINE)
text_ypos = 0
for syscall in syscalls:
syscall.timestamp_ypos = syscall.timestamp * PIXELS_PER_SECOND
syscall.log_ypos = text_ypos + FONT_SIZE
text_ypos += PIXELS_PER_LINE
return metrics
def plot_time_scale(surface, ctx, metrics):
num_seconds = (metrics.height + PIXELS_PER_SECOND - 1) / PIXELS_PER_SECOND
ctx.set_source_rgb(0.5, 0.5, 0.5)
ctx.set_line_width(1.0)
for i in range(num_seconds):
ypos = i * PIXELS_PER_SECOND
ctx.move_to(0, ypos + 0.5)
ctx.line_to(TIME_SCALE_WIDTH, ypos + 0.5)
ctx.stroke()
ctx.move_to(0, ypos + 2 + FONT_SIZE)
ctx.show_text("%d s" % i)
def plot_syscall(surface, ctx, syscall):
ctx.set_source_rgb(*syscall.colors)
# Line
ctx.move_to(TIME_SCALE_WIDTH, syscall.timestamp_ypos)
ctx.line_to(TIME_SCALE_WIDTH + SYSCALL_MARKER_WIDTH, syscall.timestamp_ypos)
ctx.line_to(LOG_TEXT_XPOS - LOG_MARKER_WIDTH, syscall.log_ypos - FONT_SIZE / 2 + 0.5)
ctx.line_to(LOG_TEXT_XPOS, syscall.log_ypos - FONT_SIZE / 2 + 0.5)
ctx.stroke()
# Log text
ctx.move_to(LOG_TEXT_XPOS, syscall.log_ypos)
ctx.show_text("%8.5f: %s" % (syscall.timestamp, syscall.log))
def plot_syscalls_to_surface(syscalls, metrics):
num_syscalls = len(syscalls)
surface = cairo.ImageSurface(cairo.FORMAT_RGB24,
metrics.width, metrics.height)
ctx = cairo.Context(surface)
ctx.select_font_face(FONT_NAME)
ctx.set_font_size(FONT_SIZE)
# Background
ctx.set_source_rgb (*BACKGROUND_COLOR)
ctx.rectangle(0, 0, metrics.width, metrics.height)
ctx.fill()
# Time scale
plot_time_scale(surface, ctx, metrics)
# Contents
ctx.set_line_width(1.0)
for syscall in syscalls:
plot_syscall(surface, ctx, syscall)
return surface
def autocrop(file, bgcolor):
im = Image.open(file)
if im.mode != "RGB":
im = im.convert("RGB")
bg = Image.new("RGB", im.size, bgcolor)
diff = ImageChops.difference(im, bg)
bbox = diff.getbbox()
if not bbox:
return None # no contents
im = im.crop(bbox)
im.save(file)
def main(args):
option_parser = optparse.OptionParser(
usage="usage: %prog -o output.png <strace.txt>")
option_parser.add_option("-o",
"--output", dest="output",
metavar="FILE",
help="Name of output file (output is a PNG file)")
options, args = option_parser.parse_args()
if not options.output:
print 'Please specify an output filename with "-o file.png" or "--output=file.png".'
return 1
if len(args) != 1:
print 'Please specify only one input filename, which is an strace log taken with "strace -ttt -f"'
return 1
in_filename = args[0]
out_filename = options.output
syscalls = []
for syscall in parse_strace(in_filename):
syscalls.append(syscall)
if isinstance(syscall, FirstMark):
syscalls = []
elif isinstance(syscall, LastMark):
break
if not syscalls:
print 'No marks in %s, add access("MARK: ...", F_OK)' % in_filename
return 1
normalize_timestamps(syscalls)
metrics = compute_syscall_metrics(syscalls)
surface = plot_syscalls_to_surface(syscalls, metrics)
surface.write_to_png(out_filename)
autocrop(out_filename, BACKGROUND_COLOR)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| Python |
#!/usr/bin/env python
#
# plot-timeline.py - A simple program to plot timelines based on a Linux strace(1) log.
# Copyright (C) 2007 Federico Mena-Quintero, Johan Dahlin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Authors: Federico Mena-Quintero <federico@gnu.org>
# Johan Dahlin <johan@gnome.org>
import math
import optparse
import os
import re
import sys
import cairo
import Image, ImageChops
### CUSTOMIZATION BEGINS HERE
FONT_NAME = "Bitstream Vera Sans"
FONT_SIZE = 12
PIXELS_PER_SECOND = 30000
PIXELS_PER_LINE = 14
PLOT_WIDTH = 1400
TIME_SCALE_WIDTH = 20
SYSCALL_MARKER_WIDTH = 20
LOG_TEXT_XPOS = 300
LOG_MARKER_WIDTH = 20
BACKGROUND_COLOR = (0, 0, 0)
# list of strings to ignore in the plot
ignore_strings = [
# "nautilus_directory_async_state_changed"
]
# list of pairs ("string", (r, g, b)) to give a special color to some strings
special_colors = [
# ("nautilus_window_size_allocate", (1, 1, 1)),
# ("STARTING MAIN LOOP", (1, 0, 0)),
]
### CUSTOMIZATION ENDS HERE
def get_special_color (string):
for sc in special_colors:
if string.find (sc[0]) >= 0:
return sc[1]
return None
def string_has_substrings (string, substrings):
for i in substrings:
if string.find (i) >= 0:
return True
return False
# assumes "strace -ttt -f"
mark_regex = re.compile (r'^\d+ +(\d+\.\d+) +access\("MARK: ([^:]*: )(.*)", F_OK.*')
mark_timestamp_group = 1
mark_program_group = 2
mark_log_group = 3
# 3273 1141862703.998196 execve("/usr/bin/dbus-launch", ["/usr/bin/dbus-launch", "--sh-syntax", "--exit-with-session", "/usr/X11R6/bin/gnome"], [/* 61 vars */]) = 0
# 3275 1141862704.003623 execve("/home/devel/bin/dbus-daemon", ["dbus-daemon", "--fork", "--print-pid", "8", "--print-address", "6", "--session"], [/* 61 vars */]) = -1 ENOENT (No such file or directory)
complete_exec_regex = re.compile (r'^(\d+) +(\d+\.\d+) +execve\("(.*)", \[".*= (0|-1 ENOENT \(No such file or directory\))$')
complete_exec_pid_group = 1
complete_exec_timestamp_group = 2
complete_exec_command_group = 3
complete_exec_result_group = 4
# 3283 1141862704.598008 execve("/opt/gnome/lib/GConf/2/gconf-sanity-check-2", ["/opt/gnome/lib/GConf/2/gconf-san"...], [/* 66 vars */] <unfinished ...>
unfinished_exec_regex = re.compile (r'^(\d+) +(\d+\.\d+) +execve\("(.*)", \[".*<unfinished \.\.\.>$')
unfinished_exec_pid_group = 1
unfinished_exec_timestamp_group = 2
unfinished_exec_command_group = 3
# 3283 1141862704.598704 <... execve resumed> ) = 0
# 3309 1141862707.027481 <... execve resumed> ) = -1 ENOENT (No such file or directory)
resumed_exec_regex = re.compile (r'^(\d+) +(\d+\.\d+) +<\.\.\. execve resumed.*= (0|-1 ENOENT \(No such file or directory\))$')
resumed_exec_pid_group = 1
resumed_exec_timestamp_group = 2
resumed_exec_result_group = 3
success_result = "0"
class BaseMark:
colors = 0, 0, 0
def __init__(self, timestamp, log):
self.timestamp = timestamp
self.log = log
self.timestamp_ypos = 0
self.log_ypos = 0
class AccessMark(BaseMark):
pass
class LastMark(BaseMark):
colors = 1.0, 0, 0
class FirstMark(BaseMark):
colors = 1.0, 0, 0
class ExecMark(BaseMark):
# colors = 0.75, 0.33, 0.33
colors = (1.0, 0.0, 0.0)
def __init__(self, timestamp, log, is_complete, is_resumed):
# if is_complete:
text = 'execve: '
# elif is_resumed:
# text = 'execve resumed: '
# else:
# text = 'execve started: '
text = text + os.path.basename(log)
BaseMark.__init__(self, timestamp, text)
class Metrics:
def __init__(self):
self.width = 0
self.height = 0
# don't use black or red
palette = [
(0.12, 0.29, 0.49),
(0.36, 0.51, 0.71),
(0.75, 0.31, 0.30),
(0.62, 0.73, 0.38),
(0.50, 0.40, 0.63),
(0.29, 0.67, 0.78),
(0.96, 0.62, 0.34),
(1.0 - 0.12, 1.0 - 0.29, 1.0 - 0.49),
(1.0 - 0.36, 1.0 - 0.51, 1.0 - 0.71),
(1.0 - 0.75, 1.0 - 0.31, 1.0 - 0.30),
(1.0 - 0.62, 1.0 - 0.73, 1.0 - 0.38),
(1.0 - 0.50, 1.0 - 0.40, 1.0 - 0.63),
(1.0 - 0.29, 1.0 - 0.67, 1.0 - 0.78),
(1.0 - 0.96, 1.0 - 0.62, 1.0 - 0.34)
]
class SyscallParser:
def __init__ (self):
self.pending_execs = []
self.syscalls = []
def search_pending_execs (self, search_pid):
n = len (self.pending_execs)
for i in range (n):
(pid, timestamp, command) = self.pending_execs[i]
if pid == search_pid:
return (i, timestamp, command)
return (None, None, None)
def add_line (self, str):
m = mark_regex.search (str)
if m:
timestamp = float (m.group (mark_timestamp_group))
program = m.group (mark_program_group)
text = program + m.group (mark_log_group)
if text == 'last':
self.syscalls.append (LastMark (timestamp, text))
elif text == 'first':
self.syscalls.append (FirstMark (timestamp, text))
else:
if not string_has_substrings (text, ignore_strings):
s = AccessMark (timestamp, text)
c = get_special_color (text)
if c:
s.colors = c
else:
program_hash = program.__hash__ ()
s.colors = palette[program_hash % len (palette)]
self.syscalls.append (s)
return
m = complete_exec_regex.search (str)
if m:
result = m.group (complete_exec_result_group)
if result == success_result:
pid = m.group (complete_exec_pid_group)
timestamp = float (m.group (complete_exec_timestamp_group))
command = m.group (complete_exec_command_group)
self.syscalls.append (ExecMark (timestamp, command, True, False))
return
m = unfinished_exec_regex.search (str)
if m:
pid = m.group (unfinished_exec_pid_group)
timestamp = float (m.group (unfinished_exec_timestamp_group))
command = m.group (unfinished_exec_command_group)
self.pending_execs.append ((pid, timestamp, command))
# self.syscalls.append (ExecMark (timestamp, command, False, False))
return
m = resumed_exec_regex.search (str)
if m:
pid = m.group (resumed_exec_pid_group)
timestamp = float (m.group (resumed_exec_timestamp_group))
result = m.group (resumed_exec_result_group)
(index, old_timestamp, command) = self.search_pending_execs (pid)
if index == None:
print "Didn't find pid %s in pending_execs!" % pid
sys.exit (1)
del self.pending_execs[index]
if result == success_result:
self.syscalls.append (ExecMark (timestamp, command, False, True))
def parse_strace(filename):
parser = SyscallParser ()
for line in file(filename, "r"):
if line == "":
break
parser.add_line (line)
return parser.syscalls
def normalize_timestamps(syscalls):
first_timestamp = syscalls[0].timestamp
for syscall in syscalls:
syscall.timestamp -= first_timestamp
def compute_syscall_metrics(syscalls):
num_syscalls = len(syscalls)
metrics = Metrics()
metrics.width = PLOT_WIDTH
last_timestamp = syscalls[num_syscalls - 1].timestamp
num_seconds = int(math.ceil(last_timestamp))
metrics.height = max(num_seconds * PIXELS_PER_SECOND,
num_syscalls * PIXELS_PER_LINE)
text_ypos = 0
for syscall in syscalls:
syscall.timestamp_ypos = syscall.timestamp * PIXELS_PER_SECOND
syscall.log_ypos = text_ypos + FONT_SIZE
text_ypos += PIXELS_PER_LINE
return metrics
def plot_time_scale(surface, ctx, metrics):
num_seconds = (metrics.height + PIXELS_PER_SECOND - 1) / PIXELS_PER_SECOND
ctx.set_source_rgb(0.5, 0.5, 0.5)
ctx.set_line_width(1.0)
for i in range(num_seconds):
ypos = i * PIXELS_PER_SECOND
ctx.move_to(0, ypos + 0.5)
ctx.line_to(TIME_SCALE_WIDTH, ypos + 0.5)
ctx.stroke()
ctx.move_to(0, ypos + 2 + FONT_SIZE)
ctx.show_text("%d s" % i)
def plot_syscall(surface, ctx, syscall):
ctx.set_source_rgb(*syscall.colors)
# Line
ctx.move_to(TIME_SCALE_WIDTH, syscall.timestamp_ypos)
ctx.line_to(TIME_SCALE_WIDTH + SYSCALL_MARKER_WIDTH, syscall.timestamp_ypos)
ctx.line_to(LOG_TEXT_XPOS - LOG_MARKER_WIDTH, syscall.log_ypos - FONT_SIZE / 2 + 0.5)
ctx.line_to(LOG_TEXT_XPOS, syscall.log_ypos - FONT_SIZE / 2 + 0.5)
ctx.stroke()
# Log text
ctx.move_to(LOG_TEXT_XPOS, syscall.log_ypos)
ctx.show_text("%8.5f: %s" % (syscall.timestamp, syscall.log))
def plot_syscalls_to_surface(syscalls, metrics):
num_syscalls = len(syscalls)
surface = cairo.ImageSurface(cairo.FORMAT_RGB24,
metrics.width, metrics.height)
ctx = cairo.Context(surface)
ctx.select_font_face(FONT_NAME)
ctx.set_font_size(FONT_SIZE)
# Background
ctx.set_source_rgb (*BACKGROUND_COLOR)
ctx.rectangle(0, 0, metrics.width, metrics.height)
ctx.fill()
# Time scale
plot_time_scale(surface, ctx, metrics)
# Contents
ctx.set_line_width(1.0)
for syscall in syscalls:
plot_syscall(surface, ctx, syscall)
return surface
def autocrop(file, bgcolor):
im = Image.open(file)
if im.mode != "RGB":
im = im.convert("RGB")
bg = Image.new("RGB", im.size, bgcolor)
diff = ImageChops.difference(im, bg)
bbox = diff.getbbox()
if not bbox:
return None # no contents
im = im.crop(bbox)
im.save(file)
def main(args):
option_parser = optparse.OptionParser(
usage="usage: %prog -o output.png <strace.txt>")
option_parser.add_option("-o",
"--output", dest="output",
metavar="FILE",
help="Name of output file (output is a PNG file)")
options, args = option_parser.parse_args()
if not options.output:
print 'Please specify an output filename with "-o file.png" or "--output=file.png".'
return 1
if len(args) != 1:
print 'Please specify only one input filename, which is an strace log taken with "strace -ttt -f"'
return 1
in_filename = args[0]
out_filename = options.output
syscalls = []
for syscall in parse_strace(in_filename):
syscalls.append(syscall)
if isinstance(syscall, FirstMark):
syscalls = []
elif isinstance(syscall, LastMark):
break
if not syscalls:
print 'No marks in %s, add access("MARK: ...", F_OK)' % in_filename
return 1
normalize_timestamps(syscalls)
metrics = compute_syscall_metrics(syscalls)
surface = plot_syscalls_to_surface(syscalls, metrics)
surface.write_to_png(out_filename)
autocrop(out_filename, BACKGROUND_COLOR)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| Python |
# -*- coding: utf-8 -*-
# PLEASE LEAVE THIS LINE ALONE
from core.herbarium import Herbarium
"""
Fill this with your own code, simplified.
An example structure could be:
h = Herbarium(source="list.fhb", create=["species","family","main"], structure="default", language="en")
h.make()
The options can be found in the user manual.
"""
h = Herbarium(source="list.fhb", create=["species","family","main"], structure="default", language="en")
h.make() | Python |
# -*- coding: utf-8 -*-
"""
This file is part of Free Herbarium.
Free Herbarium is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Free Herbarium is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Free Herbarium. If not, see <http://www.gnu.org/licenses/>.
"""
import re, os, shutil
from lib.FHBparser import FHBparser
class Herbarium:
def __init__(self, source, create, structure="default", language="en"):
self.source = source
self.create = create
self.structure = structure
self.language = language
def make(self):
shutil.rmtree("tmp")
os.mkdir("tmp")
parser = FHBparser(self.source)
dictionary = parser.parse()
print dictionary | Python |
"""ok""" | Python |
# -*- coding: utf-8 -*-
"""
This file is part of Free Herbarium.
Free Herbarium is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Free Herbarium is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Free Herbarium. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from lib.preprocessor import Preprocessor
class UnknownNameException:
""" The name of a given species is not given. It must be."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class FHBparser:
"""Parses a file with all the species and rendering data. """
def __init__(self, source):
self.source = source
def _getname(self,defs):
""" Tries to access the name of a species. If none given,
raises an exception. """
for d in defs:
if d.split("=")[0] == "NAME":
return d.split("=")[1]
raise UnknownNameException("No NAME parameter found")
def _checkDict(self, defs):
""" Checks if the dictionary of the species has been initialized.
If not, it initializes it. """
if self.curname not in self.dictionary.keys():
n = self._getname(defs)
self.dictionary[self._getname(defs)] = []
self.curname = n
def parse(self):
""" The main job. Parses the document. """
with open(self.source) as f:
text = f.read()
preprocessor = re.findall("#(.*?)$",text,re.M)
preprocess = Preprocessor(preprocessor)
preprocess.save()
for x in preprocessor:
text = text.replace("#"+x+"\n","")
text = text.replace("\n","")
parsed = text.split("BeginSpecies")[1:]
self.dictionary = {}
for sp in parsed:
defs = sp.split(";")
self.curname = ""
for defi in defs:
stat = defi.split("=")
sw = stat[0]
if sw == "EndSpecies": break
val = stat[1]
if sw == "NAME":
self.dictionary[val] = {}
self.curname = val
elif sw == "OTHERNAMES":
self._checkDict(defs)
self.dictionary[self.curname]["othernames"] = re.split(",\s?",val)
elif sw == "IMGS":
self._checkDict(defs)
self.dictionary[self.curname]["imgs"] = re.split(",\s?",val)
elif sw == "LAT":
self._checkDict(defs)
self.dictionary[self.curname]["lat"] = val
elif sw == "DESC":
self._checkDict(defs)
self.dictionary[self.curname]["desc"] = val
elif sw == "ES":
self._checkDict(defs)
self.dictionary[self.curname]["es"] = val
elif sw == "FAM":
self._checkDict(defs)
self.dictionary[self.curname]["fam"] = val
else:
print "<Warning> Unrecognized variable \"%s\" with value \"%s\"" % (sw, val)
return self.dictionary
| Python |
"""ok""" | Python |
# -*- coding: utf-8 -*-
"""
This file is part of Free Herbarium.
Free Herbarium is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Free Herbarium is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Free Herbarium. If not, see <http://www.gnu.org/licenses/>.
"""
class UnrecognisedArgumentError:
""" The name of a given species is not given. It must be."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class OutofplaceError:
""" The name of a given species is not given. It must be."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Preprocessor:
def __init__(self, lines):
self.lines = lines
def _define(self, key, val):
with open("tmp/definitions","a") as f:
f.write(key+"="+val+"\n")
def _addLatin(self, lang, equiv):
with open("tmp/lat_%s" % lang, "a") as f:
sp = equiv.split("=")
f.write(sp[0]+"=u'"+sp[1]+"'\n")
def save(self):
family = False
for line in self.lines:
action = line.split(" ")[0]
args = line.split(" ")[1:]
if action == "set":
self._define(args[0],args[1])
elif action == "begin":
if args[0] == "families":
family = True # begin listening
else:
raise UnrecognisedArgumentError("Argument %s for function 'begin'" % args[0])
elif action == "end":
if args[0] == "families":
family = False # stop listening
else:
raise UnrecognisedArgumentError("Argument %s for function 'begin'" % args[0])
elif action == "lat":
if family == True:
self._addLatin(args[0],args[1])
else:
raise OutofplaceError("'lat' function must be placed inside a 'family' block.") | Python |
#!/usr/bin/env python
"""
tesshelper.py -- Utility operations to compare, report stats, and copy
public headers for tesseract 3.0x VS2008 Project
$RCSfile: tesshelper.py,v $ $Revision: 7ca575b377aa $ $Date: 2012/03/07 17:26:31 $
"""
r"""
Requires:
python 2.7 or greater: activestate.com
http://www.activestate.com/activepython/downloads
because using the new argparse module and new literal set syntax (s={1, 2}) .
General Notes:
--------------
Format for a .vcproj file entry:
<File
RelativePath="..\src\allheaders.h"
>
</File>
"""
epilogStr = r"""
Examples:
Assume that tesshelper.py is in c:\buildfolder\tesseract-3.02\vs2008,
which is also the current directory. Then,
python tesshelper .. compare
will compare c:\buildfolder\tesseract-3.02 "library" directories to the
libtesseract Project
(c:\buildfolder\tesseract-3.02\vs2008\libtesseract\libtesseract.vcproj).
python tesshelper .. report
will display summary stats for c:\buildfolder\tesseract-3.02 "library"
directories and the libtesseract Project.
python tesshelper .. copy ..\..\include
will copy all "public" libtesseract header files to
c:\buildfolder\include.
python tesshelper .. clean
will clean the vs2008 folder of all build directories, and .user, .suo,
.ncb, and other temp files.
"""
# imports of python standard library modules
# See Python Documentation | Library Reference for details
import collections
import glob
import argparse
import os
import re
import shutil
import sys
# ====================================================================
VERSION = "1.0 %s" % "$Date: 2012/03/07 17:26:31 $".split()[1]
PROJ_SUBDIR = r"vs2008\libtesseract"
PROJFILE = "libtesseract.vcproj"
NEWHEADERS_FILENAME = "newheaders.txt"
NEWSOURCES_FILENAME = "newsources.txt"
fileNodeTemplate = \
''' <File
RelativePath="..\..\%s"
>
</File>
'''
# ====================================================================
def getProjectfiles(libTessDir, libProjectFile, nTrimChars):
"""Return sets of all, c, h, and resources files in libtesseract Project"""
#extract filenames of header & source files from the .vcproj
projectCFiles = set()
projectHFiles = set()
projectRFiles = set()
projectFilesSet = set()
f = open(libProjectFile, "r")
data = f.read()
f.close()
projectFiles = re.findall(r'(?i)RelativePath="(\.[^"]+)"', data)
for projectFile in projectFiles:
root, ext = os.path.splitext(projectFile.lower())
if ext == ".c" or ext == ".cpp":
projectCFiles.add(projectFile)
elif ext == ".h":
projectHFiles.add(projectFile)
elif ext == ".rc":
projectRFiles.add(projectFile)
else:
print "unknown file type: %s" % projectFile
relativePath = os.path.join(libTessDir, projectFile)
relativePath = os.path.abspath(relativePath)
relativePath = relativePath[nTrimChars:].lower()
projectFilesSet.add(relativePath)
return projectFilesSet, projectHFiles, projectCFiles, projectRFiles
def getTessLibFiles(tessDir, nTrimChars):
"""Return set of all libtesseract files in tessDir"""
libDirs = [
"api",
"ccmain",
"ccstruct",
"ccutil",
"classify",
"cube",
"cutil",
"dict",
r"neural_networks\runtime",
"opencl",
"textord",
"viewer",
"wordrec",
#"training",
r"vs2008\port",
r"vs2008\libtesseract",
]
#create list of all .h, .c, .cpp files in "library" directories
tessFiles = set()
for curDir in libDirs:
baseDir = os.path.join(tessDir, curDir)
for filetype in ["*.c", "*.cpp", "*.h", "*.rc"]:
pattern = os.path.join(baseDir, filetype)
fileList = glob.glob(pattern)
for curFile in fileList:
curFile = os.path.abspath(curFile)
relativePath = curFile[nTrimChars:].lower()
tessFiles.add(relativePath)
return tessFiles
# ====================================================================
def tessCompare(tessDir):
'''Compare libtesseract Project files and actual "sub-library" files.'''
vs2008Dir = os.path.join(tessDir, "vs2008")
libTessDir = os.path.join(vs2008Dir, "libtesseract")
libProjectFile = os.path.join(libTessDir,"libtesseract.vcproj")
tessAbsDir = os.path.abspath(tessDir)
nTrimChars = len(tessAbsDir)+1
print 'Comparing VS2008 Project "%s" with\n "%s"' % (libProjectFile,
tessAbsDir)
projectFilesSet, projectHFiles, projectCFiles, projectRFiles = \
getProjectfiles(libTessDir, libProjectFile, nTrimChars)
tessFiles = getTessLibFiles(tessDir, nTrimChars)
extraFiles = tessFiles - projectFilesSet
print "%2d Extra files (in %s but not in Project)" % (len(extraFiles),
tessAbsDir)
headerFiles = []
sourceFiles = []
sortedList = list(extraFiles)
sortedList.sort()
for filename in sortedList:
root, ext = os.path.splitext(filename.lower())
if ext == ".h":
headerFiles.append(filename)
else:
sourceFiles.append(filename)
print " %s " % filename
print
print "%2d new header file items written to %s" % (len(headerFiles),
NEWHEADERS_FILENAME)
headerFiles.sort()
with open(NEWHEADERS_FILENAME, "w") as f:
for filename in headerFiles:
f.write(fileNodeTemplate % filename)
print "%2d new source file items written to %s" % (len(sourceFiles),
NEWSOURCES_FILENAME)
sourceFiles.sort()
with open(NEWSOURCES_FILENAME, "w") as f:
for filename in sourceFiles:
f.write(fileNodeTemplate % filename)
print
deadFiles = projectFilesSet - tessFiles
print "%2d Dead files (in Project but not in %s" % (len(deadFiles),
tessAbsDir)
sortedList = list(deadFiles)
sortedList.sort()
for filename in sortedList:
print " %s " % filename
# ====================================================================
def tessReport(tessDir):
"""Report summary stats on "sub-library" files and libtesseract Project file."""
vs2008Dir = os.path.join(tessDir, "vs2008")
libTessDir = os.path.join(vs2008Dir, "libtesseract")
libProjectFile = os.path.join(libTessDir,"libtesseract.vcproj")
tessAbsDir = os.path.abspath(tessDir)
nTrimChars = len(tessAbsDir)+1
projectFilesSet, projectHFiles, projectCFiles, projectRFiles = \
getProjectfiles(libTessDir, libProjectFile, nTrimChars)
tessFiles = getTessLibFiles(tessDir, nTrimChars)
print 'Summary stats for "%s" library directories' % tessAbsDir
folderCounters = {}
for tessFile in tessFiles:
tessFile = tessFile.lower()
folder, head = os.path.split(tessFile)
file, ext = os.path.splitext(head)
typeCounter = folderCounters.setdefault(folder, collections.Counter())
typeCounter[ext[1:]] += 1
folders = folderCounters.keys()
folders.sort()
totalFiles = 0
totalH = 0
totalCPP = 0
totalOther = 0
print
print " total h cpp"
print " ----- --- ---"
for folder in folders:
counters = folderCounters[folder]
nHFiles = counters['h']
nCPPFiles = counters['cpp']
total = nHFiles + nCPPFiles
totalFiles += total
totalH += nHFiles
totalCPP += nCPPFiles
print " %5d %3d %3d %s" % (total, nHFiles, nCPPFiles, folder)
print " ----- --- ---"
print " %5d %3d %3d" % (totalFiles, totalH, totalCPP)
print
print 'Summary stats for VS2008 Project "%s"' % libProjectFile
print " %5d %s" %(len(projectHFiles), "Header files")
print " %5d %s" % (len(projectCFiles), "Source files")
print " %5d %s" % (len(projectRFiles), "Resource files")
print " -----"
print " %5d" % (len(projectHFiles) + len(projectCFiles) + len(projectRFiles), )
# ====================================================================
def copyIncludes(fileSet, description, tessDir, includeDir):
"""Copy set of files to specified include dir."""
print
print 'Copying libtesseract "%s" headers to %s' % (description, includeDir)
print
sortedList = list(fileSet)
sortedList.sort()
count = 0
errList = []
for includeFile in sortedList:
filepath = os.path.join(tessDir, includeFile)
if os.path.isfile(filepath):
shutil.copy2(filepath, includeDir)
print "Copied: %s" % includeFile
count += 1
else:
print '***Error: "%s" doesn\'t exist"' % filepath
errList.append(filepath)
print '%d header files successfully copied to "%s"' % (count, includeDir)
if len(errList):
print "The following %d files were not copied:"
for filepath in errList:
print " %s" % filepath
def tessCopy(tessDir, includeDir):
'''Copy all "public" libtesseract Project header files to include directory.
Preserves directory hierarchy.'''
baseIncludeSet = {
r"api\baseapi.h",
r"api\capi.h",
r"api\apitypes.h",
r"ccstruct\publictypes.h",
r"ccmain\thresholder.h",
r"ccutil\host.h",
r"ccutil\basedir.h",
r"ccutil\tesscallback.h",
r"ccutil\unichar.h",
r"ccutil\platform.h",
}
strngIncludeSet = {
r"ccutil\strngs.h",
r"ccutil\memry.h",
r"ccutil\host.h",
r"ccutil\serialis.h",
r"ccutil\errcode.h",
r"ccutil\fileerr.h",
#r"ccutil\genericvector.h",
}
resultIteratorIncludeSet = {
r"ccmain\ltrresultiterator.h",
r"ccmain\pageiterator.h",
r"ccmain\resultiterator.h",
r"ccutil\genericvector.h",
r"ccutil\tesscallback.h",
r"ccutil\errcode.h",
r"ccutil\host.h",
r"ccutil\helpers.h",
r"ccutil\ndminx.h",
r"ccutil\params.h",
r"ccutil\unicharmap.h",
r"ccutil\unicharset.h",
}
genericVectorIncludeSet = {
r"ccutil\genericvector.h",
r"ccutil\tesscallback.h",
r"ccutil\errcode.h",
r"ccutil\host.h",
r"ccutil\helpers.h",
r"ccutil\ndminx.h",
}
blobsIncludeSet = {
r"ccstruct\blobs.h",
r"ccstruct\rect.h",
r"ccstruct\points.h",
r"ccstruct\ipoints.h",
r"ccutil\elst.h",
r"ccutil\host.h",
r"ccutil\serialis.h",
r"ccutil\lsterr.h",
r"ccutil\ndminx.h",
r"ccutil\tprintf.h",
r"ccutil\params.h",
r"viewer\scrollview.h",
r"ccstruct\vecfuncs.h",
}
extraFilesSet = {
#r"vs2008\include\stdint.h",
r"vs2008\include\leptonica_versionnumbers.vsprops",
r"vs2008\include\tesseract_versionnumbers.vsprops",
}
tessIncludeDir = os.path.join(includeDir, "tesseract")
if os.path.isfile(tessIncludeDir):
print 'Aborting: "%s" is a file not a directory.' % tessIncludeDir
return
if not os.path.exists(tessIncludeDir):
os.mkdir(tessIncludeDir)
#fileSet = baseIncludeSet | strngIncludeSet | genericVectorIncludeSet | blobsIncludeSet
fileSet = baseIncludeSet | strngIncludeSet | resultIteratorIncludeSet
copyIncludes(fileSet, "public", tessDir, tessIncludeDir)
copyIncludes(extraFilesSet, "extra", tessDir, includeDir)
# ====================================================================
def tessClean(tessDir):
'''Clean vs2008 folder of all build directories and certain temp files.'''
vs2008Dir = os.path.join(tessDir, "vs2008")
vs2008AbsDir = os.path.abspath(vs2008Dir)
answer = raw_input(
'Are you sure you want to clean the\n "%s" folder (Yes/No) [No]? ' %
vs2008AbsDir)
if answer.lower() not in ("yes",):
return
answer = raw_input('Only list the items to be deleted (Yes/No) [Yes]? ')
answer = answer.strip()
listOnly = answer.lower() not in ("no",)
for rootDir, dirs, files in os.walk(vs2008AbsDir):
for buildDir in ("LIB_Release", "LIB_Debug", "DLL_Release", "DLL_Debug"):
if buildDir in dirs:
dirs.remove(buildDir)
absBuildDir = os.path.join(rootDir, buildDir)
if listOnly:
print "Would remove: %s" % absBuildDir
else:
print "Removing: %s" % absBuildDir
shutil.rmtree(absBuildDir)
if rootDir == vs2008AbsDir:
for file in files:
if file.lower() not in ("tesseract.sln",
"tesshelper.py",
"readme.txt"):
absPath = os.path.join(rootDir, file)
if listOnly:
print "Would remove: %s" % absPath
else:
print "Removing: %s" % absPath
os.remove(absPath)
else:
for file in files:
root, ext = os.path.splitext(file)
if ext.lower() in (".suo",
".ncb",
".user",
) or (
len(ext)>0 and ext[-1] == "~"):
absPath = os.path.join(rootDir, file)
if listOnly:
print "Would remove: %s" % absPath
else:
print "Removing: %s" % absPath
os.remove(absPath)
# ====================================================================
def validateTessDir(tessDir):
"""Check that tessDir is a valid tesseract directory."""
if not os.path.isdir(tessDir):
raise argparse.ArgumentTypeError('Directory "%s" doesn\'t exist.' % tessDir)
projFile = os.path.join(tessDir, PROJ_SUBDIR, PROJFILE)
if not os.path.isfile(projFile):
raise argparse.ArgumentTypeError('Project file "%s" doesn\'t exist.' % projFile)
return tessDir
def validateDir(dir):
"""Check that dir is a valid directory named include."""
if not os.path.isdir(dir):
raise argparse.ArgumentTypeError('Directory "%s" doesn\'t exist.' % dir)
dirpath = os.path.abspath(dir)
head, tail = os.path.split(dirpath)
if tail.lower() != "include":
raise argparse.ArgumentTypeError('Include directory "%s" must be named "include".' % tail)
return dir
def main ():
parser = argparse.ArgumentParser(
epilog=epilogStr,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--version", action="version",
version="%(prog)s " + VERSION)
parser.add_argument('tessDir', type=validateTessDir,
help="tesseract installation directory")
subparsers = parser.add_subparsers(
dest="subparser_name",
title="Commands")
parser_changes = subparsers.add_parser('compare',
help="compare libtesseract Project with tessDir")
parser_changes.set_defaults(func=tessCompare)
parser_report = subparsers.add_parser('report',
help="report libtesseract summary stats")
parser_report.set_defaults(func=tessReport)
parser_copy = subparsers.add_parser('copy',
help="copy public libtesseract header files to includeDir")
parser_copy.add_argument('includeDir', type=validateDir,
help="Directory to copy header files to.")
parser_copy.set_defaults(func=tessCopy)
parser_clean = subparsers.add_parser('clean',
help="clean vs2008 folder of build folders and .user files")
parser_clean.set_defaults(func=tessClean)
#kludge because argparse has no ability to set default subparser
if (len(sys.argv) == 2):
sys.argv.append("compare")
args = parser.parse_args()
#handle commands
if args.func == tessCopy:
args.func(args.tessDir, args.includeDir)
else:
args.func(args.tessDir)
if __name__ == '__main__' :
main()
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Zdenko Podobný
# Author: Zdenko Podobný
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple python demo script of tesseract-ocr 3.02 c-api
"""
import os
import sys
import ctypes
# Demo variables
lang = "eng"
filename = "../phototest.tif"
libpath = "/usr/local/lib64/"
libpath_w = "../vs2008/DLL_Release/"
TESSDATA_PREFIX = os.environ.get('TESSDATA_PREFIX')
if not TESSDATA_PREFIX:
TESSDATA_PREFIX = "../"
if sys.platform == "win32":
libname = libpath_w + "libtesseract302.dll"
libname_alt = "libtesseract302.dll"
os.environ["PATH"] += os.pathsep + libpath_w
else:
libname = libpath + "libtesseract.so.3.0.2"
libname_alt = "libtesseract.so.3"
try:
tesseract = ctypes.cdll.LoadLibrary(libname)
except:
try:
tesseract = ctypes.cdll.LoadLibrary(libname_alt)
except WindowsError, err:
print("Trying to load '%s'..." % libname)
print("Trying to load '%s'..." % libname_alt)
print(err)
exit(1)
tesseract.TessVersion.restype = ctypes.c_char_p
tesseract_version = tesseract.TessVersion()[:4]
# We need to check library version because libtesseract.so.3 is symlink
# and can point to other version than 3.02
if float(tesseract_version) < 3.02:
print("Found tesseract-ocr library version %s." % tesseract_version)
print("C-API is present only in version 3.02!")
exit(2)
api = tesseract.TessBaseAPICreate()
rc = tesseract.TessBaseAPIInit3(api, TESSDATA_PREFIX, lang);
if (rc):
tesseract.TessBaseAPIDelete(api)
print("Could not initialize tesseract.\n")
exit(3)
text_out = tesseract.TessBaseAPIProcessPages(api, filename, None , 0);
result_text = ctypes.string_at(text_out)
print result_text
| Python |
# Django settings for #Freedom99 project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('root', 'c-x@email.cz'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', '' or 'oracle'.
DATABASE_NAME = 'db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Prague'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'cs-cz'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
MEDIA_ROOT = 'static'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/static'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'Do_not_fuck_with_us_at_this_moment!_We_now_this_is_public_so..!'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'freedom99.urls'
TEMPLATE_DIRS = (
'templates'
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'articles',
'linkovac',
'django.contrib.admin'
)
| Python |
from django.db import models
#Waasxasx! | Python |
# Create your views here.
| Python |
#! /usr/bin/python
# -*- coding: UTF-8 -*-
from django.http import HttpResponse
from django.template import Context, loader
__author__="c-x"
__NAME__ = "index file for Freedom99 web portal"
__description__ = ""
__version__ = "0.0"
__contact__ = {
'email' : 'c-x@email.cz',
'jabber' : 'c-x@jabber.cz || jabber.org || gajim.org',
'website' : 'SFPS-Tecora.biz'}
def main(request):
templ = loader.get_template('index.html')
conx = Context({})
return HttpResponse(templ.render(conx)) | Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/(.*)', admin.site.root), # Automaticky admin
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': '/media/vault/pyScripts/freedom99/static/'}), # Slozka pro staticke veci jako obrazky
# (r'^article/$', 'freedom99.articles.views.list'), # Zobrazeni seznamu clanku
# (r'^article/(?P<articleID>\d+)/$', "freedom99.articles.views.detail"), # Zobrazeni clanku
# (r'^article/(?P<articleID>\d+)/Addcomment/$', "freedom99.articles.views.AddComment"), # Stranka pro pridavani komentu
# (r'^linkovac/AddLink/$', "freedom99.linkovac.views.AddLink"), # Stranka pro pridavani ke clankum
# (r'^linkovac/$', 'freedom99.linkovac.views.list'), # Seznam vsech linku
# (r'^linkovac/(?P<linkID>\d+)/$', "freedom99.linkovac.views.detail"), # Zobrazeni linku
(r'^$', 'freedom99.index.main'), # Hlavni stranka
)
| Python |
from django.db import models
class Article(models.Model):
title = models.CharField(max_length=200)
# author = models. blabla
date = models.DateTimeField("date")
text = models.TextField(max_length=200000)
public = models.BooleanField()
def __unicode__(self):
return self.title
class Comment(models.Model):
article = models.ForeignKey(Article)
name = models.CharField(max_length=20)
contact = models.CharField(max_length=150)
text = models.TextField(max_length=2000)
def __unicode__(self):
return self.name | Python |
from django.http import HttpResponse
from freedom99.articles.models import Article, Comment
from django.template import Context, loader
from django.http import Http404
from django.shortcuts import get_object_or_404
def list(request):
list = Article.objects.all()
templ = loader.get_template('articleList.html')
conx = Context({
'list': list,
})
return HttpResponse(templ.render(conx))
def detail(request, articleID):
article = get_object_or_404(Article, id=articleID)
comments = Comment.objects.filter(article=article)
templ = loader.get_template('article.html')
conx = Context({
"article":article,
"comments":comments})
return HttpResponse(templ.render(conx))
def AddComment(request, articleID):
article = get_object_or_404(Article, id=articleID)
postData = request.POST
comment = Comment(article=article, contact = postData["contact"],
name = postData["name"], text= postData["text"])
comment.save()
return detail(request, articleID)
| Python |
from freedom99.articles.models import Article, Comment
from django.contrib import admin
admin.site.register(Article)
admin.site.register(Comment)
| Python |
__author__="c-x"
__date__ ="$6.8.2009 0:42:34$"
from setuptools import setup,find_packages
setup (
name = 'freedom99',
version = '0.1',
packages = find_packages(),
# Declare your packages' dependencies here, for eg:
install_requires=['foo>=3'],
# Fill in these to make your Egg ready for upload to
# PyPI
author = 'c-x',
author_email = '',
summary = 'Just another Python package for the cheese shop',
url = '',
license = '',
long_description= 'Long description of the package',
# could also include long_description, download_url, classifiers, etc.
) | Python |
#! /usr/bin/python
# -*- coding: UTF-8 -*-
from django.http import HttpResponse
from django.template import Context, loader
__author__="c-x"
__NAME__ = "index file for Freedom99 web portal"
__description__ = ""
__version__ = "0.0"
__contact__ = {
'email' : 'c-x@email.cz',
'jabber' : 'c-x@jabber.cz || jabber.org || gajim.org',
'website' : 'SFPS-Tecora.biz'}
def main(request):
templ = loader.get_template('index.html')
conx = Context({})
return HttpResponse(templ.render(conx)) | Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
#!/usr/bin/python
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB TECHNOLOGIES BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
import unittest
import sys, logging
import freebase
import random
import getlogindetails
from freebase.api import HTTPMetawebSession, MetawebError
USERNAME = 'username'
PASSWORD = 'password'
API_HOST = 'http://sandbox-freebase.com'
TEST_QUERY = {'id': 'null', 'name': 'Sting'}
s = HTTPMetawebSession(API_HOST)
if USERNAME == "username" and PASSWORD == "password":
USERNAME, PASSWORD = getlogindetails.main()
s.login(USERNAME, PASSWORD)
class TestFreebase(unittest.TestCase):
def test_freebase_dot_login_logout(self):
freebase.login(username=USERNAME, password=PASSWORD)
self.assertNotEqual(freebase.user_info(), None)
self.assertEqual(freebase.loggedin(), True)
freebase.logout()
self.assertRaises(MetawebError, freebase.user_info)
self.assertEqual(freebase.loggedin(), False)
def test_login_logout(self):
mss = HTTPMetawebSession(API_HOST, username=USERNAME,
password=PASSWORD)
mss.login()
user_info = mss.user_info()
self.assertNotEqual(None, user_info)
self.assertEqual(user_info.code, "/api/status/ok")
self.assertEqual(mss.loggedin(), True)
mss.logout()
self.assertRaises(MetawebError, mss.user_info)
self.assertEqual(mss.loggedin(), False)
def test_freebase_dot_read(self):
query = {'type':'/music/artist','guid':[{}],'name':'Sting', 'album':[{}]}
result = freebase.mqlread(query)
self.assertNotEqual(None, result)
self.assert_(result.has_key('guid'))
self.assert_(result.has_key('type'))
self.assert_(result.has_key('name'))
self.assert_(result.has_key('album'))
self.assertEqual(type([]), type(result['album']))
self.assert_(len(result['album']) > 0)
self.assertEqual( 'Sting', result['name'])
self.assertEqual('#9202a8c04000641f8000000000092a01', result['guid'][0]['value'])
def test_freebase_dot_write(self):
read_query = {'type':'/music/artist','name':'Yanni\'s Cousin Tom', 'id':{}}
freebase.sandbox.login(username=USERNAME, password=PASSWORD)
result = freebase.sandbox.mqlread(read_query)
self.assertEqual(None, result)
write_query = {'create':'unless_exists', 'type':'/music/artist','name':'Yanni'}
write_result = freebase.sandbox.mqlwrite(write_query)
self.assertNotEqual(None, write_result)
self.assert_(write_result.has_key('create'))
self.assert_(write_result.has_key('type'))
self.assert_(write_result.has_key('name'))
self.assertEqual('existed', write_result['create'])
self.assertEqual('Yanni', write_result['name'])
self.assertEqual('/music/artist', write_result['type'])
def test_read(self):
query = {'type':'/music/artist','guid':[{}],'name':'Sting', 'album':[{}]}
mss = HTTPMetawebSession(API_HOST)
result = mss.mqlread(query)
self.assertNotEqual(None, result)
self.assert_(result.has_key('guid'))
self.assert_(result.has_key('type'))
self.assert_(result.has_key('name'))
self.assert_(result.has_key('album'))
self.assertEqual(type([]), type(result['album']))
self.assert_(len(result['album']) > 0)
self.assertEqual( 'Sting', result['name'])
self.assertEqual('#9202a8c04000641f8000000000092a01', result['guid'][0]['value'])
def test_mqlreaditer(self):
filmq = [{'id': None,
'initial_release_date>=': '2009',
'name': None,
'type': '/film/film'
}]
r0 = freebase.mqlreaditer(filmq)
r1 = freebase.mqlreaditer(filmq[0]) # The difference between [{}] and []. mqlreaditer should be able to handle both
self.assertNotEqual(r0, None)
self.assertEqual([a for a in r0], [b for b in r1])
# and let's test it for mqlread, just in case
# actually, for mqlread, it must be [{}], because there are lots of elements
m0 = freebase.mqlread(filmq)
m1 = lambda : freebase.mqlread(filmq[0])
self.assertRaises(MetawebError, m1)
self.assertNotEqual(m0, None)
def test_ridiculously_long_write(self):
q = [{
"id":None,
"id|=":["/guid/9202a8c04000641f80000000000" + str(a) for a in range(10000,10320)]
}]
self.assert_(len(str(q)), 1024)
self.assertNotEqual(len(freebase.mqlread(q)), 0)
def test_write(self):
read_query = {'type':'/music/artist','name':'Yanni\'s Cousin Tom', 'id':{}}
mss = HTTPMetawebSession(API_HOST, username=USERNAME, password=PASSWORD)
result = mss.mqlread(read_query)
self.assertEqual(None, result)
write_query = {'create':'unless_exists', 'type':'/music/artist','name':'Yanni'}
mss.login()
write_result = mss.mqlwrite(write_query)
self.assertNotEqual(None, write_result)
self.assert_(write_result.has_key('create'))
self.assert_(write_result.has_key('type'))
self.assert_(write_result.has_key('name'))
self.assertEqual('existed', write_result['create'])
self.assertEqual('Yanni', write_result['name'])
self.assertEqual('/music/artist', write_result['type'])
def test_trans_blurb(self):
kurt = "/en/kurt_vonnegut"
blurb = freebase.blurb(kurt)
self.assert_(blurb.startswith("Kurt Vonnegut"))
self.assertNotEqual(len(blurb), 0)
blurb14 = freebase.blurb(kurt, maxlength=14)
blurb57 = freebase.blurb(kurt, maxlength=57)
self.assertNotEqual(len(blurb14), len(blurb57))
blurbpar = freebase.blurb(kurt, break_paragraphs=True, maxlength=20000)
blurbnopar = freebase.blurb(kurt, break_paragraphs=False, maxlength=20000)
# self.assertNotEqual(blurbpar, blurbnopar) this doesn't work unless I get a good example
# of an article with paragraphs.
def test_trans_raw(self):
kurt = "/en/kurt_vonnegut"
self.assertRaises(MetawebError, lambda: freebase.raw(kurt))
r = freebase.mqlread({"id":kurt, "/common/topic/article":[{"id":None, "optional":True, "limit":1}]})
raw = freebase.raw(r["/common/topic/article"][0].id)
self.assertNotEqual(len(raw), 0)
# trans should also work
trans = freebase.trans(r["/common/topic/article"][0].id)
self.assertEqual(trans, raw)
def test_unsafe(self):
kurt = "/en/kurt_vonnegut"
self.assertRaises(MetawebError, lambda: freebase.unsafe(kurt))
r = freebase.mqlread({"id":kurt, "/common/topic/article":[{"id":None, "optional":True, "limit":1}]})
unsafe = freebase.unsafe(r["/common/topic/article"][0].id)
self.assertNotEqual(len(unsafe), 0)
# we need an example of getting unsafe data
# ...
def test_trans_image_thumb(self):
kurt = "/en/kurt_vonnegut"
r = freebase.mqlread({"id":kurt, "/common/topic/image":[{"id":None, "optional":True, "limit":1}]})
imageid = r["/common/topic/image"][0].id
rawimage = freebase.raw(imageid)
thumbedimage = freebase.image_thumb(imageid, maxheight=99)
self.assertNotEqual(rawimage, thumbedimage)
def test_upload(self):
my_text = "Kurt Vonnegut was an author! " + str(random.random())
freebase.sandbox.login(USERNAME, PASSWORD)
response = freebase.sandbox.upload(my_text, "text/plain")
self.assertEqual(freebase.sandbox.raw(response.id), my_text)
# since it's text/plain, blurb should also be equal
self.assertEqual(freebase.sandbox.blurb(response.id), my_text)
def is_kurt_there(self, results):
for result in results:
if result.name == "Kurt Vonnegut":
return True
return False
def test_search(self):
r0 = freebase.search("Kurt V")
self.assertEqual(self.is_kurt_there(r0), True)
r1 = freebase.search("Kurt V", type=["/location/citytown"])
self.assertEqual(self.is_kurt_there(r1), False)
r2 = freebase.search("Kurt V", type=["/location/citytown", "/music/artist"])
self.assertEqual(self.is_kurt_there(r2), False)
self.assertNotEqual(len(r0), len(r1))
self.assertNotEqual(len(r0), len(r2))
self.assertNotEqual(len(r1), len(r2))
def test_touch(self):
# this one's hard to test... let's just make sure it works.
freebase.touch()
def test_geosearch(self):
self.assertRaises(Exception, freebase.geosearch)
r0 = freebase.geosearch(location="/en/california")
self.assertNotEqual(len(r0), 0)
json = freebase.geosearch(location="/en/san_francisco", format="json")
kml = freebase.geosearch(location="/en/san_francisco", format="kml")
self.assertNotEqual(json, kml)
def test_uri_submit(self):
# test a pdf
r = freebase.sandbox.uri_submit("http://www.jcbl.or.jp/game/nec/necfest07/nec2007_data/HayashiMiyake.pdf", content_type="application/pdf")
self.assertEqual(r['/type/content/media_type'], 'application/pdf')
# test an image
r = freebase.sandbox.uri_submit("http://datamob.org/media/detail_freebase.png")
self.assertEqual(r['/type/content/media_type'], 'image/png')
def test_version(self):
r = freebase.version()
self.assertNotEqual(len(r), 0)
def test_status(self):
r = freebase.status()
self.assertNotEqual(len(r), 0)
self.assertEqual(r["status"], u"200 OK")
def test_private_domains(self):
freebase.sandbox.login(username=USERNAME, password=PASSWORD)
r = freebase.sandbox.create_private_domain("superfly" + str(int(random.random() * 1e10)), "Superfly!")
q = {"id" : r["domain_id"], "*" : None}
info = freebase.sandbox.mqlread(q)
self.assertEqual(info["type"], ["/type/domain"])
self.assertNotEqual(len(info["key"]), 0)
self.assertEqual(info["attribution"], info["creator"])
freebase.sandbox.delete_private_domain(info["key"][0])
deleted = freebase.sandbox.mqlread(q)
self.assertEqual(len(deleted["key"]), 0)
self.assertEqual(len(deleted["type"]), 0)
self.assertEqual(deleted["name"], None)
self.assertEqual(deleted["creator"], info["attribution"])
if __name__ == '__main__':
unittest.main()
| Python |
import os
import os.path
import getpass
import freebase
from freebase.api.session import MetawebError
passwordfile = "test/.password.txt"
def main(create_password_file=False):
USERNAME, PASSWORD = "", ""
if not os.path.isfile(passwordfile):
print "In order to run the tests, we need to use a valid freebase username and password"
USERNAME = raw_input("Please enter your username: ")
try:
PASSWORD = getpass.getpass("Please enter your password: ")
except getpass.GetPassWarning:
PASSWORD = raw_input("Please enter your password: ")
freebase.login(USERNAME, PASSWORD)
print "Thanks!"
if create_password_file:
writepassword(passwordfile, USERNAME, PASSWORD)
else:
pf = open(passwordfile, "r")
USERNAME, PASSWORD = pf.read().split("\n")
pf.close()
try:
freebase.login(USERNAME, PASSWORD)
except MetawebError, me:
print "The username/password in your .password.txt file are incorrect"
raise me
return USERNAME, PASSWORD
def writepassword(passwordfile, username, password):
fh = open(passwordfile, "w")
fh.write(username + "\n" + password)
fh.close() | Python |
import unittest
import os
import os.path
import freebase
import getlogindetails
def main():
created = False
passwordfile = "test/.password.txt"
# setup password stuff
if not os.path.isfile(passwordfile):
created = True
USERNAME, PASSWORD = getlogindetails.main(create_password_file=True)
USERNAME, PASSWORD = getlogindetails.main()
# run tests
import test_freebase
import test_schema_manipulation
import test_hardcore_schema_manipulation
s1 = unittest.TestLoader().loadTestsFromTestCase(test_freebase.TestFreebase)
s2 = unittest.TestLoader().loadTestsFromTestCase(test_schema_manipulation.TestSchemaManipulation)
s3 = unittest.TestLoader().loadTestsFromTestCase(test_hardcore_schema_manipulation.TestHardcoreSchemaManipulation)
# This is very strange. If you try to do [s1, s2], thereby running freebase tests first,
# two tests in the testschemamanipulation file fail! They fail because of caching issues; if
# I check on freebase, the changes are actually there. I have racked my mind for explanations.
# this is such a hack, and I'm sorry. The tests run 100% correct individually.
anotherrun = unittest.TestSuite([s1, s3, s2])
#run = unittest.TestSuite(suites)
# delete password stuff
if created: os.remove(passwordfile)
return anotherrun
if __name__ == '__main__':
main() | Python |
#!/usr/bin/python
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
json = []
# if jsonlib2 is already installed, then we're fine
# we don't need anything else
try:
import jsonlib2
except ImportError:
# if python version < 2.6, require simplejson
# if python version >= 2.6, it comes with json
major, minor, micro, releaselevel, serial = sys.version_info
if major <= 2 and minor < 6:
json.append("simplejson")
setup(
name='freebase',
version='1.01',
author='Nick Thompson',
author_email='nix@metaweb.com',
maintainer_email='developers@freebase.com',
license='BSD',
url='http://code.google.com/p/freebase-python/',
description='Python client library for the freebase.com service',
long_description="""A Python library providing a convenient
wrapper around the freebase.com service api, as well as some
utility functions helpful in writing clients of the api.""",
packages=['freebase', 'freebase.api', 'freebase.fcl'],
entry_points = {
'console_scripts': [
'fcl = freebase.fcl.fcl:main',
'fb_save_base = freebase.schema_cmd:fb_save_base',
'fb_save_type = freebase.schema_cmd:fb_save_type',
'fb_restore = freebase.schema_cmd:fb_restore'
]
},
test_suite = "test.runtests.main",
install_requires=[] + json,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Database :: Front-Ends',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| Python |
#========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB TECHNOLOGIES BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
#
# This is the full "metaweb.py" module from the Metaweb API documentation
#
# In the documentation, each function is presented as a separate
# example. This is the whole file.
#
# If you find any errors or have suggestions for improving this module,
# send them to the Freebase developers mailing list: developers@freebase.com
# You can subscribe to the mailing list at http://lists.freebase.com/
#
import httplib
import urllib # URL encoding
import urllib2 # Higher-level URL content fetching
import simplejson # JSON serialization and parsing
import cookielib # Cookie handling
import os
#
# When experimenting, use the sandbox.freebase.com service.
# Every Monday, sandbox.freebase.com is erased and it is updated
# with a fresh copy of data from www.freebase.com. This makes
# it an ideal place to experiment.
#
host = 'sandbox.freebase.com' # The Metaweb host
readservice = '/api/service/mqlread' # Path to mqlread service
loginservice = '/api/account/login' # Path to login service
writeservice = '/api/service/mqlwrite' # Path to mqlwrite service
uploadservice = '/api/service/upload' # Path to upload service
searchservice = '/api/service/search' # Path to search service
credentials = None # default credential from login()
escape = False # default escape, set to 'html' for HTML escaping
permission = None # default permission used when creating new objects
debug = False # default debug setting
# Install a CookieProcessor
cookiefile = os.path.join(os.environ["HOME"], ".metaweb.cookies.txt")
cookiejar = cookielib.LWPCookieJar()
if os.path.isfile(cookiefile):
cookiejar.load(cookiefile)
urllib2.install_opener(
urllib2.build_opener(
urllib2.HTTPCookieProcessor(cookiejar)))
# If anything goes wrong when talking to a Metaweb service, we raise MQLError.
class MQLError(Exception):
def __init__(self, value): # This is the exception constructor method
self.value = value
def __str__(self): # Convert error object to a string
return repr(self.value)
# Submit the MQL query q and return the result as a Python object.
# If authentication credentials are supplied, use them in a cookie.
# Raises MQLError if the query was invalid. Raises urllib2.HTTPError if
# mqlread returns an HTTP status code other than 200 (which should not happen).
def read(q, credentials=credentials, escape=escape):
# Put the query in an envelope
envelope = {'query':q}
# Add escape if needed
if escape != 'html':
envelope['escape'] = False if not escape else escape
# Encode the result
encoded = urllib.urlencode({'query': simplejson.dumps(envelope)})
# Build the URL and create a Request object for it
url = 'http://%s%s' % (host, readservice)
req = urllib2.Request(url)
# The body of the POST request is encoded URL parameters
req.add_header('Content-type', 'application/x-www-form-urlencoded')
# Send our authentication credentials, if any, as a cookie.
# The need for mqlread authentication is a temporary restriction.
if credentials: req.add_header('Cookie', credentials)
# Use the encoded envelope as the value of the q parameter in the body
# of the request. Specifying a body automatically makes this a POST.
req.add_data(encoded)
# Now upen the URL and and parse its JSON content
f = urllib2.urlopen(req) # Open the URL
inner = simplejson.load(f) # Parse JSON response to an object
# If anything was wrong with the invocation, mqlread will return an HTTP
# error, and the code above with raise urllib2.HTTPError.
# If anything was wrong with the query, we won't get an HTTP error, but
# will get an error status code in the response envelope. In this case
# we raise our own MQLError exception.
if not inner['code'].startswith('/api/status/ok'):
if debug: print q
if debug: print inner
if debug: print f.info()['X-Metaweb-Cost']
if debug: print f.info()['X-Metaweb-TID']
error = inner['messages'][0]
raise MQLError('%s: %s' % (error['code'], error['message']))
# If there was no error, then just return the result from the envelope
return inner['result']
# Submit the MQL query q and return the result as a Python object
# This function behaves like read() above, but uses cursors so that
# it works even for very large result sets. See also the cursor class below.
def readall(q, credentials=credentials, escape=escape):
# This is the start of the mqlread URL.
# We just need to append the envelope to it
url = 'http://%s%s' % (host, readservice)
# The query and most of the envelope are constant. We just need to append
# the encoded cursor value and some closing braces to this prefix string
jsonq = simplejson.dumps(q)
# Add escape if needed
if escape != 'html':
jsonq += ',"escape":' + ('false' if not escape else escape)
cursor = 'true' # This is the initial value of the cursor
results = [] # We accumulate results in this array
# Loop until mqlread tells us there are no more results
while cursor:
# append the cursor and the closing braces to the envelope
envelope = urllib.urlencode({'query': '{"query":' + jsonq + ',"cursor":' + cursor + '}'})
# Begin an HTTP request for the URL
req = urllib2.Request(url)
# The body of the POST request is encoded URL parameters
req.add_header('Content-type', 'application/x-www-form-urlencoded')
# Send our authentication credentials, if any, as a cookie.
# The need for mqlread authentication is a temporary restriction.
if credentials:
req.add_header('Cookie', credentials)
# Use the encoded envelope as the value of the q parameter in the body
# of the request. Specifying a body automatically makes this a POST.
req.add_data(envelope)
# Read and parse the URL contents
f = urllib2.urlopen(req) # Open URL
inner = simplejson.load(f) # Parse JSON response
# Raise a MQLError if there were errors
if not inner['code'].startswith('/api/status/ok'):
if debug: print q
if debug: print inner
if debug: print f.info()['X-Metaweb-Cost']
if debug: print f.info()['X-Metaweb-TID']
error = inner['messages'][0]
raise MQLError('%s: %s' % (error['code'], error['message']))
# Append this batch of results to the main array of results.
results.extend(inner['result'])
# Finally, get the new value of the cursor for the next iteration
cursor = inner['cursor']
if cursor: # If it is not false, put it
cursor = '"' + cursor + '"' # in quotes as a JSON string
# Now that we're done with the loop, return the results array
return results
# Submit multiple MQL queries and return the result as a Python array.
# If authentication credentials are supplied, use them in a cookie.
# Raises MQLError if the query was invalid. Raises urllib2.HTTPError if
# mqlread returns an HTTP status code other than 200 (which should not happen).
def readmulti(queries, credentials=credentials, escape=escape):
encoded = ""
for i in range(0, len(queries)):
# Put the query in an envelope
envelope = {'query':queries[i]}
# Add escape if needed
if escape != 'html':
envelope['escape'] = False if not escape else escape
if i > 0:
encoded += ","
encoded += '"q%d":%s' % (i, simplejson.dumps(envelope))
# URL encode the outer envelope
encoded = urllib.urlencode({'queries': "{" + encoded + "}"})
# Build the URL and create a Request object for it
url = 'http://%s%s' % (host, readservice)
req = urllib2.Request(url)
# The body of the POST request is encoded URL parameters
req.add_header('Content-type', 'application/x-www-form-urlencoded')
# Send our authentication credentials, if any, as a cookie.
# The need for mqlread authentication is a temporary restriction.
if credentials: req.add_header('Cookie', credentials)
# Use the encoded envelope as the value of the q parameter in the body
# of the request. Specifying a body automatically makes this a POST.
req.add_data(encoded)
# Now upen the URL and and parse its JSON content
f = urllib2.urlopen(req) # Open the URL
inner = simplejson.load(f) # Parse JSON response to an object
# If anything was wrong with the invocation, mqlread will return an HTTP
# error, and the code above with raise urllib2.HTTPError.
# If anything was wrong with the query, we won't get an HTTP error, but
# will get an error status code in the response envelope. In this case
# we raise our own MQLError exception.
if not inner['code'].startswith('/api/status/ok'):
if debug: print queries
if debug: print inner
if debug: print f.info()['X-Metaweb-Cost']
if debug: print f.info()['X-Metaweb-TID']
error = inner['messages'][0]
raise MQLError('%s: %s' % (error['code'], error['message']))
# extract the results
results = []
for i in range(0, len(queries)):
result = inner["q%d" % i]
if not result['code'].startswith('/api/status/ok'):
if debug: print queries[i]
if debug: print result
if debug: print f.info()['X-Metaweb-Cost']
if debug: print f.info()['X-Metaweb-TID']
error = result['messages'][0]
raise MQLError('%s: %s' % (error['code'], error['message']))
results.append(result['result'])
# If there was no error, then just return the result from the envelope
return results
# Submit the specified username and password to the Metaweb login service.
# Return opaque authentication credentials on success.
# Raise MQLError on failure.
def login(username, password):
# Establish a connection to the server and make a request.
# Note that we use the low-level httplib library instead of urllib2.
# This allows us to manage cookies explicitly.
conn = httplib.HTTPConnection(host)
conn.request('POST', # POST the request
loginservice, # The URL path /api/account/login
# The body of the request: encoded username/password
urllib.urlencode({'username':username, 'password':password}),
# This header specifies how the body of the post is encoded.
{'Content-type': 'application/x-www-form-urlencoded'})
# Get the response from the server
response = conn.getresponse()
if response.status == 200: # We get HTTP 200 OK even if login fails
# Parse response body and raise a MQLError if login failed
body = simplejson.loads(response.read())
if not body['code'].startswith('/api/status/ok'):
if debug: print inner
if debug: print f.info()['X-Metaweb-Cost']
if debug: print f.info()['X-Metaweb-TID']
error = body['messages'][0]
raise MQLError('%s: %s' % (error['code'], error['message']))
# Otherwise return cookies to serve as authentication credentials.
# The set-cookie header holds one or more cookie specifications,
# separated by commas. Each specification is a name, an equal
# sign, a value, and one or more trailing clauses that consist
# of a semicolon and some metadata. We don't care about the
# metadata. We just want to return a comma-separated list of
# name=value pairs.
cookies = response.getheader('set-cookie').split(',')
return ';'.join([c[0:c.index(';')] for c in cookies])
else: # This should never happen
raise MQLError('HTTP Error: %d %s' % (response.status,response.reason))
# Submit the MQL write q and return the result as a Python object.
# Authentication credentials are required, obtained from login()
# Raises MQLError if the query was invalid. Raises urllib2.HTTPError if
# mqlwrite returns an HTTP status code other than 200
def write(query, credentials=credentials, escape=escape, permission=permission):
# We're requesting this URL
req = urllib2.Request('http://%s%s' % (host, writeservice))
# Send our authentication credentials as a cookie
if credentials:
req.add_header('Cookie', credentials)
# This custom header is required and guards against XSS attacks
req.add_header('X-Metaweb-Request', 'True')
# The body of the POST request is encoded URL parameters
req.add_header('Content-type', 'application/x-www-form-urlencoded')
# Wrap the query object in a query envelope
envelope = {'qname': {'query': query}}
# Add escape if needed
if escape != 'html':
envelope['qname']['escape'] = (False if not escape else escape)
# Add permissions if needed
if permission:
envelope['qname']['use_permission_of'] = permission
# JSON encode the envelope
encoded = simplejson.dumps(envelope)
# Use the encoded envelope as the value of the q parameter in the body
# of the request. Specifying a body automatically makes this a POST.
req.add_data(urllib.urlencode({'queries':encoded}))
# Now do the POST
f = urllib2.urlopen(req)
response = simplejson.load(f) # Parse HTTP response as JSON
inner = response['qname'] # Open outer envelope; get inner envelope
# If anything was wrong with the invocation, mqlwrite will return an HTTP
# error, and the code above with raise urllib2.HTTPError.
# If anything was wrong with the query, we will get an error status code
# in the response envelope.
# we raise our own MQLError exception.
if not inner['code'].startswith('/api/status/ok'):
if debug: print query
if debug: print inner
if debug: print f.info()['X-Metaweb-Cost']
if debug: print f.info()['X-Metaweb-TID']
error = inner['messages'][0]
raise MQLError('%s: %s' % (error['code'], error['message']))
# save cookie
cookiejar.save(cookiefile)
# If there was no error, then just return the result from the envelope
return inner['result']
# Upload the specified content (and give it the specified type).
# Return the guid of the /type/content object that represents it.
# The returned guid can be used to retrieve the content with /api/trans/raw.
def upload(content, type, credentials=credentials):
# This is the URL we POST content to
url = 'http://%s%s'%(host,uploadservice)
# Build the HTTP request
req = urllib2.Request(url, content) # URL and content to POST
req.add_header('Content-Type', type) # Content type header
if credentials:
req.add_header('Cookie', credentials) # Authentication header
req.add_header('X-Metaweb-Request', 'True') # Guard against XSS attacks
f = urllib2.urlopen(req) # POST the request
response = simplejson.load(f) # Parse the response
if not response['code'].startswith('/api/status/ok'):
if debug: print inner
if debug: print f.info()['X-Metaweb-Cost']
if debug: print f.info()['X-Metaweb-TID']
error = response['messages'][0]
raise MQLError('%s: %s' % (error['code'], error['message']))
return response['result']['id'] # Extract and return content id
# Search for topics
def search(query, type=None, start=0, limit=0):
args = {"query": query}
if type:
args["type"] = type
if start > 0:
args["start"] = start
if limit > 0:
args["limit"] = limit
url = 'http://%s%s?%s'%(host, searchservice, urllib.urlencode(args))
f = urllib2.urlopen(url)
response = simplejson.load(f) # Parse the response
if not response['code'].startswith('/api/status/ok'):
if debug: print query
if debug: print inner
if debug: print f.info()['X-Metaweb-Cost']
if debug: print f.info()['X-Metaweb-TID']
error = response['messages'][0]
raise MQLError('%s: %s' % (error['code'], error['message']))
return response['result']
# Cursor for iterating over large data sets
# For example:
# query = {"name": None, "type":"/type/media_type"}
# for row in metaweb.cursor([query]):
# print row
class cursor:
def __init__(self, query, credentials=credentials, escape=escape):
self.query = query
self.credentials = credentials
self.index = 0
self.results = []
self.cursor = 'true'
self.url = 'http://%s%s' % (host, readservice)
self.jsonq = simplejson.dumps(self.query)
if escape != 'html':
self.jsonq += ',"escape":' + ('false' if not escape else escape)
def __iter__(self):
return self
def next(self):
# return the next value
if self.index < len(self.results):
result = self.results[self.index]
self.index = self.index + 1
return result
# check if there is more
if not self.cursor:
raise StopIteration
# append the cursor and the closing braces to the envelope
envelope = urllib.urlencode({'query': '{"query":' + self.jsonq + ',"cursor":' + self.cursor + '}'})
# Begin an HTTP request for the URL
req = urllib2.Request(self.url)
# The body of the POST request is encoded URL parameters
req.add_header('Content-type', 'application/x-www-form-urlencoded')
# Send our authentication credentials, if any, as a cookie.
# The need for mqlread authentication is a temporary restriction.
if self.credentials: req.add_header('Cookie', self.credentials)
# Use the encoded envelope as the value of the q parameter in the body
# of the request. Specifying a body automatically makes this a POST.
req.add_data(envelope)
# Read and parse the URL contents
f = urllib2.urlopen(req) # Open URL
inner = simplejson.load(f) # Parse JSON response
# Raise a MQLError if there were errors
if not inner['code'].startswith('/api/status/ok'):
if debug: print self.query
if debug: print inner
if debug: print f.info()['X-Metaweb-Cost']
if debug: print f.info()['X-Metaweb-TID']
error = inner['messages'][0]
raise MQLError('%s: %s' % (error['code'], error['message']))
# Remember the next cursor
self.cursor = inner['cursor']
if self.cursor: # If it is not false, put it
self.cursor = '"' + self.cursor + '"' # in quotes as a JSON string
# Append this batch of results to the main array of results.
self.results = inner['result']
if len(self.results) == 0:
raise StopIteration
# Return the first result
self.index = 1
return self.results[0]
| Python |
try:
from google.appengine.api import urlfetch
from cookie_handlers import CookiefulUrlfetch
except:
pass
try:
import httplib2
from cookie_handlers import CookiefulHttp
except:
pass
try:
import urllib2
import socket
except:
pass
import logging
import re
class Urllib2Client(object):
def __init__(self, cookiejar, rse):
cookiespy = urllib2.HTTPCookieProcessor(cookiejar)
self.opener = urllib2.build_opener(cookiespy)
self._raise_service_error = rse
self.log = logging.getLogger()
def __call__(self, url, method, body, headers):
req = urllib2.Request(url, body, headers)
try:
resp = self.opener.open(req)
except socket.error, e:
self.log.error('SOCKET FAILURE: %s', e.fp.read())
raise MetawebError, 'failed contacting %s: %s' % (url, str(e))
except urllib2.HTTPError, e:
self.log.error('HTTP ERROR: %s', e)
self._raise_service_error(url, e.code, e.info().type, e.fp.read())
for header in resp.info().headers:
self.log.debug('HTTP HEADER %s', header)
name, value = re.split("[:\n\r]", header, 1)
if name.lower() == 'x-metaweb-tid':
self.tid = value.strip()
return (resp, resp.read())
class Httplib2Client(object):
def __init__(self, cookiejar, rse):
self.cookiejar = cookiejar
self._raise_service_error = rse
self.httpclient = CookiefulHttp(cookiejar=self.cookiejar)
def __call__(self, url, method, body, headers):
try:
resp, content = self.httpclient.request(url, method=method,
body=body, headers=headers)
if (resp.status != 200):
self._raise_service_error(url, resp.status, resp['content-type'], content)
except socket.error, e:
self.log.error('SOCKET FAILURE: %s', e.fp.read())
raise MetawebError, 'failed contacting %s: %s' % (url, str(e))
except httplib2.HttpLib2ErrorWithResponse, e:
self._raise_service_error(url, resp.status, resp['content-type'], content)
except httplib2.HttpLib2Error, e:
raise MetawebError(u'HTTP error: %s' % (e,))
#tid = resp.get('x-metaweb-tid', None)
return (resp, content)
class UrlfetchClient(object):
def __init__(self, cookiejar, rse):
self.cookiejar = cookiejar
self._raise_service_error = rse
self.httpclient = CookiefulUrlfetch(cookiejar=self.cookiejar)
def __call__(self, url, method, body, headers):
resp = self.httpclient.request(url, payload=body, method=method, headers=headers)
if resp.status_code != 200:
self._raise_service_error(url, resp.status_code, resp.headers['content-type'], resp.body)
return (resp, resp.content)
| Python |
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
import string
import re
def quotekey(ustr):
"""
quote a unicode string to turn it into a valid namespace key
"""
valid_always = string.ascii_letters + string.digits
valid_interior_only = valid_always + '_-'
if isinstance(ustr, str):
s = unicode(ustr,'utf-8')
elif isinstance(ustr, unicode):
s = ustr
else:
raise ValueError, 'quotekey() expects utf-8 string or unicode'
output = []
if s[0] in valid_always:
output.append(s[0])
else:
output.append('$%04X' % ord(s[0]))
for c in s[1:-1]:
if c in valid_interior_only:
output.append(c)
else:
output.append('$%04X' % ord(c))
if len(s) > 1:
if s[-1] in valid_always:
output.append(s[-1])
else:
output.append('$%04X' % ord(s[-1]))
return str(''.join(output))
def unquotekey(key, encoding=None):
"""
unquote a namespace key and turn it into a unicode string
"""
valid_always = string.ascii_letters + string.digits
output = []
i = 0
while i < len(key):
if key[i] in valid_always:
output.append(key[i])
i += 1
elif key[i] in '_-' and i != 0 and i != len(key):
output.append(key[i])
i += 1
elif key[i] == '$' and i+4 < len(key):
# may raise ValueError if there are invalid characters
output.append(unichr(int(key[i+1:i+5],16)))
i += 5
else:
raise ValueError, "unquote key saw invalid character '%s' at position %d" % (key[i], i)
ustr = u''.join(output)
if encoding is None:
return ustr
return ustr.encode(encoding)
# should this also include "'()" into safe?
def urlencode_pathseg(data):
'''
urlencode for placement between slashes in an url.
'''
if isinstance(data, unicode):
data = data.encode('utf_8')
return urllib.quote(data, "~:@$!*,;=&+")
def id_to_urlid(id):
"""
convert a mql id to an id suitable for embedding in a url path.
"""
segs = id.split('/')
assert isinstance(id, str) and id != '', 'bad id "%s"' % id
if id[0] == '~':
assert len(segs) == 1
# assume valid, should check
return id
if id[0] == '#':
assert len(segs) == 1
# assume valid, should check
return '%23' + id[1:]
if id[0] != '/':
raise ValueError, 'unknown id format %s' % id
# ok, we have a slash-path
# requote components as keys and rejoin.
# urlids do not have leading slashes!!!
return '/'.join(urlencode_pathseg(unquotekey(seg)) for seg in segs[1:])
| Python |
# ==================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ====================================================================
"""
declarations for external metaweb api.
from metaweb.api import HTTPMetawebSession
mss = HTTPMetawebSession('sandbox.freebase.com')
print mss.mqlread([dict(name=None, type='/type/type')])
"""
__all__ = ['MetawebError', 'MetawebSession', 'HTTPMetawebSession', 'attrdict']
__version__ = '1.0'
import os, sys, re
import cookielib
SEPARATORS = (",", ":")
# json libraries rundown
# jsonlib2 is the fastest, but it's written in C, so not as
# accessible. json is included in python2.6. simplejson
# is the same as json.
try:
import jsonlib2 as json
except ImportError:
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
try:
# appengine provides simplejson at django.utils.simplejson
from django.utils import simplejson as json
except ImportError:
raise Exception("unable to import neither json, simplejson, jsonlib2, or django.utils.simplejson")
try:
from urllib import quote as urlquote
except ImportError:
from urlib_stub import quote as urlquote
import pprint
import socket
import logging
LITERAL_TYPE_IDS = set([
"/type/int",
"/type/float",
"/type/boolean",
"/type/rawstring",
"/type/uri",
"/type/text",
"/type/datetime",
"/type/bytestring",
"/type/id",
"/type/key",
"/type/value",
"/type/enumeration"
])
class Delayed(object):
"""
Wrapper for callables in log statements. Avoids actually making
the call until the result is turned into a string.
A few examples:
json.dumps is never called because the logger never
tries to format the result
>>> logging.debug(Delayed(json.dumps, q))
This time json.dumps() is actually called:
>>> logging.warn(Delayed(json.dumps, q))
"""
def __init__(self, f, *args, **kwds):
self.f = f
self.args = args
self.kwds = kwds
def __str__(self):
return str(self.f(*self.args, **self.kwds))
def logformat(result):
"""
Format the dict/list as a json object
"""
rstr = json.dumps(result, indent=2)
if rstr[0] == '{':
rstr = rstr[1:-2]
return rstr
from httpclients import Httplib2Client, Urllib2Client, UrlfetchClient
# Check for urlfetch first so that urlfetch is used when running the appengine SDK
try:
import google.appengine.api.urlfetch
from cookie_handlers import CookiefulUrlfetch
http_client = UrlfetchClient
except ImportError:
try:
import httplib2
from cookie_handlers import CookiefulHttp
http_client = Httplib2Client
except ImportError:
import urllib2
httplib2 = None
CookiefulHttp = None
http_client = Urllib2Client
def urlencode_weak(s):
return urlquote(s, safe=',/:$')
# from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/361668
class attrdict(dict):
"""A dict whose items can also be accessed as member variables.
>>> d = attrdict(a=1, b=2)
>>> d['c'] = 3
>>> print d.a, d.b, d.c
1 2 3
>>> d.b = 10
>>> print d['b']
10
# but be careful, it's easy to hide methods
>>> print d.get('c')
3
>>> d['get'] = 4
>>> print d.get('a')
Traceback (most recent call last):
TypeError: 'int' object is not callable
"""
def __init__(self, *args, **kwargs):
# adds the *args and **kwargs to self (which is a dict)
dict.__init__(self, *args, **kwargs)
self.__dict__ = self
# TODO expose the common parts of the result envelope
class MetawebError(Exception):
"""
an error report from the metaweb service.
"""
pass
# TODO right now this is a completely unnecessary superclass.
# is there enough common behavior between session types
# to justify it?
class MetawebSession(object):
"""
MetawebSession is the base class for MetawebSession, subclassed for
different connection types. Only http is available externally.
This is more of an interface than a class
"""
# interface definition here...
# from httplib2
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
class HTTPMetawebSession(MetawebSession):
"""
a MetawebSession is a request/response queue.
this version uses the HTTP api, and is synchronous.
"""
# share cookies across sessions, so that different sessions can
# see each other's writes immediately.
_default_cookiejar = cookielib.CookieJar()
def __init__(self, service_url, username=None, password=None, prev_session=None, cookiejar=None, cookiefile=None):
"""
create a new MetawebSession for interacting with the Metaweb.
a new session will inherit state from prev_session if present,
"""
super(HTTPMetawebSession, self).__init__()
self.log = logging.getLogger()
assert not service_url.endswith('/')
if not '/' in service_url: # plain host:port
service_url = 'http://' + service_url
self.service_url = service_url
self.username = username
self.password = password
self.tid = None
if prev_session:
self.service_url = prev.service_url
if cookiefile is not None:
cookiejar = self.open_cookie_file(cookiefile)
if cookiejar is not None:
self.cookiejar = cookiejar
elif prev_session:
self.cookiejar = prev_session.cookiejar
else:
self.cookiejar = self._default_cookiejar
self._http_request = http_client(self.cookiejar, self._raise_service_error)
def open_cookie_file(self, cookiefile=None):
if cookiefile is None or cookiefile == '':
if os.environ.has_key('HOME'):
cookiefile = os.path.join(os.environ['HOME'], '.pyfreebase/cookiejar')
else:
raise MetawebError("no cookiefile specified and no $HOME/.pyfreebase directory" % cookiefile)
cookiejar = cookielib.LWPCookieJar(cookiefile)
if os.path.exists(cookiefile):
cookiejar.load(ignore_discard=True)
return cookiejar
def _httpreq(self, service_path, method='GET', body=None, form=None,
headers=None):
"""
make an http request to the service.
form arguments are encoded in the url, even for POST, if a non-form
content-type is given for the body.
returns a pair (resp, body)
resp is the response object and may be different depending
on whether urllib2 or httplib2 is in use?
"""
if method == 'GET':
assert body is None
if method != "GET" and method != "POST":
assert 0, 'unknown method %s' % method
url = self.service_url + service_path
if headers is None:
headers = {}
else:
headers = _normalize_headers(headers)
# this is a lousy way to parse Content-Type, where is the library?
ct = headers.get('content-type', None)
if ct is not None:
ct = ct.split(';')[0]
if body is not None:
# if body is provided, content-type had better be too
assert ct is not None
if form is not None:
qstr = '&'.join(['%s=%s' % (urlencode_weak(unicode(k)), urlencode_weak(unicode(v)))
for k,v in form.items()])
if method == 'POST':
# put the args on the url if we're putting something else
# in the body. this is used to add args to raw uploads.
if body is not None:
url += '?' + qstr
else:
if ct is None:
ct = 'application/x-www-form-urlencoded'
headers['content-type'] = ct + '; charset=utf-8'
if ct == 'multipart/form-encoded':
# TODO handle this case
raise NotImplementedError
elif ct == 'application/x-www-form-urlencoded':
body = qstr
else:
# for all methods other than POST, use the url
url += '?' + qstr
# assure the service that this isn't a CSRF form submission
headers['x-metaweb-request'] = 'Python'
if 'user-agent' not in headers:
headers['user-agent'] = 'python freebase.api-%s' % __version__
#if self.tid is not None:
# headers['x-metaweb-tid'] = self.tid
####### DEBUG MESSAGE - should check log level before generating
if form is None:
formstr = ''
else:
formstr = '\nFORM:\n ' + '\n '.join(['%s=%s' % (k,v)
for k,v in form.items()])
if headers is None:
headerstr = ''
else:
headerstr = '\nHEADERS:\n ' + '\n '.join([('%s: %s' % (k,v))
for k,v in headers.items()])
self.log.info('%s %s%s%s', method, url, formstr, headerstr)
#######
# just in case you decide to make SUPER ridiculous GET queries:
if len(url) > 1000 and method == "GET":
method = "POST"
url, body = url.split("?")
ct = 'application/x-www-form-urlencoded'
headers['content-type'] = ct + '; charset=utf-8'
return self._http_request(url, method, body, headers)
def _raise_service_error(self, url, status, ctype, body):
is_jsbody = (ctype.endswith('javascript')
or ctype.endswith('json'))
if str(status) == '400' and is_jsbody:
r = self._loadjson(body)
msg = r.messages[0]
raise MetawebError(u'%s %s %r' % (msg.get('code',''), msg.message, msg.info))
raise MetawebError, 'request failed: %s: %s\n%s' % (url, status, body)
def _httpreq_json(self, *args, **kws):
resp, body = self._httpreq(*args, **kws)
return self._loadjson(body)
def _loadjson(self, json_input):
# TODO really this should be accomplished by hooking
# simplejson to create attrdicts instead of dicts.
def struct2attrdict(st):
"""
copy a json structure, turning all dicts into attrdicts.
copying descends instances of dict and list, including subclasses.
"""
if isinstance(st, dict):
return attrdict([(k,struct2attrdict(v)) for k,v in st.items()])
if isinstance(st, list):
return [struct2attrdict(li) for li in st]
return st
if json_input == '':
self.log.error('the empty string is not valid json')
raise MetawebError('the empty string is not valid json')
try:
r = json.loads(json_input)
except ValueError, e:
self.log.error('error parsing json string %r' % json_input)
raise MetawebError, 'error parsing JSON string: %s' % e
return struct2attrdict(r)
def _check_mqlerror(self, r):
if r.code != '/api/status/ok':
for msg in r.messages:
self.log.error('mql error: %s %s %r' % (msg.code, msg.message, msg.get('query', None)))
raise MetawebError, 'query failed: %s\n%s\n%s' % (r.messages[0].code, r.messages[0].message, json.dumps(r.messages[0].get('query', None), indent=2))
def _mqlresult(self, r):
self._check_mqlerror(r)
self.log.info('result: %s', Delayed(logformat, r))
return r.result
def login(self, username=None, password=None):
"""sign in to the service. For a more complete description,
see http://www.freebase.com/view/en/api_account_login"""
service = '/api/account/login'
username = username or self.username
password = password or self.password
assert username is not None
assert password is not None
self.log.debug('LOGIN USERNAME: %s', username)
r = self._httpreq_json(service, 'POST',
form=dict(username=username,
password=password))
if r.code != '/api/status/ok':
raise MetawebError(u'%s %r' % (r.get('code',''), r.messages))
self.log.debug('LOGIN RESP: %r', r)
self.log.debug('LOGIN COOKIES: %s', self.cookiejar)
def logout(self):
"""logout of the service. For a more complete description,
see http://www.freebase.com/view/en/api_account_logout"""
service = '/api/account/logout'
self.log.debug("LOGOUT")
r = self._httpreq_json(service, 'GET')
if r.code != '/api/status/ok':
raise MetawebError(u'%s %r' % (r.get('code',''), r.messages)) #this should never happen
def user_info(self, mql_output=None):
""" get user_info. For a more complete description,
see http://www.freebase.com/view/guid/9202a8c04000641f800000000c36a842"""
service = "/api/service/user_info"
qstr = json.dumps(mql_output, separators=SEPARATORS)
r = self._httpreq_json(service, 'POST', form=dict(mql_output=qstr))
return r
def loggedin(self):
"""check to see whether a user is logged in or not. For a
more complete description, see http://www.freebase.com/view/en/api_account_loggedin"""
service = "/api/account/loggedin"
try:
r = self._httpreq_json(service, 'GET')
if r.code == "/api/status/ok":
return True
except MetawebError, me:
return False
def create_private_domain(self, domain_key, display_name):
""" create a private domain. For a more complete description,
see http://www.freebase.com/edit/topic/en/api_service_create_private_domain"""
service = "/api/service/create_private_domain"
form = dict(domain_key=domain_key, display_name=display_name)
r = self._httpreq_json(service, 'POST', form=form)
return r
def delete_private_domain(self, domain_key):
""" create a private domain. For a more complete description,
see http://www.freebase.com/edit/topic/en/api_service_delete_private_domain"""
service = "/api/service/delete_private_domain"
form = dict(domain_key=domain_key)
return self._httpreq_json(service, 'POST', form=form)
def mqlreaditer(self, sq, asof=None):
"""read a structure query."""
cursor = True
service = '/api/service/mqlread'
if isinstance(sq, (tuple, list)):
if len(sq) > 1:
raise MetawebError("You cannot ask mqlreaditer a query in the form: [{}, {}, ...], just [{}] or {}")
sq = sq[0]
while 1:
subq = dict(query=[sq], cursor=cursor, escape=False)
if asof:
subq['as_of_time'] = asof
qstr = json.dumps(subq, separators=SEPARATORS)
r = self._httpreq_json(service, form=dict(query=qstr))
for item in self._mqlresult(r):
yield item
if r['cursor']:
cursor = r['cursor']
self.log.info('CONTINUING with %s', cursor)
else:
return
def mqlread(self, sq, asof=None):
"""read a structure query. For a more complete description,
see http://www.freebase.com/view/en/api_service_mqlread"""
subq = dict(query=sq, escape=False)
if asof:
subq['as_of_time'] = asof
if isinstance(sq, list):
subq['cursor'] = True
service = '/api/service/mqlread'
self.log.info('%s: %s',
service,
Delayed(logformat, sq))
qstr = json.dumps(subq, separators=SEPARATORS)
r = self._httpreq_json(service, form=dict(query=qstr))
return self._mqlresult(r)
def mqlreadmulti(self, queries, asof=None):
"""read a structure query"""
keys = [('q%d' % i) for i,v in enumerate(queries)];
envelope = {}
for i,sq in enumerate(queries):
subq = dict(query=sq, escape=False)
if asof:
subq['as_of_time'] = asof
# XXX put this back once mqlreadmulti is working in general
#if isinstance(sq, list):
# subq['cursor'] = True
envelope[keys[i]] = subq
service = '/api/service/mqlread'
self.log.info('%s: %s',
service,
Delayed(logformat, envelope))
qstr = json.dumps(envelope, separators=SEPARATORS)
rs = self._httpreq_json(service, form=dict(queries=qstr))
self.log.info('%s result: %s',
service,
Delayed(json.dumps, rs, indent=2))
return [self._mqlresult(rs[key]) for key in keys]
def trans(self, guid):
return self.raw(guid)
def raw(self, id):
"""translate blob from id. For a more complete description,
see http://www.freebase.com/view/en/api_trans_raw"""
url = '/api/trans/raw' + urlquote(id)
self.log.info(url)
resp, body = self._httpreq(url)
self.log.info('raw is %d bytes' % len(body))
return body
def blurb(self, id, break_paragraphs=False, maxlength=200):
"""translate only the text in blob from id. For a more
complete description, see http://www.freebase.com/view/en/api_trans_blurb"""
url = '/api/trans/blurb' + urlquote(id)
self.log.info(url)
resp, body = self._httpreq(url, form=dict(break_paragraphs=break_paragraphs, maxlength=maxlength))
self.log.info('blurb is %d bytes' % len(body))
return body
def unsafe(self, id):
""" unsafe raw """
url = '/api/trans/unsafe' + urlquote(id)
self.log.info(url)
resp, body = self._httpreq(url, headers={'x-metaweb-request' : 'Python'})
self.log.info('unsafe is %d bytes' % len(body))
return body
def image_thumb(self, id, maxwidth=None, maxheight=None, mode="fit", onfail=None):
""" given the id of an image, this will return a URL of a thumbnail of the image.
The full details of how the image is cropped and finessed is detailed at
http://www.freebase.com/view/en/api_trans_image_thumb """
service = "/api/trans/image_thumb"
assert mode in ["fit", "fill", "fillcrop", "fillcropmid"]
form = dict(mode=mode)
if maxwidth is not None:
form["maxwidth"] = maxwidth
if maxheight is not None:
form["maxheight"] = maxheight
if onfail is not None:
form["onfail"] = onfail
resp, body = self._httpreq(service + urlquote(id), form=form)
self.log.info('image is %d bytes' % len(body))
return body
def mqlwrite(self, sq, use_permission_of=None):
"""do a mql write. For a more complete description,
see http://www.freebase.com/view/en/api_service_mqlwrite"""
query = dict(query=sq, escape=False)
if use_permission_of:
query['use_permission_of'] = use_permission_of
qstr = json.dumps(query, separators=SEPARATORS)
self.log.debug('MQLWRITE: %s', qstr)
service = '/api/service/mqlwrite'
self.log.info('%s: %s',
service,
Delayed(logformat,sq))
r = self._httpreq_json(service, 'POST',
form=dict(query=qstr))
self.log.debug('MQLWRITE RESP: %r', r)
return self._mqlresult(r)
def mqlcheck(self, sq):
""" See if a write is valid, and see what would happen, but do not
actually do the write """
query = dict(query=sq, escape=False)
qstr = json.dumps(query, separators=SEPARATORS)
self.log.debug('MQLCHECK: %s', qstr)
service = '/api/service/mqlcheck'
self.log.info('%s: %s',
service,
Delayed(logformat, sq))
r = self._httpreq_json(service, 'POST',
form=dict(query=qstr))
self.log.debug('MQLCHECK RESP: %r', r)
return self._mqlresult(r)
def mqlflush(self):
"""ask the service not to hand us old data"""
self.log.debug('MQLFLUSH')
service = '/api/service/touch'
r = self._httpreq_json(service)
self._check_mqlerror(r)
return True
def touch(self):
""" make sure you are accessing the most recent data. For a more
complete description, see http://www.freebase.com/view/en/api_service_touch"""
return self.mqlflush()
def upload(self, body, content_type, document_id=False, permission_of=False):
"""upload to the metaweb. For a more complete description,
see http://www.freebase.com/view/en/api_service_upload"""
service = '/api/service/upload'
self.log.info('POST %s: %s (%d bytes)',
service, content_type, len(body))
headers = {}
if content_type is not None:
headers['content-type'] = content_type
form = None
if document_id is not False:
if document_id is None:
form = { 'document': '' }
else:
form = { 'document': document_id }
if permission_of is not False:
if form:
form['permission_of'] = permission_of
else:
form = { 'permission_of' : permission_of }
# note the use of both body and form.
# form parameters get encoded into the URL in this case
r = self._httpreq_json(service, 'POST',
headers=headers, body=body, form=form)
return self._mqlresult(r)
def uri_submit(self, URI, document=None, content_type=None):
""" submit a URI to freebase. For a more complete description,
see http://www.freebase.com/edit/topic/en/api_service_uri_submit """
service = "/api/service/uri_submit"
form = dict(uri=URI)
if document is not None:
form["document"] = document
if content_type is not None:
form["content_type"] = content_type
r = self._httpreq_json(service, 'POST', form=form)
return self._mqlresult(r)
def search(self, query, format=None, prefixed=None, limit=20, start=0,
type=None, type_strict="any", domain=None, domain_strict=None,
escape="html", timeout=None, mql_filter=None, mql_output=None):
""" search freebase.com. For a more complete description,
see http://www.freebase.com/view/en/api_service_search"""
service = "/api/service/search"
form = dict(query=query)
if format:
form["format"] = format
if prefixed:
form["prefixed"] = prefixed
if limit:
form["limit"] = limit
if start:
form["start"] = start
if type:
form["type"] = type
if type_strict:
form["type_strict"] = type_strict
if domain:
form["domain"] = domain
if domain_strict:
form["domain_strict"] = domain_strict
if escape:
form["escape"] = escape
if timeout:
form["timeout"] = timeout
if mql_filter:
form["mql_filter"] = mql_filter
if mql_output:
form["mql_output"] = mql_output
r = self._httpreq_json(service, 'POST', form=form)
return self._mqlresult(r)
def geosearch(self, location=None, location_type=None, mql_input=None, limit=20,
start=0, type=None, geometry_type=None, intersect=None, mql_filter=None,
within=None, inside=None, order_by=None, count=None, format="json", mql_output=None):
""" perform a geosearch. For a more complete description,
see http://www.freebase.com/api/service/geosearch?help """
service = "/api/service/geosearch"
if location is None and location_type is None and mql_input is None:
raise Exception("You have to give it something to work with")
form = dict()
if location:
form["location"] = location
if location_type:
form["location_type"] = location_type
if mql_input:
form["mql_input"] = mql_input
if limit:
form["limit"] = limit
if start:
form["start"] = start
if type:
form["type"] = type
if geometry_type:
form["geometry_type"] = geometry_type
if intersect:
form["intersect"] = intersect
if mql_filter:
form["mql_filter"] = mql_filter
if within:
form["within"] = within
if inside:
form["inside"] = inside
if order_by:
form["order_by"] = order_by
if count:
form["count"] = count
if format:
form["format"] = format
if mql_output:
form["mql_output"] = mql_output
if format == "json":
r = self._httpreq_json(service, 'POST', form=form)
else:
r = self._httpreq(service, 'POST', form=form)
return r
def version(self):
""" get versions for various parts of freebase. For a more
complete description, see http://www.freebase.com/view/en/api_version"""
service = "/api/version"
r = self._httpreq_json(service)
return r
def status(self):
""" get the status for various parts of freebase. For a more
complete description, see http://www.freebase.com/view/en/api_status """
service = "/api/status"
r = self._httpreq_json(service)
return r
### DEPRECATED IN API
def reconcile(self, name, etype=['/common/topic']):
"""DEPRECATED: reconcile name to guid. For a more complete description,
see http://www.freebase.com/view/en/dataserver_reconciliation"""
service = '/dataserver/reconciliation'
r = self._httpreq_json(service, 'GET', form={'name':name, 'types':','.join(etype)})
# TODO non-conforming service, fix later
#self._mqlresult(r)
return r
### SCHEMA MANIPULATION ###
# Object helpers
def create_object(self, name="", path=None, key=None, namespace=None, included_types=None, create="unless_exists", extra=None):
if type(included_types) is str:
included_types = [included_types]
if path and (key or namespace):
raise Exception("You can't specify both the path and a key and namespace.")
if path:
key, namespace = get_key_namespace(path)
if included_types:
its = set(included_types)
q = [{
"id|=" : included_types,
"/freebase/type_hints/included_types" : [{"id" : None}]
}]
for res in self.mqlread(q):
its.update(map(lambda x: x["id"], res["/freebase/type_hints/included_types"]))
wq = {
"id" : None,
"name" : name,
"key" : {
"namespace" : namespace,
"value" : key,
},
"create" : create
}
if included_types:
wq.update(type = [{ "id" : it, "connect" : "insert" } for it in its])
if extra:
wq.update(extra)
return self.mqlwrite(wq)
def connect_object(self, id, newpath, extra=None):
key, namespace = get_key_namespace(newpath)
wq = {
"id" : id,
"key" : {
"namespace" : namespace,
"value" : key,
"connect" : "insert"
}
}
if extra: wq.update(extra)
return self.mqlwrite(wq)
def disconnect_object(self, id, extra=None):
key, namespace = get_key_namespace(id)
wq = {
"id" : id,
"key" : {
"namespace" : namespace,
"value" : key,
"connect" : "delete"
}
}
if extra: wq.update(extra)
return self.mqlwrite(wq)
def move_object(self, oldpath, newpath):
a = self.connect_object(oldpath, newpath)
b = self.disconnect_object(oldpath)
return a, b
def get_key_namespace(path):
# be careful with /common
namespace, key = path.rsplit("/", 1)
return (key, namespace or "/")
if __name__ == '__main__':
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
mss = HTTPMetawebSession('sandbox.freebase.com')
self.mss.log.setLevel(logging.DEBUG)
self.mss.log.addHandler(console)
print mss.mqlread([dict(name=None, type='/type/type')])
| Python |
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
#
#
# httplib2cookie.py allows you to use python's standard
# CookieJar class with httplib2.
#
#
import re
try:
from google.appengine.api import urlfetch
Http = object
except ImportError:
pass
try:
from httplib2 import Http
except ImportError:
pass
try:
import urllib
except ImportError:
import urllib_stub as urllib
import cookielib
class DummyRequest(object):
"""Simulated urllib2.Request object for httplib2
implements only what's necessary for cookielib.CookieJar to work
"""
def __init__(self, url, headers=None):
self.url = url
self.headers = headers
self.origin_req_host = cookielib.request_host(self)
self.type, r = urllib.splittype(url)
self.host, r = urllib.splithost(r)
if self.host:
self.host = urllib.unquote(self.host)
def get_full_url(self):
return self.url
def get_origin_req_host(self):
# TODO to match urllib2 this should be different for redirects
return self.origin_req_host
def get_type(self):
return self.type
def get_host(self):
return self.host
def get_header(self, key, default=None):
return self.headers.get(key.lower(), default)
def has_header(self, key):
return key in self.headers
def add_unredirected_header(self, key, val):
# TODO this header should not be sent on redirect
self.headers[key.lower()] = val
def is_unverifiable(self):
# TODO to match urllib2, this should be set to True when the
# request is the result of a redirect
return False
class DummyHttplib2Response(object):
"""Simulated urllib2.Request object for httplib2
implements only what's necessary for cookielib.CookieJar to work
"""
def __init__(self, response):
self.response = response
def info(self):
return DummyHttplib2Message(self.response)
class DummyUrlfetchResponse(object):
"""Simulated urllib2.Request object for httplib2
implements only what's necessary for cookielib.CookieJar to work
"""
def __init__(self, response):
self.response = response
def info(self):
return DummyUrlfetchMessage(self.response)
class DummyHttplib2Message(object):
"""Simulated mimetools.Message object for httplib2
implements only what's necessary for cookielib.CookieJar to work
"""
def __init__(self, response):
self.response = response
def getheaders(self, k):
k = k.lower()
v = self.response.get(k.lower(), None)
if k not in self.response:
return []
#return self.response[k].split(re.compile(',\\s*'))
# httplib2 joins multiple values for the same header
# using ','. but the netscape cookie format uses ','
# as part of the expires= date format. so we have
# to split carefully here - header.split(',') won't do it.
HEADERVAL= re.compile(r'\s*(([^,]|(,\s*\d))+)')
return [h[0] for h in HEADERVAL.findall(self.response[k])]
class DummyUrlfetchMessage(object):
"""Simulated mimetools.Message object for httplib2
implements only what's necessary for cookielib.CookieJar to work
"""
def __init__(self, response):
self.response = response
def getheaders(self, k):
k = k.lower()
v = self.response.headers.get(k.lower(), None)
if k not in self.response.headers:
return []
#return self.response[k].split(re.compile(',\\s*'))
# httplib2 joins multiple values for the same header
# using ','. but the netscape cookie format uses ','
# as part of the expires= date format. so we have
# to split carefully here - header.split(',') won't do it.
HEADERVAL= re.compile(r'\s*(([^,]|(,\s*\d))+)')
return [h[0] for h in HEADERVAL.findall(self.response.headers[k])]
class CookiefulHttp(Http):
"""Subclass of httplib2.Http that keeps cookie state
constructor takes an optional cookiejar=cookielib.CookieJar
currently this does not handle redirects completely correctly:
if the server redirects to a different host the original
cookies will still be sent to that host.
"""
def __init__(self, cookiejar=None, **kws):
# note that httplib2.Http is not a new-style-class
Http.__init__(self, **kws)
if cookiejar is None:
cookiejar = cookielib.CookieJar()
self.cookiejar = cookiejar
def request(self, uri, **kws):
headers = kws.pop('headers', None)
req = DummyRequest(uri, headers)
self.cookiejar.add_cookie_header(req)
headers = req.headers
(r, body) = Http.request(self, uri, headers=headers, **kws)
resp = DummyHttplib2Response(r)
self.cookiejar.extract_cookies(resp, req)
return (r, body)
class CookiefulUrlfetch(object):
"""Class that keeps cookie state
constructor takes an optional cookiejar=cookielib.CookieJar
"""
# TODO refactor CookefulHttp so that CookiefulUrlfetch can be a subclass of it
def __init__(self, cookiejar=None, **kws):
if cookiejar is None:
cookejar = cookielib.CookieJar()
self.cookejar = cookiejar
def request(self, uri, **kws):
headers = kws.pop('headers', None)
req = DummyRequest(uri, headers)
self.cookejar.add_cookie_header(req)
headers = req.headers
r = urlfetch.fetch(uri, headers=headers, **kws)
self.cookejar.extract_cookies(DummyUrlfetchResponse(r), req)
return r
| Python |
from session import HTTPMetawebSession, MetawebError, attrdict, LITERAL_TYPE_IDS
from mqlkey import quotekey, unquotekey | Python |
from freebase.api.session import HTTPMetawebSession
from freebase.api.session import get_key_namespace, LITERAL_TYPE_IDS
"""
NOTE
----
graph is used freely in this file. Some information:
- It refers to an internal representation of a group of types.
- It resembles a mqlread result, but it is not a mqlread result
- It also has some helper variables like __requires and __related.
- It is produced by _get_graph
- It can be converted into valid json (json.dumps(graph, indent=2))
Its structure is as follows:
"type_id" : {
"name" : "My Type",
"id" : "/base_id/type_id"
...
"__requires" : ["/base_id/type_id2"]
"__properties" : [
{
"id" : "/base_id/type_id/my_prop"
...
},
{
...
}
]
},
"type_id2" : {...}
...
"""
class DelegationError(Exception):
"""You can't set the expected_type if the expected_type of the delegated (master) is a primitive"""
class CVTError(Exception):
"""You can't set follow_types to False if there's a cvt. A cvt requires you get all the relevant types. Set follow_types to true."""
def key_exists(s, k):
q = {
"id" : k,
"guid" : None
}
return not None == s.mqlread(q)
def type_object(s, id, type_id):
q = {
"id" : type_id,
"/freebase/type_hints/included_types" : [{"id" : None}]
}
included_types = map(lambda x: x["id"], s.mqlread(q)["/freebase/type_hints/included_types"])
wq = {
"id" : id,
"type" : [{
"id" : it,
"connect" : "insert"
} for it in included_types + [type_id]]
}
return s.mqlwrite(wq)
def copy_property(s, id, newid, **extra):
newname, newschema = get_key_namespace(newid)
info = get_property_info(s, id)
info["__raw"].update(extra)
create_property(s, info["name"], newname, newschema, info["expected_type"], info["unique"], info["/freebase/property_hints/disambiguator"],
info["/freebase/documented_object/tip"], info["__raw"])
def move_property(s, id, newid, **extra):
copy_property(s, id, newid, **extra)
disconnect_schema = {"type" : "/type/property", "schema" : {"connect" : "delete", "id" : "/".join(id.split("/")[:-1]) }}
s.disconnect_object(id, extra = disconnect_schema)
def get_property_info(s, prop_id):
q = PROPERTY_QUERY
q.update(id=prop_id)
res = s.mqlread(q)
info = {}
info["name"] = res["name"]["value"]
if res["schema"]:
info["schema"] = res["schema"]["id"]
else: info["schema"] = None
if res["key"]:
info["keys"] = map(lambda x: (x["value"], x["namespace"]), res["key"])
else: info["key"] = None
if res["/freebase/documented_object/tip"]:
info["/freebase/documented_object/tip"] = res["/freebase/documented_object/tip"]["value"]
else: info["/freebase/documented_object/tip"] = None
for i in ["delegated", "enumeration", "expected_type", "id", "master_property", "unique", "unit",
"/freebase/property_hints/disambiguator", "/freebase/property_hints/display_none",
"/freebase/property_hints/display_orientation","/freebase/property_hints/enumeration",
"/freebase/property_hints/dont_display_in_weblinks", "/freebase/property_hints/inverse_description"]:
if res[i]:
if isinstance(res[i], basestring):
info[i] = res[i]
elif isinstance(res[i], bool):
info[i] = res[i]
elif res[i].has_key("id"):
info[i] = res[i]["id"]
elif res[i].has_key("value"):
info[i] = res[i]["value"]
else:
raise ValueError("There is a problem with getting the property value.")
else: info[i] = None
# delete the properties that are going to be asked for in create_property
del res["name"]
del res["schema"]
del res["key"]
del res["expected_type"]
del res["unique"]
del res["/freebase/property_hints/disambiguator"]
del res["/freebase/documented_object/tip"]
# delete other useless things...
del res["id"]
for i in [k for k, v in res.iteritems() if v is None]:
del res[i]
info["__raw"] = res
return info
# Create Type
def create_type(s, name, key, ns, cvt=False, tip=None, included=None, extra=None):
if key_exists(s, ns + "/" + key ):
return
# assert isinstance(name, basestring) # name could be mqlish
assert isinstance(key, basestring)
assert isinstance(ns, basestring)
assert tip is None or isinstance(tip, basestring)
assert included is None or isinstance(included, (basestring, list, tuple))
assert extra is None or isinstance(extra, dict)
q = {
"create" : "unconditional",
"type" : "/type/type",
"/type/type/domain" : { "connect" : "insert", "id" : ns },
"name" : {"connect" : "insert", "value" : name, "lang" : "/lang/en" },
"key" : {
"connect" : "insert",
"value" : key,
"namespace" : ns
}
}
if included:
if isinstance(included, basestring):
included = [included]
itsq = [{
"id|=" : included,
"/freebase/type_hints/included_types" : [{"id" : None}]
}]
r = s.mqlread(itsq)
included_types = set(included)
if r:
for i in r:
included_types.update(map(lambda x: x["id"], i["/freebase/type_hints/included_types"]))
its = [{"connect" : "insert", "id" : t} for t in included_types]
q['/freebase/type_hints/included_types'] = its
# TODO: enum
if cvt:
q['/freebase/type_hints/mediator'] = { "connect" : "update", "value" : True }
if tip:
q['/freebase/documented_object/tip'] = { "connect" : "update", "value" : tip, "lang" : "/lang/en" }
if extra: q.update(extra)
return s.mqlwrite(q, use_permission_of=ns)
# Create Property
def create_property(s, name, key, schema, expected, unique=False, disambig=False, tip=None, extra=None):
if key_exists(s, schema + "/" + key):
return
# validate parameters
# assert isinstance(name, basestring) # could be mql
assert isinstance(key, basestring)
assert isinstance(schema, basestring)
assert isinstance(expected, basestring)
assert tip is None or isinstance(tip, basestring)
assert extra is None or isinstance(extra, dict)
q = {
"create" : "unconditional",
"type" : "/type/property",
"name" : name,
"key" : {
"connect" : "insert",
"value" : key,
"namespace" : { "id" : schema },
},
"schema" : { "connect" : "insert", "id" : schema },
"expected_type" : { "connect" : "insert", "id" : expected }
}
if unique:
q['unique'] = { "connect" : "update", "value" : unique }
if tip:
q['/freebase/documented_object/tip'] = { "connect" : "update", "value" : tip, "lang" : "/lang/en" }
if disambig:
q['/freebase/property_hints/disambiguator'] = { "connect" : "update", "value" : True }
if extra:
q.update(extra)
return s.mqlwrite(q, use_permission_of=schema)
def delegate_property(s, p, schema, name=None, key=None, expected=None, tip=None, extra=None):
assert isinstance(p, basestring)
assert isinstance(schema, basestring)
#assert name is None or isinstance(name, basestring)
assert key is None or isinstance(key, basestring)
assert expected is None or isinstance(expected, basestring)
assert tip is None or isinstance(tip, basestring)
q = {
"id" : p,
"type" : "/type/property",
"name" : None,
"unique" : None,
"expected_type" : {"id" : None},
"key" : None,
"/freebase/documented_object/tip" : None,
"/freebase/property_hints/disambiguator" : None
}
r = s.mqlread(q)
# If the expected_type of the delegator(master) is a primitive, the delegated's
# expected_type must be the same
if r["expected_type"]["id"] in LITERAL_TYPE_IDS:
if expected:
if expected != r["expected_type"]["id"]:
raise DelegationError("You can't set the expected_type if the expected_type of the delegated (master) is a primitive")
expected = r["expected_type"]["id"]
# If the expected_type of the delegator(master) is not a primitive, the delegated's
# expected_type can be different
elif expected is None:
expected = r["expected_type"]["id"]
if not tip and r["/freebase/documented_object/tip"]:
tip = r["/freebase/documented_object/tip"]
if name is None:
name = r["name"]
if key is None:
key = r["key"]
delegate = { "/type/property/delegated" : p}
if extra: delegate.update(extra)
return create_property(s, name, key, schema, expected, r['unique'],
r["/freebase/property_hints/disambiguator"],
tip,
delegate)
def reciprocate_property(s, name, key, master, unique=False, disambig=False, tip=None, extra=None):
""" We're creating a reciprocate property of the master property. Let's illustrate
the idea behind the function with an example.
Say we examine the /visual_art/art_period_movement/associated_artworks property.
An example of an art_period_movement is the Renaissance, and once associated_artworks
could be the /en/mona_lisa. In this example, /visual_art/art_period_movement/associated_artworks
will be the master property, and /visual_art/artwork/period_or_movement will be the reciprocal.
In order to determine the characterists of the reciprocal property, we must examine the master.
associated_artworks property's schema is /visual_art/art_period_movement and its expected
type is /visual_art/artwork. Notice the similarity to /visual_art/artwork/period_or_movement.
period_or_movement's schema is /visual_art/artwork -- art_period_movement's expected type.
period_or_movement's expected type is /visual_art/art_period_movement -- art_period_movement's
schema!
So, given a master, the reciprocal's schema is the master's expected type and the reciprocal's
expected type is the master's schema. """
# assert isinstance(name, basestring) # name could be mqlish
assert isinstance(key, basestring)
assert isinstance(master, basestring)
assert tip is None or isinstance(tip, basestring)
assert extra is None or isinstance(extra, dict)
q = {
"id" : master,
"/type/property/expected_type" : None,
"/type/property/schema" : None }
r = s.mqlread(q)
ect = r["/type/property/expected_type"]
schema = r["/type/property/schema"]
master = {"master_property" : master}
if extra: master.update(extra)
# NOTE: swapping ect and schema; see comment above
return create_property(s, name, key, ect, schema, unique, disambig, tip,
extra = master)
# dump / restore types
def dump_base(s, base_id):
types = [type_object["id"] for type_object in s.mqlread({"id" : base_id, "/type/domain/types":[{"id" : None}]})["/type/domain/types"]]
graph = _get_graph(s, types, True)
return graph
def dump_type(s, type_id, follow_types=True):
types = [type_id]
graph = _get_graph(s, types, follow_types)
return graph
def restore(s, graph, new_location, ignore_types=None):
follow_types = graph.get("__follow_types", True)
# create type dependencies
type_requires_graph = {}
# create prop dependencies
prop_requires_graph = {}
prop_to_type_map = {}
for type_id, type_information in graph.iteritems():
if not type_id.startswith("__"): # not a real type, but rather a helper
# type dependency generation
type_requires_graph[type_id] = type_information["__requires"]
# prop dependency generation
for prop in type_information["properties"]:
prop_requires_graph[prop["id"]] = prop["__requires"]
prop_to_type_map[prop["id"]] = type_id
types_to_create = _generate_dependency_creation_order(type_requires_graph)
props_to_create = _generate_dependency_creation_order(prop_requires_graph)
origin_id, new_location_id = s.mqlreadmulti([{"id" : types_to_create[0], "type" : "/type/type", "domain" : {"id" : None}},
{"id" : new_location, "a:id" : None}])
origin_id = origin_id["domain"]["id"]
new_location_id = new_location_id["a:id"]
only_include = types_to_create + props_to_create
for type_id in types_to_create:
# let's find the type's key
key = None
for group in graph[type_id]["key"]:
if group["namespace"] == origin_id:
key = group["value"]
break
if key is None: # this shouldn't happen
key = graph[type_id]["id"].split("/")[-1] # this can be wrong... different key than typeid
tip = None
if graph[type_id]["/freebase/documented_object/tip"]:
tip = graph[type_id]["/freebase/documented_object/tip"]["value"]
ignore = ("name", "domain", "key", "type", "id", "properties", "/freebase/type_hints/enumeration",
"/freebase/type_hints/included_types", "/freebase/type_hints/mediator", "/freebase/documented_object/tip")
extra = _generate_extra_properties(graph[type_id], ignore)
name = graph[type_id]["name"]["value"]
included = [_convert_name_to_new(included_type["id"], origin_id, new_location_id, only_include) for included_type in graph[type_id]["/freebase/type_hints/included_types"]]
cvt = graph[type_id]["/freebase/type_hints/mediator"]
create_type(s, name, key, new_location_id, included=included, cvt=cvt, tip=tip, extra=extra)
for prop_id in props_to_create: #* prop_id
type_id = prop_to_type_map[prop_id]
all_properties_for_type = graph[type_id]["properties"]
for prop in all_properties_for_type:
if prop["id"] == prop_id: # good, we are dealing with our specific property
new_schema = _convert_name_to_new(type_id, origin_id, new_location_id, only_include)
name = prop["name"]
expected = None
if prop["expected_type"]:
expected = _convert_name_to_new(prop["expected_type"], origin_id, new_location_id, only_include)
for group in prop["key"]:
if group["namespace"] == type_id:
key = group["value"]
break
tip = None
if prop["/freebase/documented_object/tip"]:
tip = prop["/freebase/documented_object/tip"]["value"]
disambig = prop["/freebase/property_hints/disambiguator"]
unique = prop["unique"]
ignore = ("name", "expected_type", "key", "id", "master_property", "delegated", "unique", "type", "schema",
"/freebase/property_hints/disambiguator", "enumeration", "/freebase/property_hints/enumeration",
"/freebase/documented_object/tip")
extra = _generate_extra_properties(prop, ignore)
if prop['master_property']:
converted_master_property = _convert_name_to_new(prop["master_property"], origin_id, new_location_id, only_include)
if converted_master_property == prop["master_property"]:
raise CVTError("You can't set follow_types to False if there's a cvt. A cvt requires you get all the relevant types. Set follow_types to true.\n" + \
"The offending property was %s, whose master was %s." % (prop["id"], prop["master_property"]))
reciprocate_property(s, name, key, converted_master_property,
unique, disambig=disambig, tip=tip, extra=extra)
elif prop['delegated']:
delegate_property(s, _convert_name_to_new(prop['delegated'], origin_id, new_location_id, only_include), new_schema,
expected=expected, tip=tip, extra=extra)
else:
create_property(s, name, key, new_schema, expected, unique,
disambig=disambig, tip=tip, extra=extra)
def _get_graph(s, initial_types, follow_types):
""" get the graph of dependencies of all the types involved, starting with a list supplied """
assert isinstance(initial_types, (list, tuple))
graph = {}
to_update = set(initial_types)
done = set()
while len(to_update) > 0:
new = to_update.pop()
graph[new] = _get_needed(s, new)
if follow_types:
[to_update.add(b) for b in graph[new]["__related"] if b not in done]
done.update(graph[new]["__related"])
if not follow_types:
# we have to check that there are no cvts attached to us, or else
# ugly things happen (we can't include the cvt because the cvt won't link to us.)
for prop in graph[new]["properties"]:
if prop["master_property"]:
raise CVTError("You can't set follow_types to False if there's a cvt. A cvt requires you get all the relevant types. Set follow_types to true.\n" + \
"The offending property was %s, whose master was %s." % (prop["id"], prop["master_property"]))
graph["__follow_types"] = follow_types
return graph
def _convert_name_to_new(old_name, operating_base, new_base, only_include=None):
if old_name in only_include and old_name.startswith(operating_base):
return new_base + old_name.replace(operating_base, "", 1)
else:
return old_name
def _generate_dependency_creation_order(requires_graph):
# This is a naive topographical sort to determine
# in what order to create types or properties so
# that the other type/properties they rely on
# are already created
# This function is called with the type dependencies
# and then the property dependencies.
# we sort the dependency_list because its a good idea
# to create the guys with zero dependencies before the
# guys with one.. it's just a simple optimization to
# the topographical sort
dependency_list = [(len(requires), name) for (name, requires) in requires_graph.iteritems()]
dependency_list.sort()
creation_order_list = []
while len(dependency_list) > 0:
number_of_requirements, id = dependency_list.pop(0)
if number_of_requirements == 0:
creation_order_list.append(id)
continue
else:
are_the_types_dependencies_already_resolved = True
for requirement in requires_graph[id]:
if requirement not in creation_order_list:
are_the_types_dependencies_already_resolved = False
continue
if are_the_types_dependencies_already_resolved:
creation_order_list.append(id)
else:
dependency_list.append((number_of_requirements, id))
return creation_order_list
def _generate_extra_properties(dictionary_of_values, ignore):
extra = {}
for k, v in dictionary_of_values.iteritems():
if k not in ignore and not k.startswith("__"):
if v:
if isinstance(v, basestring):
extra.update({k:v})
elif isinstance(v, bool):
extra.update({k:v})
elif v.has_key("id"):
extra.update({k:v["id"]})
elif v.has_key("value"):
extra.update({k:v["value"]})
else:
raise ValueError("There is a problem with getting the property value.")
return extra
def _get_needed(s, type_id):
q = TYPE_QUERY
q.update(id=type_id)
r = s.mqlread(q)
properties = r.properties
# let's identify who the parent is in order to only include
# other types in that domain. We don't want to go around including
# all of commons because someone's a /people/person
parents = [r["domain"]["id"]]
included_types = map(lambda x: x["id"], r["/freebase/type_hints/included_types"])
related_types = set(included_types)
for prop in properties:
if prop["expected_type"]:
related_types.add(prop["expected_type"])
# we have two different types of relationships: required and related.
# related can be used to generate subgraphs of types
# required is used to generate the dependency graph of types
related = _return_relevant(related_types, parents)
requires = _return_relevant(included_types, parents)
# get property information
properties = r["properties"]
for prop in properties:
dependent_on = set()
if prop["master_property"]:
dependent_on.add(prop["master_property"])
if prop["delegated"]:
dependent_on.add(prop["delegated"])
prop["__requires"] = _return_relevant(dependent_on, parents)
# return all the information along with our special __* properties
info = r
info.update(__related=related, __requires=requires, __properties=properties)
return info
def _return_relevant(start_list, parents):
final = []
for item in start_list:
indomain = False
for parent in parents:
if item.startswith(parent):
indomain = True
continue
if indomain:
final.append(item)
return final
PROPERTY_QUERY = {
"optional" : True,
"type" : "/type/property",
"delegated" : None,
"enumeration" : None,
"expected_type" : None,
"id" : None,
"key" : [{
"namespace" : None,
"value" : None
}],
#"link" : [{}],
"master_property" : None,
"name" : {"value" : None, "lang" : "/lang/en", "optional":True},
"schema" : {"id" : None, "name" : None},
"unique" : None,
"unit" : None,
"/freebase/documented_object/tip" : {"value" : None, "limit":1, "optional" : True},
"/freebase/property_hints/disambiguator" : None,
"/freebase/property_hints/display_none" : None,
"/freebase/property_hints/display_orientation" : None,
"/freebase/property_hints/enumeration" : None,
"/freebase/property_hints/dont_display_in_weblinks" : None,
"/freebase/property_hints/inverse_description" : None,
}
TYPE_QUERY = {
"type" : "/type/type",
"domain" : {},
"key" : [{"namespace" : None, "value" : None}],
"name" : {"value" : None, "lang" : "/lang/en", "optional":True},
"/freebase/type_hints/included_types" : [{"id" : None, "optional" : True}],
"/freebase/type_hints/mediator" : None,
"/freebase/type_hints/enumeration" : None,
"/freebase/type_hints/minor" : None,
"/freebase/documented_object/tip" : {"value" : None, "limit":1, "optional":True},
}
TYPE_QUERY.update(properties=[PROPERTY_QUERY])
| Python |
#!/usr/bin/python
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
import sys
from freebase.api.session import HTTPMetawebSession
_base = HTTPMetawebSession("sandbox.freebase.com")
__all__ = ["HTTPMetawebSession"]
# we want to add base's functions to __init__.py
# so that we can say freebase.func() and really
# just call base.func()
# a little trick to refer to ourselves
self = sys.modules[__name__]
for funcname in dir(_base):
# we only want the 'real' functions
if not funcname.startswith("_"):
func = getattr(_base, funcname)
# let's make sure we're getting functions
# instead of constants or whatever
if callable(func):
# we're setting these functions
# so that they can be called like
# freebase.funcname -> base.func()
setattr(self, funcname, func)
# make sure we import the base's
# functions if we import freebase
__all__.append(funcname)
# we don't want any self-referencing
# business going. Plus, this is cleaner.
del self
# we want dir(freebase) to be clean
del funcname, func
| Python |
from optparse import OptionParser
import getpass
import sys
from freebase.api import HTTPMetawebSession
from freebase.schema import dump_base, dump_type, restore
try:
import jsonlib2 as json
except ImportError:
try:
import simplejson as json
except ImportError:
import json
def fb_save_base():
op = OptionParser(usage='%prog [options] baseid')
op.disable_interspersed_args()
op.add_option('-s', '--service', dest='service_host',
metavar='HOST',
default="freebase.com",
help='Freebase HTTP service address:port')
op.add_option('-S', '--sandbox', dest='use_sandbox',
default=False, action='store_true',
help='shortcut for --service=sandbox-freebase.com')
options, args = op.parse_args()
service_host = options.service_host
if options.use_sandbox:
service_host = "sandbox-freebase.com"
if len(args) < 1:
op.error('Required baseid missing')
if len(args) > 1:
op.error('Too many arguments')
s = HTTPMetawebSession(service_host)
print >> sys.stdout, json.dumps(dump_base(s, args[0]), indent=2)
def fb_save_type():
op = OptionParser(usage='%prog [options] typeid ')
op.disable_interspersed_args()
op.add_option('-s', '--service', dest='service_host',
metavar='HOST',
default="freebase.com",
help='Freebase HTTP service address:port')
op.add_option('-S', '--sandbox', dest='use_sandbox',
default=False, action='store_true',
help='shortcut for --service=sandbox-freebase.com')
op.add_option('-n', '--no-follow', dest='follow',
default=False, action='store_false',
help="Don't follow types, only copy the one specified.")
op.add_option('-f', '--follow', dest="follow",
default=True, action="store_true",
help="Follow the types (you might end up copying multiple types)")
options,args = op.parse_args()
service_host = options.service_host
if options.use_sandbox:
service_host = "sandbox-freebase.com"
if len(args) < 1:
op.error('Required typeid missing')
if len(args) > 1:
op.error('Too many arguments')
s = HTTPMetawebSession(service_host)
print >> sys.stdout, json.dumps(dump_type(s, args[0], options.follow), indent=2)
def fb_restore():
op = OptionParser(usage='%prog [options] new_location graph_output_from_dump*_command')
op.disable_interspersed_args()
op.add_option('-s', '--service', dest='service_host',
metavar='HOST',
help='Freebase HTTP service address:port')
op.add_option('-S', '--sandbox', dest='use_sandbox',
default=False, action='store_true',
help='shortcut for --service=sandbox-freebase.com (default)')
op.add_option('-F', '--freebase', dest='use_freebase',
default=False, action='store_true',
help='shortcut for --service=freebase.com (not default)')
op.add_option('-u', '--username', dest='username',
action='store',
help='username for freebase service')
op.add_option('-p', '--password', dest='password',
action='store',
help='password for freebase service')
options,args = op.parse_args()
if (options.username and not options.password) or (not options.username and options.password):
op.error("You must supply both a username and password")
if options.use_sandbox and options.use_freebase:
op.error("You can't use both freebase and sandbox!")
if options.service_host and (options.use_sandbox or options.use_freebase):
op.error("You can't specify both --service and --freebase or --sandbox")
if not options.service_host and not options.use_sandbox and not options.use_freebase:
op.error("You have to specify to upload to sandbox or production (freebase)")
service_host = options.service_host
if options.use_sandbox:
service_host = "sandbox-freebase.com"
if options.use_freebase:
service_host = "freebase.com"
s = login(service_host, username=options.username, password=options.password)
newlocation = args[0]
if len(args) == 1:
graphfile = "-" #stdin
else: graphfile = args[1]
if graphfile != "-":
fg = open(graphfile, "r")
graph = json.load(fg)
fg.close()
if graphfile == "-": # use stdin
graph = json.load(sys.stdin)
restore(s, graph, newlocation, ignore_types=None)
def login(api_host, username=None, password=None):
s = HTTPMetawebSession(api_host)
if not username:
print "In order to perform this operation, we need to use a valid freebase username and password"
username = raw_input("Please enter your username: ")
try:
password = getpass.getpass("Please enter your password: ")
except getpass.GetPassWarning:
password = raw_input("Please enter your password: ")
s.login(username, password)
print "Thanks!"
return s | Python |
# ==================================================================
# Copyright (c) 2007,2008,2009 Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ====================================================================
import os, sys, re, time
from fbutil import *
import simplejson
import freebase.rison as rison
from freebase.api import attrdict
def cmd_help(fb, command=None):
"""get help on commands
%prog help [cmd]
"""
if command is not None:
cmd = fb.commands[command]
print ' %s: %s' % (cmd.name, cmd.shortdoc)
print cmd.doc
return
print """
the interface to this tool is loosely modeled on the
"svn" command-line tool. many commands work slightly
differently from their unix and svn equivalents -
read the doc for the command first.
use "%s help <subcommand>" for help on a particular
subcommand.
""" % fb.progpath
fb.oparser.print_help()
print 'available subcommands:'
cmds = sorted(fb.commands.keys())
for fk in cmds:
cmd = fb.commands[fk]
print ' %s: %s' % (cmd.name, cmd.shortdoc)
def cmd_wikihelp(fb):
"""get help on commands in mediawiki markup format
%prog wikihelp
"""
print """usage: %s subcommand [ args ... ]
the interface to this tool is loosely modeled on the
"svn" command-line tool. many commands work slightly
differently from their unix and svn equivalents -
read the doc for the command first.
""" % fb.progpath
print 'subcommands:'
for fk in sorted(fb.commands.keys()):
cmd = fb.commands[fk]
# mediawiki markup cares about the difference between
# a blank line and a blank line with a bunch of
# spaces at the start of it. ewww.
doc = re.sub(r'\r?\n {0,8}', '\n ', cmd.doc)
print '==== %s %s: %s ====' % (fb.progpath, cmd.name, cmd.shortdoc)
print doc
print
# this command is disabled because it relies on some svn glue that
# was not sufficiently well thought out.
def old_cmd_pwid(fb):
"""show the "current working namespace id"
%prog pwid
by default, relative ids can't be resolved. however, if you
run the fb tool from a directory that has the svn property
'freebase:id' set, relative ids will be resolved relative
to that id. the idea is that you can create an svn tree
that parallels the freebase namespace tree.
for example:
$ %prog pwid
$ svn propset freebase:id "/freebase" .
$ %prog pwid
/freebase
"""
if fb.cwid is not None:
print fb.cwid
else:
print ''
def cmd_ls(fb, path=None):
"""list the keys in a namespace
%prog ls [id]
"""
path = fb.absid(path)
q = {'id': path,
'/type/namespace/keys': [{'value': None,
'namespace': {
'id': None,
'type': []
},
'optional':True
}]
}
r = fb.mss.mqlread(q)
if r is None:
raise CmdException('query for id %r failed' % path)
#sys.stdout.write(' '.join([mk.value for mk in r['/type/namespace/keys']]))
for mk in r['/type/namespace/keys']:
print mk.value
if 0:
suffix = ''
if ('/type/namespace' in mk.namespace.type
or '/type/domain' in mk.namespace.type):
suffix = '/'
print mk.value, mk.namespace.id+suffix
def cmd_mkdir(fb, path):
"""create a new freebase namespace
%prog mkdir id
create a new instance of /type/namespace at the given
point in id space. if id already exists, it should be
a namespace.
"""
path = fb.absid(path)
dir,file = dirsplit(path)
wq = { 'create': 'unless_exists',
'key':{
'connect': 'insert',
'namespace': dir,
'value': file
},
'name': path,
'type': '/type/namespace'
}
r = fb.mss.mqlwrite(wq)
def cmd_ln(fb, src, dst):
"""create a namespace key
%prog ln srcid dstid
create a new namespace link at dstid to the object
currently at srcid.
"""
src = fb.absid(src)
dst = fb.absid(dst)
dir,file = dirsplit(dst)
wq = { 'id': src,
'key':{
'connect': 'insert',
'namespace': dir,
'value': file
}
}
r = fb.mss.mqlwrite(wq)
def cmd_rm(fb, path):
"""unlink a namespace key
%prog rm id
remove the /type/key that connects the given id to its
parent. id must be a path for this to make any sense.
note that is like unix 'unlink' rather than 'rm'.
it won't complain if the 'subdirectory' contains data,
since that data will still be accessible to other queries.
it's not like 'rm -rf' either, because it doesn't
disturb anything other than the one directory entry.
"""
path = fb.absid(path)
dir,file = dirsplit(path)
wq = { 'id': path,
'key':{
'connect': 'delete',
'namespace': dir,
'value': file
}
}
r = fb.mss.mqlwrite(wq)
def cmd_mv(fb, src, dst):
"""rename srcid to dstid.
%prog mv srcid dstid
equivalent to:
$ fb ln <srcid> <dstid>
$ fb rm <srcid>
"""
cmd_ln(fb, src, dst)
cmd_rm(fb, src)
def cmd_cat(fb, id, include_headers=False):
"""download a document from freebase to stdout
%prog cat id
equivalent to "%prog get id -".
"""
return cmd_get(fb, id, localfile='-', include_headers=include_headers)
def cmd_get(fb, id, localfile=None, include_headers=False):
"""download a file from freebase
%prog get id [localfile]
download the document or image with the given id from freebase
into localfile. localfile '-' means stdout. localfile
defaults to a file in the current directory with the same name
as the last key in the path, possibly followed by a metadata
extension like .html or .txt.
"""
id = fb.absid(id)
dir,file = dirsplit_unsafe(id)
def read_content(id, content_only=False):
c = attrdict(id=id)
cq = { 'id': id,
'type': [],
'/common/document/content': None,
'/common/document/source_uri': None,
'/type/content/media_type': { 'name':None,
'optional': True },
#'/type/content/text_encoding': { 'name':None },
'/type/content/blob_id':None,
}
cd = fb.mss.mqlread(cq)
if '/type/content' in cd.type:
c.media_type = cd['/type/content/media_type'].name
#c.text_encoding = cd['/type/content/text_encoding'].name
c.sha256 = cd['/type/content/blob_id']
return c
if content_only:
raise CmdException('%s is not a content id' % id)
cid = cd['/common/document/content']
if cid is not None:
return read_content(cid, content_only=True)
# in this case we don't have a content object
if cd['/common/document/source_uri'] is not None:
return None
raise CmdException('%s is not a content or document id' % id)
content = read_content(id)
log.debug('COBJ %r' % content)
if content is not None:
fileext = media_type_to_extension.get(content.media_type, None)
else:
fileext = None
if localfile == '-':
ofp = sys.stdout
else:
if localfile is None:
implicit_outfile = True
localfile = file
elif re.match(r'[/\\]$', localfile):
implicit_outfile = True
localfile = localfile + file
else:
implicit_outfile = False
localfile = os.path.abspath(localfile)
# add file extension based on content-type:
# should be an option to disable this
if implicit_outfile and fileext is not None:
localfile += '.' + fileext
# if we didn't explicitly name the output file,
# don't destroy an existing file
localfile_base = localfile
count = 0
while implicit_outfile and os.path.exists(localfile):
count += 1
localfile = '%s.%d' % (localfile_base, count)
ofp = open(localfile, 'wb')
body = fb.mss.trans(id)
if include_headers:
# XXX show content-type, what else?
pass
ofp.write(body)
if localfile != '-':
print ('%s saved (%d bytes)' % (localfile, len(body)))
ofp.close()
def cmd_put(fb, localfile, id=None, content_type=None):
"""upload a document to freebase -- EXPERIMENTAL
%prog put localfile [id] [content-type]
upload the document or image in localfile to given freebase
id. if localfile is '-' the data will be read from stdin.
if id is missing or empty, a new document will be created.
later the id might default to something computed from localfile
and any svn attributes it has.
output: a single line, the id of the document.
"""
if content_type is None:
ext = re.sub('^.*\.([^/.]+)$', r'\1', localfile)
media_type = extension_to_media_type.get(ext, None)
if media_type is None:
raise CmdException('could not infer a media type from extension %r: please specify it'
% ext)
if media_type.startswith('text/'):
# this is a bad assumption. should sniff it?
text_encoding = 'utf-8'
content_type = '%s;charset=%s' % (media_type, text_encoding)
else:
content_type = media_type
new_id = None
if id is not None:
idinfo = fb.mss.mqlread({ 'id': id, 'type': '/common/document' })
if idinfo is None:
new_id = id
id = None
body = open(localfile, 'rb').read()
r = fb.mss.upload(body, content_type, document_id=id)
if new_id is None:
print r.document
else:
cmd_ln(fb, r.document, new_id)
print new_id
def cmd_dump(fb, id):
"""show all properties of a freebase object
%prog dump object_id
"""
id = fb.absid(id)
import inspect
r = inspect.inspect_object(fb.mss, id)
if r is None:
raise CmdException('no match for id %r' % id)
for k in sorted(r.keys()):
vs = r[k]
for v in vs:
id = v.get('id', '')
name = '%r' % (v.get('name') or v.get('value'))
if name == 'None': name = ''
type = v.get('type', '')
if type == '/type/text':
extra = v.get('lang', '')
elif type == '/type/key':
extra = v.get('namespace', '')
else:
extra = ''
fb.trow(k, id, name, type, extra)
def cmd_pget(fb, id, propid):
"""get a property of a freebase object -- EXPERIMENTAL
%prog pget object_id property_id
get the property named by property_id from the object.
XXX output quoting is not well specified.
property_id must be a fully qualified id for now.
prints one line for each match.
if propid ends in '*' this does a wildcard for a particular type.
"""
id = fb.absid(id)
proptype, propkey = dirsplit(propid)
if propkey != '*':
# look up the prop
q = { 'id': id,
propid: [{}],
}
r = fb.mss.mqlread(q)
for v in r[propid]:
if 'value' in v:
print v.value
else:
print v.id
else:
# look up the prop
q = { 'id': id,
'*': [{}],
}
if isinstance(proptype, basestring):
q['type'] = proptype
r = fb.mss.mqlread(q)
for k in sorted(r.keys()):
v = r[k];
if 'value' in v:
print '%s %s' % (k, v.value)
else:
print '%s %s' % (k, v.id)
def cmd_pdel(fb, id, propid, oldval):
"""delete a property of a freebase object -- EXPERIMENTAL
%prog pdel object_id property_id oldvalue
set the property named by property_id on the object.
value is an id or a json value. XXX this is ambiguous.
property_id must be a fully qualified id for now.
for now you need to provide a "oldval" argument,
later this tool will query and perhaps prompt if the
deletion is ambiguous.
prints a single line, either 'deleted' or 'missing'
"""
return cmd_pset(fb, id, propid, None, oldval)
def cmd_touch(fb):
"""bypass any cached query results the service may have. use sparingly.
"""
fb.mss.mqlflush()
def cmd_pset(fb, id, propkey, val, oldval=None, extra=None):
"""set a property of a freebase object -- EXPERIMENTAL
%prog pset object_id property_id value
set the property named by property_id on the object.
value is an id or a json value. XXX this is ambiguous.
property_id must be a fully qualified id for now.
if the property should be a unique property, this will
write with 'connect:update'. if the property may have
multiple, it is written with 'connect:insert'.
prints a single line, either 'inserted' or 'present'
"""
id = fb.absid(id)
propid = fb.absprop(propkey)
# look up the prop
pq = { 'id': propid,
'type': '/type/property',
'name': None,
'unique': None,
'expected_type': {
'id': None,
'name': None,
'default_property': None,
'optional': True,
},
}
prop = fb.mss.mqlread(pq)
if prop is None:
raise CmdException('can\'t resolve property key %r - use an absolute id' % propid);
if propid.startswith('/type/object/') or propid.startswith('/type/value/'):
propkey = re.sub('/type/[^/]+/', '', propid);
else:
propkey = propid
wq = { 'id': id,
propkey: {
}
}
if val is None:
val = oldval
wq[propkey]['connect'] = 'delete'
elif prop.unique:
wq[propkey]['connect'] = 'update'
else:
wq[propkey]['connect'] = 'insert'
if prop.expected_type is None:
wq[propkey]['id'] = val
elif prop.expected_type.id not in value_types:
wq[propkey]['id'] = val
else:
wq[propkey]['value'] = val
if prop.expected_type.id == '/type/text':
if extra is not None:
lang = extra
else:
lang = '/lang/en'
wq[propkey]['lang'] = lang
if prop.expected_type.id == '/type/key':
if extra is not None:
wq[propkey]['namespace'] = extra
else:
raise CmdException('must specify a namespace to pset /type/key')
r = fb.mss.mqlwrite(wq)
print r[propkey]['connect']
def cmd_login(fb, username=None, password=None):
"""login to the freebase service
%prog login [username [password]]
cookies are maintained in a file so
they are available to the next invocation.
prompts for username and password if not given
"""
import getpass
if username is None:
sys.stdout.write('freebase.com username: ')
username = sys.stdin.readline()
if not username:
raise CmdException('usernmae required for login')
username = re.compile('\n$').sub('', username)
if password is None:
password = getpass.getpass('freebase.com password: ')
fb.mss.username = username
fb.mss.password = password
fb.mss.login()
def cmd_logout(fb):
"""logout from the freebase service
%prog logout
deletes the login cookies
"""
fb.cookiejar.clear(domain=fb.service_host.split(':')[0])
def cmd_find(fb, qstr):
"""print all ids matching a given constraint.
if the query string starts with "{" it is treated as json.
otherwise it is treated as o-rison.
%prog find
"""
if qstr.startswith('{'):
q = simplejson.loads(qstr)
else:
q = rison.loads('(' + qstr + ')')
if 'id' not in q:
q['id'] = None
results = fb.mss.mqlreaditer(q)
for r in results:
print r.id
def cmd_q(fb, qstr):
"""run a freebase query.
if the query string starts with "{" it is treated as json.
otherwise it is treated as o-rison.
dump the result as json.
%prog q
"""
if qstr.startswith('{'):
q = simplejson.loads(qstr)
else:
q = rison.loads('(' + qstr + ')')
# results could be streamed with a little more work
results = fb.mss.mqlreaditer(q)
print simplejson.dumps(list(results), indent=2)
def cmd_open(fb, id):
"""open a web browser on the given id. works on OSX only for now.
%prog open /some/id
"""
os.system("open 'http://www.freebase.com/view%s'" % id)
def cmd_log(fb, id):
"""log changes pertaining to a given id.
INCOMPLETE
%prog log /some/id
"""
null = None
true = True
false = False
baseq = {
'type': '/type/link',
'source': null,
'master_property': null,
'attribution': null,
'timestamp': null,
'operation': null,
'valid': null,
'sort': '-timestamp'
};
queries = [
{
'target_value': { '*': null },
'target': { 'id': null, 'name': null, 'optional': true },
},
{
'target': { 'id': null, 'name': null },
}]
for i,q in list(enumerate(queries)):
q.update(baseq)
queries[i] = [q]
valuesfrom,linksfrom = fb.mss.mqlreadmulti(queries)
for link in linksfrom:
# fb.trow(link.master_property.id, ...)
print simplejson.dumps(link, indent=2)
for link in valuesfrom:
# fb.trow(link.master_property.id, ...)
print simplejson.dumps(link, indent=2)
def cmd_search(fb, what):
"""Search freebase for "query" and print out 10 matching ids
%prog search "some query"
"""
r = fb.mss.search(what, format="ids", limit=10)
for id in r:
print id
| Python |
#!/usr/bin/env python
# ==================================================================
# Copyright (c) 2007,2008,2009 Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ====================================================================
import os, sys, re, time, stat
from optparse import OptionParser
import getpass
import cookielib
import logging
import simplejson
from fbutil import FbException, CmdException, log, default_propkeys
console = logging.StreamHandler()
log.addHandler(console)
from freebase.api import HTTPMetawebSession, MetawebError, attrdict
_cookiedir = None
if os.environ.has_key('HOME'):
_cookiedir = os.path.join(os.environ['HOME'], '.pyfreebase')
class Command(object):
def __init__(self, module, name, func):
self.module = module
self.name = name
self.func = func
PROG = re.compile(r'\%prog')
NEWLINE = re.compile(r'(?:\r?\n)+')
# fill in self.doc
if isinstance(func.__doc__, basestring):
doc = PROG.sub('fcl', func.__doc__ + '\n')
self.shortdoc, self.doc = NEWLINE.split(doc, 1)
else:
self.shortdoc = '(missing documentation)'
self.doc = '(missing documentation)'
class FbCommandHandler(object):
def __init__(self):
self.service_host = 'www.freebase.com'
self.cookiejar = None
self.cwid = ''
self.progpath = 'fcl'
self.commands = {}
self.cookiefile = None
if _cookiedir is not None:
self.cookiefile = os.path.join(_cookiedir, 'cookiejar')
def init(self):
if self.cookiefile is not None:
self.cookiejar = cookielib.LWPCookieJar(self.cookiefile)
if os.path.exists(self.cookiefile):
try:
self.cookiejar.load(ignore_discard=True)
except cookielib.LoadError:
log.warn('error loading cookies')
#print 'start cookies %r' % self.cookiejar
self.mss = HTTPMetawebSession(self.service_host,
cookiejar=self.cookiejar)
def absid(self, path):
if path is None:
path = ''
if path.startswith('/'):
return path
if not isinstance(self.cwid, basestring) or not self.cwid.startswith('/'):
# svn cwid support is disabled because it relies on some svn glue that
# was not sufficiently well thought out.
# raise CmdException("can't resolve relative id %r without cwid - see 'fcl help pwid'" % (path))
raise CmdException("no support for relative id %r" % (path))
if path == '' or path == '.':
return self.cwid
return self.cwid + '/' + path
def absprop(self, propkey):
if propkey.startswith('/'):
return propkey
# check schemas of /type/object and /type/value,
# as well as other reserved names
if propkey in default_propkeys:
return default_propkeys[propkey]
return self.absid(propkey)
def thead(self, *args):
strs = ['%r' % arg
for arg in args]
print '\t'.join(strs)
def trow(self, *args):
print '\t'.join(args)
return
strs = ['%r' % arg
for arg in args]
print '\t'.join(strs)
def save(self):
#print 'end cookies %r' % self.cookiejar
if _cookiedir and self.cookiefile.startswith(_cookiedir):
# create private cookiedir if needed
if not os.path.exists(_cookiedir):
os.mkdir(_cookiedir, 0700)
os.chmod(_cookiedir, stat.S_IRWXU)
if self.cookiejar is None:
return
self.cookiejar.save(ignore_discard=True)
# save the cwd and other state too
def import_commands(self, modname):
"""
import new fb commands from a file
"""
namespace = {}
pyimport = 'from %s import *' % modname
exec pyimport in namespace
mod = sys.modules.get(modname)
commands = [Command(mod, k[4:], getattr(mod, k))
for k in getattr(mod, '__all__', dir(mod))
if (k.startswith('cmd_')
and callable(getattr(mod, k)))]
for cmd in commands:
log.info('importing %r' % ((cmd.name, cmd.func),))
self.commands[cmd.name] = cmd
log.info('imported %r from %r' % (modname, mod.__file__))
def dispatch(self, cmd, args):
if cmd in self.commands:
try:
self.commands[cmd].func(self, *args)
except KeyboardInterrupt, e:
sys.stderr.write('%s\n' % (str(e),))
except FbException, e:
sys.stderr.write('%s\n' % (str(e),))
except CmdException, e:
sys.stderr.write('%s\n' % (str(e),))
except MetawebError, e:
sys.stderr.write('%s\n' % (str(e),))
else:
self.oparser.error('unknown subcommand %r, try "%s help"' % (cmd, self.progpath))
self.save()
def cmdline_main(self):
op = OptionParser(usage='%prog [options] command [args...] ')
self.oparser = op
op.disable_interspersed_args()
op.add_option('-d', '--debug', dest='debug',
default=False, action='store_true',
help='turn on debugging output')
op.add_option('-v', '--verbose', dest='verbose',
default=False, action='store_true',
help='verbose output')
op.add_option('-V', '--very-verbose', dest='very_verbose',
default=False, action='store_true',
help='lots of debug output')
op.add_option('-s', '--service', dest='service_host',
metavar='HOST',
default=self.service_host,
help='Freebase HTTP service address:port')
op.add_option('-S', '--sandbox', dest='use_sandbox',
default=False, action='store_true',
help='shortcut for --service=sandbox.freebase.com')
op.add_option('-c', '--cookiejar', dest='cookiefile',
metavar='FILE',
default=self.cookiefile,
help='Cookie storage file (will be created if missing)')
options,args = op.parse_args()
if len(args) < 1:
op.error('required subcommand missing')
loglevel = logging.WARNING
if options.verbose:
loglevel = logging.INFO
if options.very_verbose:
loglevel = logging.DEBUG
console.setLevel(loglevel)
log.setLevel(loglevel)
if options.use_sandbox:
self.service_host = 'sandbox.freebase.com'
else:
self.service_host = options.service_host
self.cookiefile = options.cookiefile
#self.progpath = sys.argv[0]
self.init()
self.mss.log.setLevel(loglevel)
self.mss.log.addHandler(console)
self.import_commands('freebase.fcl.commands')
self.import_commands('freebase.fcl.mktype')
cmd = args.pop(0)
self.dispatch(cmd, args)
# entry point for script
def main():
try:
# turn off crlf output on windows so we work properly
# with unix tools.
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
except ImportError:
pass
fb = FbCommandHandler()
fb.cmdline_main()
if __name__ == '__main__':
main()
| Python |
# ==================================================================
# Copyright (c) 2007,2008,2009 Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ====================================================================
import re
import logging
log = logging.getLogger()
class FbException(Exception):
pass
class CmdException(Exception):
pass
media_types = {
'html': ['text/html'],
'txt': ['text/plain'],
'xml': ['text/xml',
'application/xml'],
'atom': ['application/atom+xml'],
'js': ['text/javascript',
'application/javascript',
'application/x-javascript'],
'json': ['application/json'],
'jpg': ['image/jpeg',
'image/pjpeg'],
'gif': ['image/gif'],
'png': ['image/png'],
}
extension_to_media_type = dict([(k,vs[0]) for k,vs in media_types.items()])
media_type_to_extension = {}
for k,vs in media_types.items():
for v in vs:
media_type_to_extension[v] = k
DIRSPLIT = re.compile(r'^(.+)/([^/]+)$')
def dirsplit_unsafe(id):
m = DIRSPLIT.match(id)
if m is None:
return (None, id)
dir,file = m.groups()
return (dir,file)
def dirsplit(id):
dir,file = dirsplit_unsafe(id)
if dir == '/guid':
raise FbException('%r is not a freebase keypath' % (id,))
return (dir,file)
value_types = [
'/type/text',
'/type/key',
'/type/rawstring',
'/type/float',
'/type/int',
'/type/boolean',
'/type/uri',
'/type/datetime',
'/type/id',
'/type/enumeration',
]
default_propkeys = {
'value': '/type/value/value',
'id': '/type/object/id',
'guid': '/type/object/guid',
'type': '/type/object/type',
'name': '/type/object/name',
'key': '/type/object/key',
'timestamp': '/type/object/timestamp',
'permission': '/type/object/permission',
'creator': '/type/object/creator',
'attribution': '/type/object/attribution'
};
| Python |
# ==================================================================
# Copyright (c) 2007,2008,2009 Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ====================================================================
#
#
# wrap all the nastiness needed for a general mql inspect query
#
#
import os, sys, re
null = None
true = True
false = False
inspect_query = {
'name': null,
'type': [],
'/type/reflect/any_master': [{
'optional':true,
'id': null,
'name': null,
'link': {
'master_property': {
'id': null,
'schema': null
}
}
}],
'/type/reflect/any_reverse': [{
'optional':true,
'id': null,
'name': null,
'link': {
'master_property': {
'id':null,
'schema': null,
'expected_type': null,
'reverse_property': {
'id': null,
'schema': null,
'optional': true
}
}
}
}],
'/type/reflect/any_value': [{
'optional':true,
'value': null,
'link': {
'master_property': {
'id':null,
'schema': null,
'expected_type': null
},
}
}],
't:/type/reflect/any_value': [{
'optional':true,
'type': '/type/text',
'value': null,
'lang': null,
'link': {
'master_property': {
'id':null,
'schema': null
},
}
}],
'/type/object/creator': [{
'optional':true,
'id':null,
'name':null
}],
'/type/object/timestamp': [{
'optional':true,
'value': null,
}],
'/type/object/key': [{
'optional':true,
'value': null,
'namespace': null
}],
'/type/namespace/keys': [{
'optional':true,
'value': null,
'namespace': null
}]
}
def transform_result(result):
proptypes = {}
props = {}
# copy a property from a /type/reflect clause
def pushtype(propdesc, prop):
tid = propdesc['schema']
propid = propdesc['id']
if isinstance(prop, dict):
prop = dict(prop)
if 'link' in prop:
prop.pop('link')
if tid not in proptypes:
proptypes[tid] = {}
if propid not in proptypes[tid]:
proptypes[tid][propid] = []
if propid not in props:
props[propid] = []
props[propid].append(prop)
# copy a property that isn't enumerated by /type/reflect
def pushprop(propid):
ps = result[propid]
if ps is None:
return
# hack to infer the schema from id, not always reliable!
schema = re.sub(r'/[^/]+$', '', propid)
keyprop = dict(id=propid, schema=schema)
for p in ps:
pushtype(keyprop, p)
ps = result['/type/reflect/any_master'] or []
for p in ps:
propdesc = p.link.master_property
pushtype(propdesc, p)
# non-text non-key values
ps = result['/type/reflect/any_value'] or []
for p in ps:
propdesc = p.link.master_property
# /type/text values are queried specially
# so that we can get the lang, so ignore
# them here.
if propdesc.expected_type == '/type/text':
continue
pushtype(propdesc, p)
# text values
ps = result['t:/type/reflect/any_value'] or []
for p in ps:
propdesc = p.link.master_property
pushtype(propdesc, p)
pushprop('/type/object/creator')
pushprop('/type/object/timestamp')
pushprop('/type/object/key')
pushprop('/type/namespace/keys')
# now the reverse properties
ps = result['/type/reflect/any_reverse'] or []
for prop in ps:
propdesc = prop.link.master_property.reverse_property
# synthetic property descriptor for the reverse of
# a property with no reverse descriptor.
# note the bogus id starting with '-'.
if propdesc is None:
# schema = prop.link.master_property.expected_type
# if schema is None:
# schema = 'other'
schema = 'other'
propdesc = dict(id='-' + prop.link.master_property.id,
schema=schema)
pushtype(propdesc, prop)
#return proptypes
return props
def inspect_object(mss, id):
q = dict(inspect_query)
q['id'] = id
r = mss.mqlread(q)
if r is None:
return None
return transform_result(r)
| Python |
#!/usr/bin/env python
# ==================================================================
# Copyright (c) 2007,2008,2009 Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ====================================================================
import os, sys, re, time, stat
from optparse import OptionParser
import getpass
import cookielib
import logging
import simplejson
from fbutil import FbException, CmdException, log, default_propkeys
console = logging.StreamHandler()
log.addHandler(console)
from freebase.api import HTTPMetawebSession, MetawebError, attrdict
_cookiedir = None
if os.environ.has_key('HOME'):
_cookiedir = os.path.join(os.environ['HOME'], '.pyfreebase')
class Command(object):
def __init__(self, module, name, func):
self.module = module
self.name = name
self.func = func
PROG = re.compile(r'\%prog')
NEWLINE = re.compile(r'(?:\r?\n)+')
# fill in self.doc
if isinstance(func.__doc__, basestring):
doc = PROG.sub('fcl', func.__doc__ + '\n')
self.shortdoc, self.doc = NEWLINE.split(doc, 1)
else:
self.shortdoc = '(missing documentation)'
self.doc = '(missing documentation)'
class FbCommandHandler(object):
def __init__(self):
self.service_host = 'www.freebase.com'
self.cookiejar = None
self.cwid = ''
self.progpath = 'fcl'
self.commands = {}
self.cookiefile = None
if _cookiedir is not None:
self.cookiefile = os.path.join(_cookiedir, 'cookiejar')
def init(self):
if self.cookiefile is not None:
self.cookiejar = cookielib.LWPCookieJar(self.cookiefile)
if os.path.exists(self.cookiefile):
try:
self.cookiejar.load(ignore_discard=True)
except cookielib.LoadError:
log.warn('error loading cookies')
#print 'start cookies %r' % self.cookiejar
self.mss = HTTPMetawebSession(self.service_host,
cookiejar=self.cookiejar)
def absid(self, path):
if path is None:
path = ''
if path.startswith('/'):
return path
if not isinstance(self.cwid, basestring) or not self.cwid.startswith('/'):
# svn cwid support is disabled because it relies on some svn glue that
# was not sufficiently well thought out.
# raise CmdException("can't resolve relative id %r without cwid - see 'fcl help pwid'" % (path))
raise CmdException("no support for relative id %r" % (path))
if path == '' or path == '.':
return self.cwid
return self.cwid + '/' + path
def absprop(self, propkey):
if propkey.startswith('/'):
return propkey
# check schemas of /type/object and /type/value,
# as well as other reserved names
if propkey in default_propkeys:
return default_propkeys[propkey]
return self.absid(propkey)
def thead(self, *args):
strs = ['%r' % arg
for arg in args]
print '\t'.join(strs)
def trow(self, *args):
print '\t'.join(args)
return
strs = ['%r' % arg
for arg in args]
print '\t'.join(strs)
def save(self):
#print 'end cookies %r' % self.cookiejar
if _cookiedir and self.cookiefile.startswith(_cookiedir):
# create private cookiedir if needed
if not os.path.exists(_cookiedir):
os.mkdir(_cookiedir, 0700)
os.chmod(_cookiedir, stat.S_IRWXU)
if self.cookiejar is None:
return
self.cookiejar.save(ignore_discard=True)
# save the cwd and other state too
def import_commands(self, modname):
"""
import new fb commands from a file
"""
namespace = {}
pyimport = 'from %s import *' % modname
exec pyimport in namespace
mod = sys.modules.get(modname)
commands = [Command(mod, k[4:], getattr(mod, k))
for k in getattr(mod, '__all__', dir(mod))
if (k.startswith('cmd_')
and callable(getattr(mod, k)))]
for cmd in commands:
log.info('importing %r' % ((cmd.name, cmd.func),))
self.commands[cmd.name] = cmd
log.info('imported %r from %r' % (modname, mod.__file__))
def dispatch(self, cmd, args):
if cmd in self.commands:
try:
self.commands[cmd].func(self, *args)
except KeyboardInterrupt, e:
sys.stderr.write('%s\n' % (str(e),))
except FbException, e:
sys.stderr.write('%s\n' % (str(e),))
except CmdException, e:
sys.stderr.write('%s\n' % (str(e),))
except MetawebError, e:
sys.stderr.write('%s\n' % (str(e),))
else:
self.oparser.error('unknown subcommand %r, try "%s help"' % (cmd, self.progpath))
self.save()
def cmdline_main(self):
op = OptionParser(usage='%prog [options] command [args...] ')
self.oparser = op
op.disable_interspersed_args()
op.add_option('-d', '--debug', dest='debug',
default=False, action='store_true',
help='turn on debugging output')
op.add_option('-v', '--verbose', dest='verbose',
default=False, action='store_true',
help='verbose output')
op.add_option('-V', '--very-verbose', dest='very_verbose',
default=False, action='store_true',
help='lots of debug output')
op.add_option('-s', '--service', dest='service_host',
metavar='HOST',
default=self.service_host,
help='Freebase HTTP service address:port')
op.add_option('-S', '--sandbox', dest='use_sandbox',
default=False, action='store_true',
help='shortcut for --service=sandbox.freebase.com')
op.add_option('-c', '--cookiejar', dest='cookiefile',
metavar='FILE',
default=self.cookiefile,
help='Cookie storage file (will be created if missing)')
options,args = op.parse_args()
if len(args) < 1:
op.error('required subcommand missing')
loglevel = logging.WARNING
if options.verbose:
loglevel = logging.INFO
if options.very_verbose:
loglevel = logging.DEBUG
console.setLevel(loglevel)
log.setLevel(loglevel)
if options.use_sandbox:
self.service_host = 'sandbox.freebase.com'
else:
self.service_host = options.service_host
self.cookiefile = options.cookiefile
#self.progpath = sys.argv[0]
self.init()
self.mss.log.setLevel(loglevel)
self.mss.log.addHandler(console)
self.import_commands('freebase.fcl.commands')
self.import_commands('freebase.fcl.mktype')
cmd = args.pop(0)
self.dispatch(cmd, args)
# entry point for script
def main():
try:
# turn off crlf output on windows so we work properly
# with unix tools.
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
except ImportError:
pass
fb = FbCommandHandler()
fb.cmdline_main()
if __name__ == '__main__':
main()
| Python |
# ==================================================================
# Copyright (c) 2007,2008,2009 Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ====================================================================
import os, sys, re, time
from fbutil import *
def cmd_mkobj(fb, id, typeid='/common/topic', name=''):
"""create a new object with a given type -- EXPERIMENTAL
%prog mkobj new_id typeid name
create a new object with type typeid at the given
namespace location.
if present, name gives the display name of the new object.
"""
id = fb.absid(id)
nsid, key = dirsplit(id)
typeid = fb.absid(typeid)
if name == '':
name = key
wq = { 'create': 'unless_exists',
'id': None,
'name': name,
'type': typeid,
'key':{
'namespace': nsid,
'value': key
},
}
# TODO add included types
r = fb.mss.mqlwrite(wq)
print r.id,r.create
def cmd_mktype(fb, id, name=''):
"""create a new type -- EXPERIMENTAL
%prog mktype new_id name
create a new object with type Type at the given
namespace location.
this doesn't create any type hints.
if present, name gives the display name of the new property
"""
id = fb.absid(id)
nsid, key = dirsplit(id)
if name == '':
name = key
wq = { 'create': 'unless_exists',
'id': None,
'name': name,
'type': '/type/type',
'key':{
'namespace': nsid,
'value': key
},
}
r = fb.mss.mqlwrite(wq)
print r.id,r.create
def mkprop(fb, typeid, key, name='', vtype=None, master_property=None):
"""helper to create a new property
"""
if name == '':
name = key
wq = { 'create': 'unless_exists',
'id': None,
'type': '/type/property',
'name': name,
'schema': typeid,
'key': {
'namespace': typeid,
'value': key
}
}
if vtype is not None:
wq['expected_type'] = vtype
if master_property is not None:
wq['master_property'] = master_property
return fb.mss.mqlwrite(wq)
def cmd_mkprop(fb, id, name='', vtype=None, revkey=None, revname=''):
"""create a new property -- EXPERIMENTAL
%prog mkprop new_id [name] [expected_type] [reverse_property] [reverse_name]
create a new object with type Property at the given
location. creates both the "schema" and "key" links
for the property, but doesn't create any freebase property
hints.
if present, name gives the display name of the new property
"""
id = fb.absid(id)
if vtype is not None:
vtype = fb.absid(vtype)
typeid, key = dirsplit(id)
r = mkprop(fb, typeid, key, name, vtype)
# write the reverse property if specified
print r.id, r.create
if revkey is None:
return
assert vtype is not None
rr = mkprop(fb, vtype, revkey, revname, typeid, id)
print rr.id, rr.create
def cmd_publish_type(fb, typeid):
"""try to publish a freebase type for the client
%prog publish_type typeid
set /freebase/type_profile/published to the /freebase/type_status
instance named 'Published'
should also try to set the domain to some namespace that
has type:/type/domain
"""
id = fb.absid(typeid)
w = {
'id': id,
'/freebase/type_profile/published': {
'connect': 'insert',
'type': '/freebase/type_status',
'name': 'Published'
}
}
r = fb.mss.mqlwrite(w)
print r['/freebase/type_profile/published']['connect']
| Python |
#!/usr/bin/python
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
import sys
from freebase.api.session import HTTPMetawebSession
_base = HTTPMetawebSession("sandbox.freebase.com")
__all__ = ["HTTPMetawebSession"]
# we want to add base's functions to __init__.py
# so that we can say freebase.func() and really
# just call base.func()
# a little trick to refer to ourselves
self = sys.modules[__name__]
for funcname in dir(_base):
# we only want the 'real' functions
if not funcname.startswith("_"):
func = getattr(_base, funcname)
# let's make sure we're getting functions
# instead of constants or whatever
if callable(func):
# we're setting these functions
# so that they can be called like
# freebase.funcname -> base.func()
setattr(self, funcname, func)
# make sure we import the base's
# functions if we import freebase
__all__.append(funcname)
# we don't want any self-referencing
# business going. Plus, this is cleaner.
del self
# we want dir(freebase) to be clean
del funcname, func
| Python |
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
#
# URI Templating in Python
#
# see http://bitworking.org/projects/URI-Templates/
# and http://bitworking.org/news/URI_Templates
#
# note that this implementation may go away soon in
# favor of joe gregorio's own code:
# http://code.google.com/p/uri-templates/
#
#
# this implementation can also parse URIs, as long as the
# template is sufficiently specific. to allow this to work
# the '/' character is forbidden in keys when parsing.
# later it should be possible to loosen this restriction.
#
#
# example:
# from whatever.uritemplate import expand_uri_template
# expand_uri_template('http://{host}/{file}',
# dict(host='example.org',
# file='fred'))
#
# TODO:
# allow parsing to be aware of http://www. and trailing /
# nail down quoting issues
#
import os, sys, re
import urllib
__all__ = ['expand_uri_template', 'URITemplate']
def expand_uri_template(template, args):
"""Expand a URI template using the given args dictionary.
"""
return URITemplate(template).run(args)
def _uri_encode_var(v):
return urllib.quote(v, safe="-_.~!$&'()*+,;=:/?[]#@")
class URITemplate(object):
"""a URITemplate is a URI with simple variable substitution.
"""
VARREF = re.compile(r'\{([0-9a-zA-Z_]+)\}')
def __init__(self, s):
"""Compile a URITemplate from a string.
"""
self.template = s;
self.params = []
tsplit = self.VARREF.split(s)
rxs = ['^']
for i in range(len(tsplit)):
if i % 2:
# track the vars used
self.params.append(tsplit[i])
# vars match any string
# vars are assumed to lack '/' - this is imperfect...
rxs.append('([^/]*)')
else:
# quote special chars regexp interpretation
rxs.append(re.escape(tsplit[i]))
rxs.append('$')
self._parser = re.compile(''.join(rxs))
def __repr__(self):
return '<URITemplate %r>' % self.template
def run (self, args):
"""Expand the template using the given args.
"""
def repl(m):
key = m.group(1)
return _uri_encode_var(args.get(key, ''))
uri = self.VARREF.sub(repl,self.template)
#if self.parse(uri) is None:
# print 're-parsing generated uri failed: %r, %r' % (uri, self.template)
return uri
def parse(self, uri):
"""Try to parse a URI, extracting variable values.
"""
m = self._parser.match(uri)
if m is None:
return None
return dict(zip(self.params, m.groups()))
if __name__ == '__main__':
#
# testcases are imported from the URI::Template module on CPAN
#
import urllib2, simplejson
fp = urllib2.urlopen('http://search.cpan.org/src/BRICAS/URI-Template-0.09/t/data/spec.json')
testsuite = simplejson.loads(fp.read())
vars = dict([(k.encode('utf-8'),v) for k,v in testsuite['variables'].items()])
nsucceed = 0
nfail = 0
for test in testsuite['tests']:
ut = URITemplate(test['template'])
uri = ut.run(vars)
if uri != test['expected']:
print 'FAILED %r expected %r' % (uri, test['expected'])
print ' vars: %r' % (vars,)
nfail += 1
else:
nsucceed += 1
print 'tests: %d succeeded, %d failed' % (nsucceed, nfail)
| Python |
import sys
from freebase.api.session import HTTPMetawebSession
import sandbox
__all__ = ["HTTPMetawebSession", "sandbox"]
_base = HTTPMetawebSession("freebase.com")
# we want to add base's functions to __init__.py
# so that we can say freebase.func() and really
# just call base.func()
# a little trick to refer to __init__
# self isn't defined because __init__ is in
# a world in and of itself
self = sys.modules[__name__]
for funcname in dir(_base):
# we only want the 'real' functions
if not funcname.startswith("_"):
func = getattr(_base, funcname)
# let's make sure we're getting functions
# instead of constants or whatever
if callable(func):
# we're setting these functions
# so that they can be called like
# freebase.funcname -> base.func()
setattr(self, funcname, func)
# make sure we import the base's
# functions if we import freebase
__all__.append(funcname)
# we don't want any self-referencing
# business going. Plus, this is cleaner.
del self
# we want dir(freebase) to be clean
del funcname, func
| Python |
#
# rison for python (parser only so far)
# see http://mjtemplate.org/examples/rison.html for more info
#
######################################################################
#
# the rison parser is based on javascript openlaszlo-json:
# Author: Oliver Steele
# Copyright: Copyright 2006 Oliver Steele. All rights reserved.
# Homepage: http:#osteele.com/sources/openlaszlo/json
# License: MIT License.
# Version: 1.0
#
# hacked by nix for use in uris
# ported to python by nix
#
# TODO
#
# switch to unicode
# fall through to simplejson if first char is not in '!(' -
# this allows code to use just one parser
#
import os, sys, re
#import simplejson
simplejson = None
class ParserException(Exception):
pass
class Parser(object):
WHITESPACE = ''
#WHITESPACE = " \t\n\r\f"
# we divide the uri-safe glyphs into three sets
# <rison> and <reserved> classes are illegal in ids.
# <rison> - used by rison (possibly later)
# <reserved> - not common in strings, reserved
#not_idchar = "'!=:(),*@$;&";
idchar_punctuation = '_-./~'
not_idchar = ''.join([c for c in (chr(i) for i in range(127))
if not (c.isalnum()
or c in idchar_punctuation)])
# additionally, we need to distinguish ids and numbers by first char
not_idstart = "-0123456789";
# regexp string matching a valid id
idrx = ('[^' + not_idstart + not_idchar +
'][^' + not_idchar + ']*')
# regexp to check for valid rison ids
id_ok_re = re.compile('^' + idrx + '$', re.M)
# regexp to find the end of an id when parsing
next_id_re = re.compile(idrx, re.M)
def parse_json(self, str):
if len(str) > 0 and str[0] not in '!(':
return simplejson.loads(str)
return self.parse(str)
def parse(self, str):
self.string = str
self.index = 0
value = self.readValue()
if self.next():
raise ParserException("unable to parse rison string %r" % (str,))
return value
def readValue(self):
c = self.next()
if c == '!':
return self.parse_bang()
if c == '(':
return self.parse_open_paren()
if c == "'":
return self.parse_single_quote()
if c in '-0123456789':
return self.parse_number()
# fell through table, parse as an id
s = self.string
i = self.index-1
m = self.next_id_re.match(s, i)
if m:
id = m.group(0)
self.index = i + len(id)
return id # a string
if c:
raise ParserException("invalid character: '" + c + "'")
raise ParserException("empty expression")
def parse_array(self):
ar = []
while 1:
c = self.next()
if c == ')':
return ar
if c is None:
raise ParserException("unmatched '!('")
if len(ar):
if c != ',':
raise ParserException("missing ','")
elif c == ',':
raise ParserException("extra ','")
else:
self.index -= 1
n = self.readValue()
ar.append(n)
return ar
def parse_bang (self):
s = self.string
c = s[self.index]
self.index += 1
if c is None:
raise ParserException('"!" at end of input')
if c not in self.bangs:
raise ParserException('unknown literal: "!' + c + '"')
x = self.bangs[c]
if callable(x):
return x(self)
return x
def parse_open_paren (self):
count = 0
o = {}
while 1:
c = self.next()
if c == ')':
return o
if count:
if c != ',':
raise ParserException("missing ','")
elif c == ',':
raise ParserException("extra ','")
else:
self.index -= 1
k = self.readValue()
if self.next() != ':':
raise ParserException("missing ':'")
v = self.readValue()
o[k] = v
count += 1
def parse_single_quote(self):
s = self.string
i = self.index
start = i
segments = []
while 1:
if i >= len(s):
raise ParserException('unmatched "\'"')
c = s[i]
i += 1
if c == "'":
break
if c == '!':
if start < i-1:
segments.append(s[start:i-1])
c = s[i]
i += 1
if c in "!'":
segments.append(c)
else:
raise ParserException('invalid string escape: "!'+c+'"')
start = i
if start < i-1:
segments.append(s[start:i-1])
self.index = i
return ''.join(segments)
# Also any number start (digit or '-')
def parse_number(self):
s = self.string
i = self.index
start = i-1
state = 'int'
permittedSigns = '-'
transitions = {
'int+.': 'frac',
'int+e': 'exp',
'frac+e': 'exp'
}
while 1:
if i >= len(s):
i += 1
break
c = s[i]
i += 1
if '0' <= c and c <= '9':
continue
if permittedSigns.find(c) >= 0:
permittedSigns = ''
continue
state = transitions.get(state + '+' + c.lower(), None)
if state is None:
break
if state == 'exp':
permittedSigns = '-'
self.index = i - 1
s = s[start:self.index]
if s == '-':
raise ParserException("invalid number")
if re.search('[.e]', s):
return float(s)
return int(s)
# return the next non-whitespace character, or undefined
def next(self):
l = len(self.string)
s = self.string
i = self.index
while 1:
if i == len(s):
return None
c = s[i]
i += 1
if c not in self.WHITESPACE:
break
self.index = i
return c
bangs = {
't': True,
'f': False,
'n': None,
'(': parse_array
}
def loads(s):
return Parser().parse(s)
if __name__ == '__main__':
p = Parser()
rison_examples = [
"(a:0,b:1)",
"(a:0,b:foo,c:'23skidoo')",
"!t",
"!f",
"!n",
"''",
"0",
"1.5",
"-3",
"1e30",
"1e-30",
"G.",
"a",
"'0a'",
"'abc def'",
"()",
"(a:0)",
"(id:!n,type:/common/document)",
"!()",
"!(!t,!f,!n,'')",
"'-h'",
"a-z",
"'wow!!'",
"domain.com",
"'user@domain.com'",
"'US $10'",
"'can!'t'",
];
for s in rison_examples:
print
print '*'*70
print
print s
print '%r' % (p.parse(s),)
| Python |
#!/usr/bin/python
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
import freebase
query = {
"id" : "/en/the_beatles",
"type" : "/music/artist",
"album" : [{
"name" : None,
"release_date" : None,
"track": {
"return" : "count"
},
"sort" : "release_date"
}]
}
results = freebase.mqlread(query)
for album in results.album:
print "Album \"" + album.name + "\" has " + str(album.track) + " tracks."
# Album "Please Please Me" has 14 tracks.
# Album "From Me to You" has 2 tracks.
# Album "Introducing... The Beatles" has 24 tracks.
# Album "She Loves You" has 2 tracks.
# Album "With the Beatles" has 14 tracks.
# Album "I Want to Hold Your Hand" has 2 tracks.
# Album "Meet the Beatles" has 12 tracks.
# Album "Second Album" has 11 tracks.
# Album "Something New" has 11 tracks.
# Album "A Hard Day's Night" has 36 tracks.
# Album "I Feel Fine" has 2 tracks.
# Album "Beatles for Sale" has 14 tracks.
# Album "Beatles '65" has 11 tracks.
# Album "The Early Beatles" has 11 tracks.
# Album "Beatles VI" has 11 tracks.
# Album "Help! / I'm Down" has 2 tracks.
# Album "Help!" has 14 tracks.
# Album "Rubber Soul" has 14 tracks.
# Album "We Can Work It Out / Day Tripper" has 2 tracks.
# Album "Paperback Writer / Rain" has 2 tracks.
# Album "Yesterday... and Today" has 11 tracks.
# Album "Revolver" has 14 tracks.
# Album "Strawberry Fields Forever / Penny Lane" has 2 tracks.
# Album "Sgt. Pepper's Lonely Hearts Club Band" has 24 tracks.
# Album "Magical Mystery Tour" has 11 tracks.
# Album "Hello, Goodbye" has 2 tracks.
# Album "Magical Mystery Tour" has 6 tracks.
# Album "Lady Madonna" has 2 tracks.
# Album "Hey Jude" has 2 tracks.
# Album "The White Album" has 81 tracks.
# Album "Yellow Submarine" has 13 tracks.
# Album "Get Back" has 2 tracks.
# Album "Abbey Road" has 17 tracks.
# Album "Hey Jude" has 10 tracks.
# Album "Let It Be" has 12 tracks.
# Album "1962-1966" has 26 tracks.
# Album "1967-1970" has 28 tracks.
# Album "Love Songs" has 25 tracks.
# Album "The Beatles at the Hollywood Bowl" has 13 tracks.
# Album "Sgt. Pepper's Lonely Hearts Club Band" has 3 tracks.
# Album "Past Masters, Volume One" has 18 tracks.
# Album "Past Masters, Volume Two" has 15 tracks.
# Album "Rockin' at the Star-Club" has 16 tracks.
# Album "The Early Tapes of the Beatles" has 14 tracks.
# Album "Live at the BBC" has 69 tracks.
# Album "Anthology 1" has 60 tracks.
# Album "Free as a Bird" has 4 tracks.
# Album "The Best Of [26 Unforgetable Hit Songs]" has 26 tracks.
# Album "Real Love" has 4 tracks.
# Album "Anthology 2" has 45 tracks.
# Album "Anthology 3" has 50 tracks.
# Album "Yellow Submarine Songtrack" has 15 tracks.
# Album "1" has 27 tracks.
# Album "Let It Be... Naked" has 12 tracks.
# Album "The Capitol Albums, Volume 1" has 90 tracks.
# Album "The Capitol Albums, Volume 2" has 68 tracks.
# Album "16 Superhits, Volume 1" has 16 tracks.
# Album "16 Superhits, Volume 2" has 16 tracks.
# Album "16 Superhits, Volume 3" has 16 tracks.
# Album "16 Superhits, Volume 4" has 16 tracks.
# Album "1962 Live at Star Club in Hamburg" has 24 tracks.
# Album "1962 Live Recordings" has 30 tracks.
# Album "1962-1966 (Red Album)" has 26 tracks.
# Album "1962-1970" has 18 tracks.
# Album "A Collection of Beatles Oldies (UK Mono LP)" has 16 tracks.
# Album "Alternate Rubber Soul" has 28 tracks.
# Album "Anthology (disc 3)" has 22 tracks.
# Album "Beatles Tapes III: The 1964 World Tour" has 19 tracks.
# Album "Beatles VI (Stereo and Mono)" has 22 tracks.
# Album "Best Selection 1962-1968 Part 3" has 20 tracks.
# Album "Best, Volume 4: 1964" has 12 tracks.
# Album "Best, Volume 9: 1966" has 12 tracks.
# Album "Christmas" has 20 tracks.
# Album "Complete Rooftop Concert 1" has 22 tracks.
# Album "EP Collection (disc 1)" has 4 tracks.
# Album "EP Collection (disc 10)" has 4 tracks.
# Album "EP Collection (disc 11: Yesterday)" has 4 tracks.
# Album "EP Collection (disc 12)" has 4 tracks.
# Album "EP Collection" has 12 tracks.
# Album "EP Collection (disc 2: Twist and Shout)" has 4 tracks.
# Album "EP Collection (disc 3)" has 4 tracks.
# Album "EP Collection (disc 4)" has 4 tracks.
# Album "EP Collection (disc 5)" has 4 tracks.
# Album "EP Collection (disc 6)" has 4 tracks.
# Album "EP Collection (disc 7)" has 4 tracks.
# Album "EP Collection (disc 8)" has 4 tracks.
# Album "EP Collection (disc 9)" has 4 tracks.
# Album "EP Collection, Volume 1" has 32 tracks.
# Album "EP Collection, Volume 2" has 26 tracks.
# Album "From Yesterday Forever" has 22 tracks.
# Album "Get Back" has 20 tracks.
# Album "Golden Best 20, Volume 1" has 19 tracks.
# Album "In the Beginning" has 12 tracks.
# Album "Introducing the Beatles (Us Mono Ver. 2)" has 12 tracks.
# Album "Live at Star Club 1962, Volume 1" has 11 tracks.
# Album "Live in Adelaide & Houston, Texas" has 18 tracks.
# Album "Live in Japan 1964" has 22 tracks.
# Album "Live in Paris 1965 2" has 24 tracks.
# Album "Live in Paris, 1965" has 23 tracks.
# Album "Mythology 2 (disc 1)" has 21 tracks.
| Python |
#!/usr/bin/python
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
import freebase
query = {
"id" : "/en/the_beatles",
"type" : "/music/artist",
"album" : [{
"name" : None,
"release_date" : None,
"track": {
"return" : "count"
},
"sort" : "release_date"
}]
}
results = freebase.mqlread(query)
for album in results.album:
print "Album \"" + album.name + "\" has " + str(album.track) + " tracks."
# Album "Please Please Me" has 14 tracks.
# Album "From Me to You" has 2 tracks.
# Album "Introducing... The Beatles" has 24 tracks.
# Album "She Loves You" has 2 tracks.
# Album "With the Beatles" has 14 tracks.
# Album "I Want to Hold Your Hand" has 2 tracks.
# Album "Meet the Beatles" has 12 tracks.
# Album "Second Album" has 11 tracks.
# Album "Something New" has 11 tracks.
# Album "A Hard Day's Night" has 36 tracks.
# Album "I Feel Fine" has 2 tracks.
# Album "Beatles for Sale" has 14 tracks.
# Album "Beatles '65" has 11 tracks.
# Album "The Early Beatles" has 11 tracks.
# Album "Beatles VI" has 11 tracks.
# Album "Help! / I'm Down" has 2 tracks.
# Album "Help!" has 14 tracks.
# Album "Rubber Soul" has 14 tracks.
# Album "We Can Work It Out / Day Tripper" has 2 tracks.
# Album "Paperback Writer / Rain" has 2 tracks.
# Album "Yesterday... and Today" has 11 tracks.
# Album "Revolver" has 14 tracks.
# Album "Strawberry Fields Forever / Penny Lane" has 2 tracks.
# Album "Sgt. Pepper's Lonely Hearts Club Band" has 24 tracks.
# Album "Magical Mystery Tour" has 11 tracks.
# Album "Hello, Goodbye" has 2 tracks.
# Album "Magical Mystery Tour" has 6 tracks.
# Album "Lady Madonna" has 2 tracks.
# Album "Hey Jude" has 2 tracks.
# Album "The White Album" has 81 tracks.
# Album "Yellow Submarine" has 13 tracks.
# Album "Get Back" has 2 tracks.
# Album "Abbey Road" has 17 tracks.
# Album "Hey Jude" has 10 tracks.
# Album "Let It Be" has 12 tracks.
# Album "1962-1966" has 26 tracks.
# Album "1967-1970" has 28 tracks.
# Album "Love Songs" has 25 tracks.
# Album "The Beatles at the Hollywood Bowl" has 13 tracks.
# Album "Sgt. Pepper's Lonely Hearts Club Band" has 3 tracks.
# Album "Past Masters, Volume One" has 18 tracks.
# Album "Past Masters, Volume Two" has 15 tracks.
# Album "Rockin' at the Star-Club" has 16 tracks.
# Album "The Early Tapes of the Beatles" has 14 tracks.
# Album "Live at the BBC" has 69 tracks.
# Album "Anthology 1" has 60 tracks.
# Album "Free as a Bird" has 4 tracks.
# Album "The Best Of [26 Unforgetable Hit Songs]" has 26 tracks.
# Album "Real Love" has 4 tracks.
# Album "Anthology 2" has 45 tracks.
# Album "Anthology 3" has 50 tracks.
# Album "Yellow Submarine Songtrack" has 15 tracks.
# Album "1" has 27 tracks.
# Album "Let It Be... Naked" has 12 tracks.
# Album "The Capitol Albums, Volume 1" has 90 tracks.
# Album "The Capitol Albums, Volume 2" has 68 tracks.
# Album "16 Superhits, Volume 1" has 16 tracks.
# Album "16 Superhits, Volume 2" has 16 tracks.
# Album "16 Superhits, Volume 3" has 16 tracks.
# Album "16 Superhits, Volume 4" has 16 tracks.
# Album "1962 Live at Star Club in Hamburg" has 24 tracks.
# Album "1962 Live Recordings" has 30 tracks.
# Album "1962-1966 (Red Album)" has 26 tracks.
# Album "1962-1970" has 18 tracks.
# Album "A Collection of Beatles Oldies (UK Mono LP)" has 16 tracks.
# Album "Alternate Rubber Soul" has 28 tracks.
# Album "Anthology (disc 3)" has 22 tracks.
# Album "Beatles Tapes III: The 1964 World Tour" has 19 tracks.
# Album "Beatles VI (Stereo and Mono)" has 22 tracks.
# Album "Best Selection 1962-1968 Part 3" has 20 tracks.
# Album "Best, Volume 4: 1964" has 12 tracks.
# Album "Best, Volume 9: 1966" has 12 tracks.
# Album "Christmas" has 20 tracks.
# Album "Complete Rooftop Concert 1" has 22 tracks.
# Album "EP Collection (disc 1)" has 4 tracks.
# Album "EP Collection (disc 10)" has 4 tracks.
# Album "EP Collection (disc 11: Yesterday)" has 4 tracks.
# Album "EP Collection (disc 12)" has 4 tracks.
# Album "EP Collection" has 12 tracks.
# Album "EP Collection (disc 2: Twist and Shout)" has 4 tracks.
# Album "EP Collection (disc 3)" has 4 tracks.
# Album "EP Collection (disc 4)" has 4 tracks.
# Album "EP Collection (disc 5)" has 4 tracks.
# Album "EP Collection (disc 6)" has 4 tracks.
# Album "EP Collection (disc 7)" has 4 tracks.
# Album "EP Collection (disc 8)" has 4 tracks.
# Album "EP Collection (disc 9)" has 4 tracks.
# Album "EP Collection, Volume 1" has 32 tracks.
# Album "EP Collection, Volume 2" has 26 tracks.
# Album "From Yesterday Forever" has 22 tracks.
# Album "Get Back" has 20 tracks.
# Album "Golden Best 20, Volume 1" has 19 tracks.
# Album "In the Beginning" has 12 tracks.
# Album "Introducing the Beatles (Us Mono Ver. 2)" has 12 tracks.
# Album "Live at Star Club 1962, Volume 1" has 11 tracks.
# Album "Live in Adelaide & Houston, Texas" has 18 tracks.
# Album "Live in Japan 1964" has 22 tracks.
# Album "Live in Paris 1965 2" has 24 tracks.
# Album "Live in Paris, 1965" has 23 tracks.
# Album "Mythology 2 (disc 1)" has 21 tracks.
| Python |
import cgi
import os
from google.appengine.api import users
from google.appengine.api import urlfetch
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
import freebase
class Search(db.Model):
author = db.UserProperty()
id = db.StringProperty()
url = db.StringProperty()
name = db.StringProperty()
date = db.DateTimeProperty(auto_now_add=True)
class MainPage(webapp.RequestHandler):
def get(self):
searches = []
searches_avail = False
user = users.get_current_user()
if user:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
searches = Search.gql("where author = :author order by date desc limit 5", author=user)
try:
searches[0]
searches_avail = True
except:
pass
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login to Google'
template_values = {
'user' : user,
'searches': searches,
'searches_avail' : searches_avail,
'url': url,
'url_linktext': url_linktext,
}
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(template.render(path, template_values))
class Name(webapp.RequestHandler):
def get(self):
query = { "id" : cgi.escape(self.request.get('id')), "name" : None}
r = freebase.mqlread(query)
self.response.out.write(r.name)
class Recents(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
final = ""
if user:
final = "<strong>Your Recent Searches:</strong> "
searches = Search.gql("where author = :author order by date desc limit 5", author=user)
bignum = -1
for num, search in enumerate(searches):
final += "<a href='?id=" + search.id + "'>" + search.name + "</a>, "
bignum = num
final = final[:-2]
if bignum == -1:
final = ""
if not user:
final = ""
self.response.out.write(final)
class Info(webapp.RequestHandler):
def get(self):
VIEW = "?id="
BASE = "http://www.freebase.com/api/trans/image_thumb"
OPTIONS = "?maxheight=300"
id = cgi.escape(self.request.get('id'))
if id != "":
info_query = {
"id" : id,
"name" : None,
}
info = freebase.mqlread(info_query)
user = users.get_current_user()
search = Search()
if user:
search.author = user
search.id = id
search.name = info.name
search.url = VIEW + id
search.put()
query = {
"id": id,
"/type/reflect/any_reverse": [{
"id" : None,
"/common/topic/image": [{
"id": None,
"optional": True,
"limit": 5
}]
}]
}
results = freebase.mqlread(query)
images = dict()
for thing in results["/type/reflect/any_reverse"]:
imgroup = thing['/common/topic/image']
if len(imgroup) > 0:
for image in imgroup:
images[VIEW + thing["id"]] = BASE + image['id'] + OPTIONS
#template_values = {"images": images}
#path = os.path.join(os.path.dirname(__file__), 'images.html')
#self.response.out.write(template.render(path, template_values))
final = ""
for page, image in images.items():
final += "<a href='" + page + "'><img src='" + image + "' alt='' /></a>"
if len(final) == 0:
final = "Sorry, there are no images to show."
self.response.out.write(final)
application = webapp.WSGIApplication(
[('/', MainPage),
('/info', Info),
('/name', Name),
('/recents', Recents)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main() | Python |
try:
from google.appengine.api import urlfetch
from cookie_handlers import CookiefulUrlfetch
except:
pass
try:
import httplib2
from cookie_handlers import CookiefulHttp
except:
pass
try:
import urllib2
import socket
except:
pass
import logging
import re
class Urllib2Client(object):
def __init__(self, cookiejar, rse):
cookiespy = urllib2.HTTPCookieProcessor(cookiejar)
self.opener = urllib2.build_opener(cookiespy)
self._raise_service_error = rse
self.log = logging.getLogger()
def __call__(self, url, method, body, headers):
req = urllib2.Request(url, body, headers)
try:
resp = self.opener.open(req)
except socket.error, e:
self.log.error('SOCKET FAILURE: %s', e.fp.read())
raise MetawebError, 'failed contacting %s: %s' % (url, str(e))
except urllib2.HTTPError, e:
self.log.error('HTTP ERROR: %s', e)
self._raise_service_error(url, e.code, e.info().type, e.fp.read())
for header in resp.info().headers:
self.log.debug('HTTP HEADER %s', header)
name, value = re.split("[:\n\r]", header, 1)
if name.lower() == 'x-metaweb-tid':
self.tid = value.strip()
return (resp, resp.read())
class Httplib2Client(object):
def __init__(self, cookiejar, rse):
self.cookiejar = cookiejar
self._raise_service_error = rse
self.httpclient = CookiefulHttp(cookiejar=self.cookiejar)
def __call__(self, url, method, body, headers):
try:
resp, content = self.httpclient.request(url, method=method,
body=body, headers=headers)
if (resp.status != 200):
self._raise_service_error(url, resp.status, resp['content-type'], content)
except socket.error, e:
self.log.error('SOCKET FAILURE: %s', e.fp.read())
raise MetawebError, 'failed contacting %s: %s' % (url, str(e))
except httplib2.HttpLib2ErrorWithResponse, e:
self._raise_service_error(url, resp.status, resp['content-type'], content)
except httplib2.HttpLib2Error, e:
raise MetawebError(u'HTTP error: %s' % (e,))
#tid = resp.get('x-metaweb-tid', None)
return (resp, content)
class UrlfetchClient(object):
def __init__(self, cookiejar, rse):
self.cookiejar = cookiejar
self._raise_service_error = rse
self.httpclient = CookiefulUrlfetch(cookiejar=self.cookiejar)
def __call__(self, url, method, body, headers):
resp = self.httpclient.request(url, payload=body, method=method, headers=headers)
if resp.status_code != 200:
self._raise_service_error(url, resp.status_code, resp.headers['content-type'], resp.body)
return (resp, resp.content)
| Python |
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
import string
import re
def quotekey(ustr):
"""
quote a unicode string to turn it into a valid namespace key
"""
valid_always = string.ascii_letters + string.digits
valid_interior_only = valid_always + '_-'
if isinstance(ustr, str):
s = unicode(ustr,'utf-8')
elif isinstance(ustr, unicode):
s = ustr
else:
raise ValueError, 'quotekey() expects utf-8 string or unicode'
output = []
if s[0] in valid_always:
output.append(s[0])
else:
output.append('$%04X' % ord(s[0]))
for c in s[1:-1]:
if c in valid_interior_only:
output.append(c)
else:
output.append('$%04X' % ord(c))
if len(s) > 1:
if s[-1] in valid_always:
output.append(s[-1])
else:
output.append('$%04X' % ord(s[-1]))
return str(''.join(output))
def unquotekey(key, encoding=None):
"""
unquote a namespace key and turn it into a unicode string
"""
valid_always = string.ascii_letters + string.digits
output = []
i = 0
while i < len(key):
if key[i] in valid_always:
output.append(key[i])
i += 1
elif key[i] in '_-' and i != 0 and i != len(key):
output.append(key[i])
i += 1
elif key[i] == '$' and i+4 < len(key):
# may raise ValueError if there are invalid characters
output.append(unichr(int(key[i+1:i+5],16)))
i += 5
else:
raise ValueError, "unquote key saw invalid character '%s' at position %d" % (key[i], i)
ustr = u''.join(output)
if encoding is None:
return ustr
return ustr.encode(encoding)
# should this also include "'()" into safe?
def urlencode_pathseg(data):
'''
urlencode for placement between slashes in an url.
'''
if isinstance(data, unicode):
data = data.encode('utf_8')
return urllib.quote(data, "~:@$!*,;=&+")
def id_to_urlid(id):
"""
convert a mql id to an id suitable for embedding in a url path.
"""
segs = id.split('/')
assert isinstance(id, str) and id != '', 'bad id "%s"' % id
if id[0] == '~':
assert len(segs) == 1
# assume valid, should check
return id
if id[0] == '#':
assert len(segs) == 1
# assume valid, should check
return '%23' + id[1:]
if id[0] != '/':
raise ValueError, 'unknown id format %s' % id
# ok, we have a slash-path
# requote components as keys and rejoin.
# urlids do not have leading slashes!!!
return '/'.join(urlencode_pathseg(unquotekey(seg)) for seg in segs[1:])
| Python |
# ==================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ====================================================================
"""
declarations for external metaweb api.
from metaweb.api import HTTPMetawebSession
mss = HTTPMetawebSession('sandbox.freebase.com')
print mss.mqlread([dict(name=None, type='/type/type')])
"""
__all__ = ['MetawebError', 'MetawebSession', 'HTTPMetawebSession', 'attrdict']
__version__ = '0.1'
import os, sys, re
import cookielib
try:
import simplejson
except ImportError:
try:
# appengine provides simplejson at django.utils.simplejson
from django.utils import simplejson
except ImportError:
raise Exception("unable to import simplejson")
try:
from urllib import quote as urlquote
except ImportError:
from urlib_stub import quote as urlquote
import pprint
import socket
import logging
class Delayed(object):
"""
Wrapper for callables in log statements. Avoids actually making
the call until the result is turned into a string.
A few examples:
simplejson.dumps is never called because the logger never
tries to format the result
>>> logging.debug(Delayed(simplejson.dumps, q))
This time simplejson.dumps() is actually called:
>>> logging.warn(Delayed(simplejson.dumps, q))
"""
def __init__(self, f, *args, **kwds):
self.f = f
self.args = args
self.kwds = kwds
def __str__(self):
return str(self.f(*self.args, **self.kwds))
def logformat(result):
"""
Format the dict/list as a json object
"""
rstr = simplejson.dumps(result, indent=2)
if rstr[0] == '{':
rstr = rstr[1:-2]
return rstr
from httpclients import Httplib2Client, Urllib2Client, UrlfetchClient
# Check for urlfetch first so that urlfetch is used when running the appengine SDK
try:
import google.appengine.api.urlfetch
from cookie_handlers import CookiefulUrlfetch
http_client = UrlfetchClient
except ImportError:
try:
import httplib2
from cookie_handlers import CookiefulHttp
http_client = Httplib2Client
except ImportError:
import urllib2
httplib2 = None
CookiefulHttp = None
http_client = Urllib2Client
# remove whitespace from json encoded output
simplejson.JSONEncoder.item_separator = ','
simplejson.JSONEncoder.key_separator = ':'
# don't escape slashes, we're not pasting into script tags here.
if simplejson.encoder.ESCAPE_DCT.get('/', None) == r'\/':
simplejson.encoder.ESCAPE_DCT['/'] = '/'
def urlencode_weak(s):
return urlquote(s, safe=',/:$')
# from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/361668
class attrdict(dict):
"""A dict whose items can also be accessed as member variables.
>>> d = attrdict(a=1, b=2)
>>> d['c'] = 3
>>> print d.a, d.b, d.c
1 2 3
>>> d.b = 10
>>> print d['b']
10
# but be careful, it's easy to hide methods
>>> print d.get('c')
3
>>> d['get'] = 4
>>> print d.get('a')
Traceback (most recent call last):
TypeError: 'int' object is not callable
"""
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.__dict__ = self
# TODO expose the common parts of the result envelope
class MetawebError(Exception):
"""
an error report from the metaweb service.
"""
pass
# TODO right now this is a completely unnecessary superclass.
# is there enough common behavior between session types
# to justify it?
class MetawebSession(object):
"""
MetawebSession is the base class for MetawebSession, subclassed for
different connection types. Only http is available externally.
This is more of an interface than a class
"""
# interface definition here...
# from httplib2
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
class HTTPMetawebSession(MetawebSession):
"""
a MetawebSession is a request/response queue.
this version uses the HTTP api, and is synchronous.
"""
# share cookies across sessions, so that different sessions can
# see each other's writes immediately.
_default_cookiejar = cookielib.CookieJar()
def __init__(self, service_url, username=None, password=None, prev_session=None, cookiejar=None, cookiefile=None):
"""
create a new MetawebSession for interacting with the Metaweb.
a new session will inherit state from prev_session if present,
"""
super(HTTPMetawebSession, self).__init__()
self.log = logging.getLogger()
assert not service_url.endswith('/')
if not '/' in service_url: # plain host:port
service_url = 'http://' + service_url
self.service_url = service_url
self.username = username
self.password = password
self.tid = None
if prev_session:
self.service_url = prev.service_url
if cookiefile is not None:
cookiejar = self.open_cookie_file(cookiefile)
if cookiejar is not None:
self.cookiejar = cookiejar
elif prev_session:
self.cookiejar = prev_session.cookiejar
else:
self.cookiejar = self._default_cookiejar
self._http_request = http_client(self.cookiejar, self._raise_service_error)
def open_cookie_file(self, cookiefile=None):
if cookiefile is None or cookiefile == '':
if os.environ.has_key('HOME'):
cookiefile = os.path.join(os.environ['HOME'], '.pyfreebase/cookiejar')
else:
raise MetawebError("no cookiefile specified and no $HOME/.pyfreebase directory" % cookiefile)
cookiejar = cookielib.LWPCookieJar(cookiefile)
if os.path.exists(cookiefile):
cookiejar.load(ignore_discard=True)
return cookiejar
def _httpreq(self, service_path, method='GET', body=None, form=None,
headers=None):
"""
make an http request to the service.
form arguments are encoded in the url, even for POST, if a non-form
content-type is given for the body.
returns a pair (resp, body)
resp is the response object and may be different depending
on whether urllib2 or httplib2 is in use?
"""
if method == 'GET':
assert body is None
if method != "GET" and method != "POST":
assert 0, 'unknown method %s' % method
url = self.service_url + service_path
if headers is None:
headers = {}
else:
headers = _normalize_headers(headers)
# this is a lousy way to parse Content-Type, where is the library?
ct = headers.get('content-type', None)
if ct is not None:
ct = ct.split(';')[0]
if body is not None:
# if body is provided, content-type had better be too
assert ct is not None
if form is not None:
qstr = '&'.join(['%s=%s' % (urlencode_weak(unicode(k)), urlencode_weak(unicode(v)))
for k,v in form.items()])
if method == 'POST':
# put the args on the url if we're putting something else
# in the body. this is used to add args to raw uploads.
if body is not None:
url += '?' + qstr
else:
if ct is None:
ct = 'application/x-www-form-urlencoded'
headers['content-type'] = ct + '; charset=utf-8'
if ct == 'multipart/form-encoded':
# TODO handle this case
raise NotImplementedError
elif ct == 'application/x-www-form-urlencoded':
body = qstr
else:
# for all methods other than POST, use the url
url += '?' + qstr
# assure the service that this isn't a CSRF form submission
headers['x-metaweb-request'] = 'Python'
if 'user-agent' not in headers:
headers['user-agent'] = 'python freebase.api-%s' % __version__
#if self.tid is not None:
# headers['x-metaweb-tid'] = self.tid
####### DEBUG MESSAGE - should check log level before generating
if form is None:
formstr = ''
else:
formstr = '\nFORM:\n ' + '\n '.join(['%s=%s' % (k,v)
for k,v in form.items()])
if headers is None:
headerstr = ''
else:
headerstr = '\nHEADERS:\n ' + '\n '.join([('%s: %s' % (k,v))
for k,v in headers.items()])
self.log.info('%s %s%s%s', method, url, formstr, headerstr)
#######
return self._http_request(url, method, body, headers)
def _raise_service_error(self, url, status, ctype, body):
is_jsbody = (ctype.endswith('javascript')
or ctype.endswith('json'))
if str(status) == '400' and is_jsbody:
r = self._loadjson(body)
msg = r.messages[0]
raise MetawebError(u'%s %s %r' % (msg.get('code',''), msg.message, msg.info))
raise MetawebError, 'request failed: %s: %r %r' % (url, status, body)
def _httpreq_json(self, *args, **kws):
resp, body = self._httpreq(*args, **kws)
return self._loadjson(body)
def _loadjson(self, json):
# TODO really this should be accomplished by hooking
# simplejson to create attrdicts instead of dicts.
def struct2attrdict(st):
"""
copy a json structure, turning all dicts into attrdicts.
copying descends instances of dict and list, including subclasses.
"""
if isinstance(st, dict):
return attrdict([(k,struct2attrdict(v)) for k,v in st.items()])
if isinstance(st, list):
return [struct2attrdict(li) for li in st]
return st
if json == '':
self.log.error('the empty string is not valid json')
raise MetawebError('the empty string is not valid json')
try:
r = simplejson.loads(json)
except ValueError, e:
self.log.error('error parsing json string %r' % json)
raise MetawebError, 'error parsing JSON string: %s' % e
return struct2attrdict(r)
def _check_mqlerror(self, r):
if r.code != '/api/status/ok':
for msg in r.messages:
self.log.error('mql error: %s %s %r' % (msg.code, msg.message, msg.get('query', None)))
raise MetawebError, 'query failed: %s\n%r' % (r.messages[0].code, r.messages[0].get('query', None))
def _mqlresult(self, r):
self._check_mqlerror(r)
self.log.info('result: %s', Delayed(logformat, r))
return r.result
def login(self, username=None, password=None):
"""sign in to the service. For a more complete description,
see http://www.freebase.com/view/en/api_account_login"""
service = '/api/account/login'
username = username or self.username
password = password or self.password
assert username is not None
assert password is not None
self.log.debug('LOGIN USERNAME: %s', username)
r = self._httpreq_json(service, 'POST',
form=dict(username=username,
password=password))
if r.code != '/api/status/ok':
raise MetawebError(u'%s %r' % (r.get('code',''), r.messages))
self.log.debug('LOGIN RESP: %r', r)
self.log.debug('LOGIN COOKIES: %s', self.cookiejar)
def logout(self):
"""logout of the service. For a more complete description,
see http://www.freebase.com/view/en/api_account_logout"""
service = '/api/account/logout'
self.log.debug("LOGOUT")
r = self._httpreq_json(service, 'GET')
if r.code != '/api/status/ok':
raise MetawebError(u'%s %r' % (r.get('code',''), r.messages)) #this should never happen
def user_info(self, mql_output=None):
""" get user_info. For a more complete description,
see http://www.freebase.com/view/en/api_service_user_info"""
service = "/api/service/user_info"
qstr = simplejson.dumps(mql_output)
r = self._httpreq_json(service, 'POST', form=dict(mql_output=qstr))
return r
def loggedin(self):
"""check to see whether a user is logged in or not. For a
more complete description, see http://www.freebase.com/view/en/api_account_loggedin"""
service = "/api/account/loggedin"
try:
r = self._httpreq_json(service, 'GET')
if r.code == "/api/status/ok":
return True
except MetawebError, me:
return False
def mqlreaditer(self, sq, asof=None):
"""read a structure query."""
cursor = True
while 1:
subq = dict(query=[sq], cursor=cursor, escape=False)
if asof:
subq['as_of_time'] = asof
qstr = simplejson.dumps(subq)
service = '/api/service/mqlread'
r = self._httpreq_json(service, form=dict(query=qstr))
for item in self._mqlresult(r):
yield item
if r['cursor']:
cursor = r['cursor']
self.log.info('CONTINUING with %s', cursor)
else:
return
def mqlread(self, sq, asof=None):
"""read a structure query. For a more complete description,
see http://www.freebase.com/view/en/api_service_mqlread"""
subq = dict(query=sq, escape=False)
if asof:
subq['as_of_time'] = asof
if isinstance(sq, list):
subq['cursor'] = True
service = '/api/service/mqlread'
self.log.info('%s: %s',
service,
Delayed(logformat, sq))
qstr = simplejson.dumps(subq)
r = self._httpreq_json(service, form=dict(query=qstr))
return self._mqlresult(r)
def mqlreadmulti(self, queries, asof=None):
"""read a structure query"""
keys = [('q%d' % i) for i,v in enumerate(queries)];
envelope = {}
for i,sq in enumerate(queries):
subq = dict(query=sq, escape=False)
if asof:
subq['as_of_time'] = asof
# XXX put this back once mqlreadmulti is working in general
#if isinstance(sq, list):
# subq['cursor'] = True
envelope[keys[i]] = subq
service = '/api/service/mqlread'
self.log.info('%s: %s',
service,
Delayed(logformat, envelope))
qstr = simplejson.dumps(envelope)
rs = self._httpreq_json(service, form=dict(queries=qstr))
self.log.info('%s result: %s',
service,
Delayed(simplejson.dumps, rs, indent=2))
return [self._mqlresult(rs[key]) for key in keys]
def raw(self, id):
"""translate blob from id. For a more complete description,
see http://www.freebase.com/view/en/api_trans_raw"""
url = '/api/trans/raw' + urlquote(id)
self.log.info(url)
resp, body = self._httpreq(url)
self.log.info('raw is %d bytes' % len(body))
return body
def blurb(self, id, break_paragraphs=False, maxlength=200):
"""translate only the text in blob from id. For a more
complete description, see http://www.freebase.com/view/en/api_trans_blurb"""
url = '/api/trans/blurb' + urlquote(id)
self.log.info(url)
resp, body = self._httpreq(url, form=dict(break_paragraphs=break_paragraphs, maxlength=maxlength))
self.log.info('blurb is %d bytes' % len(body))
return body
def image_thumb(self, id, maxwidth=None, maxheight=None, mode="fit", onfail=None):
""" given the id of an image, this will return a URL of a thumbnail of the image.
The full details of how the image is cropped and finessed is detailed at
http://www.freebase.com/view/en/api_trans_image_thumb """
service = "/api/trans/image_thumb"
assert mode in ["fit", "fill", "fillcrop", "fillcropmid"]
form = dict(mode=mode)
if maxwidth is not None:
form["maxwidth"] = maxwidth
if maxheight is not None:
form["maxheight"] = maxheight
if onfail is not None:
form["onfail"] = onfail
resp, body = self._httpreq(service + urlquote(id), form=form)
self.log.info('image is %d bytes' % len(body))
return body
def mqlwrite(self, sq):
"""do a mql write. For a more complete description,
see http://www.freebase.com/view/en/api_service_mqlwrite"""
query = dict(query=sq, escape=False)
qstr = simplejson.dumps(query)
self.log.debug('MQLWRITE: %s', qstr)
service = '/api/service/mqlwrite'
self.log.info('%s: %s',
service,
Delayed(logformat,sq))
r = self._httpreq_json(service, 'POST',
form=dict(query=qstr))
self.log.debug('MQLWRITE RESP: %r', r)
return self._mqlresult(r)
def mqlcheck(self, sq):
""" See if a write is valid, and see what would happen, but do not
actually do the write """
query = dict(query=sq, escape=False)
qstr = simplejson.dumps(query)
self.log.debug('MQLCHECK: %s', qstr)
service = '/api/service/mqlcheck'
self.log.info('%s: %s',
service,
Delayed(logformat, sq))
r = self._httpreq_json(service, 'POST',
form=dict(query=qstr))
self.log.debug('MQLCHECK RESP: %r', r)
return self._mqlresult(r)
def mqlflush(self):
"""ask the service not to hand us old data"""
self.log.debug('MQLFLUSH')
service = '/api/service/touch'
r = self._httpreq_json(service)
self._check_mqlerror(r)
return True
def touch(self):
""" make sure you are accessing the most recent data. For a more
complete description, see http://www.freebase.com/view/en/api_service_touch"""
return self.mqlflush()
def upload(self, body, content_type, document_id=False, permission_of=False):
"""upload to the metaweb. For a more complete description,
see http://www.freebase.com/view/en/api_service_upload"""
service = '/api/service/upload'
self.log.info('POST %s: %s (%d bytes)',
service, content_type, len(body))
headers = {}
if content_type is not None:
headers['content-type'] = content_type
form = None
if document_id is not False:
if document_id is None:
form = { 'document': '' }
else:
form = { 'document': document_id }
if permission_of is not False:
if form:
form['permission_of'] = permission_of
else:
form = { 'permission_of' : permission_of }
# note the use of both body and form.
# form parameters get encoded into the URL in this case
r = self._httpreq_json(service, 'POST',
headers=headers, body=body, form=form)
return self._mqlresult(r)
def uri_submit(self, URI, document=None, content_type=None):
""" submit a URI to freebase. For a more complete description,
see http://www.freebase.com/edit/topic/en/api_service_uri_submit """
service = "/api/service/uri_submit"
form = dict(uri=URI)
if document is not None:
form["document"] = document
if content_type is not None:
form["content_type"] = content_type
r = self._httpreq_json(service, 'POST', form=form)
return self._mqlresult(r)
def search(self, query, format=None, prefixed=None, limit=20, start=0,
type=None, type_strict="any", domain=None, domain_strict=None,
escape="html", timeout=None, mql_filter=None, mql_output=None):
""" search freebase.com. For a more complete description,
see http://www.freebase.com/view/en/api_service_search"""
service = "/api/service/search"
form = dict(query=query)
if format:
form["format"] = format
if prefixed:
form["prefixed"] = prefixed
if limit:
form["limit"] = limit
if start:
form["start"] = start
if type:
form["type"] = type
if type_strict:
form["type_strict"] = type_strict
if domain:
form["domain"] = domain
if domain_strict:
form["domain_strict"] = domain_strict
if escape:
form["escape"] = escape
if timeout:
form["timeout"] = timeout
if mql_filter:
form["mql_filter"] = mql_filter
if mql_output:
form["mql_output"] = mql_output
r = self._httpreq_json(service, 'POST', form=form)
return self._mqlresult(r)
def geosearch(self, location=None, location_type=None, mql_input=None, limit=20,
start=0, type=None, geometry_type=None, intersect=None, mql_filter=None,
within=None, inside=None, order_by=None, count=None, format="json", mql_output=None):
""" perform a geosearch. For a more complete description,
see http://www.freebase.com/api/service/geosearch?help """
service = "/api/service/geosearch"
if location is None and location_type is None and mql_input is None:
raise Exception("You have to give it something to work with")
form = dict()
if location:
form["location"] = location
if location_type:
form["location_type"] = location_type
if mql_input:
form["mql_input"] = mql_input
if limit:
form["limit"] = limit
if start:
form["start"] = start
if type:
form["type"] = type
if geometry_type:
form["geometry_type"] = geometry_type
if intersect:
form["intersect"] = intersect
if mql_filter:
form["mql_filter"] = mql_filter
if within:
form["within"] = within
if inside:
form["inside"] = inside
if order_by:
form["order_by"] = order_by
if count:
form["count"] = count
if format:
form["format"] = format
if mql_output:
form["mql_output"] = mql_output
if format == "json":
r = self._httpreq_json(service, 'POST', form=form)
else:
r = self._httpreq(service, 'POST', form=form)
return r
def version(self):
""" get versions for various parts of freebase. For a more
complete description, see http://www.freebase.com/view/en/api_version"""
service = "/api/version"
r = self._httpreq_json(service)
return r
### DEPRECATED IN API
def reconcile(self, name, etype=['/common/topic']):
"""DEPRECATED: reconcile name to guid. For a more complete description,
see http://www.freebase.com/view/en/dataserver_reconciliation"""
service = '/dataserver/reconciliation'
r = self._httpreq_json(service, 'GET', form={'name':name, 'types':','.join(etype)})
# TODO non-conforming service, fix later
#self._mqlresult(r)
return r
if __name__ == '__main__':
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
mss = HTTPMetawebSession('sandbox.freebase.com')
self.mss.log.setLevel(logging.DEBUG)
self.mss.log.addHandler(console)
print mss.mqlread([dict(name=None, type='/type/type')])
| Python |
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
#
#
# httplib2cookie.py allows you to use python's standard
# CookieJar class with httplib2.
#
#
import re
try:
from google.appengine.api import urlfetch
Http = object
except ImportError:
pass
try:
from httplib2 import Http
except ImportError:
pass
try:
import urllib
except ImportError:
import urllib_stub as urllib
import cookielib
class DummyRequest(object):
"""Simulated urllib2.Request object for httplib2
implements only what's necessary for cookielib.CookieJar to work
"""
def __init__(self, url, headers=None):
self.url = url
self.headers = headers
self.origin_req_host = cookielib.request_host(self)
self.type, r = urllib.splittype(url)
self.host, r = urllib.splithost(r)
if self.host:
self.host = urllib.unquote(self.host)
def get_full_url(self):
return self.url
def get_origin_req_host(self):
# TODO to match urllib2 this should be different for redirects
return self.origin_req_host
def get_type(self):
return self.type
def get_host(self):
return self.host
def get_header(self, key, default=None):
return self.headers.get(key.lower(), default)
def has_header(self, key):
return key in self.headers
def add_unredirected_header(self, key, val):
# TODO this header should not be sent on redirect
self.headers[key.lower()] = val
def is_unverifiable(self):
# TODO to match urllib2, this should be set to True when the
# request is the result of a redirect
return False
class DummyHttplib2Response(object):
"""Simulated urllib2.Request object for httplib2
implements only what's necessary for cookielib.CookieJar to work
"""
def __init__(self, response):
self.response = response
def info(self):
return DummyHttplib2Message(self.response)
class DummyUrlfetchResponse(object):
"""Simulated urllib2.Request object for httplib2
implements only what's necessary for cookielib.CookieJar to work
"""
def __init__(self, response):
self.response = response
def info(self):
return DummyUrlfetchMessage(self.response)
class DummyHttplib2Message(object):
"""Simulated mimetools.Message object for httplib2
implements only what's necessary for cookielib.CookieJar to work
"""
def __init__(self, response):
self.response = response
def getheaders(self, k):
k = k.lower()
v = self.response.get(k.lower(), None)
if k not in self.response:
return []
#return self.response[k].split(re.compile(',\\s*'))
# httplib2 joins multiple values for the same header
# using ','. but the netscape cookie format uses ','
# as part of the expires= date format. so we have
# to split carefully here - header.split(',') won't do it.
HEADERVAL= re.compile(r'\s*(([^,]|(,\s*\d))+)')
return [h[0] for h in HEADERVAL.findall(self.response[k])]
class DummyUrlfetchMessage(object):
"""Simulated mimetools.Message object for httplib2
implements only what's necessary for cookielib.CookieJar to work
"""
def __init__(self, response):
self.response = response
def getheaders(self, k):
k = k.lower()
v = self.response.headers.get(k.lower(), None)
if k not in self.response.headers:
return []
#return self.response[k].split(re.compile(',\\s*'))
# httplib2 joins multiple values for the same header
# using ','. but the netscape cookie format uses ','
# as part of the expires= date format. so we have
# to split carefully here - header.split(',') won't do it.
HEADERVAL= re.compile(r'\s*(([^,]|(,\s*\d))+)')
return [h[0] for h in HEADERVAL.findall(self.response.headers[k])]
class CookiefulHttp(Http):
"""Subclass of httplib2.Http that keeps cookie state
constructor takes an optional cookiejar=cookielib.CookieJar
currently this does not handle redirects completely correctly:
if the server redirects to a different host the original
cookies will still be sent to that host.
"""
def __init__(self, cookiejar=None, **kws):
# note that httplib2.Http is not a new-style-class
Http.__init__(self, **kws)
if cookiejar is None:
cookiejar = cookielib.CookieJar()
self.cookiejar = cookiejar
def request(self, uri, **kws):
headers = kws.pop('headers', None)
req = DummyRequest(uri, headers)
self.cookiejar.add_cookie_header(req)
headers = req.headers
(r, body) = Http.request(self, uri, headers=headers, **kws)
resp = DummyHttplib2Response(r)
self.cookiejar.extract_cookies(resp, req)
return (r, body)
class CookiefulUrlfetch(object):
"""Class that keeps cookie state
constructor takes an optional cookiejar=cookielib.CookieJar
"""
# TODO refactor CookefulHttp so that CookiefulUrlfetch can be a subclass of it
def __init__(self, cookiejar=None, **kws):
if cookiejar is None:
cookejar = cookielib.CookieJar()
self.cookejar = cookiejar
def request(self, uri, **kws):
headers = kws.pop('headers', None)
req = DummyRequest(uri, headers)
self.cookejar.add_cookie_header(req)
headers = req.headers
r = urlfetch.fetch(uri, headers=headers, **kws)
self.cookejar.extract_cookies(DummyUrlfetchResponse(r), req)
return r
| Python |
from session import HTTPMetawebSession, MetawebError, attrdict
from mqlkey import quotekey, unquotekey
| Python |
#!/usr/bin/python
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
import sys
from freebase.api.session import HTTPMetawebSession
_base = HTTPMetawebSession("sandbox.freebase.com")
__all__ = ["HTTPMetawebSession"]
# we want to add base's functions to __init__.py
# so that we can say freebase.func() and really
# just call base.func()
# a little trick to refer to ourselves
self = sys.modules[__name__]
for funcname in dir(_base):
# we only want the 'real' functions
if not funcname.startswith("_"):
func = getattr(_base, funcname)
# let's make sure we're getting functions
# instead of constants or whatever
if callable(func):
# we're setting these functions
# so that they can be called like
# freebase.funcname -> base.func()
setattr(self, funcname, func)
# make sure we import the base's
# functions if we import freebase
__all__.append(funcname)
# we don't want any self-referencing
# business going. Plus, this is cleaner.
del self
# we want dir(freebase) to be clean
del funcname, func
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.