code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
# Written by Bram Cohen
# see LICENSE.txt for license information
from zurllib import urlopen
from urlparse import urljoin
from btformats import check_message
from Choker import Choker
from Storage import Storage
from StorageWrapper import StorageWrapper
from Uploader import Upload
from Downloader import Downloader
from Connecter import Connecter
from Encrypter import Encoder
from RawServer import RawServer
from Rerequester import Rerequester
from DownloaderFeedback import DownloaderFeedback
from RateMeasure import RateMeasure
from CurrentRateMeasure import Measure
from PiecePicker import PiecePicker
from bencode import bencode, bdecode
from __init__ import version
from binascii import b2a_hex
from sha import sha
from os import path, makedirs
from parseargs import parseargs, formatDefinitions
from socket import error as socketerror
from random import seed
from threading import Thread, Event
from time import time
try:
from os import getpid
except ImportError:
def getpid():
return 1
defaults = [
('max_uploads', 7,
"the maximum number of uploads to allow at once."),
('keepalive_interval', 120.0,
'number of seconds to pause between sending keepalives'),
('download_slice_size', 2 ** 14,
"How many bytes to query for per request."),
('request_backlog', 5,
"how many requests to keep in a single pipe at once."),
('max_message_length', 2 ** 23,
"maximum length prefix encoding you'll accept over the wire - larger values get the connection dropped."),
('ip', '',
"ip to report you have to the tracker."),
('minport', 6881, 'minimum port to listen on, counts up if unavailable'),
('maxport', 6999, 'maximum port to listen on'),
('responsefile', '',
'file the server response was stored in, alternative to url'),
('url', '',
'url to get file from, alternative to responsefile'),
('saveas', '',
'local file name to save the file as, null indicates query user'),
('timeout', 300.0,
'time to wait between closing sockets which nothing has been received on'),
('timeout_check_interval', 60.0,
'time to wait between checking if any connections have timed out'),
('max_slice_length', 2 ** 17,
"maximum length slice to send to peers, larger requests are ignored"),
('max_rate_period', 20.0,
"maximum amount of time to guess the current rate estimate represents"),
('bind', '',
'ip to bind to locally'),
('upload_rate_fudge', 5.0,
'time equivalent of writing to kernel-level TCP buffer, for rate adjustment'),
('display_interval', .5,
'time between updates of displayed information'),
('rerequest_interval', 5 * 60,
'time to wait between requesting more peers'),
('min_peers', 20,
'minimum number of peers to not do rerequesting'),
('http_timeout', 60,
'number of seconds to wait before assuming that an http connection has timed out'),
('max_initiate', 35,
'number of peers at which to stop initiating new connections'),
('max_allow_in', 55,
'maximum number of connections to allow, after this new incoming connections will be immediately closed'),
('check_hashes', 1,
'whether to check hashes on disk'),
('max_upload_rate', 0,
'maximum kB/s to upload at, 0 means no limit'),
('snub_time', 30.0,
"seconds to wait for data to come in over a connection before assuming it's semi-permanently choked"),
('spew', 0,
"whether to display diagnostic info to stdout"),
('rarest_first_cutoff', 4,
"number of downloads at which to switch from random to rarest first"),
('min_uploads', 4,
"the number of uploads to fill out to with extra optimistic unchokes"),
('report_hash_failures', 0,
"whether to inform the user that hash failures occur. They're non-fatal."),
]
def download(params, filefunc, statusfunc, finfunc, errorfunc, doneflag, cols, pathFunc = None, paramfunc = None, spewflag = Event()):
if len(params) == 0:
errorfunc('arguments are -\n' + formatDefinitions(defaults, cols))
return
try:
config, args = parseargs(params, defaults, 0, 1)
if args:
if config.get('responsefile', None) == None:
raise ValueError, 'must have responsefile as arg or parameter, not both'
if path.isfile(args[0]):
config['responsefile'] = args[0]
else:
config['url'] = args[0]
if (config['responsefile'] == '') == (config['url'] == ''):
raise ValueError, 'need responsefile or url'
except ValueError, e:
errorfunc('error: ' + str(e) + '\nrun with no args for parameter explanations')
return
try:
if config['responsefile'] != '':
h = open(config['responsefile'], 'rb')
else:
h = urlopen(config['url'])
response = h.read()
h.close()
except IOError, e:
if config['responsefile'] != '' and config['responsefile'].find('Temporary Internet Files') != -1:
errorfunc('BitTorrent was passed a filename that doesn\'t exist. ' +
'Either clear your Temporary Internet Files or right-click the link ' +
'and save the .torrent to disk first.')
else:
errorfunc('problem getting response info - ' + str(e))
return
try:
response = bdecode(response)
check_message(response)
except ValueError, e:
errorfunc("got bad file info - " + str(e))
return
try:
def make(f, forcedir = False):
if not forcedir:
f = path.split(f)[0]
if f != '' and not path.exists(f):
makedirs(f)
info = response['info']
if info.has_key('length'):
file_length = info['length']
file = filefunc(info['name'], file_length, config['saveas'], False)
if file is None:
return
make(file)
files = [(file, file_length)]
else:
file_length = 0
for x in info['files']:
file_length += x['length']
file = filefunc(info['name'], file_length, config['saveas'], True)
if file is None:
return
# if this path exists, and no files from the info dict exist, we assume it's a new download and
# the user wants to create a new directory with the default name
existing = 0
if path.exists(file):
for x in info['files']:
if path.exists(path.join(file, x['path'][0])):
existing = 1
if not existing:
file = path.join(file, info['name'])
make(file, True)
# alert the UI to any possible change in path
if pathFunc != None:
pathFunc(file)
files = []
for x in info['files']:
n = file
for i in x['path']:
n = path.join(n, i)
files.append((n, x['length']))
make(n)
except OSError, e:
errorfunc("Couldn't allocate dir - " + str(e))
return
finflag = Event()
ann = [None]
myid = 'M' + version.replace('.', '-')
myid = myid + ('-' * (8 - len(myid))) + b2a_hex(sha(repr(time()) + ' ' + str(getpid())).digest()[-6:])
seed(myid)
pieces = [info['pieces'][x:x+20] for x in xrange(0,
len(info['pieces']), 20)]
def failed(reason, errorfunc = errorfunc, doneflag = doneflag):
doneflag.set()
if reason is not None:
errorfunc(reason)
rawserver = RawServer(doneflag, config['timeout_check_interval'], config['timeout'], errorfunc = errorfunc, maxconnects = config['max_allow_in'])
try:
try:
storage = Storage(files, open, path.exists, path.getsize)
except IOError, e:
errorfunc('trouble accessing files - ' + str(e))
return
def finished(finfunc = finfunc, finflag = finflag,
ann = ann, storage = storage, errorfunc = errorfunc):
finflag.set()
try:
storage.set_readonly()
except (IOError, OSError), e:
errorfunc('trouble setting readonly at end - ' + str(e))
if ann[0] is not None:
ann[0](1)
finfunc()
rm = [None]
def data_flunked(amount, rm = rm, errorfunc = errorfunc, report_hash_failures = config['report_hash_failures']):
if rm[0] is not None:
rm[0](amount)
if report_hash_failures:
errorfunc('a piece failed hash check, re-downloading it')
storagewrapper = StorageWrapper(storage,
config['download_slice_size'], pieces,
info['piece length'], finished, failed,
statusfunc, doneflag, config['check_hashes'], data_flunked)
except ValueError, e:
failed('bad data - ' + str(e))
except IOError, e:
failed('IOError - ' + str(e))
if doneflag.isSet():
return
e = 'maxport less than minport - no ports to check'
for listen_port in xrange(config['minport'], config['maxport'] + 1):
try:
rawserver.bind(listen_port, config['bind'])
break
except socketerror, e:
pass
else:
errorfunc("Couldn't listen - " + str(e))
return
choker = Choker(config['max_uploads'], rawserver.add_task, finflag.isSet,
config['min_uploads'])
upmeasure = Measure(config['max_rate_period'],
config['upload_rate_fudge'])
downmeasure = Measure(config['max_rate_period'])
def make_upload(connection, choker = choker,
storagewrapper = storagewrapper,
max_slice_length = config['max_slice_length'],
max_rate_period = config['max_rate_period'],
fudge = config['upload_rate_fudge']):
return Upload(connection, choker, storagewrapper,
max_slice_length, max_rate_period, fudge)
ratemeasure = RateMeasure(storagewrapper.get_amount_left())
rm[0] = ratemeasure.data_rejected
picker = PiecePicker(len(pieces), config['rarest_first_cutoff'])
for i in xrange(len(pieces)):
if storagewrapper.do_I_have(i):
picker.complete(i)
downloader = Downloader(storagewrapper, picker,
config['request_backlog'], config['max_rate_period'],
len(pieces), downmeasure, config['snub_time'],
ratemeasure.data_came_in)
connecter = Connecter(make_upload, downloader, choker,
len(pieces), upmeasure, config['max_upload_rate'] * 1024, rawserver.add_task)
infohash = sha(bencode(info)).digest()
encoder = Encoder(connecter, rawserver,
myid, config['max_message_length'], rawserver.add_task,
config['keepalive_interval'], infohash, config['max_initiate'])
rerequest = Rerequester(response['announce'], config['rerequest_interval'],
rawserver.add_task, connecter.how_many_connections,
config['min_peers'], encoder.start_connection,
rawserver.add_task, storagewrapper.get_amount_left,
upmeasure.get_total, downmeasure.get_total, listen_port,
config['ip'], myid, infohash, config['http_timeout'], errorfunc,
config['max_initiate'], doneflag, upmeasure.get_rate, downmeasure.get_rate,
encoder.ever_got_incoming)
if config['spew']:
spewflag.set()
DownloaderFeedback(choker, rawserver.add_task, statusfunc,
upmeasure.get_rate, downmeasure.get_rate,
upmeasure.get_total, downmeasure.get_total, ratemeasure.get_time_left,
ratemeasure.get_size_left, file_length, finflag,
config['display_interval'], spewflag)
# useful info and functions for the UI
if paramfunc:
paramfunc({ 'max_upload_rate' : connecter.change_max_upload_rate, # change_max_upload_rate(<int bytes/sec>)
'max_uploads': choker.change_max_uploads, # change_max_uploads(<int max uploads>)
'listen_port' : listen_port, # int
'peer_id' : myid, # string
'info_hash' : infohash, # string
'start_connection' : encoder._start_connection # start_connection((<string ip>, <int port>), <peer id>)
})
statusfunc({"activity" : 'connecting to peers'})
ann[0] = rerequest.announce
rerequest.begin()
rawserver.listen_forever(encoder)
storage.close()
rerequest.announce(2)
| Python |
# Written by Bram Cohen
# see LICENSE.txt for license information
from types import StringType, LongType, IntType, ListType, DictType
from re import compile
reg = compile(r'^[^/\\.~][^/\\]*$')
ints = (LongType, IntType)
def check_info(info):
if type(info) != DictType:
raise ValueError, 'bad metainfo - not a dictionary'
pieces = info.get('pieces')
if type(pieces) != StringType or len(pieces) % 20 != 0:
raise ValueError, 'bad metainfo - bad pieces key'
piecelength = info.get('piece length')
if type(piecelength) not in ints or piecelength <= 0:
raise ValueError, 'bad metainfo - illegal piece length'
name = info.get('name')
if type(name) != StringType:
raise ValueError, 'bad metainfo - bad name'
if not reg.match(name):
raise ValueError, 'name %s disallowed for security reasons' % name
if info.has_key('files') == info.has_key('length'):
raise ValueError, 'single/multiple file mix'
if info.has_key('length'):
length = info.get('length')
if type(length) not in ints or length < 0:
raise ValueError, 'bad metainfo - bad length'
else:
files = info.get('files')
if type(files) != ListType:
raise ValueError
for f in files:
if type(f) != DictType:
raise ValueError, 'bad metainfo - bad file value'
length = f.get('length')
if type(length) not in ints or length < 0:
raise ValueError, 'bad metainfo - bad length'
path = f.get('path')
if type(path) != ListType or path == []:
raise ValueError, 'bad metainfo - bad path'
for p in path:
if type(p) != StringType:
raise ValueError, 'bad metainfo - bad path dir'
if not reg.match(p):
raise ValueError, 'path %s disallowed for security reasons' % p
for i in xrange(len(files)):
for j in xrange(i):
if files[i]['path'] == files[j]['path']:
raise ValueError, 'bad metainfo - duplicate path'
def check_message(message):
if type(message) != DictType:
raise ValueError
check_info(message.get('info'))
announce = message.get('announce')
if type(announce) != StringType or len(announce) == 0:
raise ValueError, 'bad torrent file - announce is invalid'
def check_peers(message):
if type(message) != DictType:
raise ValueError
if message.has_key('failure reason'):
if type(message['failure reason']) != StringType:
raise ValueError
return
peers = message.get('peers')
if type(peers) == ListType:
for p in peers:
if type(p) != DictType:
raise ValueError
if type(p.get('ip')) != StringType:
raise ValueError
port = p.get('port')
if type(port) not in ints or p <= 0:
raise ValueError
if p.has_key('peer id'):
id = p.get('peer id')
if type(id) != StringType or len(id) != 20:
raise ValueError
elif type(peers) != StringType or len(peers) % 6 != 0:
raise ValueError
interval = message.get('interval', 1)
if type(interval) not in ints or interval <= 0:
raise ValueError
minint = message.get('min interval', 1)
if type(minint) not in ints or minint <= 0:
raise ValueError
if type(message.get('tracker id', '')) != StringType:
raise ValueError
npeers = message.get('num peers', 0)
if type(npeers) not in ints or npeers < 0:
raise ValueError
dpeers = message.get('done peers', 0)
if type(dpeers) not in ints or dpeers < 0:
raise ValueError
last = message.get('last', 0)
if type(last) not in ints or last < 0:
raise ValueError
| Python |
version = '3.4.2'
| Python |
# Written by Bram Cohen
# see LICENSE.txt for license information
from zurllib import urlopen, quote
from btformats import check_peers
from bencode import bdecode
from threading import Thread, Lock
from socket import error
from time import time
from random import randrange
from binascii import b2a_hex
class Rerequester:
def __init__(self, url, interval, sched, howmany, minpeers,
connect, externalsched, amount_left, up, down,
port, ip, myid, infohash, timeout, errorfunc, maxpeers, doneflag,
upratefunc, downratefunc, ever_got_incoming):
self.url = ('%s?info_hash=%s&peer_id=%s&port=%s&key=%s' %
(url, quote(infohash), quote(myid), str(port),
b2a_hex(''.join([chr(randrange(256)) for i in xrange(4)]))))
if ip != '':
self.url += '&ip=' + quote(ip)
self.interval = interval
self.last = None
self.trackerid = None
self.announce_interval = 30 * 60
self.sched = sched
self.howmany = howmany
self.minpeers = minpeers
self.connect = connect
self.externalsched = externalsched
self.amount_left = amount_left
self.up = up
self.down = down
self.timeout = timeout
self.errorfunc = errorfunc
self.maxpeers = maxpeers
self.doneflag = doneflag
self.upratefunc = upratefunc
self.downratefunc = downratefunc
self.ever_got_incoming = ever_got_incoming
self.last_failed = True
self.last_time = 0
def c(self):
self.sched(self.c, self.interval)
if self.ever_got_incoming():
getmore = self.howmany() <= self.minpeers / 3
else:
getmore = self.howmany() < self.minpeers
if getmore or time() - self.last_time > self.announce_interval:
self.announce()
def begin(self):
self.sched(self.c, self.interval)
self.announce(0)
def announce(self, event = None):
self.last_time = time()
s = ('%s&uploaded=%s&downloaded=%s&left=%s' %
(self.url, str(self.up()), str(self.down()),
str(self.amount_left())))
if self.last is not None:
s += '&last=' + quote(str(self.last))
if self.trackerid is not None:
s += '&trackerid=' + quote(str(self.trackerid))
if self.howmany() >= self.maxpeers:
s += '&numwant=0'
else:
s += '&compact=1'
if event != None:
s += '&event=' + ['started', 'completed', 'stopped'][event]
set = SetOnce().set
def checkfail(self = self, set = set):
if set():
if self.last_failed and self.upratefunc() < 100 and self.downratefunc() < 100:
self.errorfunc('Problem connecting to tracker - timeout exceeded')
self.last_failed = True
self.sched(checkfail, self.timeout)
Thread(target = self.rerequest, args = [s, set]).start()
def rerequest(self, url, set):
try:
h = urlopen(url)
r = h.read()
h.close()
if set():
def add(self = self, r = r):
self.last_failed = False
self.postrequest(r)
self.externalsched(add, 0)
except (IOError, error), e:
if set():
def fail(self = self, r = 'Problem connecting to tracker - ' + str(e)):
if self.last_failed:
self.errorfunc(r)
self.last_failed = True
self.externalsched(fail, 0)
def postrequest(self, data):
try:
r = bdecode(data)
check_peers(r)
if r.has_key('failure reason'):
self.errorfunc('rejected by tracker - ' + r['failure reason'])
else:
if r.has_key('warning message'):
self.errorfunc('warning from tracker - ' + r['warning message'])
self.announce_interval = r.get('interval', self.announce_interval)
self.interval = r.get('min interval', self.interval)
self.trackerid = r.get('tracker id', self.trackerid)
self.last = r.get('last')
p = r['peers']
peers = []
if type(p) == type(''):
for x in xrange(0, len(p), 6):
ip = '.'.join([str(ord(i)) for i in p[x:x+4]])
port = (ord(p[x+4]) << 8) | ord(p[x+5])
peers.append((ip, port, None))
else:
for x in p:
peers.append((x['ip'], x['port'], x.get('peer id')))
ps = len(peers) + self.howmany()
if ps < self.maxpeers:
if self.doneflag.isSet():
if r.get('num peers', 1000) - r.get('done peers', 0) > ps * 1.2:
self.last = None
else:
if r.get('num peers', 1000) > ps * 1.2:
self.last = None
for x in peers:
self.connect((x[0], x[1]), x[2])
except ValueError, e:
if data != '':
self.errorfunc('bad data from tracker - ' + str(e))
class SetOnce:
def __init__(self):
self.lock = Lock()
self.first = True
def set(self):
try:
self.lock.acquire()
r = self.first
self.first = False
return r
finally:
self.lock.release()
| Python |
# Written by Bram Cohen
# see LICENSE.txt for license information
from time import time
class RateMeasure:
def __init__(self, left):
self.start = None
self.last = None
self.rate = 0
self.remaining = None
self.left = left
self.broke = False
self.got_anything = False
def data_came_in(self, amount):
if not self.got_anything:
self.got_anything = True
self.start = time() - 2
self.last = self.start
self.left -= amount
return
self.update(time(), amount)
def data_rejected(self, amount):
self.left += amount
def get_time_left(self):
if not self.got_anything:
return None
t = time()
if t - self.last > 15:
self.update(t, 0)
return self.remaining
def get_size_left(self):
return self.left
def update(self, t, amount):
self.left -= amount
try:
self.rate = ((self.rate * (self.last - self.start)) + amount) / (t - self.start)
self.last = t
self.remaining = self.left / self.rate
if self.start < self.last - self.remaining:
self.start = self.last - self.remaining
except ZeroDivisionError:
self.remaining = None
if self.broke and self.last - self.start < 20:
self.start = self.last - 20
if self.last - self.start > 20:
self.broke = True
| Python |
# Written by Bram Cohen
# see LICENSE.txt for license information
from sha import sha
from threading import Event
from bitfield import Bitfield
def dummy_status(fractionDone = None, activity = None):
pass
def dummy_data_flunked(size):
pass
class StorageWrapper:
def __init__(self, storage, request_size, hashes,
piece_size, finished, failed,
statusfunc = dummy_status, flag = Event(), check_hashes = True,
data_flunked = dummy_data_flunked):
self.storage = storage
self.request_size = request_size
self.hashes = hashes
self.piece_size = piece_size
self.data_flunked = data_flunked
self.total_length = storage.get_total_length()
self.amount_left = self.total_length
if self.total_length <= piece_size * (len(hashes) - 1):
raise ValueError, 'bad data from tracker - total too small'
if self.total_length > piece_size * len(hashes):
raise ValueError, 'bad data from tracker - total too big'
self.finished = finished
self.failed = failed
self.numactive = [0] * len(hashes)
self.inactive_requests = [1] * len(hashes)
self.amount_inactive = self.total_length
self.endgame = False
self.have = Bitfield(len(hashes))
self.waschecked = [check_hashes] * len(hashes)
self.places = {}
self.holes = []
if len(hashes) == 0:
finished()
return
targets = {}
total = len(hashes)
for i in xrange(len(hashes)):
if not self._waspre(i):
targets.setdefault(hashes[i], []).append(i)
total -= 1
numchecked = 0.0
if total and check_hashes:
statusfunc({"activity" : 'checking existing file',
"fractionDone" : 0})
def markgot(piece, pos, self = self, check_hashes = check_hashes):
self.places[piece] = pos
self.have[piece] = True
self.amount_left -= self._piecelen(piece)
self.amount_inactive -= self._piecelen(piece)
self.inactive_requests[piece] = None
self.waschecked[piece] = check_hashes
lastlen = self._piecelen(len(hashes) - 1)
for i in xrange(len(hashes)):
if not self._waspre(i):
self.holes.append(i)
elif not check_hashes:
markgot(i, i)
else:
sh = sha(self.storage.read(piece_size * i, lastlen))
sp = sh.digest()
sh.update(self.storage.read(piece_size * i + lastlen, self._piecelen(i) - lastlen))
s = sh.digest()
if s == hashes[i]:
markgot(i, i)
elif targets.get(s) and self._piecelen(i) == self._piecelen(targets[s][-1]):
markgot(targets[s].pop(), i)
elif not self.have[len(hashes) - 1] and sp == hashes[-1] and (i == len(hashes) - 1 or not self._waspre(len(hashes) - 1)):
markgot(len(hashes) - 1, i)
else:
self.places[i] = i
if flag.isSet():
return
numchecked += 1
statusfunc({'fractionDone': 1 - float(self.amount_left) / self.total_length})
if self.amount_left == 0:
finished()
def _waspre(self, piece):
return self.storage.was_preallocated(piece * self.piece_size, self._piecelen(piece))
def _piecelen(self, piece):
if piece < len(self.hashes) - 1:
return self.piece_size
else:
return self.total_length - piece * self.piece_size
def get_amount_left(self):
return self.amount_left
def do_I_have_anything(self):
return self.amount_left < self.total_length
def _make_inactive(self, index):
length = min(self.piece_size, self.total_length - self.piece_size * index)
l = []
x = 0
while x + self.request_size < length:
l.append((x, self.request_size))
x += self.request_size
l.append((x, length - x))
self.inactive_requests[index] = l
def is_endgame(self):
return self.endgame
def get_have_list(self):
return self.have.tostring()
def do_I_have(self, index):
return self.have[index]
def do_I_have_requests(self, index):
return not not self.inactive_requests[index]
def new_request(self, index):
# returns (begin, length)
if self.inactive_requests[index] == 1:
self._make_inactive(index)
self.numactive[index] += 1
rs = self.inactive_requests[index]
r = min(rs)
rs.remove(r)
self.amount_inactive -= r[1]
if self.amount_inactive == 0:
self.endgame = True
return r
def piece_came_in(self, index, begin, piece):
try:
return self._piece_came_in(index, begin, piece)
except IOError, e:
self.failed('IO Error ' + str(e))
return True
def _piece_came_in(self, index, begin, piece):
if not self.places.has_key(index):
n = self.holes.pop(0)
if self.places.has_key(n):
oldpos = self.places[n]
old = self.storage.read(self.piece_size * oldpos, self._piecelen(n))
if self.have[n] and sha(old).digest() != self.hashes[n]:
self.failed('data corrupted on disk - maybe you have two copies running?')
return True
self.storage.write(self.piece_size * n, old)
self.places[n] = n
if index == oldpos or index in self.holes:
self.places[index] = oldpos
else:
for p, v in self.places.items():
if v == index:
break
self.places[index] = index
self.places[p] = oldpos
old = self.storage.read(self.piece_size * index, self.piece_size)
self.storage.write(self.piece_size * oldpos, old)
elif index in self.holes or index == n:
if not self._waspre(n):
self.storage.write(self.piece_size * n, self._piecelen(n) * chr(0xFF))
self.places[index] = n
else:
for p, v in self.places.items():
if v == index:
break
self.places[index] = index
self.places[p] = n
old = self.storage.read(self.piece_size * index, self._piecelen(n))
self.storage.write(self.piece_size * n, old)
self.storage.write(self.places[index] * self.piece_size + begin, piece)
self.numactive[index] -= 1
if not self.inactive_requests[index] and not self.numactive[index]:
if sha(self.storage.read(self.piece_size * self.places[index], self._piecelen(index))).digest() == self.hashes[index]:
self.have[index] = True
self.inactive_requests[index] = None
self.waschecked[index] = True
self.amount_left -= self._piecelen(index)
if self.amount_left == 0:
self.finished()
else:
self.data_flunked(self._piecelen(index))
self.inactive_requests[index] = 1
self.amount_inactive += self._piecelen(index)
return False
return True
def request_lost(self, index, begin, length):
self.inactive_requests[index].append((begin, length))
self.amount_inactive += length
self.numactive[index] -= 1
def get_piece(self, index, begin, length):
try:
return self._get_piece(index, begin, length)
except IOError, e:
self.failed('IO Error ' + str(e))
return None
def _get_piece(self, index, begin, length):
if not self.have[index]:
return None
if not self.waschecked[index]:
if sha(self.storage.read(self.piece_size * self.places[index], self._piecelen(index))).digest() != self.hashes[index]:
self.failed('told file complete on start-up, but piece failed hash check')
return None
self.waschecked[index] = True
if begin + length > self._piecelen(index):
return None
return self.storage.read(self.piece_size * self.places[index] + begin, length)
class DummyStorage:
def __init__(self, total, pre = False, ranges = []):
self.pre = pre
self.ranges = ranges
self.s = chr(0xFF) * total
self.done = False
def was_preexisting(self):
return self.pre
def was_preallocated(self, begin, length):
for b, l in self.ranges:
if begin >= b and begin + length <= b + l:
return True
return False
def get_total_length(self):
return len(self.s)
def read(self, begin, length):
return self.s[begin:begin + length]
def write(self, begin, piece):
self.s = self.s[:begin] + piece + self.s[begin + len(piece):]
def finished(self):
self.done = True
def test_basic():
ds = DummyStorage(3)
sw = StorageWrapper(ds, 2, [sha('abc').digest()], 4, ds.finished, None)
assert sw.get_amount_left() == 3
assert not sw.do_I_have_anything()
assert sw.get_have_list() == chr(0)
assert sw.do_I_have_requests(0)
x = []
x.append(sw.new_request(0))
assert sw.do_I_have_requests(0)
x.append(sw.new_request(0))
assert not sw.do_I_have_requests(0)
x.sort()
assert x == [(0, 2), (2, 1)]
sw.request_lost(0, 2, 1)
del x[-1]
assert sw.do_I_have_requests(0)
x.append(sw.new_request(0))
assert x == [(0, 2), (2, 1)]
assert not sw.do_I_have_requests(0)
sw.piece_came_in(0, 0, 'ab')
assert not sw.do_I_have_requests(0)
assert sw.get_amount_left() == 3
assert not sw.do_I_have_anything()
assert sw.get_have_list() == chr(0)
assert not ds.done
sw.piece_came_in(0, 2, 'c')
assert not sw.do_I_have_requests(0)
assert sw.get_amount_left() == 0
assert sw.do_I_have_anything()
assert sw.get_have_list() == chr(0x80)
assert sw.get_piece(0, 0, 3) == 'abc'
assert sw.get_piece(0, 1, 2) == 'bc'
assert sw.get_piece(0, 0, 2) == 'ab'
assert sw.get_piece(0, 1, 1) == 'b'
assert ds.done
def test_two_pieces():
ds = DummyStorage(4)
sw = StorageWrapper(ds, 3, [sha('abc').digest(),
sha('d').digest()], 3, ds.finished, None)
assert sw.get_amount_left() == 4
assert not sw.do_I_have_anything()
assert sw.get_have_list() == chr(0)
assert sw.do_I_have_requests(0)
assert sw.do_I_have_requests(1)
assert sw.new_request(0) == (0, 3)
assert sw.get_amount_left() == 4
assert not sw.do_I_have_anything()
assert sw.get_have_list() == chr(0)
assert not sw.do_I_have_requests(0)
assert sw.do_I_have_requests(1)
assert sw.new_request(1) == (0, 1)
assert sw.get_amount_left() == 4
assert not sw.do_I_have_anything()
assert sw.get_have_list() == chr(0)
assert not sw.do_I_have_requests(0)
assert not sw.do_I_have_requests(1)
sw.piece_came_in(0, 0, 'abc')
assert sw.get_amount_left() == 1
assert sw.do_I_have_anything()
assert sw.get_have_list() == chr(0x80)
assert not sw.do_I_have_requests(0)
assert not sw.do_I_have_requests(1)
assert sw.get_piece(0, 0, 3) == 'abc'
assert not ds.done
sw.piece_came_in(1, 0, 'd')
assert ds.done
assert sw.get_amount_left() == 0
assert sw.do_I_have_anything()
assert sw.get_have_list() == chr(0xC0)
assert not sw.do_I_have_requests(0)
assert not sw.do_I_have_requests(1)
assert sw.get_piece(1, 0, 1) == 'd'
def test_hash_fail():
ds = DummyStorage(4)
sw = StorageWrapper(ds, 4, [sha('abcd').digest()], 4, ds.finished, None)
assert sw.get_amount_left() == 4
assert not sw.do_I_have_anything()
assert sw.get_have_list() == chr(0)
assert sw.do_I_have_requests(0)
assert sw.new_request(0) == (0, 4)
sw.piece_came_in(0, 0, 'abcx')
assert sw.get_amount_left() == 4
assert not sw.do_I_have_anything()
assert sw.get_have_list() == chr(0)
assert sw.do_I_have_requests(0)
assert sw.new_request(0) == (0, 4)
assert not ds.done
sw.piece_came_in(0, 0, 'abcd')
assert ds.done
assert sw.get_amount_left() == 0
assert sw.do_I_have_anything()
assert sw.get_have_list() == chr(0x80)
assert not sw.do_I_have_requests(0)
def test_lazy_hashing():
ds = DummyStorage(4, ranges = [(0, 4)])
flag = Event()
sw = StorageWrapper(ds, 4, [sha('abcd').digest()], 4, ds.finished, lambda x, flag = flag: flag.set(), check_hashes = False)
assert sw.get_piece(0, 0, 2) is None
assert flag.isSet()
def test_lazy_hashing_pass():
ds = DummyStorage(4)
flag = Event()
sw = StorageWrapper(ds, 4, [sha(chr(0xFF) * 4).digest()], 4, ds.finished, lambda x, flag = flag: flag.set(), check_hashes = False)
assert sw.get_piece(0, 0, 2) is None
assert not flag.isSet()
def test_preexisting():
ds = DummyStorage(4, True, [(0, 4)])
sw = StorageWrapper(ds, 2, [sha(chr(0xFF) * 2).digest(),
sha('ab').digest()], 2, ds.finished, None)
assert sw.get_amount_left() == 2
assert sw.do_I_have_anything()
assert sw.get_have_list() == chr(0x80)
assert not sw.do_I_have_requests(0)
assert sw.do_I_have_requests(1)
assert sw.new_request(1) == (0, 2)
assert not ds.done
sw.piece_came_in(1, 0, 'ab')
assert ds.done
assert sw.get_amount_left() == 0
assert sw.do_I_have_anything()
assert sw.get_have_list() == chr(0xC0)
assert not sw.do_I_have_requests(0)
assert not sw.do_I_have_requests(1)
def test_total_too_short():
ds = DummyStorage(4)
try:
StorageWrapper(ds, 4, [sha(chr(0xff) * 4).digest(),
sha(chr(0xFF) * 4).digest()], 4, ds.finished, None)
raise 'fail'
except ValueError:
pass
def test_total_too_big():
ds = DummyStorage(9)
try:
sw = StorageWrapper(ds, 4, [sha('qqqq').digest(),
sha(chr(0xFF) * 4).digest()], 4, ds.finished, None)
raise 'fail'
except ValueError:
pass
def test_end_above_total_length():
ds = DummyStorage(3, True)
sw = StorageWrapper(ds, 4, [sha('qqq').digest()], 4, ds.finished, None)
assert sw.get_piece(0, 0, 4) == None
def test_end_past_piece_end():
ds = DummyStorage(4, True, ranges = [(0, 4)])
sw = StorageWrapper(ds, 4, [sha(chr(0xFF) * 2).digest(),
sha(chr(0xFF) * 2).digest()], 2, ds.finished, None)
assert ds.done
assert sw.get_piece(0, 0, 3) == None
from random import shuffle
def test_alloc_random():
ds = DummyStorage(101)
sw = StorageWrapper(ds, 1, [sha(chr(i)).digest() for i in xrange(101)], 1, ds.finished, None)
for i in xrange(100):
assert sw.new_request(i) == (0, 1)
r = range(100)
shuffle(r)
for i in r:
sw.piece_came_in(i, 0, chr(i))
for i in xrange(100):
assert sw.get_piece(i, 0, 1) == chr(i)
assert ds.s[:100] == ''.join([chr(i) for i in xrange(100)])
def test_alloc_resume():
ds = DummyStorage(101)
sw = StorageWrapper(ds, 1, [sha(chr(i)).digest() for i in xrange(101)], 1, ds.finished, None)
for i in xrange(100):
assert sw.new_request(i) == (0, 1)
r = range(100)
shuffle(r)
for i in r[:50]:
sw.piece_came_in(i, 0, chr(i))
assert ds.s[50:] == chr(0xFF) * 51
ds.ranges = [(0, 50)]
sw = StorageWrapper(ds, 1, [sha(chr(i)).digest() for i in xrange(101)], 1, ds.finished, None)
for i in r[50:]:
sw.piece_came_in(i, 0, chr(i))
assert ds.s[:100] == ''.join([chr(i) for i in xrange(100)])
def test_last_piece_pre():
ds = DummyStorage(3, ranges = [(2, 1)])
ds.s = chr(0xFF) + chr(0xFF) + 'c'
sw = StorageWrapper(ds, 2, [sha('ab').digest(), sha('c').digest()], 2, ds.finished, None)
assert not sw.do_I_have_requests(1)
assert sw.do_I_have_requests(0)
def test_not_last_pre():
ds = DummyStorage(3, ranges = [(1, 1)])
ds.s = chr(0xFF) + 'a' + chr(0xFF)
sw = StorageWrapper(ds, 1, [sha('a').digest()] * 3, 1, ds.finished, None)
assert not sw.do_I_have_requests(1)
assert sw.do_I_have_requests(0)
assert sw.do_I_have_requests(2)
def test_last_piece_not_pre():
ds = DummyStorage(51, ranges = [(50, 1)])
sw = StorageWrapper(ds, 2, [sha('aa').digest()] * 25 + [sha('b').digest()], 2, ds.finished, None)
for i in xrange(25):
assert sw.new_request(i) == (0, 2)
assert sw.new_request(25) == (0, 1)
sw.piece_came_in(25, 0, 'b')
r = range(25)
shuffle(r)
for i in r:
sw.piece_came_in(i, 0, 'aa')
assert ds.done
assert ds.s == 'a' * 50 + 'b'
| Python |
# Written by Bram Cohen
# see LICENSE.txt for license information
from parseargs import parseargs, formatDefinitions
from RawServer import RawServer
from HTTPHandler import HTTPHandler
from NatCheck import NatCheck
from threading import Event
from bencode import bencode, bdecode, Bencached
from zurllib import urlopen, quote, unquote
from urlparse import urlparse
from os import rename
from os.path import exists, isfile
from cStringIO import StringIO
from time import time, gmtime, strftime
from random import shuffle
from sha import sha
from types import StringType, IntType, LongType, ListType, DictType
from binascii import b2a_hex, a2b_hex, a2b_base64
import sys
from __init__ import version
defaults = [
('port', 80, "Port to listen on."),
('dfile', None, 'file to store recent downloader info in'),
('bind', '', 'ip to bind to locally'),
('socket_timeout', 15, 'timeout for closing connections'),
('save_dfile_interval', 5 * 60, 'seconds between saving dfile'),
('timeout_downloaders_interval', 45 * 60, 'seconds between expiring downloaders'),
('reannounce_interval', 30 * 60, 'seconds downloaders should wait between reannouncements'),
('response_size', 50, 'number of peers to send in an info message'),
('timeout_check_interval', 5,
'time to wait between checking if any connections have timed out'),
('nat_check', 3,
"how many times to check if a downloader is behind a NAT (0 = don't check)"),
('min_time_between_log_flushes', 3.0,
'minimum time it must have been since the last flush to do another one'),
('allowed_dir', '', 'only allow downloads for .torrents in this dir'),
('parse_allowed_interval', 15, 'minutes between reloading of allowed_dir'),
('show_names', 1, 'whether to display names from allowed dir'),
('favicon', '', 'file containing x-icon data to return when browser requests favicon.ico'),
('only_local_override_ip', 1, "ignore the ip GET parameter from machines which aren't on local network IPs"),
('logfile', '', 'file to write the tracker logs, use - for stdout (default)'),
('allow_get', 0, 'use with allowed_dir; adds a /file?hash={hash} url that allows users to download the torrent file'),
('keep_dead', 0, 'keep dead torrents after they expire (so they still show up on your /scrape and web page)'),
('max_give', 200, 'maximum number of peers to give with any one request'),
]
def statefiletemplate(x):
if type(x) != DictType:
raise ValueError
for cname, cinfo in x.items():
if cname == 'peers':
for y in cinfo.values(): # The 'peers' key is a dictionary of SHA hashes (torrent ids)
if type(y) != DictType: # ... for the active torrents, and each is a dictionary
raise ValueError
for id, info in y.items(): # ... of client ids interested in that torrent
if (len(id) != 20):
raise ValueError
if type(info) != DictType: # ... each of which is also a dictionary
raise ValueError # ... which has an IP, a Port, and a Bytes Left count for that client for that torrent
if type(info.get('ip', '')) != StringType:
raise ValueError
port = info.get('port')
if type(port) not in (IntType, LongType) or port < 0:
raise ValueError
left = info.get('left')
if type(left) not in (IntType, LongType) or left < 0:
raise ValueError
elif cname == 'completed':
if (type(cinfo) != DictType): # The 'completed' key is a dictionary of SHA hashes (torrent ids)
raise ValueError # ... for keeping track of the total completions per torrent
for y in cinfo.values(): # ... each torrent has an integer value
if type(y) not in (IntType, LongType): # ... for the number of reported completions for that torrent
raise ValueError
def parseTorrents(dir):
import os
a = {}
for f in os.listdir(dir):
if f[-8:] == '.torrent':
try:
p = os.path.join(dir,f)
d = bdecode(open(p, 'rb').read())
h = sha(bencode(d['info'])).digest()
i = d['info']
a[h] = {}
a[h]['name'] = i.get('name', f)
a[h]['file'] = f
a[h]['path'] = p
l = 0
if i.has_key('length'):
l = i.get('length',0)
elif i.has_key('files'):
for li in i['files']:
if li.has_key('length'):
l = l + li['length']
a[h]['length'] = l
except:
# what now, boss?
print "Error parsing " + f, sys.exc_info()[0]
return a
alas = 'your file may exist elsewhere in the universe\nbut alas, not here\n'
def isotime(secs = None):
if secs == None:
secs = time()
return strftime('%Y-%m-%d %H:%M UTC', gmtime(secs))
def compact_peer_info(ip, port):
return ''.join([chr(int(i)) for i in ip.split('.')]) + chr((port & 0xFF00) >> 8) + chr(port & 0xFF)
class Tracker:
def __init__(self, config, rawserver):
self.response_size = config['response_size']
self.dfile = config['dfile']
self.natcheck = config['nat_check']
self.max_give = config['max_give']
self.reannounce_interval = config['reannounce_interval']
self.save_dfile_interval = config['save_dfile_interval']
self.show_names = config['show_names']
self.only_local_override_ip = config['only_local_override_ip']
favicon = config['favicon']
self.favicon = None
if favicon:
if isfile(favicon):
h = open(favicon, 'rb')
self.favicon = h.read()
h.close()
else:
print "**warning** specified favicon file -- %s -- does not exist." % favicon
self.rawserver = rawserver
self.becache1 = {}
self.becache2 = {}
self.cache1 = {}
self.cache2 = {}
self.times = {}
if exists(self.dfile):
h = open(self.dfile, 'rb')
ds = h.read()
h.close()
tempstate = bdecode(ds)
else:
tempstate = {}
if tempstate.has_key('peers'):
self.state = tempstate
else:
self.state = {}
self.state['peers'] = tempstate
self.downloads = self.state.setdefault('peers', {})
self.completed = self.state.setdefault('completed', {})
statefiletemplate(self.state)
for x, dl in self.downloads.items():
self.times[x] = {}
for y, dat in dl.items():
self.times[x][y] = 0
if not dat.get('nat',1):
ip = dat['ip']
gip = dat.get('given ip')
if gip and is_valid_ipv4(gip) and (not self.only_local_override_ip or is_local_ip(ip)):
ip = gip
self.becache1.setdefault(x,{})[y] = Bencached(bencode({'ip': ip,
'port': dat['port'], 'peer id': y}))
self.becache2.setdefault(x,{})[y] = compact_peer_info(ip, dat['port'])
rawserver.add_task(self.save_dfile, self.save_dfile_interval)
self.prevtime = time()
self.timeout_downloaders_interval = config['timeout_downloaders_interval']
rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval)
self.logfile = None
self.log = None
if (config['logfile'] != '') and (config['logfile'] != '-'):
try:
self.logfile = config['logfile']
self.log = open(self.logfile,'a')
sys.stdout = self.log
print "# Log Started: ", isotime()
except:
print "Error trying to redirect stdout to log file:", sys.exc_info()[0]
self.allow_get = config['allow_get']
if config['allowed_dir'] != '':
self.allowed_dir = config['allowed_dir']
self.parse_allowed_interval = config['parse_allowed_interval']
self.parse_allowed()
else:
self.allowed = None
if unquote('+') != ' ':
self.uq_broken = 1
else:
self.uq_broken = 0
self.keep_dead = config['keep_dead']
def get(self, connection, path, headers):
try:
(scheme, netloc, path, pars, query, fragment) = urlparse(path)
if self.uq_broken == 1:
path = path.replace('+',' ')
query = query.replace('+',' ')
path = unquote(path)[1:]
params = {}
for s in query.split('&'):
if s != '':
i = s.index('=')
params[unquote(s[:i])] = unquote(s[i+1:])
except ValueError, e:
return (400, 'Bad Request', {'Content-Type': 'text/plain'},
'you sent me garbage - ' + str(e))
if path == '' or path == 'index.html':
s = StringIO()
s.write('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n' \
'<html><head><title>BitTorrent download info</title>\n')
if self.favicon != None:
s.write('<link rel="shortcut icon" href="/favicon.ico" />\n')
s.write('</head>\n<body>\n' \
'<h3>BitTorrent download info</h3>\n'\
'<ul>\n'
'<li><strong>tracker version:</strong> %s</li>\n' \
'<li><strong>server time:</strong> %s</li>\n' \
'</ul>\n' % (version, isotime()))
names = self.downloads.keys()
if names:
names.sort()
tn = 0
tc = 0
td = 0
tt = 0 # Total transferred
ts = 0 # Total size
nf = 0 # Number of files displayed
uc = {}
ud = {}
if self.allowed != None and self.show_names:
s.write('<table summary="files" border="1">\n' \
'<tr><th>info hash</th><th>torrent name</th><th align="right">size</th><th align="right">complete</th><th align="right">downloading</th><th align="right">downloaded</th><th align="right">transferred</th></tr>\n')
else:
s.write('<table summary="files">\n' \
'<tr><th>info hash</th><th align="right">complete</th><th align="right">downloading</th><th align="right">downloaded</th></tr>\n')
for name in names:
l = self.downloads[name]
n = self.completed.get(name, 0)
tn = tn + n
lc = []
for i in l.values():
if type(i) == DictType:
if i['left'] == 0:
lc.append(1)
uc[i['ip']] = 1
else:
ud[i['ip']] = 1
c = len(lc)
tc = tc + c
d = len(l) - c
td = td + d
if self.allowed != None and self.show_names:
if self.allowed.has_key(name):
nf = nf + 1
sz = self.allowed[name]['length'] # size
ts = ts + sz
szt = sz * n # Transferred for this torrent
tt = tt + szt
if self.allow_get == 1:
linkname = '<a href="/file?info_hash=' + b2a_hex(name) + '">' + self.allowed[name]['name'] + '</a>'
else:
linkname = self.allowed[name]['name']
s.write('<tr><td><code>%s</code></td><td>%s</td><td align="right">%s</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i</td><td align="right">%s</td></tr>\n' \
% (b2a_hex(name), linkname, size_format(sz), c, d, n, size_format(szt)))
else:
s.write('<tr><td><code>%s</code></td><td align="right"><code>%i</code></td><td align="right"><code>%i</code></td><td align="right"><code>%i</code></td></tr>\n' \
% (b2a_hex(name), c, d, n))
ttn = 0
for i in self.completed.values():
ttn = ttn + i
if self.allowed != None and self.show_names:
s.write('<tr><td align="right" colspan="2">%i files</td><td align="right">%s</td><td align="right">%i/%i</td><td align="right">%i/%i</td><td align="right">%i/%i</td><td align="right">%s</td></tr>\n'
% (nf, size_format(ts), len(uc), tc, len(ud), td, tn, ttn, size_format(tt)))
else:
s.write('<tr><td align="right">%i files</td><td align="right">%i/%i</td><td align="right">%i/%i</td><td align="right">%i/%i</td></tr>\n'
% (nf, len(uc), tc, len(ud), td, tn, ttn))
s.write('</table>\n' \
'<ul>\n' \
'<li><em>info hash:</em> SHA1 hash of the "info" section of the metainfo (*.torrent)</li>\n' \
'<li><em>complete:</em> number of connected clients with the complete file (total: unique IPs/total connections)</li>\n' \
'<li><em>downloading:</em> number of connected clients still downloading (total: unique IPs/total connections)</li>\n' \
'<li><em>downloaded:</em> reported complete downloads (total: current/all)</li>\n' \
'<li><em>transferred:</em> torrent size * total downloaded (does not include partial transfers)</li>\n' \
'</ul>\n')
else:
s.write('<p>not tracking any files yet...</p>\n')
s.write('</body>\n' \
'</html>\n')
return (200, 'OK', {'Content-Type': 'text/html; charset=iso-8859-1'}, s.getvalue())
elif path == 'scrape':
fs = {}
names = []
if params.has_key('info_hash'):
if self.downloads.has_key(params['info_hash']):
names = [ params['info_hash'] ]
# else return nothing
else:
names = self.downloads.keys()
names.sort()
for name in names:
l = self.downloads[name]
n = self.completed.get(name, 0)
c = len([1 for i in l.values() if type(i) == DictType and i['left'] == 0])
d = len(l) - c
fs[name] = {'complete': c, 'incomplete': d, 'downloaded': n}
if (self.allowed is not None) and self.allowed.has_key(name) and self.show_names:
fs[name]['name'] = self.allowed[name]['name']
r = {'files': fs}
return (200, 'OK', {'Content-Type': 'text/plain'}, bencode(r))
elif (path == 'file') and (self.allow_get == 1) and params.has_key('info_hash') and self.allowed.has_key(a2b_hex(params['info_hash'])):
hash = a2b_hex(params['info_hash'])
fname = self.allowed[hash]['file']
fpath = self.allowed[hash]['path']
return (200, 'OK', {'Content-Type': 'application/x-bittorrent', 'Content-Disposition': 'attachment; filename=' + fname}, open(fpath, 'rb').read())
elif path == 'favicon.ico' and self.favicon != None:
return (200, 'OK', {'Content-Type' : 'image/x-icon'}, self.favicon)
if path != 'announce':
return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
try:
if not params.has_key('info_hash'):
raise ValueError, 'no info hash'
if params.has_key('ip') and not is_valid_ipv4(params['ip']):
raise ValueError('DNS name or invalid IP address given for IP')
infohash = params['info_hash']
if self.allowed != None:
if not self.allowed.has_key(infohash):
return (200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode({'failure reason':
'Requested download is not authorized for use with this tracker.'}))
ip = connection.get_ip()
ip_override = 0
if params.has_key('ip') and is_valid_ipv4(params['ip']) and (
not self.only_local_override_ip or is_local_ip(ip)):
ip_override = 1
if params.has_key('event') and params['event'] not in ['started', 'completed', 'stopped']:
raise ValueError, 'invalid event'
port = long(params.get('port', ''))
uploaded = long(params.get('uploaded', ''))
downloaded = long(params.get('downloaded', ''))
left = long(params.get('left', ''))
myid = params.get('peer_id', '')
if len(myid) != 20:
raise ValueError, 'id not of length 20'
rsize = self.response_size
if params.has_key('numwant'):
rsize = min(long(params['numwant']), self.max_give)
except ValueError, e:
return (400, 'Bad Request', {'Content-Type': 'text/plain'},
'you sent me garbage - ' + str(e))
peers = self.downloads.setdefault(infohash, {})
self.completed.setdefault(infohash, 0)
ts = self.times.setdefault(infohash, {})
confirm = 0
if peers.has_key(myid):
myinfo = peers[myid]
if myinfo.has_key('key'):
if params.get('key') != myinfo['key']:
return (200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
bencode({'failure reason': 'key did not match key supplied earlier'}))
confirm = 1
elif myinfo['ip'] == ip:
confirm = 1
else:
confirm = 1
if params.get('event', '') != 'stopped' and confirm:
ts[myid] = time()
if not peers.has_key(myid):
peers[myid] = {'ip': ip, 'port': port, 'left': left}
if params.has_key('key'):
peers[myid]['key'] = params['key']
if params.has_key('ip') and is_valid_ipv4(params['ip']):
peers[myid]['given ip'] = params['ip']
mip = ip
if ip_override:
mip = params['ip']
if not self.natcheck or ip_override:
self.becache1.setdefault(infohash,{})[myid] = Bencached(bencode({'ip': mip, 'port': port, 'peer id': myid}))
self.becache2.setdefault(infohash,{})[myid] = compact_peer_info(mip, port)
else:
peers[myid]['left'] = left
peers[myid]['ip'] = ip
if params.get('event', '') == 'completed':
self.completed[infohash] = 1 + self.completed[infohash]
if port == 0:
peers[myid]['nat'] = 2**30
elif self.natcheck and not ip_override:
to_nat = peers[myid].get('nat', -1)
if to_nat and to_nat < self.natcheck:
NatCheck(self.connectback_result, infohash, myid, ip, port, self.rawserver)
else:
peers[myid]['nat'] = 0
elif confirm:
if peers.has_key(myid):
if self.becache1[infohash].has_key(myid):
del self.becache1[infohash][myid]
del self.becache2[infohash][myid]
del peers[myid]
del ts[myid]
data = {'interval': self.reannounce_interval}
if params.get('compact', 0):
if rsize == 0:
data['peers'] = ''
else:
cache = self.cache2.setdefault(infohash, [])
if len(cache) < rsize:
del cache[:]
cache.extend(self.becache2.setdefault(infohash, {}).values())
shuffle(cache)
del self.cache1.get(infohash, [])[:]
data['peers'] = ''.join(cache[-rsize:])
del cache[-rsize:]
else:
if rsize == 0:
data['peers'] = []
else:
cache = self.cache1.setdefault(infohash, [])
if len(cache) < rsize:
del cache[:]
cache.extend(self.becache1.setdefault(infohash, {}).values())
shuffle(cache)
del self.cache2.get(infohash, [])[:]
data['peers'] = cache[-rsize:]
del cache[-rsize:]
connection.answer((200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode(data)))
def connectback_result(self, result, downloadid, peerid, ip, port):
record = self.downloads.get(downloadid, {}).get(peerid)
if record is None or record['ip'] != ip or record['port'] != port:
return
if not record.has_key('nat'):
record['nat'] = int(not result)
else:
if result:
record['nat'] = 0
else:
record['nat'] += 1
if result:
self.becache1.setdefault(downloadid,{})[peerid] = Bencached(bencode({'ip': ip, 'port': port, 'peer id': peerid}))
self.becache2.setdefault(downloadid,{})[peerid] = compact_peer_info(ip, port)
def save_dfile(self):
self.rawserver.add_task(self.save_dfile, self.save_dfile_interval)
h = open(self.dfile, 'wb')
h.write(bencode(self.state))
h.close()
def parse_allowed(self):
self.rawserver.add_task(self.parse_allowed, self.parse_allowed_interval * 60)
self.allowed = parseTorrents(self.allowed_dir)
def expire_downloaders(self):
for x in self.times.keys():
for myid, t in self.times[x].items():
if t < self.prevtime:
if self.becache1.get(x, {}).has_key(myid):
del self.becache1[x][myid]
del self.becache2[x][myid]
del self.times[x][myid]
del self.downloads[x][myid]
self.prevtime = time()
if (self.keep_dead != 1):
for key, value in self.downloads.items():
if len(value) == 0:
del self.times[key]
del self.downloads[key]
self.rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval)
def is_valid_ipv4(ip):
try:
x = compact_peer_info(ip, 0)
if len(x) != 6:
return False
except (ValueError, IndexError):
return False
return True
def is_local_ip(ip):
try:
v = [long(x) for x in ip.split('.')]
if v[0] == 10 or v[0] == 127 or v[:2] in ([192, 168], [169, 254]):
return 1
if v[0] == 172 and v[1] >= 16 and v[1] <= 31:
return 1
except ValueError:
return 0
def track(args):
if len(args) == 0:
print formatDefinitions(defaults, 80)
return
try:
config, files = parseargs(args, defaults, 0, 0)
except ValueError, e:
print 'error: ' + str(e)
print 'run with no arguments for parameter explanations'
return
r = RawServer(Event(), config['timeout_check_interval'], config['socket_timeout'])
t = Tracker(config, r)
r.bind(config['port'], config['bind'], True)
r.listen_forever(HTTPHandler(t.get, config['min_time_between_log_flushes']))
t.save_dfile()
print '# Shutting down: ' + isotime()
def size_format(s):
if (s < 1024):
r = str(s) + 'B'
elif (s < 1048576):
r = str(int(s/1024)) + 'KiB'
elif (s < 1073741824l):
r = str(int(s/1048576)) + 'MiB'
elif (s < 1099511627776l):
r = str(int((s/1073741824.0)*100.0)/100.0) + 'GiB'
else:
r = str(int((s/1099511627776.0)*100.0)/100.0) + 'TiB'
return(r)
| Python |
# Written by Michael Janssen
# See LICENSE.txt for license information
def fmttime(n, compact = 0):
if n == -1:
if compact:
return '(no seeds?)'
else:
return 'download not progressing (no seeds?)'
if n == 0:
if compact:
return "complete"
else:
return 'download complete!'
n = int(n)
m, s = divmod(n, 60)
h, m = divmod(m, 60)
if h > 1000000:
return 'n/a'
if compact:
return '%d:%02d:%02d' % (h, m, s)
else:
return 'finishing in %d:%02d:%02d' % (h, m, s)
def fmtsize(n, baseunit = 0, padded = 1):
unit = [' B', ' K', ' M', ' G', ' T', ' P', ' E', ' Z', ' Y']
i = baseunit
while i + 1 < len(unit) and n >= 999:
i += 1
n = float(n) / (1 << 10)
size = ''
if padded:
if n < 10:
size = ' '
elif n < 100:
size = ' '
if i != 0:
size += '%.1f %s' % (n, unit[i])
else:
if padded:
size += '%.0f %s' % (n, unit[i])
else:
size += '%.0f %s' % (n, unit[i])
return size
| Python |
# Written by Bram Cohen
# see LICENSE.txt for license information
from cStringIO import StringIO
from socket import error as socketerror
protocol_name = 'BitTorrent protocol'
# header, reserved, download id, my id, [length, message]
class NatCheck:
def __init__(self, resultfunc, downloadid, peerid, ip, port, rawserver):
self.resultfunc = resultfunc
self.downloadid = downloadid
self.peerid = peerid
self.ip = ip
self.port = port
self.closed = False
self.buffer = StringIO()
self.next_len = 1
self.next_func = self.read_header_len
try:
self.connection = rawserver.start_connection((ip, port), self)
self.connection.write(chr(len(protocol_name)) + protocol_name +
(chr(0) * 8) + downloadid)
except socketerror:
self.answer(False)
except IOError:
self.answer(False)
def answer(self, result):
self.closed = True
try:
self.connection.close()
except AttributeError:
pass
self.resultfunc(result, self.downloadid, self.peerid, self.ip, self.port)
def read_header_len(self, s):
if ord(s) != len(protocol_name):
return None
return len(protocol_name), self.read_header
def read_header(self, s):
if s != protocol_name:
return None
return 8, self.read_reserved
def read_reserved(self, s):
return 20, self.read_download_id
def read_download_id(self, s):
if s != self.downloadid:
return None
return 20, self.read_peer_id
def read_peer_id(self, s):
if s != self.peerid:
return None
self.answer(True)
return None
def data_came_in(self, connection, s):
while True:
if self.closed:
return
i = self.next_len - self.buffer.tell()
if i > len(s):
self.buffer.write(s)
return
self.buffer.write(s[:i])
s = s[i:]
m = self.buffer.getvalue()
self.buffer.reset()
self.buffer.truncate()
x = self.next_func(m)
if x is None:
if not self.closed:
self.answer(False)
return
self.next_len, self.next_func = x
def connection_lost(self, connection):
if not self.closed:
self.closed = True
self.resultfunc(False, self.downloadid, self.peerid, self.ip, self.port)
def connection_flushed(self, connection):
pass
| Python |
# Written by Bram Cohen
# see LICENSE.txt for license information
from CurrentRateMeasure import Measure
class Upload:
def __init__(self, connection, choker, storage,
max_slice_length, max_rate_period, fudge):
self.connection = connection
self.choker = choker
self.storage = storage
self.max_slice_length = max_slice_length
self.max_rate_period = max_rate_period
self.choked = True
self.interested = False
self.buffer = []
self.measure = Measure(max_rate_period, fudge)
if storage.do_I_have_anything():
connection.send_bitfield(storage.get_have_list())
def got_not_interested(self):
if self.interested:
self.interested = False
del self.buffer[:]
self.choker.not_interested(self.connection)
def got_interested(self):
if not self.interested:
self.interested = True
self.choker.interested(self.connection)
def flushed(self):
while len(self.buffer) > 0 and self.connection.is_flushed():
index, begin, length = self.buffer[0]
del self.buffer[0]
piece = self.storage.get_piece(index, begin, length)
if piece is None:
self.connection.close()
return
self.measure.update_rate(len(piece))
self.connection.send_piece(index, begin, piece)
def got_request(self, index, begin, length):
if not self.interested or length > self.max_slice_length:
self.connection.close()
return
if not self.choked:
self.buffer.append((index, begin, length))
self.flushed()
def got_cancel(self, index, begin, length):
try:
self.buffer.remove((index, begin, length))
except ValueError:
pass
def choke(self):
if not self.choked:
self.choked = True
del self.buffer[:]
self.connection.send_choke()
def unchoke(self):
if self.choked:
self.choked = False
self.connection.send_unchoke()
def is_choked(self):
return self.choked
def is_interested(self):
return self.interested
def has_queries(self):
return len(self.buffer) > 0
def get_rate(self):
return self.measure.get_rate()
class DummyConnection:
def __init__(self, events):
self.events = events
self.flushed = False
def send_bitfield(self, bitfield):
self.events.append(('bitfield', bitfield))
def is_flushed(self):
return self.flushed
def close(self):
self.events.append('closed')
def send_piece(self, index, begin, piece):
self.events.append(('piece', index, begin, piece))
def send_choke(self):
self.events.append('choke')
def send_unchoke(self):
self.events.append('unchoke')
class DummyChoker:
def __init__(self, events):
self.events = events
def interested(self, connection):
self.events.append('interested')
def not_interested(self, connection):
self.events.append('not interested')
class DummyStorage:
def __init__(self, events):
self.events = events
def do_I_have_anything(self):
self.events.append('do I have')
return True
def get_have_list(self):
self.events.append('get have list')
return [False, True]
def get_piece(self, index, begin, length):
self.events.append(('get piece', index, begin, length))
if length == 4:
return None
return 'a' * length
def test_skip_over_choke():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
assert u.is_choked()
assert not u.is_interested()
u.got_interested()
assert u.is_interested()
u.got_request(0, 0, 3)
dco.flushed = True
u.flushed()
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'interested']
def test_bad_piece():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
assert u.is_choked()
assert not u.is_interested()
u.got_interested()
assert u.is_interested()
u.unchoke()
assert not u.is_choked()
u.got_request(0, 0, 4)
dco.flushed = True
u.flushed()
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'interested', 'unchoke',
('get piece', 0, 0, 4), 'closed']
def test_still_rejected_after_unchoke():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
assert u.is_choked()
assert not u.is_interested()
u.got_interested()
assert u.is_interested()
u.unchoke()
assert not u.is_choked()
u.got_request(0, 0, 3)
u.choke()
u.unchoke()
dco.flushed = True
u.flushed()
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'interested', 'unchoke',
'choke', 'unchoke']
def test_sends_when_flushed():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
u.unchoke()
u.got_interested()
u.got_request(0, 1, 3)
dco.flushed = True
u.flushed()
u.flushed()
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'unchoke', 'interested',
('get piece', 0, 1, 3), ('piece', 0, 1, 'aaa')]
def test_sends_immediately():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
u.unchoke()
u.got_interested()
dco.flushed = True
u.got_request(0, 1, 3)
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'unchoke', 'interested',
('get piece', 0, 1, 3), ('piece', 0, 1, 'aaa')]
def test_cancel():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
u.unchoke()
u.got_interested()
u.got_request(0, 1, 3)
u.got_cancel(0, 1, 3)
u.got_cancel(0, 1, 2)
u.flushed()
dco.flushed = True
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'unchoke', 'interested']
def test_clears_on_not_interested():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
u.unchoke()
u.got_interested()
u.got_request(0, 1, 3)
u.got_not_interested()
dco.flushed = True
u.flushed()
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'unchoke', 'interested',
'not interested']
def test_close_when_sends_on_not_interested():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
u.got_request(0, 1, 3)
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'closed']
def test_close_over_max_length():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
u.got_interested()
u.got_request(0, 1, 101)
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'interested', 'closed']
def test_no_bitfield_on_start_empty():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
ds.do_I_have_anything = lambda: False
u = Upload(dco, dch, ds, 100, 20, 5)
assert events == []
| Python |
"""
xbee.py
By Paul Malmsten, 2010
Inspired by code written by Amit Synderman and Marco Sangalli
pmalmsten@gmail.com
XBee superclass module
This class defines data and methods common to all XBee modules.
This class should be subclassed in order to provide
series-specific functionality.
"""
import struct, threading, time
from xbee.frame import APIFrame
class ThreadQuitException(Exception):
pass
class XBeeBase(threading.Thread):
"""
Abstract base class providing command generation and response
parsing methods for XBee modules.
Constructor arguments:
ser: The file-like serial port to use.
shorthand: boolean flag which determines whether shorthand command
calls (i.e. xbee.at(...) instead of xbee.send("at",...)
are allowed.
callback: function which should be called with frame data
whenever a frame arrives from the serial port.
When this is not None, a background thread to monitor
the port and call the given function is automatically
started.
escaped: boolean flag which determines whether the library should
operate in escaped mode. In this mode, certain data bytes
in the output and input streams will be escaped and unescaped
in accordance with the XBee API. This setting must match
the appropriate api_mode setting of an XBee device; see your
XBee device's documentation for more information.
"""
def __init__(self, ser, shorthand=True, callback=None, escaped=False):
super(XBeeBase, self).__init__()
self.serial = ser
self.shorthand = shorthand
self._callback = None
self._thread_continue = False
self._escaped = escaped
if callback:
self._callback = callback
self._thread_continue = True
self._thread_quit = threading.Event()
self.start()
def halt(self):
"""
halt: None -> None
If this instance has a separate thread running, it will be
halted. This method will wait until the thread has cleaned
up before returning.
"""
if self._callback:
self._thread_continue = False
self._thread_quit.wait()
def _write(self, data):
"""
_write: binary data -> None
Packages the given binary data in an API frame and writes the
result to the serial port
"""
frame = APIFrame(data, self._escaped).output()
self.serial.write(frame)
def run(self):
"""
run: None -> None
This method overrides threading.Thread.run() and is automatically
called when an instance is created with threading enabled.
"""
while True:
try:
self._callback(self.wait_read_frame())
except ThreadQuitException:
break
self._thread_quit.set()
def _wait_for_frame(self, t_max = 250):
"""
_wait_for_frame: None -> binary data
_wait_for_frame will read from the serial port until a valid
API frame arrives. It will then return the binary data
contained within the frame.
If this method is called as a separate thread
and self.thread_continue is set to False, the thread will
exit by raising a ThreadQuitException.
"""
frame = APIFrame(escaped=self._escaped)
t_overflow = 0
while t_overflow < t_max:
if self._callback and not self._thread_continue:
raise ThreadQuitException
if self.serial.inWaiting() == 0:
time.sleep(.01)
t_overflow+= 1
continue
byte = self.serial.read()
if byte != APIFrame.START_BYTE:
continue
# Save all following bytes
frame.fill(byte)
while(frame.remaining_bytes() > 0):
frame.fill(self.serial.read())
try:
# Try to parse and return result
frame.parse()
return frame
except ValueError:
# Bad frame, so restart
#frame = APIFrame(escaped=self._escaped)
frame = None
def _build_command(self, cmd, **kwargs):
"""
_build_command: string (binary data) ... -> binary data
_build_command will construct a command packet according to the
specified command's specification in api_commands. It will expect
named arguments for all fields other than those with a default
value or a length of 'None'.
Each field will be written out in the order they are defined
in the command definition.
"""
try:
cmd_spec = self.api_commands[cmd]
except AttributeError:
raise NotImplementedError("API command specifications could not be found; use a derived class which defines 'api_commands'.")
packet = ''
for field in cmd_spec:
try:
# Read this field's name from the function arguments dict
data = kwargs[field['name']]
except KeyError:
# Data wasn't given
# Only a problem if the field has a specific length
if field['len'] is not None:
# Was a default value specified?
default_value = field['default']
if default_value:
# If so, use it
data = default_value
else:
# Otherwise, fail
raise KeyError(
"The expected field %s of length %d was not provided"
% (field['name'], field['len']))
else:
# No specific length, ignore it
data = None
# Ensure that the proper number of elements will be written
if field['len'] and len(data) != field['len']:
raise ValueError(
"The data provided for '%s' was not %d bytes long"\
% (field['name'], field['len']))
# Add the data to the packet, if it has been specified
# Otherwise, the parameter was of variable length, and not
# given
if data:
packet += data
return packet
def _split_response(self, data):
"""
_split_response: binary data -> {'id':str,
'param':binary data,
...}
_split_response takes a data packet received from an XBee device
and converts it into a dictionary. This dictionary provides
names for each segment of binary data as specified in the
api_responses spec.
"""
# Fetch the first byte, identify the packet
# If the spec doesn't exist, raise exception
packet_id = data[0]
try:
packet = self.api_responses[packet_id]
except AttributeError:
raise NotImplementedError("API response specifications could not be found; use a derived class which defines 'api_responses'.")
except KeyError:
raise KeyError(
"Unrecognized response packet with id byte %s"
% data[0])
# Current byte index in the data stream
index = 1
# Result info
info = {'id':packet['name']}
packet_spec = packet['structure']
# Parse the packet in the order specified
for field in packet_spec:
if field['len'] == 'null_terminated':
field_data = ''
while data[index] != '\x00':
field_data += data[index]
index += 1
index += 1
info[field['name']] = field_data
elif field['len'] is not None:
# Store the number of bytes specified
# Are we trying to read beyond the last data element?
if index + field['len'] > len(data):
raise ValueError(
"Response packet was shorter than expected")
field_data = data[index:index + field['len']]
info[field['name']] = field_data
index += field['len']
# If the data field has no length specified, store any
# leftover bytes and quit
else:
field_data = data[index:]
# Were there any remaining bytes?
if field_data:
# If so, store them
info[field['name']] = field_data
index += len(field_data)
break
# If there are more bytes than expected, raise an exception
if index < len(data):
raise ValueError(
"Response packet was longer than expected; expected: %d, got: %d bytes" % (index,
len(data)))
# Check if this packet was an IO sample
# If so, process the sample data
if 'parse_as_io_samples' in packet:
field_to_process = packet['parse_as_io_samples']
info[field_to_process] = self._parse_samples(
info[field_to_process])
return info
def _parse_samples_header(self, io_bytes):
"""
_parse_samples_header: binary data in XBee IO data format ->
(int, [int ...], [int ...], int, int)
_parse_samples_header will read the first three bytes of the
binary data given and will return the number of samples which
follow, a list of enabled digital inputs, a list of enabled
analog inputs, the dio_mask, and the size of the header in bytes
"""
header_size = 3
# number of samples (always 1?) is the first byte
sample_count = ord(io_bytes[0])
# part of byte 1 and byte 2 are the DIO mask ( 9 bits )
dio_mask = (ord(io_bytes[1]) << 8 | ord(io_bytes[2])) & 0x01FF
# upper 7 bits of byte 1 is the AIO mask
aio_mask = (ord(io_bytes[1]) & 0xFE) >> 1
# sorted lists of enabled channels; value is position of bit in mask
dio_chans = []
aio_chans = []
for i in range(0,9):
if dio_mask & (1 << i):
dio_chans.append(i)
dio_chans.sort()
for i in range(0,7):
if aio_mask & (1 << i):
aio_chans.append(i)
aio_chans.sort()
return (sample_count, dio_chans, aio_chans, dio_mask, header_size)
def _parse_samples(self, io_bytes):
"""
_parse_samples: binary data in XBee IO data format ->
[ {"dio-0":True,
"dio-1":False,
"adc-0":100"}, ...]
_parse_samples reads binary data from an XBee device in the IO
data format specified by the API. It will then return a
dictionary indicating the status of each enabled IO port.
"""
sample_count, dio_chans, aio_chans, dio_mask, header_size = \
self._parse_samples_header(io_bytes)
samples = []
# split the sample data into a list, so it can be pop()'d
sample_bytes = [ord(c) for c in io_bytes[header_size:]]
# repeat for every sample provided
for sample_ind in range(0, sample_count):
tmp_samples = {}
if dio_chans:
# we have digital data
digital_data_set = (sample_bytes.pop(0) << 8 | sample_bytes.pop(0))
digital_values = dio_mask & digital_data_set
for i in dio_chans:
tmp_samples['dio-%d' % i] = True if (digital_values >> i) & 1 else False
for i in aio_chans:
# only first 10 bits are significant
analog_sample = (sample_bytes.pop(0) << 8 | sample_bytes.pop(0)) & 0x03FF
tmp_samples['adc-%d' % i] = analog_sample
samples.append(tmp_samples)
return samples
def send(self, cmd, **kwargs):
"""
send: string param=binary data ... -> None
When send is called with the proper arguments, an API command
will be written to the serial port for this XBee device
containing the proper instructions and data.
This method must be called with named arguments in accordance
with the api_command specification. Arguments matching all
field names other than those in reserved_names (like 'id' and
'order') should be given, unless they are of variable length
(of 'None' in the specification. Those are optional).
"""
# Pass through the keyword arguments
self._write(self._build_command(cmd, **kwargs))
def wait_read_frame(self, t = 250):
"""
wait_read_frame: None -> frame info dictionary
wait_read_frame calls XBee._wait_for_frame() and waits until a
valid frame appears on the serial port. Once it receives a frame,
wait_read_frame attempts to parse the data contained within it
and returns the resulting dictionary
"""
frame = self._wait_for_frame(t)
# If the frame is empty
if frame == None:
return {}
else:
return self._split_response(frame.data)
def __getattr__(self, name):
"""
If a method by the name of a valid api command is called,
the arguments will be automatically sent to an appropriate
send() call
"""
# If api_commands is not defined, raise NotImplementedError\
# If its not defined, _getattr__ will be called with its name
if name == 'api_commands':
raise NotImplementedError("API command specifications could not be found; use a derived class which defines 'api_commands'.")
# Is shorthand enabled, and is the called name a command?
if self.shorthand and name in self.api_commands:
# If so, simply return a function which passes its arguments
# to an appropriate send() call
return lambda **kwargs: self.send(name, **kwargs)
else:
raise AttributeError("XBee has no attribute '%s'" % name)
| Python |
"""
zigbee.py
By Greg Rapp, 2010
Inspired by code written by Paul Malmsten, 2010
Inspired by code written by Amit Synderman and Marco Sangalli
gdrapp@gmail.com
This module implements an XBee ZB (ZigBee) API library.
"""
import struct
from xbee.base import XBeeBase
from xbee.python2to3 import byteToInt, intToByte
class ZigBee(XBeeBase):
"""
Provides an implementation of the XBee API for XBee ZB (ZigBee) modules
with recent firmware.
Commands may be sent to a device by instantiating this class with
a serial port object (see PySerial) and then calling the send
method with the proper information specified by the API. Data may
be read from a device synchronously by calling wait_read_frame.
For asynchronous reads, see the defintion of XBeeBase.
"""
# Packets which can be sent to an XBee
# Format:
# {name of command:
# [{name:field name, len:field length, default: default value sent}
# ...
# ]
# ...
# }
api_commands = {"at":
[{'name':'id', 'len':1, 'default':b'\x08'},
{'name':'frame_id', 'len':1, 'default':b'\x01'},
{'name':'command', 'len':2, 'default':None},
{'name':'parameter', 'len':None, 'default':None}],
"queued_at":
[{'name':'id', 'len':1, 'default':b'\x09'},
{'name':'frame_id', 'len':1, 'default':b'\x01'},
{'name':'command', 'len':2, 'default':None},
{'name':'parameter', 'len':None, 'default':None}],
"remote_at":
[{'name':'id', 'len':1, 'default':b'\x17'},
{'name':'frame_id', 'len':1, 'default':b'\x00'},
# dest_addr_long is 8 bytes (64 bits), so use an unsigned long long
{'name':'dest_addr_long', 'len':8, 'default':struct.pack('>Q', 0)},
{'name':'dest_addr', 'len':2, 'default':b'\xFF\xFE'},
{'name':'options', 'len':1, 'default':b'\x02'},
{'name':'command', 'len':2, 'default':None},
{'name':'parameter', 'len':None, 'default':None}],
"tx":
[{'name':'id', 'len':1, 'default':b'\x10'},
{'name':'frame_id', 'len':1, 'default':b'\x01'},
{'name':'dest_addr_long', 'len':8, 'default':None},
{'name':'dest_addr', 'len':2, 'default':None},
{'name':'broadcast_radius','len':1, 'default':b'\x00'},
{'name':'options', 'len':1, 'default':b'\x00'},
{'name':'data', 'len':None, 'default':None}],
"tx_explicit":
[{'name':'id', 'len':1, 'default':b'\x11'},
{'name':'frame_id', 'len':1, 'default':b'\x00'},
{'name':'dest_addr_long', 'len':8, 'default':None},
{'name':'dest_addr', 'len':2, 'default':None},
{'name':'src_endpoint', 'len':1, 'default':None},
{'name':'dest_endpoint', 'len':1, 'default':None},
{'name':'cluster', 'len':2, 'default':None},
{'name':'profile', 'len':2, 'default':None},
{'name':'broadcast_radius','len':1, 'default':b'\x00'},
{'name':'options', 'len':1, 'default':b'\x00'},
{'name':'data', 'len':None, 'default':None}]
}
# Packets which can be received from an XBee
# Format:
# {id byte received from XBee:
# {name: name of response
# structure:
# [ {'name': name of field, 'len':length of field}
# ...
# ]
# parse_as_io_samples:name of field to parse as io
# }
# ...
# }
#
api_responses = {b"\x90":
{'name':'rx',
'structure':
[{'name':'source_addr_long','len':8},
{'name':'source_addr', 'len':2},
{'name':'options', 'len':1},
{'name':'rf_data', 'len':None}]},
b"\x91":
{'name':'rx_explicit',
'structure':
[{'name':'source_addr_long','len':8},
{'name':'source_addr', 'len':2},
{'name':'source_endpoint', 'len':1},
{'name':'dest_endpoint', 'len':1},
{'name':'cluster', 'len':2},
{'name':'profile', 'len':2},
{'name':'options', 'len':1},
{'name':'rf_data', 'len':None}]},
b"\x92": # Checked by GDR-parse_samples_header function appears to need update to support
{'name':'rx_io_data_long_addr',
'structure':
[{'name':'source_addr_long','len':8},
{'name':'source_addr', 'len':2},
{'name':'options', 'len':1},
{'name':'samples', 'len':None}],
'parsing': [('samples',
lambda xbee,original: xbee._parse_samples(original['samples'])
)]},
b"\x8b":
{'name':'tx_status',
'structure':
[{'name':'frame_id', 'len':1},
{'name':'dest_addr', 'len':2},
{'name':'retries', 'len':1},
{'name':'deliver_status', 'len':1},
{'name':'discover_status', 'len':1}]},
b"\x8a":
{'name':'status',
'structure':
[{'name':'status', 'len':1}]},
b"\x88":
{'name':'at_response',
'structure':
[{'name':'frame_id', 'len':1},
{'name':'command', 'len':2},
{'name':'status', 'len':1},
{'name':'parameter', 'len':None}],
'parsing': [('parameter',
lambda self, original: self._parse_IS_at_response(original)),
('parameter',
lambda self, original: self._parse_ND_at_response(original))]
},
b"\x97": #Checked GDR (not sure about parameter, could be 4 bytes)
{'name':'remote_at_response',
'structure':
[{'name':'frame_id', 'len':1},
{'name':'source_addr_long','len':8},
{'name':'source_addr', 'len':2},
{'name':'command', 'len':2},
{'name':'status', 'len':1},
{'name':'parameter', 'len':None}],
'parsing': [('parameter',
lambda self, original: self._parse_IS_at_response(original))]
},
b"\x95":
{'name':'node_id_indicator',
'structure':
[{'name':'sender_addr_long','len':8},
{'name':'sender_addr', 'len':2},
{'name':'options', 'len':1},
{'name':'source_addr', 'len':2},
{'name':'source_addr_long','len':8},
{'name':'node_id', 'len':'null_terminated'},
{'name':'parent_source_addr','len':2},
{'name':'device_type', 'len':1},
{'name':'source_event', 'len':1},
{'name':'digi_profile_id', 'len':2},
{'name':'manufacturer_id', 'len':2}]}
}
def _parse_IS_at_response(self, packet_info):
"""
If the given packet is a successful remote AT response for an IS
command, parse the parameter field as IO data.
"""
if packet_info['id'] in ('at_response','remote_at_response') and packet_info['command'] == b'IS' and packet_info['status'] == b'\x00':
return self._parse_samples(packet_info['parameter'])
else:
return packet_info['parameter']
def _parse_ND_at_response(self, packet_info):
"""
If the given packet is a successful AT response for an ND
command, parse the parameter field.
"""
if packet_info['id'] == 'at_response' and packet_info['command'] == b'ND' and packet_info['status'] == b'\x00':
result = {}
# Parse each field directly
result['source_addr'] = packet_info['parameter'][0:2]
result['source_addr_long'] = packet_info['parameter'][2:10]
# Parse the null-terminated node identifier field
null_terminator_index = 10
while packet_info['parameter'][null_terminator_index:null_terminator_index+1] != b'\x00':
null_terminator_index += 1;
# Parse each field thereafter directly
result['node_identifier'] = packet_info['parameter'][10:null_terminator_index]
result['parent_address'] = packet_info['parameter'][null_terminator_index+1:null_terminator_index+3]
result['device_type'] = packet_info['parameter'][null_terminator_index+3:null_terminator_index+4]
result['status'] = packet_info['parameter'][null_terminator_index+4:null_terminator_index+5]
result['profile_id'] = packet_info['parameter'][null_terminator_index+5:null_terminator_index+7]
result['manufacturer'] = packet_info['parameter'][null_terminator_index+7:null_terminator_index+9]
# Simple check to ensure a good parse
if null_terminator_index+9 != len(packet_info['parameter']):
raise ValueError("Improper ND response length: expected {0}, read {1} bytes".format(len(packet_info['parameter']), null_terminator_index+9))
return result
else:
return packet_info['parameter']
def __init__(self, *args, **kwargs):
# Call the super class constructor to save the serial port
super(ZigBee, self).__init__(*args, **kwargs)
def _parse_samples_header(self, io_bytes):
"""
_parse_samples_header: binary data in XBee ZB IO data format ->
(int, [int ...], [int ...], int, int)
_parse_samples_header will read the first three bytes of the
binary data given and will return the number of samples which
follow, a list of enabled digital inputs, a list of enabled
analog inputs, the dio_mask, and the size of the header in bytes
_parse_samples_header is overloaded here to support the additional
IO lines offered by the XBee ZB
"""
header_size = 4
# number of samples (always 1?) is the first byte
sample_count = byteToInt(io_bytes[0])
# bytes 1 and 2 are the DIO mask; bits 9 and 8 aren't used
dio_mask = (byteToInt(io_bytes[1]) << 8 | byteToInt(io_bytes[2])) & 0x0E7F
# byte 3 is the AIO mask
aio_mask = byteToInt(io_bytes[3])
# sorted lists of enabled channels; value is position of bit in mask
dio_chans = []
aio_chans = []
for i in range(0,13):
if dio_mask & (1 << i):
dio_chans.append(i)
dio_chans.sort()
for i in range(0,8):
if aio_mask & (1 << i):
aio_chans.append(i)
aio_chans.sort()
return (sample_count, dio_chans, aio_chans, dio_mask, header_size)
| Python |
"""
dispatch.py
By Paul Malmsten, 2010
pmalmsten@gmail.com
Provides the Dispatch class, which allows one to filter incoming data
packets from an XBee device and call an appropriate method when
one arrives.
"""
from xbee import XBee
class Dispatch(object):
def __init__(self, ser=None, xbee=None):
self.xbee = None
if xbee:
self.xbee = xbee
elif ser:
self.xbee = XBee(ser)
self.handlers = []
self.names = set()
def register(self, name, callback, filter):
"""
register: string, function: string, data -> None, function: data -> boolean -> None
Register will save the given name, callback, and filter function
for use when a packet arrives. When one arrives, the filter
function will be called to determine whether to call its associated
callback function. If the filter method returns true, the callback
method will be called with its associated name string and the packet
which triggered the call.
"""
if name in self.names:
raise ValueError("A callback has already been registered with the name '%s'" % name)
self.handlers.append(
{'name':name,
'callback':callback,
'filter':filter}
)
self.names.add(name)
def run(self, oneshot=False):
"""
run: boolean -> None
run will read and dispatch any packet which arrives from the
XBee device
"""
if not self.xbee:
raise ValueError("Either a serial port or an XBee must be provided to __init__ to execute run()")
while True:
self.dispatch(self.xbee.wait_read_frame())
if oneshot:
break
def dispatch(self, packet):
"""
dispatch: XBee data dict -> None
When called, dispatch checks the given packet against each
registered callback method and calls each callback whose filter
function returns true.
"""
for handler in self.handlers:
if handler['filter'](packet):
# Call the handler method with its associated
# name and the packet which passed its filter check
handler['callback'](handler['name'], packet)
| Python |
"""
fake.py
By Paul Malmsten, 2010
pmalmsten@gmail.com
Provides fake objects for testing the dispatch package.
"""
class FakeXBee(object):
"""
Represents an XBee device from which data can be read.
"""
def __init__(self, data):
self.data = data
def wait_read_frame(self):
return self.data
| Python |
from xbee.helpers.dispatch.dispatch import Dispatch
| Python |
"""
python2to3.py
By Paul Malmsten, 2011
Helper functions for handling Python 2 and Python 3 datatype shenanigans.
"""
def byteToInt(byte):
"""
byte -> int
Determines whether to use ord() or not to get a byte's value.
"""
if hasattr(byte, 'bit_length'):
# This is already an int
return byte
return ord(byte) if hasattr(byte, 'encode') else byte[0]
def intToByte(i):
"""
int -> byte
Determines whether to use chr() or bytes() to return a bytes object.
"""
return chr(i) if hasattr(bytes(), 'encode') else bytes([i])
def stringToBytes(s):
"""
string -> bytes
Converts a string into an appropriate bytes object
"""
return s.encode('ascii')
| Python |
"""
frame.py
By Paul Malmsten, 2010
pmalmsten@gmail.com
Represents an API frame for communicating with an XBee
"""
import struct
from xbee.python2to3 import byteToInt, intToByte
class APIFrame:
"""
Represents a frame of data to be sent to or which was received
from an XBee device
"""
START_BYTE = b'\x7E'
ESCAPE_BYTE = b'\x7D'
XON_BYTE = b'\x11'
XOFF_BYTE = b'\x13'
ESCAPE_BYTES = (START_BYTE, ESCAPE_BYTE, XON_BYTE, XOFF_BYTE)
def __init__(self, data=b'', escaped=False):
self.data = data
self.raw_data = b''
self.escaped = escaped
self._unescape_next_byte = False
def checksum(self):
"""
checksum: None -> single checksum byte
checksum adds all bytes of the binary, unescaped data in the
frame, saves the last byte of the result, and subtracts it from
0xFF. The final result is the checksum
"""
total = 0
# Add together all bytes
for byte in self.data:
total += byteToInt(byte)
# Only keep the last byte
total = total & 0xFF
return intToByte(0xFF - total)
def verify(self, chksum):
"""
verify: 1 byte -> boolean
verify checksums the frame, adds the expected checksum, and
determines whether the result is correct. The result should
be 0xFF.
"""
total = 0
# Add together all bytes
for byte in self.data:
total += byteToInt(byte)
# Add checksum too
total += byteToInt(chksum)
# Only keep low bits
total &= 0xFF
# Check result
return total == 0xFF
def len_bytes(self):
"""
len_data: None -> (MSB, LSB) 16-bit integer length, two bytes
len_bytes counts the number of bytes to be sent and encodes the
data length in two bytes, big-endian (most significant first).
"""
count = len(self.data)
return struct.pack("> h", count)
def output(self):
"""
output: None -> valid API frame (binary data)
output will produce a valid API frame for transmission to an
XBee module.
"""
# start is one byte long, length is two bytes
# data is n bytes long (indicated by length)
# chksum is one byte long
data = self.len_bytes() + self.data + self.checksum()
# Only run the escaoe process if it hasn't been already
if self.escaped and len(self.raw_data) < 1:
self.raw_data = APIFrame.escape(data)
if self.escaped:
data = self.raw_data
# Never escape start byte
return APIFrame.START_BYTE + data
@staticmethod
def escape(data):
"""
escape: byte string -> byte string
When a 'special' byte is encountered in the given data string,
it is preceded by an escape byte and XORed with 0x20.
"""
escaped_data = b""
for byte in data:
if intToByte(byteToInt(byte)) in APIFrame.ESCAPE_BYTES:
escaped_data += APIFrame.ESCAPE_BYTE
escaped_data += intToByte(0x20 ^ byteToInt(byte))
else:
escaped_data += intToByte(byteToInt(byte))
return escaped_data
def fill(self, byte):
"""
fill: byte -> None
Adds the given raw byte to this APIFrame. If this APIFrame is marked
as escaped and this byte is an escape byte, the next byte in a call
to fill() will be unescaped.
"""
if self._unescape_next_byte:
byte = intToByte(byteToInt(byte) ^ 0x20)
self._unescape_next_byte = False
elif self.escaped and byte == APIFrame.ESCAPE_BYTE:
self._unescape_next_byte = True
return
self.raw_data += intToByte(byteToInt(byte))
def remaining_bytes(self):
remaining = 3
if len(self.raw_data) >= 3:
# First two bytes are the length of the data
raw_len = self.raw_data[1:3]
data_len = struct.unpack("> h", raw_len)[0]
remaining += data_len
# Don't forget the checksum
remaining += 1
return remaining - len(self.raw_data)
def parse(self):
"""
parse: None -> None
Given a valid API frame, parse extracts the data contained
inside it and verifies it against its checksum
"""
if len(self.raw_data) < 3:
ValueError("parse() may only be called on a frame containing at least 3 bytes of raw data (see fill())")
# First two bytes are the length of the data
raw_len = self.raw_data[1:3]
# Unpack it
data_len = struct.unpack("> h", raw_len)[0]
# Read the data
data = self.raw_data[3:3 + data_len]
chksum = self.raw_data[-1]
# Checksum check
self.data = data
if not self.verify(chksum):
raise ValueError("Invalid checksum")
| Python |
#! /usr/bin/python
"""
Fake.py
By Paul Malmsten, 2010
pmalmsten@gmail.com
Provides fake device objects for other unit tests.
"""
import sys
class FakeDevice(object):
"""
Represents a fake serial port for testing purposes
"""
def __init__(self):
self.data = b''
def write(self, data):
"""
Writes data to the fake port for later evaluation
"""
self.data = data
class FakeReadDevice(object):
"""
Represents a fake serial port which can be read from in a similar
fashion to the real thing
"""
def __init__(self, data, silent_on_empty=False):
self.data = data
self.read_index = 0
self.silent_on_empty = silent_on_empty
def read(self, length=1):
"""
Read the indicated number of bytes from the port
"""
# If too many bytes would be read, raise exception
if self.read_index + length > len(self.data):
if self.silent_on_empty:
sys.exit(0)
else:
raise ValueError("Not enough bytes exist!")
read_data = self.data[self.read_index:self.read_index + length]
self.read_index += length
return read_data
def inWaiting(self):
"""
Returns the number of bytes available to be read
"""
return len(self.data) - self.read_index
| Python |
"""
ieee.py
By Paul Malmsten, 2010
Inspired by code written by Amit Synderman and Marco Sangalli
pmalmsten@gmail.com
This module provides an XBee (IEEE 802.15.4) API library.
"""
import struct
from xbee.base import XBeeBase
class XBee(XBeeBase):
"""
Provides an implementation of the XBee API for IEEE 802.15.4 modules
with recent firmware.
Commands may be sent to a device by instansiating this class with
a serial port object (see PySerial) and then calling the send
method with the proper information specified by the API. Data may
be read from a device syncronously by calling wait_read_frame. For
asynchronous reads, see the definition of XBeeBase.
"""
# Packets which can be sent to an XBee
# Format:
# {name of command:
# [{name:field name, len:field length, default: default value sent}
# ...
# ]
# ...
# }
api_commands = {"at":
[{'name':'id', 'len':1, 'default':b'\x08'},
{'name':'frame_id', 'len':1, 'default':b'\x00'},
{'name':'command', 'len':2, 'default':None},
{'name':'parameter', 'len':None, 'default':None}],
"queued_at":
[{'name':'id', 'len':1, 'default':b'\x09'},
{'name':'frame_id', 'len':1, 'default':b'\x00'},
{'name':'command', 'len':2, 'default':None},
{'name':'parameter', 'len':None, 'default':None}],
"remote_at":
[{'name':'id', 'len':1, 'default':b'\x17'},
{'name':'frame_id', 'len':1, 'default':b'\x00'},
# dest_addr_long is 8 bytes (64 bits), so use an unsigned long long
{'name':'dest_addr_long', 'len':8, 'default':struct.pack('>Q', 0)},
{'name':'dest_addr', 'len':2, 'default':b'\xFF\xFE'},
{'name':'options', 'len':1, 'default':b'\x02'},
{'name':'command', 'len':2, 'default':None},
{'name':'parameter', 'len':None, 'default':None}],
"tx_long_addr":
[{'name':'id', 'len':1, 'default':b'\x00'},
{'name':'frame_id', 'len':1, 'default':b'\x00'},
{'name':'dest_addr', 'len':8, 'default':None},
{'name':'options', 'len':1, 'default':b'\x00'},
{'name':'data', 'len':None, 'default':None}],
"tx":
[{'name':'id', 'len':1, 'default':b'\x01'},
{'name':'frame_id', 'len':1, 'default':b'\x00'},
{'name':'dest_addr', 'len':2, 'default':None},
{'name':'options', 'len':1, 'default':b'\x00'},
{'name':'data', 'len':None, 'default':None}]
}
# Packets which can be received from an XBee
# Format:
# {id byte received from XBee:
# {name: name of response
# structure:
# [ {'name': name of field, 'len':length of field}
# ...
# ]
# parsing: [(name of field to parse,
# function which accepts an xbee object and the
# partially-parsed dictionary of data received
# and returns bytes to replace the
# field to parse's data with
# )]},
# }
# ...
# }
#
api_responses = {b"\x80":
{'name':'rx_long_addr',
'structure':
[{'name':'source_addr', 'len':8},
{'name':'rssi', 'len':1},
{'name':'options', 'len':1},
{'name':'rf_data', 'len':None}]},
b"\x81":
{'name':'rx',
'structure':
[{'name':'source_addr', 'len':2},
{'name':'rssi', 'len':1},
{'name':'options', 'len':1},
{'name':'rf_data', 'len':None}]},
b"\x82":
{'name':'rx_io_data_long_addr',
'structure':
[{'name':'source_addr_long','len':8},
{'name':'rssi', 'len':1},
{'name':'options', 'len':1},
{'name':'samples', 'len':None}],
'parsing': [('samples',
lambda xbee,original: xbee._parse_samples(original['samples'])
)]},
b"\x83":
{'name':'rx_io_data',
'structure':
[{'name':'source_addr', 'len':2},
{'name':'rssi', 'len':1},
{'name':'options', 'len':1},
{'name':'samples', 'len':None}],
'parsing': [('samples',
lambda xbee,original: xbee._parse_samples(original['samples'])
)]},
b"\x89":
{'name':'tx_status',
'structure':
[{'name':'frame_id', 'len':1},
{'name':'status', 'len':1}]},
b"\x8a":
{'name':'status',
'structure':
[{'name':'status', 'len':1}]},
b"\x88":
{'name':'at_response',
'structure':
[{'name':'frame_id', 'len':1},
{'name':'command', 'len':2},
{'name':'status', 'len':1},
{'name':'parameter', 'len':None}]},
b"\x97":
{'name':'remote_at_response',
'structure':
[{'name':'frame_id', 'len':1},
{'name':'source_addr_long','len':8},
{'name':'source_addr', 'len':2},
{'name':'command', 'len':2},
{'name':'status', 'len':1},
{'name':'parameter', 'len':None}]},
}
def __init__(self, *args, **kwargs):
# Call the super class constructor to save the serial port
super(XBee, self).__init__(*args, **kwargs)
| Python |
"""
XBee package initalization file
By Paul Malmsten, 2010
pmalmsten@gmail.com
"""
from xbee.ieee import XBee
from xbee.zigbee import ZigBee
| Python |
#! /usr/bin/python
"""
receive_samples_async.py
By Paul Malmsten, 2010
pmalmsten@gmail.com
This example reads the serial port and asynchronously processes IO data
received from a remote XBee.
"""
from xbee import XBee
import time
import serial
PORT = '/dev/ttyUSB0'
BAUD_RATE = 9600
# Open serial port
ser = serial.Serial(PORT, BAUD_RATE)
def message_received(data):
print data
# Create API object, which spawns a new thread
xbee = XBee(ser, callback=message_received)
# Do other stuff in the main thread
while True:
try:
time.sleep(.1)
except KeyboardInterrupt:
break
# halt() must be called before closing the serial
# port in order to ensure proper thread shutdown
xbee.halt()
ser.close()
| Python |
#! /usr/bin/python
"""
dispatch_async.py
By Paul Malmsten, 2010
pmalmsten@gmail.com
This example continuously reads the serial port and dispatches packets
which arrive to appropriate methods for processing in a separate thread.
"""
from xbee import XBee
from xbee.helpers.dispatch import Dispatch
import time
import serial
PORT = '/dev/ttyUSB0'
BAUD_RATE = 9600
# Open serial port
ser = serial.Serial(PORT, BAUD_RATE)
# Create handlers for various packet types
def status_handler(name, packet):
print "Status Update - Status is now: ", packet['status']
def io_sample_handler(name, packet):
print "Samples Received: ", packet['samples']
# When a Dispatch is created with a serial port, it will automatically
# create an XBee object on your behalf for accessing the device.
# If you wish, you may explicitly provide your own XBee:
#
# xbee = XBee(ser)
# dispatch = Dispatch(xbee=xbee)
#
# Functionally, these are the same.
dispatch = Dispatch(ser)
# Register the packet handlers with the dispatch:
# The string name allows one to distinguish between mutiple registrations
# for a single callback function
# The second argument is the function to call
# The third argument is a function which determines whether to call its
# associated callback when a packet arrives. It should return a boolean.
dispatch.register(
"status",
status_handler,
lambda packet: packet['id']=='status'
)
dispatch.register(
"io_data",
io_sample_handler,
lambda packet: packet['id']=='rx_io_data'
)
# Create API object, which spawns a new thread
# Point the asyncronous callback at Dispatch.dispatch()
# This method will dispatch a single XBee data packet when called
xbee = XBee(ser, callback=dispatch.dispatch)
# Do other stuff in the main thread
while True:
try:
time.sleep(.1)
except KeyboardInterrupt:
break
# halt() must be called before closing the serial
# port in order to ensure proper thread shutdown
xbee.halt()
ser.close()
| Python |
#! /usr/bin/python
from xbee import XBee
import serial
"""
serial_example.py
By Paul Malmsten, 2010
Demonstrates reading the low-order address bits from an XBee Series 1
device over a serial port (USB) in API-mode.
"""
def main():
"""
Sends an API AT command to read the lower-order address bits from
an XBee Series 1 and looks for a response
"""
try:
# Open serial port
ser = serial.Serial('/dev/ttyUSB0', 9600)
# Create XBee Series 1 object
xbee = XBee(ser)
# Send AT packet
xbee.send('at', frame_id='A', command='DH')
# Wait for response
response = xbee.wait_read_frame()
print response
# Send AT packet
xbee.send('at', frame_id='B', command='DL')
# Wait for response
response = xbee.wait_read_frame()
print response
# Send AT packet
xbee.send('at', frame_id='C', command='MY')
# Wait for response
response = xbee.wait_read_frame()
print response
# Send AT packet
xbee.send('at', frame_id='D', command='CE')
# Wait for response
response = xbee.wait_read_frame()
print response
except KeyboardInterrupt:
pass
finally:
ser.close()
if __name__ == '__main__':
main()
| Python |
#! /usr/bin/python
"""
receive_samples.py
By Paul Malmsten, 2010
pmalmsten@gmail.com
This example continuously reads the serial port and processes IO data
received from a remote XBee.
"""
from xbee import XBee
import serial
PORT = '/dev/ttyUSB0'
BAUD_RATE = 9600
# Open serial port
ser = serial.Serial(PORT, BAUD_RATE)
# Create API object
xbee = XBee(ser)
# Continuously read and print packets
while True:
try:
response = xbee.wait_read_frame()
print response
except KeyboardInterrupt:
break
ser.close()
| Python |
#! /usr/bin/python
"""
dispatch.py
By Paul Malmsten, 2010
pmalmsten@gmail.com
This example continuously reads the serial port and dispatches packets
which arrive to appropriate methods for processing.
"""
from xbee.helpers.dispatch import Dispatch
import serial
PORT = '/dev/ttyUSB0'
BAUD_RATE = 9600
# Open serial port
ser = serial.Serial(PORT, BAUD_RATE)
# Create handlers for various packet types
def status_handler(name, packet):
print "Status Update - Status is now: ", packet['status']
def io_sample_handler(name, packet):
print "Samples Received: ", packet['samples']
# When a Dispatch is created with a serial port, it will automatically
# create an XBee object on your behalf for accessing the device.
# If you wish, you may explicitly provide your own XBee:
#
# xbee = XBee(ser)
# dispatch = Dispatch(xbee=xbee)
#
# Functionally, these are the same.
dispatch = Dispatch(ser)
# Register the packet handlers with the dispatch:
# The string name allows one to distinguish between mutiple registrations
# for a single callback function
# The second argument is the function to call
# The third argument is a function which determines whether to call its
# associated callback when a packet arrives. It should return a boolean.
dispatch.register(
"status",
status_handler,
lambda packet: packet['id']=='status'
)
dispatch.register(
"io_data",
io_sample_handler,
lambda packet: packet['id']=='rx_io_data'
)
try:
# run() will loop infinitely while waiting for and processing
# packets which arrive. Don't expect it to return (unless an
# exception occurs).
dispatch.run()
except KeyboardInterrupt:
pass
ser.close()
| Python |
#! /usr/bin/python
"""
alarm.py
By Paul Malmsten, 2010
pmalmsten@gmail.com
This module will communicate with a remote XBee device in order to
implement a simple alarm clock with bed occupancy detection.
"""
import serial
from xbee import XBee
class DataSource(object):
"""
Represents a source from which alarm times may be pulled (i.e. an
online calendar)
"""
def next_alarm_time(self, current_time):
"""
next_alarm_time: datetime -> datetime
Returns the next time at which the alarm should activate
"""
raise NotImplemented()
class AlarmDevice(object):
"""
Represents alarm harware, such as input and output to an from
the real world
"""
def __init__(self, hw):
self.hw = hw
def activate(self):
"""
activate: None -> None
Activates noise-making features
"""
raise NotImplementedError()
def deactivate(self):
"""
deactivate: None -> None
Deactivates noise-making features
"""
raise NotImplementedError()
def bed_occupied(self):
"""
bed_occupied: None -> Boolean
Determines whether the bed is currently occupied
"""
raise NotImplementedError()
class WakeupRoutine(object):
"""
Represents a process by which a user should be awoken with a
particular AlarmDevice
"""
def __init__(self, device):
self.device = device
def trigger(self):
"""
trigger: None -> None
Begins the specified wakeup process with the given hardware
device. Does not relinquish control until the wakeup process is
complete.
"""
raise NotImplementedError()
# ================= Custom Classes =============================
class TestSource(DataSource):
def __init__(self, time):
super(TestSource, self).__init__()
self.next_time = time
def next_alarm_time(self, current_time):
return self.next_time
class XBeeAlarm(AlarmDevice):
DETECT_THRESH = 350
def __init__(self, serial_port, remote_addr):
# Open serial port, construct XBee1, configure remote device,
# store as hardware
self.remote_addr = remote_addr
ser = serial.Serial(serial_port)
xbee = XBee(ser)
super(XBeeAlarm, self).__init__(xbee)
# Reset remote device
self._reset()
def _reset(self):
"""
reset: None -> None
Resets the remote XBee device to a standard configuration
"""
# Analog pin 0
self.hw.remote_at(
dest_addr=self.remote_addr,
command='D0',
parameter='\x02')
# Disengage remote LED, buzzer
self.deactivate()
self._set_send_samples(False)
def _set_LED(self, status):
"""
_set_LED: boolean -> None
Sets the status of the remote LED
"""
# DIO pin 1 (LED), active low
self.hw.remote_at(
dest_addr=self.remote_addr,
command='D1',
parameter='\x04' if status else '\x05')
def _set_buzzer(self, status):
"""
_set_buzzer: boolean -> None
Sets the status of the remote buzzer
"""
# DIO pin 1 (LED), active low
self.hw.remote_at(
dest_addr=self.remote_addr,
command='D2',
parameter='\x05' if status else '\x04')
def _set_send_samples(self, status):
"""
_set_send_samples: boolean -> None
Sets whether the remote device will send data samples once every
second.
"""
# Send samples once per second
self.hw.remote_at(
dest_addr=self.remote_addr,
command='IR',
parameter='\xff' if status else '\x00')
def activate(self):
"""
activate: None -> None
Remote XBee starts making noise and turns on LED
"""
self._set_LED(True)
self._set_buzzer(True)
def deactivate(self):
"""
activate: None -> None
Remote XBee starts making noise and turns on LED
"""
self._set_LED(False)
self._set_buzzer(False)
def bed_occupied(self):
"""
bed_occupied: None -> boolean
Determines whether the bed is currently occupied by requesting
data from the remote XBee and comparing the analog value with
a threshold.
"""
# Receive samples from the remote device
self._set_send_samples(True)
while True:
packet = self.hw.wait_read_frame()
if 'adc-0' in packet['samples'][0]:
# Stop receiving samples from the remote device
self._set_send_samples(False)
return packet['samples'][0]['adc-0'] > XBeeAlarm.DETECT_THRESH
class SimpleWakeupRoutine(WakeupRoutine):
"""
When triggered, activates the alarm if the bed is occupied. The
alarm continues until the bed is no longer occupied.
"""
def trigger(self):
from time import sleep
pulse_delay = 0.1
if self.device.bed_occupied():
# Initial alarm
for x in range(0, 5):
self.device.activate()
sleep(pulse_delay)
self.device.deactivate()
sleep(pulse_delay)
# Allow time to escape
sleep(30)
# Extended alarm
duration = 1
pause = 10
while self.device.bed_occupied():
self.device.activate()
sleep(duration)
self.device.deactivate()
sleep(pause)
duration *= 2
def main():
"""
Run through simple demonstration of alarm concept
"""
alarm = XBeeAlarm('/dev/ttyUSB0', '\x56\x78')
routine = SimpleWakeupRoutine(alarm)
from time import sleep
while True:
"""
Run the routine with 10 second delays
"""
try:
print "Waiting 5 seconds..."
sleep(5)
print "Firing"
routine.trigger()
except KeyboardInterrupt:
break
if __name__ == '__main__':
main()
| Python |
#! /usr/bin/python
"""
led_adc_example.py
By Paul Malmsten, 2010
pmalmsten@gmail.com
A simple example which sets up a remote device to read an analog value
on ADC0 and a digital output on DIO1. It will then read voltage
measurements and write an active-low result to the remote DIO1 pin.
"""
from xbee import XBee
import serial
ser = serial.Serial('/dev/ttyUSB0', 9600)
xbee = XBee(ser)
## Set up remote device
#xbee.send('remote_at',
#frame_id='A',
#dest_addr_long='\x00\x00\x00\x00\x00\x00\x00\x00',
#dest_addr='\x56\x78',
#options='\x02',
#command='D0',
#parameter='\x02')
#print xbee.wait_read_frame()['status']
#xbee.send('remote_at',
#frame_id='B',
#dest_addr_long='\x00\x00\x00\x00\x00\x00\x00\x00',
#dest_addr='\x56\x78',
#options='\x02',
#command='D1',
#parameter='\x05')
#print xbee.wait_read_frame()['status']
#xbee.send('remote_at',
#frame_id='C',
#dest_addr_long='\x00\x00\x00\x00\x00\x00\x00\x00',
#dest_addr='\x56\x78',
#options='\x02',
#command='IR',
#parameter='\x32')
#print xbee.wait_read_frame()['status']
#xbee.send('remote_at',
#frame_id='C',
#dest_addr_long='\x00\x00\x00\x00\x00\x00\x00\x00',
#dest_addr='\x56\x78',
#options='\x02',
#command='WR')
# Deactivate alarm pin
xbee.remote_at(
dest_addr='\x56\x78',
command='D2',
parameter='\x04')
xbee.remote_at(
dest_addr='\x56\x78',
command='WR')
#print xbee.wait_read_frame()['status']
while True:
try:
packet = xbee.wait_read_frame()
print packet
# If it's a sample, check it
if packet['id'] == 'rx_io_data':
# Set remote LED status
if packet['samples'][0]['adc-0'] > 160:
# Active low
xbee.remote_at(
dest_addr='\x56\x78',
command='D1',
parameter='\x04')
# Active high alarm pin
xbee.remote_at(
dest_addr='\x56\x78',
command='D2',
parameter='\x05')
else:
xbee.remote_at(
dest_addr='\x56\x78',
command='D1',
parameter='\x05')
# Deactivate alarm pin
xbee.remote_at(
dest_addr='\x56\x78',
command='D2',
parameter='\x04')
except KeyboardInterrupt:
break
ser.close()
| Python |
"""
distutils_extensions.py
By Paul Malmsten, 2010
pmalmsten@gmail.com
Provides distutils extension code for running tests
"""
from distutils.core import Command
from distutils.command.build_py import build_py as _build_py
import sys
class TestCommand(Command):
description = "Runs automated tests"
user_options = [('strict','s',
"If a test fails, immediately quits with exit code 1")]
def initialize_options(self):
self.strict = False
def finalize_options(self):
pass
def run(self):
try:
import nose
if not nose.run(argv=['nosetests']):
message = ["An automated test has failed! Please report this",
"failure to a project member. Use at your own risk!"]
if self.strict:
message.append("strict mode is on (see setup.cfg) - setup will now exit")
self.show_warning(message)
if self.strict:
sys.exit(1)
except ImportError:
self.show_warning(
["Automated tests have been skipped (install nose and run",
"'python setup.py test' to run the tests)"]
)
def show_warning(self, lines):
print >> sys.stderr, "#######################################################"
print >> sys.stderr, "# WARNING"
for line in lines:
print >> sys.stderr, "# ", line
print >> sys.stderr, "#######################################################"
class build_py(_build_py):
"""
Automatically runs tests during build
"""
def run(self):
self.run_command('test')
_build_py.run(self)
| Python |
#!/usr/bin/env python
"""
shell.py
Amit Snyderman, 2009
<amit@amitsnyderman.com>
Updated by Paul Malmsten, 2010
pmalmsten@gmail.com
Provides a simple shell for testing XBee devices. Currently, the shell
only allows one to parse and print received data; sending is not
supported.
"""
# $Id: xbee-serial-terminal.py 7 2009-12-30 16:25:08Z amitsnyderman $
import sys, time, cmd, serial, binascii
from xbee import XBee1
class XBeeShell(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
self.prompt = "xbee% "
self.serial = serial.Serial()
def default(self, p):
if not self.serial.isOpen():
print "You must set a serial port first."
else:
if p == '+++':
self.serial.write(p)
time.sleep(2)
else:
self.serial.write('%s\r' % p)
time.sleep(0.5)
output = ''
while self.serial.inWaiting():
output += self.serial.read()
print output.replace('\r', '\n').rstrip()
def do_serial(self, p):
"""Set the serial port, e.g.: /dev/tty.usbserial-A4001ib8"""
try:
self.serial.port = p
self.serial.open()
print 'Opening serial port: %s' % p
except Exception, e:
print 'Unable to open serial port: %s' % p
def do_baudrate(self, p):
"""Set the serial port's baud rate, e.g.: 19200"""
self.serial.baudrate = p
def do_watch(self, p):
if not self.serial.isOpen():
print "You must set a serial port first."
else:
while 1:
xbee = XBee1(self.serial)
packet = xbee.wait_read_frame()
print packet
def do_exit(self, p):
"""Exits from the XBee serial console"""
self.serial.close()
return 1
if __name__ == '__main__':
shell = XBeeShell()
shell.cmdloop()
| Python |
from distutils.core import setup
packages=[
'xbee',
'xbee.tests',
'xbee.helpers',
'xbee.helpers.dispatch',
'xbee.helpers.dispatch.tests',
]
setup(
name='XBee',
version='2.0.0',
author='Paul Malmsten',
author_email='pmalmsten@gmail.com',
packages=packages,
scripts=[],
url='http://code.google.com/p/python-xbee/',
license='LICENSE.txt',
description='Python tools for working with XBee radios',
long_description=open('README.txt').read(),
requires=['serial'],
provides=packages,
)
| Python |
from naoqi import *
import time
class RecordWavModule(ALModule):
def __init__( self, strName, IP, frequency, activeChannels, pathToNao):
ALModule.__init__(self, strName);
self.Ip = IP
self.ALAudioRecProxy = ALProxy("ALAudioRecorder", IP, 9559)
self.channels = activeChannels
self.pathToNaoAudio = pathToNao
self.frequency = frequency
def initChannels(self):
self.channels.append(False) #Left
self.channels.append(False) #Right
self.channels.append(True) #Front
self.channels.append(False) #Rear
def recordWav(self, recordTime=1):
#print "recordWav, path ", self.pathToNaoAudio
self.ALAudioRecProxy.startMicrophonesRecording(self.pathToNaoAudio, "wav", self.frequency, self.channels);
time.sleep(recordTime)
self.ALAudioRecProxy.stopMicrophonesRecording()
if __name__ == '__main__':
IP= "10.100.52.139"
# Creation of a new Python Broker
pythonBroker = ALBroker("pythonBroker","0.0.0.0",9999, IP,9559)
recordModule = RecordWavModule("MySoundProcessingModule", IP)
recordModule.recordWav(5)
pythonBroker.shutdown()
| Python |
import sys
from naoqi import ALBroker
from commandLineHandler import CommandLineHandler
from recordWav import RecordWavModule
from audioSoundProcessing import SoundProcessingModule
from controlNao import ControlNaoModule
from commandParser import CommandParser
from aaltoASRInterface import AaltoASRInterface
import ConfigParser
config = ConfigParser.ConfigParser()
config.read('../configuration files/recognition_config.ini')
#These constants could be later read from config file
NAO_PASSWORD = config.get("NAO", "NAO_PASSWORD")
PATH_TO_NAO_RECORDS = config.get("NAO", "PATH_TO_NAO_RECORDS")
AUDIO_FILENAME = config.get("NAO", "AUDIO_FILENAME")
CONVERTED_AUDIO_FILE = config.get("PC", "CONVERTED_AUDIO_FILE")
PATH_TO_COMPUTER_AUDIO = config.get("PC", "PATH_TO_COMPUTER_AUDIO")
PATH_TO_COMMAND_CONFIG = config.get("PC", "PATH_TO_COMMAND_CONFIG")
PATH_TO_AALTOASR_DECODER = config.get("PC", "PATH_TO_AALTOASR_DECODER")
COMMAND_TIME = config.getint("AUDIO", "COMMAND_TIME") #recording time for command
AUDIO_FREQUENCY = config.getint("AUDIO", "AUDIO_FREQUENCY")
ACTIVE_CHANNELS = [item=="True" for item in config.get("AUDIO", "ACTIVE_CHANNELS").split()]
THRESHOLD_FOR_RMS = config.getint("AUDIO", "THRESHOLD_FOR_RMS")
CONFIDENCE_THRESHOLD = config.getint("AALTO_ASR", "CONFIDENCE_THRESHOLD")
def createModules():
global soundProcessor
global naoController
global recorder
global commandLineHandler
global commandParser
global aaltoASRInterface
soundProcessor = SoundProcessingModule("soundProcessor", IP, THRESHOLD_FOR_RMS)
naoController = ControlNaoModule("naoController", IP)
recorder = RecordWavModule("recorder", IP, AUDIO_FREQUENCY, ACTIVE_CHANNELS, PATH_TO_NAO_RECORDS+AUDIO_FILENAME)
commandLineHandler = CommandLineHandler(IP, NAO_PASSWORD)
commandParser = CommandParser(CONFIDENCE_THRESHOLD)
commandParser.readCommandConfig(PATH_TO_COMMAND_CONFIG)
#print("Commands " + str(commandParser.commands))
aaltoASRInterface = AaltoASRInterface(PATH_TO_AALTOASR_DECODER)
def startRecognition():
print "Recognition started!"
while(True):
print "Ready for next command!"
#Wait for loud sound before starting to record
if not soundProcessor.startProcessing(): #will return false in case of keyboard interrupt aka ctrl+c
return
#Nao blinks as a signal for starting to record command
naoController.blinkEyes(0.5)
#Record .wav file on NAO
print "Starting record"
recorder.recordWav(COMMAND_TIME)
#print "Transfer audio file to PC"
#Transfer .wav file to computer
if not commandLineHandler.transfer_file(PATH_TO_NAO_RECORDS + AUDIO_FILENAME, PATH_TO_COMPUTER_AUDIO):
print "ERROR WHILE TRANSFERRING FILE TO PC"
continue
if not commandLineHandler.convert_file_to_16kHz(PATH_TO_COMPUTER_AUDIO+ "/" + AUDIO_FILENAME, PATH_TO_COMPUTER_AUDIO+ "/" + CONVERTED_AUDIO_FILE):
continue
#print "Send file to AaltoASR"
#Get recognition result
resultFromAaltoASR = aaltoASRInterface.analyzeFile(PATH_TO_COMPUTER_AUDIO+ "/" + CONVERTED_AUDIO_FILE)
#print "Parse result from AaltoASR"
command = commandParser.parseStringFromAaltoASR(resultFromAaltoASR)
#print "Send command to Nao"
if(command):
naoController.executeControl(command[0], command[1])
else:
print("Parser did not find corresponding Nao command!")
naoController.blinkEyesErrorResponse()
if __name__ == '__main__':
#IP= "10.100.45.87"
if len(sys.argv) < 2:
print "Usage <robot ip>"
else:
IP = sys.argv[1]
try:
# Create new Python Broker
pythonBroker = ALBroker("pythonBroker","0.0.0.0",9999, IP,9559)
# Create needed modules
createModules()
# Ready start recognition, let's go!
startRecognition()
# Recognition stopped, shutdown AaltoASR interface
aaltoASRInterface.closeInterface()
pythonBroker.shutdown()
except Exception as e:
print "Error occured ", e
pythonBroker.shutdown()
'''
Time requirements:
~ 4s for initialization
~ 1s for transferring audio file to PC (48 kHz 2 seconds .wav-file)
~ 3.5s for getting result from AaltoASR (2 seconds audio file)
'''
| Python |
import subprocess
class CommandLineHandler(object):
def __init__(self, IP, password):
self.IP = IP
self.password = password
def transfer_file(self, original_filepath, new_filepath):
try:
subprocess.check_call(["sshpass",
"-p", self.password,
"scp",
"nao@{ip}:{path}".format(ip=self.IP, path = original_filepath), new_filepath])
return True
except subprocess.CalledProcessError:
print("Something went horribly wrong!")
return False
def convert_file_to_16kHz(self, file_path, converted_file_path):
try:
subprocess.check_call(["sox",
file_path,
"-c", "1",
"-r", "16000",
converted_file_path])
return True
except subprocess.CalledProcessError:
print("Could not convert audio to 16kHz")
return False
if __name__ == "__main__":
CLH = CommandLineHandler("192.168.1.33", "nao")
#CLH.transfer_file("/home/nao/speed_test.wav", "/home/pussinenkumara")
CLH.convert_file_to_16kHz("/home/jerry/audio_files/audio_test.wav", "/home/jerry/audio_files/audio_converted.wav")
| Python |
import sys
import ConfigParser
class CommParser(ConfigParser.ConfigParser):
def as_dict(self):
d = dict(self._sections)
for key in d:
d[key] = dict(self._defaults, **d[key])
d[key].pop('__name__', None)
for key in d["COMMANDS"]:
val_list = d["COMMANDS"][key].split()
if(len(val_list) > 1):
d["COMMANDS"][key] = (val_list[0], val_list[1:])
else:
d["COMMANDS"][key] = (val_list[0], [])
return d["COMMANDS"]
class CommandParser():
def __init__(self, confidenceThreshold=10):
self.confidenceThreshold = confidenceThreshold
self.commands = {}
def readCommandConfig(self, fileName):
#print("filename" + fileName)
'''
Reads command configuration from ini file.
Stores the information to a dictionary.
Returns the dictionary and also stores it as a class attribute,
in error situation returns False
'''
# Configuration will be read into a dictionary:
# KEY is the finnish command
# VALUE is a pair containing corresponding function name in controlNao.py and its parameters
configuration = {}
try:
comm_parser = CommParser()
comm_parser.read(fileName)
configuration = comm_parser.as_dict()
self.commands= configuration
return True
except IOError:
print ("Error with config file " + fileName)
return False
def parseStringFromAaltoASR(self, stringFromAaltoASR):
'''
Parses the AaltoASR recognition result.
Returns a pair which contains <function_name> and
<list_of_function_params>
See controlNao.py for available functions
If the command is not found, the function will return False
stringFromAaltoASR looks something like this:
RESULT: <s> istu </s>
CONFIDENCE: 772.157
'''
stringParts = stringFromAaltoASR.split(':')
#Error handling
if(len(stringParts) < 2):
return False
commandIdx = 0
for idx in range(len(stringParts)):
if "RESULT" in stringParts[idx]:
commandIdx = idx + 1
if commandIdx == 0:
return False #Some error in stringFromAaltoASR because "RESULT" not found
confidence = float(stringParts[commandIdx +1].strip())
if confidence > self.confidenceThreshold:
return False
#remove unwanted part
commandToParse = stringParts[commandIdx].replace('CONFIDENCE', '')
#parse actual command
finnishCommand = self.parseFinnishCommand(commandToParse)
if finnishCommand:
return self.commands[finnishCommand]
else:
return False
def parseFinnishCommand(self, commandToParse):
#remove <s> and </s> tags
commandToParse = commandToParse.replace('<s>', '')
commandToParse = commandToParse.replace('</s>','')
commandParts = commandToParse.split()
for command in commandParts:
if command in self.commands:
return command
return "" #NOT FOUND
if __name__ == '__main__':
# Main for unit testing
parser = CommandParser()
if(len(sys.argv) < 2):
print ("Usage: <path to config file>")
else:
fileName = sys.argv[1]
parser = CommandParser()
returnValue = parser.readCommandConfig(fileName)
print("PARSER COMMANDS: ")
print(parser.commands)
print ("Parser returned after reading " + fileName +":\n" + str(returnValue))
print("---------\nTest command parsing")
stringFromAaltoASR = "RESULT: <s> istu </s> <s> seiso </s>\nCONFIDENCE: 879.144043\n"
print("AaltoASR string to parse: " + stringFromAaltoASR)
parsed = parser.parseStringFromAaltoASR(stringFromAaltoASR)
print("command to send to Nao: " + str(parsed))
print("len of command params: " + str(len(parsed[1])))
print ("first command param: " + parsed[1][0])
| Python |
# -*- encoding: UTF-8 -*-
"""
This script gets the signal from the front microphone of Nao and calculates the rms power on it
It requires numpy
"""
from naoqi import *
import time
import numpy as np
class SoundProcessingModule(ALModule):
def __init__( self, strName, robotIP,threshold):
#self.pythonBroker = ALBroker("pythonBrokerSoundProcessing","0.0.0.0",9999, robotIP,9559)
ALModule.__init__( self, strName);
self.robotIP = robotIP
self.thresholdForRMS = threshold
self.BIND_PYTHON( strName, "processRemote")
self.ALAudioDevice = ALProxy("ALAudioDevice", robotIP, 9559)
self.isProcessingDone = False
self.micFront = []
#for writing raw data to file
#self.outputFile = open("output_testi.txt", "wb")
def closeBroker(self):
self.pythonBroker.shutdown()
def startProcessing(self):
# ask for the front microphone signal sampled at 16kHz
# if you want the 4 channels call setClientPreferences(self.getName(), 48000, 0, 0)
self.isProcessingDone = False
try:
self.ALAudioDevice.setClientPreferences(self.getName(), 16000, 3, 0)
self.ALAudioDevice.subscribe(self.getName())
while self.isProcessingDone == False:
time.sleep(0.1)
self.ALAudioDevice.unsubscribe(self.getName())
return True
except KeyboardInterrupt:
print "Interrupted by user, shutting down"
return False
except Exception as e:
print "Audio sound processing error ", e
return False
#close output file
#self.outputFile.close()
def processRemote(self, nbOfChannels, nbOfSamplesByChannel, timeStamp, inputBuffer):
self.micFront=self.convertStr2SignedInt(inputBuffer)
#compute the rms level on front mic
rmsMicFront = self.calcRMSLevel(self.micFront)
#print "rms level mic front = " + str(rmsMicFront)
if rmsMicFront > self.thresholdForRMS:
self.isProcessingDone = True
def calcRMSLevel(self,data) :
rms = 20 * np.log10( np.sqrt( np.sum( np.power(data,2) / len(data) )))
return rms
def convertStr2SignedInt(self, data) :
"""This function takes a string containing 16 bits little endian sound samples as input and returns a vector containing the 16 bits sound samples values converted between -1 and 1"""
signedData=[]
ind=0;
for i in range (0,len(data)/2) :
signedData.append(ord(data[ind])+ord(data[ind+1])*256)
ind=ind+2
for i in range (0,len(signedData)) :
if signedData[i]>=32768 :
signedData[i]=signedData[i]-65536
for i in range (0,len(signedData)) :
signedData[i]=signedData[i]/32768.0
return signedData
if __name__ == '__main__':
IP= "10.100.45.87"
# Creation of a new Python Broker
pythonBroker = ALBroker("pythonBroker","0.0.0.0",9999, IP,9559)
MySoundProcessingModule = SoundProcessingModule("MySoundProcessingModule", IP, -20)
MySoundProcessingModule.startProcessing()
pythonBroker.shutdown()
| Python |
import sys
import time
from naoqi import ALProxy
from naoqi import ALModule
from naoqi import ALBroker
import motion
class ControlNaoModule(ALModule):
'''
Class for sending control commands to NAO
'''
def __init__( self,moduleName ,robotIP):
ALModule.__init__(self,moduleName)
#Init proxies, there should be more of these if we want to do more than just go to posture
self.postureProxy = ALProxy("ALRobotPosture", robotIP, 9559)
self.motionProxy = ALProxy("ALMotion", robotIP, 9559)
self.ledProxy = ALProxy("ALLeds",robotIP, 9559)
self.textToSpeechProxy = ALProxy("ALTextToSpeech", robotIP, 9559)
def executeControl(self, functionName, functionParams):
'''
Method for sending the actual control command to NAO
@param functionName name of the function to be called
@param functionParams list of parameters to be passed for the function
@return value indicating whether the control was successful
'''
if(functionName == "postureCommand"):
return self.postureCommand(functionParams)
elif(functionName == "sayHello"):
return self.sayHello()
elif(functionName == "sayOwnName"):
return self.sayOwnName()
elif(functionName == "ledShow"):
return self.ledShow()
def postureCommand(self, params):
if len(params) != 1:
print "postureCommand, bad arguments!"
return False
successfulPosture = self.postureProxy.goToPosture(params[0], 1.0)
self.motionProxy.rest();
return successfulPosture
def blinkEyesErrorResponse(self):
self.ledProxy.fadeRGB("FaceLeds",1,0,0,0.1)
time.sleep(0.5)
self.ledProxy.fadeRGB("FaceLeds",1,1,1,0.1)
def blinkEyes(self, waitBeforeBlink = 0):
if waitBeforeBlink:
time.sleep(waitBeforeBlink)
self.ledProxy.off("FaceLeds")
time.sleep(0.2)
self.ledProxy.on("FaceLeds")
def ledShow(self, duration = 5):
self.ledProxy.rasta(duration)
def sayHello(self):
# Have to speak English because no Finnish language package currently installed in Nao
self.textToSpeechProxy.say("Hello")
return True
def sayOwnName(self):
self.textToSpeechProxy.say("My favourite movie is Terminator")
return True
if __name__ == "__main__":
'''
Main for testing the usage from command line
'''
if len(sys.argv) < 4:
print "Usage <robot ip> <command> <proxyName>"
else:
robotIp = sys.argv[1]
command = sys.argv[2]
proxyName = sys.argv[3]
try:
pythonBroker = ALBroker("pythonBroker","0.0.0.0",9999, robotIp,9559)
controller = ControlNaoModule("NaoController", robotIp)
#controller.blinkEyesErrorResponse()
#controller.randomEyes(3)
#controller.sayHello()
#controller.sayOwnName()
controller.executeControl("postureCommand", ["StandZero"])
'''
succeedPosture = controller.executeControl(command, proxyName)
print "Control execution successfull?", succeedPosture
pythonBroker.shutdown()
'''
except Exception, e:
print "Error occured", e
| Python |
import subprocess
import time
class AaltoASRInterface(object):
"""
This module is used to interface with the AaltoASR recognition library
through terminal commands.
"""
def __init__(self, exe_path="./decode-stream-wav"):
"""
This is given a path to the location of the C++ executable that in turn
uses the necessary AaltoASR functions.
"""
temp = ""
self.proc = subprocess.Popen(exe_path,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
#Wait for init done
while "INIT DONE" not in temp:
temp = self.proc.stdout.readline()
def analyzeFile(self, file_path):
"""
This analyzes the given audio file and returns the results of the
analysis.
"""
temp = ""
returnThis = ""
self.proc.stdin.write(file_path + "\n")
while not "CONFIDENCE" in temp:
temp = self.proc.stdout.readline()
returnThis +=temp
print temp
return returnThis
def closeInterface(self):
"""
This tells the C++ executable to end.
"""
self.proc.stdin.write("quit")
| Python |
# -*- encoding: UTF-8 -*-
"""
This script gets the signal from the front microphone of Nao and calculates the rms power on it
It requires numpy
"""
from naoqi import *
import time
import numpy as np
class SoundProcessingModule(ALModule):
def __init__( self, strName):
ALModule.__init__( self, strName );
self.BIND_PYTHON( strName, "processRemote")
self.ALAudioDevice = ALProxy("ALAudioDevice", IP, 9559)
self.isProcessingDone = False
self.nbOfFramesToProcess = 20
self.framesCount=0
self.micFront = []
def startProcessing(self):
# ask for the front microphone signal sampled at 16kHz
# if you want the 4 channels call setClientPreferences(self.getName(), 48000, 0, 0)
self.ALAudioDevice.setClientPreferences(self.getName(), 16000, 3, 0)
self.ALAudioDevice.subscribe(self.getName())
while self.isProcessingDone == False:
time.sleep(1)
self.ALAudioDevice.unsubscribe(self.getName())
def processRemote(self, nbOfChannels, nbOfSamplesByChannel, timeStamp, inputBuffer):
self.framesCount = self.framesCount + 1;
if (self.framesCount <= self.nbOfFramesToProcess) :
# convert inputBuffer to signed integer as it is interpreted as a string by python
self.micFront=self.convertStr2SignedInt(inputBuffer)
#compute the rms level on front mic
rmsMicFront = self.calcRMSLevel(self.micFront)
print "rms level mic front = " + str(rmsMicFront)
else :
self.isProcessingDone=True
def calcRMSLevel(self,data) :
rms = 20 * np.log10( np.sqrt( np.sum( np.power(data,2) / len(data) )))
return rms
def convertStr2SignedInt(self, data) :
"""This function takes a string containing 16 bits little endian sound samples as input and returns a vector containing the 16 bits sound samples values converted between -1 and 1"""
signedData=[]
ind=0;
for i in range (0,len(data)/2) :
signedData.append(ord(data[ind])+ord(data[ind+1])*256)
ind=ind+2
for i in range (0,len(signedData)) :
if signedData[i]>=32768 :
signedData[i]=signedData[i]-65536
for i in range (0,len(signedData)) :
signedData[i]=signedData[i]/32768.0
return signedData
if __name__ == '__main__':
IP= "10.100.44.229"
# Creation of a new Python Broker
pythonBroker = ALBroker("pythonBroker","0.0.0.0",9999, IP,9559)
MySoundProcessingModule = SoundProcessingModule("MySoundProcessingModule")
MySoundProcessingModule.startProcessing()
pythonBroker.shutdown()
| Python |
# -*- encoding: UTF-8 -*-
""" Say 'hello, you' each time a human face is detected
"""
import sys
import time
from naoqi import ALProxy
from naoqi import ALBroker
from naoqi import ALModule
from optparse import OptionParser
NAO_IP = "nao.local"
# Global variable to store the HumanGreeter module instance
HumanGreeter = None
memory = None
class HumanGreeterModule(ALModule):
""" A simple module able to react
to facedetection events
"""
def __init__(self, name):
ALModule.__init__(self, name)
# No need for IP and port here because
# we have our Python broker connected to NAOqi broker
# Create a proxy to ALTextToSpeech for later use
self.tts = ALProxy("ALTextToSpeech")
# Subscribe to the FaceDetected event:
global memory
memory = ALProxy("ALMemory")
memory.subscribeToEvent("FaceDetected",
"HumanGreeter",
"onFaceDetected")
def onFaceDetected(self, *_args):
""" This will be called each time a face is
detected.
"""
# Unsubscribe to the event when talking,
# to avoid repetitions
memory.unsubscribeToEvent("FaceDetected",
"HumanGreeter")
self.tts.say("Hello, you")
# Subscribe again to the event
memory.subscribeToEvent("FaceDetected",
"HumanGreeter",
"onFaceDetected")
def main():
""" Main entry point
"""
parser = OptionParser()
parser.add_option("--pip",
help="Parent broker port. The IP address or your robot",
dest="pip")
parser.add_option("--pport",
help="Parent broker port. The port NAOqi is listening to",
dest="pport",
type="int")
parser.set_defaults(
pip=NAO_IP,
pport=9559)
(opts, args_) = parser.parse_args()
pip = opts.pip
pport = opts.pport
# We need this broker to be able to construct
# NAOqi modules and subscribe to other modules
# The broker must stay alive until the program exists
myBroker = ALBroker("myBroker",
"0.0.0.0", # listen to anyone
0, # find a free port and use it
pip, # parent broker IP
pport) # parent broker port
# Warning: HumanGreeter must be a global variable
# The name given to the constructor must be the name of the
# variable
global HumanGreeter
HumanGreeter = HumanGreeterModule("HumanGreeter")
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print
print "Interrupted by user, shutting down"
myBroker.shutdown()
sys.exit(0)
if __name__ == "__main__":
main()
| Python |
from naoqi import *
import time
class RecordWavModule(ALModule):
def __init__( self, strName, IP):
ALModule.__init__(self, strName);
self.Ip = IP
self.ALAudioRecProxy = ALProxy("ALAudioRecorder", IP, 9559)
self.channels =[]
self.initChannels()
def initChannels(self):
self.channels.append(False) #Left
self.channels.append(False) #Right
self.channels.append(True) #Front
self.channels.append(False) #Rear
def recordWav(self, recordTime=1):
self.ALAudioRecProxy.startMicrophonesRecording("/home/nao/istu_seiso_48kHz_1kanava2.wav", "wav", 48000, self.channels);
time.sleep(recordTime)
self.ALAudioRecProxy.stopMicrophonesRecording()
if __name__ == '__main__':
IP= "10.100.52.139"
# Creation of a new Python Broker
pythonBroker = ALBroker("pythonBroker","0.0.0.0",9999, IP,9559)
recordModule = RecordWavModule("MySoundProcessingModule", IP)
recordModule.recordWav(5)
pythonBroker.shutdown()
| Python |
# PYTHON KOODI JOKA TEKEE ITSE TUNNISTUKSEN
toolbox.set_generate_word_graph(0)
toolbox.set_keep_state_segmentation(0);
toolbox.lna_open(lna_path, 1024)
toolbox.reset(0)
toolbox.set_end(-1)
while toolbox.run():
pass
# We have to open with only "w" first, and then later with "r"
# for reading, or the file will not be written.
lmh_file = open(lmh_path, 'w')
toolbox.print_best_lm_history_to_file(lmh_file)
lmh_file.close()
lmh_file = open(lmh_path, 'r')
recognition = lmh_file.read()
lmh_file.close()
recognition = recognition.replace('<s>', '')
recognition = recognition.replace('</s>', '')
recognition = recognition.strip()
| Python |
#!/usr/bin/python
import time
import string
import sys
import os
import re
# Set your decoder swig path in here!
sys.path.append("/home/jerry/AaltoASR/build/decoder/src/swig");
import Decoder
def runto(frame):
while (frame <= 0 or t.frame() < frame):
if (not t.run()):
break
def rec(start, end):
st = os.times()
t.reset(start)
t.set_end(end)
runto(0)
et = os.times()
duration = et[0] + et[1] - st[0] - st[1] # User + system time
frames = t.frame() - start;
sys.stdout.write('DUR: %.2fs (Real-time factor: %.2f)\n' %
(duration, duration * 125 / frames))
##################################################
# Initialize
#
akupath = "/home/jerry/AaltoASR/build/aku"
akumodel = "mallit/speecon_all_multicondition_mmi_kld0.002_6"
hmms = akumodel+".ph"
dur = akumodel+".dur"
temppath = "."
lexicon = "mallit/sanakirja"
ngram = "mallit/kielimalli"
lm_scale = 28
global_beam = 250
##################################################
##################################################
# Generate LNA file for recognition
#
f=open(temppath+"/temp.recipe", 'w')
f.write("audio="+sys.argv[1]+" lna="+temppath+"/temp.lna\n")
f.close()
sys.stderr.write("Generating LNA\n")
os.system(akupath + "/phone_probs -b "+akumodel+" -c "+akumodel+".cfg -r "+temppath+"/temp.recipe -C "+akumodel+".gcl --eval-ming 0.1")
##################################################
# Recognize
#
sys.stderr.write("loading models\n")
t = Decoder.Toolbox(0, hmms, dur)
t.set_silence_is_word(0)
t.set_optional_short_silence(1)
t.set_cross_word_triphones(1)
t.set_require_sentence_end(1)
t.set_verbose(1)
t.set_print_text_result(1)
#t.set_print_state_segmentation(1)
t.set_lm_lookahead(0)
sys.stderr.write("loading lexicon\n")
try:
t.lex_read(lexicon)
except:
print("phone:", t.lex_phone())
sys.exit(-1)
t.set_sentence_boundary("<s>", "</s>")
sys.stderr.write("loading ngram\n")
t.ngram_read(ngram, 0)
word_end_beam = int(2*global_beam/3);
trans_scale = 1
dur_scale = 3
t.set_global_beam(global_beam)
t.set_word_end_beam(word_end_beam)
t.set_token_limit(30000)
t.set_prune_similar(3)
t.set_print_probs(0)
t.set_print_indices(0)
t.set_print_frames(0)
t.set_duration_scale(dur_scale)
t.set_transition_scale(trans_scale)
t.set_lm_scale(lm_scale)
print("BEAM: %.1f" % global_beam)
print("WORD_END_BEAM: %.1f" % word_end_beam)
print("LMSCALE: %.1f" % lm_scale)
print("DURSCALE: %.1f" % dur_scale)
t.lna_open(temppath+"/temp.lna", 1024)
sys.stdout.write("REC: ")
rec(0,-1)
| Python |
#!/usr/bin/python
import time
import string
import sys
import os
import re
# Set your decoder swig path in here!
sys.path.append("/home/jerry/AaltoASR/build/decoder/src/swig");
import Decoder
def runto(frame):
while (frame <= 0 or t.frame() < frame):
if (not t.run()):
break
def rec(start, end):
st = os.times()
t.reset(start)
t.set_end(end)
runto(0)
et = os.times()
duration = et[0] + et[1] - st[0] - st[1] # User + system time
frames = t.frame() - start;
sys.stdout.write('DUR: %.2fs (Real-time factor: %.2f)\n' %
(duration, duration * 125 / frames))
##################################################
# Initialize
#
akupath = "/home/jerry/AaltoASR/build/aku"
akumodel = "mallit/speecon_all_multicondition_mmi_kld0.002_6"
hmms = akumodel+".ph"
dur = akumodel+".dur"
temppath = "."
lexicon = "mallit/sanakirja"
ngram = "mallit/kielimalli"
lm_scale = 28
global_beam = 250
##################################################
##################################################
# Generate LNA file for recognition
#
f=open(temppath+"/temp.recipe", 'w')
f.write("audio="+sys.argv[1]+" lna="+temppath+"/temp.lna\n")
f.close()
sys.stderr.write("Generating LNA\n")
os.system(akupath + "/phone_probs -b "+akumodel+" -c "+akumodel+".cfg -r "+temppath+"/temp.recipe -C "+akumodel+".gcl --eval-ming 0.1")
##################################################
# Recognize
#
sys.stderr.write("loading models\n")
t = Decoder.Toolbox(0, hmms, dur)
t.set_silence_is_word(0)
t.set_optional_short_silence(1)
t.set_cross_word_triphones(1)
t.set_require_sentence_end(1)
t.set_verbose(1)
t.set_print_text_result(1)
#t.set_print_state_segmentation(1)
t.set_lm_lookahead(0)
sys.stderr.write("loading lexicon\n")
try:
t.lex_read(lexicon)
except:
print("phone:", t.lex_phone())
sys.exit(-1)
t.set_sentence_boundary("<s>", "</s>")
sys.stderr.write("loading ngram\n")
t.ngram_read(ngram, 0)
word_end_beam = int(2*global_beam/3);
trans_scale = 1
dur_scale = 3
t.set_global_beam(global_beam)
t.set_word_end_beam(word_end_beam)
t.set_token_limit(30000)
t.set_prune_similar(3)
t.set_print_probs(0)
t.set_print_indices(0)
t.set_print_frames(0)
t.set_duration_scale(dur_scale)
t.set_transition_scale(trans_scale)
t.set_lm_scale(lm_scale)
print("BEAM: %.1f" % global_beam)
print("WORD_END_BEAM: %.1f" % word_end_beam)
print("LMSCALE: %.1f" % lm_scale)
print("DURSCALE: %.1f" % dur_scale)
t.lna_open(temppath+"/temp.lna", 1024)
sys.stdout.write("REC: ")
rec(0,-1)
| Python |
# -*- coding: UTF-8 -*-
import random
rand = random.Random()
rand.seed()
def intRandom(a):
return rand.randint(0,a)
def Clamp(x, minval = None, maxval = None):
if minval != None:
x = max(minval, x)
if maxval != None:
x = min(maxval, x)
return x
| Python |
# -*- coding: UTF-8 -*-
class Movable(object):
def __init__(self, pos = (0, 0), direction = (0,0), delta_dx = 0):
self.x,self.y = pos
self.direction = direction
self.delta_dx = delta_dx
def move(self, time_passed = 0):
dx, dy = self.direction
self.x += dx * time_passed
self.y += dy * time_passed
# Normalize speed - everything loses inertia over time
if dx < 0:
dx += self.delta_dx
if dx > 0:
dx -= self.delta_dx
if dy < 0:
dy += self.delta_dx
if dy > 0:
dy -= self.delta_dx
if abs(dx) <= self.delta_dx:
dx = 0
if abs(dy) <= self.delta_dx:
dy = 0
self.direction = (dx,dy)
| Python |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import pygame
from tile import *
from pygame.locals import *
from camera import Camera
from guy import Guy
from background import *
CLOCK_TICK = 25
SECOND_TICK = 26
Game = None
class FireGame:
size = None
def __init__(self):
# Initialize PyGame
pygame.init()
pygame.display.set_caption("The Game of Fire.")
self.video_mode_flags = pygame.DOUBLEBUF|pygame.HWSURFACE#|pygame.FULLSCREEN#|pygame.NOFRAME
FireGame.screen_size = (640*2, 480*2)
self.screen = pygame.display.set_mode(self.screen_size, self.video_mode_flags)
self.camera = Camera(self)
self.background = Background("bg.png", self.screen_size)
self.size = self.background.size
maxx, maxy = self.background.size
self.camera.set_limits(0, maxx - self.screen_size[0], 0, maxy - self.screen_size[1])
self.framerate = 60
self.clock = pygame.time.Clock()
self.second_timer = pygame.time.set_timer(SECOND_TICK, 1000)
self.framenum = 0
self.key_mods = pygame.key.get_mods()
self.keys = pygame.key.get_pressed()
self.guy = Guy((0,self.size[1]/2), self)
def move(self, time_passed):
self.guy.move(time_passed)
def mainLoop(self):
done = False
while not done:
# Handle input events
if self.handle_input():
return
self.draw()
self.key_mods = pygame.key.get_mods()
self.keys = pygame.key.get_pressed()
time_passed = self.clock.tick_busy_loop(self.framerate)
self.move(time_passed)
def handle_input(self):
done = False
self.guy.handle_input()
for event in pygame.event.get():
if event.type == QUIT:
done = True
if event.type == CLOCK_TICK:
pass
elif event.type == SECOND_TICK:
print(self.framenum)
self.framenum = 0
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
done = True
elif event.key == K_t:
# "This function only works under the unix x11 video driver."
# Go to hell, pygame, you could've made it work on Win, too!
#pygame.display.toggle_fullscreen()
self.video_mode_flags |= pygame.FULLSCREEN
self.screen = pygame.display.set_mode(self.screen_size, self.video_mode_flags)
return done
def draw(self):
self.camera.center(self.guy.x, self.guy.y)
self.background.draw(self.camera)
self.guy.draw(self.camera)
pygame.display.flip()
self.framenum += 1
if __name__ == '__main__':
Game = FireGame().mainLoop()
pygame.quit()
| Python |
import pygame
class Background(pygame.Surface):
def __init__(self, name, size):
self.image = pygame.image.load(name)
self.image = pygame.transform.scale(self.image, (size[0], size[1])).convert()
x,y = self.image.get_size()
self.image = pygame.transform.scale(self.image, (x*5, y*3)).convert()
self.size = self.image.get_size()
x,y = self.size
pygame.Surface.__init__(self, (x*2, y))
self.x = x
self.y = y
self.blit(self.image,(0,0))
def draw(self, camera):
gx, gy = camera.x, camera.y
bg_clip_rect = pygame.rect.Rect((0, 0), self.size)
camera.blit(self, (0,0), bg_clip_rect)
| Python |
from utils import *
class Camera(object):
def __init__(self, game):
self.x, self.y = (0,0)
self.screen = game.screen
self.size = game.screen_size
def set_limits(self, minx, maxx, miny, maxy):
self.minx = minx
self.maxx = maxx
self.miny = miny
self.maxy = maxy
def center(self, x, y):
w, h = self.size
self.x = Clamp(x - w/2, self.minx, self.maxx)
self.y = Clamp(y - h/2, self.miny, self.maxy)
def blit(self, image, rect, area = None):
x,y = rect[0], rect[1]
self.screen.blit(image, (x - self.x, y - self.y), area)
| Python |
# -*- coding: UTF-8 -*-
import pygame
from utils import *
from movable import Movable
global Game
class BurningTile(pygame.sprite.Sprite, Movable):
size = 20
energyColor = {}
def __init__(self, pos, *args, **kwargs):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([self.size, self.size])
self.rect = self.image.get_rect()
self.rect.topleft = pos
self.x = self.rect.topleft[0] + self.rect.w/2
self.y = self.rect.topleft[1] + self.rect.h/2
Movable.__init__(self, (self.x, self.y), *args)
if "energy" in kwargs:
self.energy = kwargs["energy"]
else:
self.energy = intRandom(100) + 200 #300
def getNeighbours(self):
x,y = self.pos
return [ (x-1,y-1), (x,y-1), (x+1,y-1),
(x-1,y), (x+1,y),
(x-1,y+1), (x,y+1), (x+1,y+1), ]
def move(self, time_passed):
Movable.move(self, time_passed)
self.x -= (intRandom(18) - 9) * time_passed * 0.0625 / 2
self.y -= (intRandom(20) - 6) * time_passed * 0.0625 / 2
if self.energy > 200:
self.energy -= 2 * time_passed * 0.0625
else:
self.energy -= 3 * time_passed * 0.0625
if not intRandom(5):
self.energy -= intRandom(5)
self.energy = int(self.energy)
if self.energy < -50:
self.kill()
return
def draw(self, camera):
rect = self.image.get_rect()
rect.topleft = (self.x - self.rect.w / 2, self.y - self.rect.h / 2)
self.image, self.color = BurningTile.energyColor[Clamp(self.energy, 0, 300)]
camera.blit(self.image, rect)
return
@staticmethod
def precalculateEnergyLevelColors():
def computeLine(x0,x1,y0,y1,x):
if x0==x1:
return (y0+y1)//2
return y0-(x0-x)*(y1-y0)//(x1-x0)
milestones = [[214, 232, 233],
[255, 253, 107],
[255, 165, 79],
[255,69,0],
[238, 64, 0],
[0,0,0]]
energyThresholds=[300,290,240,150,90,0]
for energy in range(0, 300 +1):
for i in range(len(energyThresholds)-1):
if (energyThresholds[i] >= energy >= energyThresholds[i+1]):
color = [computeLine(
energyThresholds[i],
energyThresholds[i+1],
milestones[i][j],
milestones[i+1][j],
energy) for j in range(0,3)]
col = pygame.color.Color(*color)
im = pygame.Surface([BurningTile.size, BurningTile.size])
im.fill(col)
BurningTile.energyColor[energy]= (im, col)
break
| Python |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import pygame
from tile import *
from pygame.locals import *
from camera import Camera
from guy import Guy
from background import *
CLOCK_TICK = 25
SECOND_TICK = 26
Game = None
class FireGame:
size = None
def __init__(self):
# Initialize PyGame
pygame.init()
pygame.display.set_caption("The Game of Fire.")
self.video_mode_flags = pygame.DOUBLEBUF|pygame.HWSURFACE#|pygame.FULLSCREEN#|pygame.NOFRAME
FireGame.screen_size = (640*2, 480*2)
self.screen = pygame.display.set_mode(self.screen_size, self.video_mode_flags)
self.camera = Camera(self)
self.background = Background("bg.png", self.screen_size)
self.size = self.background.size
maxx, maxy = self.background.size
self.camera.set_limits(0, maxx - self.screen_size[0], 0, maxy - self.screen_size[1])
self.framerate = 60
self.clock = pygame.time.Clock()
self.second_timer = pygame.time.set_timer(SECOND_TICK, 1000)
self.framenum = 0
self.key_mods = pygame.key.get_mods()
self.keys = pygame.key.get_pressed()
self.guy = Guy((0,self.size[1]/2), self)
def move(self, time_passed):
self.guy.move(time_passed)
def mainLoop(self):
done = False
while not done:
# Handle input events
if self.handle_input():
return
self.draw()
self.key_mods = pygame.key.get_mods()
self.keys = pygame.key.get_pressed()
time_passed = self.clock.tick_busy_loop(self.framerate)
self.move(time_passed)
def handle_input(self):
done = False
self.guy.handle_input()
for event in pygame.event.get():
if event.type == QUIT:
done = True
if event.type == CLOCK_TICK:
pass
elif event.type == SECOND_TICK:
print(self.framenum)
self.framenum = 0
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
done = True
elif event.key == K_t:
# "This function only works under the unix x11 video driver."
# Go to hell, pygame, you could've made it work on Win, too!
#pygame.display.toggle_fullscreen()
self.video_mode_flags |= pygame.FULLSCREEN
self.screen = pygame.display.set_mode(self.screen_size, self.video_mode_flags)
return done
def draw(self):
self.camera.center(self.guy.x, self.guy.y)
self.background.draw(self.camera)
self.guy.draw(self.camera)
pygame.display.flip()
self.framenum += 1
if __name__ == '__main__':
Game = FireGame().mainLoop()
pygame.quit()
| Python |
# -*- coding: UTF-8 -*-
from pygame.locals import *
from utils import *
from tile import BurningTile
from movable import Movable
import pygame
class Guy(pygame.sprite.Sprite, Movable):
events = [K_UP, K_DOWN, K_LEFT, K_RIGHT, K_SPACE]
game = None
def __init__(self, pos, game):
pygame.sprite.Sprite.__init__(self)
# Magical numbers FTW :)
# Chicks dig magic.
Movable.__init__(self, pos, (0, 0), 0.023)
Guy.game = game
self.tick = 0
self.image = pygame.transform.scale2x(pygame.image.load("bcup_stand.png")).convert()
self.rect = self.image.get_rect()
self.MAXENERGY = 100
BurningTile.precalculateEnergyLevelColors()
self.fires = pygame.sprite.Group()
self.reserved_fires = pygame.sprite.Group()
self.active_fires = pygame.sprite.Group()
self.image.set_colorkey((255,0,255)) # Bright Pink
def get_used_energy(self):
return len(self.fires) + len(self.reserved_fires) / 1.5
def has_energy(self):
return self.get_used_energy() < self.MAXENERGY
def burn(self, time_passed):
if self.has_energy():
self.fires.add(BurningTile((self.x,self.y)))
for fire in self.fires:
fire.move(time_passed)
# for fire in self.reserved_fires:
# fire.move(time_passed)
for fire in self.active_fires:
fire.move(time_passed)
def draw(self, camera):
rect = self.image.get_rect()
rect.topleft = (self.x - self.rect.w / 2, self.y - self.rect.h / 2)
for fire in self.fires:
fire.draw(camera)
camera.blit(self.image, rect)
for fire in self.reserved_fires:
fire.draw(camera)
for fire in self.active_fires:
fire.draw(camera)
def move(self, time_passed):
Movable.move(self, time_passed)
self.burn(time_passed)
size = self.game.size
self.x = Clamp(self.x, self.rect.w/2, size[0] - self.rect.w/2)
self.y = Clamp(self.y, self.rect.h/2, size[1] - self.rect.h/2)
def handle_input(self):
x,y = self.direction
delta = 0.05
mods = self.game.key_mods
keys = self.game.keys
if (mods & (KMOD_LCTRL | KMOD_RCTRL)):
will_fire = False
if self.has_energy():
will_fire = True
elif len(self.fires):
will_fire = True
for fire in self.fires:
fire.kill()
break
if will_fire:
self.reserved_fires.add(BurningTile((self.x,self.y), energy = 300))
if (mods & (KMOD_LSHIFT | KMOD_RSHIFT)):
shot_speed = 0.75
dir_x = 0
dir_y = 0
if keys[K_UP]:
dir_y -= shot_speed
if keys[K_DOWN]:
dir_y += shot_speed
if keys[K_LEFT]:
dir_x -= shot_speed
if keys[K_RIGHT]:
dir_x += shot_speed
if dir_x != 0 and dir_y != 0:
# So that the stream will be equally long in all directions
# (Disregarding any other effects, that is - gravity, for one)
dir_x /= 2**0.25
dir_y /= 2**0.25
if dir_x != 0 or dir_y != 0:
for fire in self.reserved_fires:
fire.direction = (dir_x, dir_y)
fire.energy = intRandom(100) + 200 #300
self.active_fires.add(fire)
self.reserved_fires.empty()
if self.has_energy():
self.fires.add(BurningTile((self.x,self.y), (dir_x,dir_y), 0.023))
else:
if keys[K_UP]:
y -= delta
if keys[K_DOWN]:
y += delta
if keys[K_LEFT]:
x -= delta
if keys[K_RIGHT]:
x += delta
if keys[K_SPACE]:
x,y = 0, 0
x = Clamp(x,-0.66,0.66)
y = Clamp(y,-0.66,0.66)
self.direction = (x,y)
| Python |
#!/usr/bin/env python
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit http://feedparser.org/ for the latest version
Visit http://feedparser.org/docs/ for the latest documentation
Required: Python 2.4 or later
Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "5.0.1"
__license__ = """Copyright (c) 2002-2008, Mark Pilgrim, All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>",
"Sam Ruby <http://intertwingly.net/>",
"Ade Oshineye <http://blog.oshineye.com/>",
"Martin Pool <http://sourcefrog.net/>",
"Kurt McKee <http://kurtmckee.org/>"]
_debug = 0
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
# ---------- Python 3 modules (make it work if possible) ----------
try:
import rfc822
except ImportError:
from email import _parseaddr as rfc822
try:
# Python 3.1 introduces bytes.maketrans and simultaneously
# deprecates string.maketrans; use bytes.maketrans if possible
_maketrans = bytes.maketrans
except (NameError, AttributeError):
import string
_maketrans = string.maketrans
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
# Python 3.1 deprecates decodestring in favor of decodebytes
_base64decode = getattr(base64, 'decodebytes', base64.decodestring)
except:
base64 = binascii = None
def _s2bytes(s):
# Convert a UTF-8 str to bytes if the interpreter is Python 3
try:
return bytes(s, 'utf8')
except (NameError, TypeError):
# In Python 2.5 and below, bytes doesn't exist (NameError)
# In Python 2.6 and above, bytes and str are the same (TypeError)
return s
def _l2bytes(l):
# Convert a list of ints to bytes if the interpreter is Python 3
try:
if bytes is not str:
# In Python 2.6 and above, this call won't raise an exception
# but it will return bytes([65]) as '[65]' instead of 'A'
return bytes(l)
raise NameError
except NameError:
return ''.join(map(chr, l))
# If you want feedparser to allow all URL schemes, set this to ()
# List culled from Python's urlparse documentation at:
# http://docs.python.org/library/urlparse.html
# as well as from "URI scheme" at Wikipedia:
# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
# Many more will likely need to be added!
ACCEPTABLE_URI_SCHEMES = (
'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'mailto',
'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu', 'sftp',
'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet', 'wais',
# Additional common-but-unofficial schemes
'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
)
#ACCEPTABLE_URI_SCHEMES = ()
# ---------- required modules (should come with any Python distribution) ----------
import sgmllib, re, sys, copy, urlparse, time, types, cgi, urllib, urllib2, datetime
try:
from io import BytesIO as _StringIO
except ImportError:
try:
from cStringIO import StringIO as _StringIO
except:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except:
gzip = None
try:
import zlib
except:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
from xml.sax.saxutils import escape as _xmlescape
_XML_AVAILABLE = 1
except:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
# cjkcodecs and iconv_codec provide support for more character encodings.
# Both are available from http://cjkpython.i18n.org/
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
if _debug:
import chardet.constants
chardet.constants._debug = 1
except:
chardet = None
# reversable htmlentitydefs mappings for Python 2.2
try:
from htmlentitydefs import name2codepoint, codepoint2name
except:
import htmlentitydefs
name2codepoint={}
codepoint2name={}
for (name,codepoint) in htmlentitydefs.entitydefs.iteritems():
if codepoint.startswith('&#'): codepoint=unichr(int(codepoint[2:-1]))
name2codepoint[name]=ord(codepoint)
codepoint2name[ord(codepoint)]=name
# BeautifulSoup parser used for parsing microformats from embedded HTML content
# http://www.crummy.com/software/BeautifulSoup/
# feedparser is tested with BeautifulSoup 3.0.x, but it might work with the
# older 2.x series. If it doesn't, and you can figure out why, I'll accept a
# patch and modify the compatibility statement accordingly.
try:
import BeautifulSoup
except:
BeautifulSoup = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
sgmllib.special = re.compile('<!')
sgmllib.charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
if sgmllib.endbracket.search(' <').start(0):
class EndBracketRegEx:
def __init__(self):
# Overriding the built-in sgmllib.endbracket regex allows the
# parser to find angle brackets embedded in element attributes.
self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self,string,index=0):
match = self.endbracket.match(string,index)
if match is not None:
# Returning a new object in the calling thread's context
# resolves a thread-safety.
return EndBracketMatch(match)
return None
class EndBracketMatch:
def __init__(self, match):
self.match = match
def start(self, n):
return self.match.end(n)
sgmllib.endbracket = EndBracketRegEx()
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
'hotrss': 'Hot RSS'
}
try:
UserDict = dict
except NameError:
# Python 2.1 does not have dict
from UserDict import UserDict
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
class FeedParserDict(UserDict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['summary', 'subtitle'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
return UserDict.__getitem__(self, 'tags')[0]['term']
if key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in UserDict.__getitem__(self, 'links') if link['rel']=='enclosure']
if key == 'license':
for link in UserDict.__getitem__(self, 'links'):
if link['rel']=='license' and link.has_key('href'):
return link['href']
if key == 'categories':
return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')]
realkey = self.keymap.get(key, key)
if type(realkey) == types.ListType:
for k in realkey:
if UserDict.__contains__(self, k):
return UserDict.__getitem__(self, k)
if UserDict.__contains__(self, key):
return UserDict.__getitem__(self, key)
return UserDict.__getitem__(self, realkey)
def __setitem__(self, key, value):
for k in self.keymap.keys():
if key == k:
key = self.keymap[k]
if type(key) == types.ListType:
key = key[0]
return UserDict.__setitem__(self, key, value)
def get(self, key, default=None):
if self.has_key(key):
return self[key]
else:
return default
def setdefault(self, key, value):
if not self.has_key(key):
self[key] = value
return self[key]
def has_key(self, key):
try:
return hasattr(self, key) or UserDict.__contains__(self, key)
except AttributeError:
return False
# This alias prevents the 2to3 tool from changing the semantics of the
# __contains__ function below and exhausting the maximum recursion depth
__has_key = has_key
def __getattr__(self, key):
try:
return self.__dict__[key]
except KeyError:
pass
try:
assert not key.startswith('_')
return self.__getitem__(key)
except:
raise AttributeError, "object has no attribute '%s'" % key
def __setattr__(self, key, value):
if key.startswith('_') or key == 'data':
self.__dict__[key] = value
else:
return self.__setitem__(key, value)
def __contains__(self, key):
return self.__has_key(key)
def zopeCompatibilityHack():
global FeedParserDict
del FeedParserDict
def FeedParserDict(aDict=None):
rc = {}
if aDict:
rc.update(aDict)
return rc
_ebcdic_to_ascii_map = None
def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
)
_ebcdic_to_ascii_map = _maketrans( \
_l2bytes(range(256)), _l2bytes(emap))
return s.translate(_ebcdic_to_ascii_map)
_cp1252 = {
unichr(128): unichr(8364), # euro sign
unichr(130): unichr(8218), # single low-9 quotation mark
unichr(131): unichr( 402), # latin small letter f with hook
unichr(132): unichr(8222), # double low-9 quotation mark
unichr(133): unichr(8230), # horizontal ellipsis
unichr(134): unichr(8224), # dagger
unichr(135): unichr(8225), # double dagger
unichr(136): unichr( 710), # modifier letter circumflex accent
unichr(137): unichr(8240), # per mille sign
unichr(138): unichr( 352), # latin capital letter s with caron
unichr(139): unichr(8249), # single left-pointing angle quotation mark
unichr(140): unichr( 338), # latin capital ligature oe
unichr(142): unichr( 381), # latin capital letter z with caron
unichr(145): unichr(8216), # left single quotation mark
unichr(146): unichr(8217), # right single quotation mark
unichr(147): unichr(8220), # left double quotation mark
unichr(148): unichr(8221), # right double quotation mark
unichr(149): unichr(8226), # bullet
unichr(150): unichr(8211), # en dash
unichr(151): unichr(8212), # em dash
unichr(152): unichr( 732), # small tilde
unichr(153): unichr(8482), # trade mark sign
unichr(154): unichr( 353), # latin small letter s with caron
unichr(155): unichr(8250), # single right-pointing angle quotation mark
unichr(156): unichr( 339), # latin small ligature oe
unichr(158): unichr( 382), # latin small letter z with caron
unichr(159): unichr( 376)} # latin capital letter y with diaeresis
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
try:
return urlparse.urljoin(base, uri)
except:
uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)])
return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
#Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
'http://search.yahoo.com/mrss/': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://www.w3.org/XML/1998/namespace': 'xml'
}
_matchnamespaces = {}
can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo']
can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if _debug: sys.stderr.write('initializing FeedParser\n')
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
self.svgOK = 0
self.hasTitle = 0
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
def unknown_starttag(self, tag, attrs):
if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
# normalize attrs
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
# the sgml parser doesn't handle entities in attributes, but
# strict xml parsers do -- account for this difference
if isinstance(self, _LooseFeedParser):
attrs = [(k, v.replace('&', '&')) for k, v in attrs]
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
if type(baseuri) != type(u''):
try:
baseuri = unicode(baseuri, self.encoding)
except:
baseuri = unicode(baseuri, 'iso-8859-1')
# ensure that self.baseuri is always an absolute URI that
# uses a whitelisted URI scheme (e.g. not `javscript:`)
if self.baseuri:
self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri
else:
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
if tag in ['xhtml:div', 'div']: return # typepad does this 10/2007
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
if tag.find(':') <> -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
if tag == 'svg': self.svgOK += 1
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
# Since there's no handler or something has gone wrong we explicitly add the element and its attributes
unknown_tag = prefix + suffix
if len(attrsD) == 0:
# No attributes so merge it into the encosing dictionary
return self.push(unknown_tag, 1)
else:
# Has attributes so create it in its own dictionary
context = self._getContext()
context[unknown_tag] = attrsD
def unknown_endtag(self, tag):
if _debug: sys.stderr.write('end %s\n' % tag)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
if suffix == 'svg' and self.svgOK: self.svgOK -= 1
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
if self.svgOK: raise AttributeError()
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
if tag in ['xhtml:div', 'div']: return # typepad does this 10/2007
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack: return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack: return
if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref)
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities.keys():
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try: name2codepoint[ref]
except KeyError: text = '&%s;' % ref
else: text = unichr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack: return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if _debug: sys.stderr.write('entering parse_declaration\n')
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
# CDATA block began but didn't finish
k = len(self.rawdata)
return k
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
if k >= 0:
return k+1
else:
# We have an incomplete CDATA block.
return k
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text' or contentType == 'plain':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
self.version = 'rss090'
if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
self.version = 'rss10'
if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if self._matchnamespaces.has_key(loweruri):
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack: return
if self.elementstack[-1][0] != element: return
element, expectingText, pieces = self.elementstack.pop()
if self.version == 'atom10' and self.contentparams.get('type','text') == 'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0: break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
# Ensure each piece is a str for Python 3
for (i, v) in enumerate(pieces):
if not isinstance(v, basestring):
pieces[i] = v.decode('utf-8')
output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText: return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = _base64decode(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
except TypeError:
# In Python 3, base64 takes and outputs bytes, not str
# This may not be the most correct way to accomplish this
output = _base64decode(output.encode('utf-8')).decode('utf-8')
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
if self.lookslikehtml(output):
self.contentparams['type']='text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and RESOLVE_RELATIVE_URIS:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', 'text/html'))
# parse microformats
# (must do this before sanitizing because some microformats
# rely on elements that we sanitize)
if is_htmlish and element in ['content', 'description', 'summary']:
mfresults = _parseMicroformats(output, self.baseuri, self.encoding)
if mfresults:
for tag in mfresults.get('tags', []):
self._addTag(tag['term'], tag['scheme'], tag['label'])
for enclosure in mfresults.get('enclosures', []):
self._start_enclosure(enclosure)
for xfn in mfresults.get('xfn', []):
self._addXFN(xfn['relationships'], xfn['href'], xfn['name'])
vcard = mfresults.get('vcard')
if vcard:
self._getContext()['vcard'] = vcard
# sanitize embedded markup
if is_htmlish and SANITIZE_HTML:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', 'text/html'))
if self.encoding and type(output) != type(u''):
try:
output = unicode(output, self.encoding)
except:
pass
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding in ('utf-8', 'utf-8_INVALID_PYTHON_3') and type(output) == type(u''):
try:
output = unicode(output.encode('iso-8859-1'), 'utf-8')
except:
pass
# map win-1252 extensions to the proper code points
if type(output) == type(u''):
output = u''.join([c in _cp1252.keys() and _cp1252[c] or c for c in output])
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
if element == 'title' and self.hasTitle:
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
if not self.inimage:
# query variables in urls in link elements are improperly
# converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
# unhandled character references. fix this special case.
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
# fix query variables; see above for the explanation
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
context[element] = output
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang: self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
def lookslikehtml(self, s):
if self.version.startswith('atom'): return
if self.contentparams.get('type','text/html') != 'text/plain': return
# must have a close tag or a entity reference to qualify
if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)): return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
re.findall(r'</?(\w+)',s)): return
# all entities must have been defined as valid HTML entities
from htmlentitydefs import entitydefs
if filter(lambda e: e not in entitydefs.keys(),
re.findall(r'&(\w+);',s)): return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value, overwrite=False):
context = self._getContext()
if overwrite:
context[key] = value
else:
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
#If we're here then this is an RSS feed.
#If we don't have a version or have a version that starts with something
#other than RSS then there's been a mistake. Correct it.
if not self.version or not self.version.startswith('rss'):
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_dlhottitles(self, attrsD):
self.version = 'hotrss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
_start_feedinfo = _start_channel
def _cdf_common(self, attrsD):
if attrsD.has_key('lastmod'):
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if attrsD.has_key('href'):
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
if not self.inentry:
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.hasTitle = 0
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.hasTitle = 0
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
# Append a new FeedParserDict when expecting an author
context = self._getContext()
context.setdefault('authors', [])
context['authors'].append(FeedParserDict())
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage and self.feeddata.has_key('image'):
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
context.setdefault('authors', [FeedParserDict()])
context['authors'][-1][key] = value
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author: return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.replace('<>', '')
author = author.replace('<>', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, FeedParserDict())
if author:
context['%s_detail' % key]['name'] = author
if email:
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
self.hasTitle = 0
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
_start_product = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value), overwrite=True)
_end_dcterms_issued = _end_published
_end_issued = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_pubdate = _start_updated
_start_dc_date = _start_updated
_start_lastbuilddate = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value, overwrite=True)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_pubdate = _end_updated
_end_dc_date = _end_updated
_end_lastbuilddate = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value), overwrite=True)
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True)
def _start_cc_license(self, attrsD):
context = self._getContext()
value = self._getAttribute(attrsD, 'rdf:resource')
attrsD = FeedParserDict()
attrsD['rel']='license'
if value: attrsD['href']=value
context.setdefault('links', []).append(attrsD)
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
_start_creativeCommons_license = _start_creativecommons_license
def _end_creativecommons_license(self):
value = self.pop('license')
context = self._getContext()
attrsD = FeedParserDict()
attrsD['rel']='license'
if value: attrsD['href']=value
context.setdefault('links', []).append(attrsD)
del context['license']
_end_creativeCommons_license = _end_creativecommons_license
def _addXFN(self, relationships, href, name):
context = self._getContext()
xfn = context.setdefault('xfn', [])
value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name})
if value not in xfn:
xfn.append(value)
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label): return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(value)
def _start_category(self, attrsD):
if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _start_media_category(self, attrsD):
attrsD.setdefault('scheme', 'http://search.yahoo.com/mrss/category_schema')
self._start_category(attrsD)
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split():
self._addTag(term, 'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value: return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
_end_media_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
if attrsD['rel'] == 'self':
attrsD.setdefault('type', 'application/atom+xml')
else:
attrsD.setdefault('type', 'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
if not (self.inentry and self.inimage):
context['links'].append(FeedParserDict(attrsD))
if attrsD.has_key('href'):
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
_start_producturl = _start_link
def _end_link(self):
value = self.pop('link')
context = self._getContext()
_end_producturl = _end_link
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
def _start_title(self, attrsD):
if self.svgOK: return self.unknown_starttag('title', attrsD.items())
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
if self.svgOK: return
value = self.popContent('title')
if not value: return
context = self._getContext()
self.hasTitle = 1
_end_dc_title = _end_title
def _end_media_title(self):
hasTitle = self.hasTitle
self._end_title()
self.hasTitle = hasTitle
def _start_description(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if context.has_key('generator_detail'):
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel']='enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
def _start_source(self, attrsD):
if 'url' in attrsD:
# This means that we're processing a source element from an RSS 2.0 feed
self.sourcedata['href'] = attrsD[u'url']
self.push('source', 1)
self.insource = 1
self.hasTitle = 0
def _end_source(self):
self.insource = 0
value = self.pop('source')
if value:
self.sourcedata['title'] = value
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_prodlink(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToSummary = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToSummary:
self._save('summary', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
_end_prodlink = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
if attrsD.get('href'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
# Convert 'yes' -> True, 'clean' to False, and any other value to None
# False and None both evaluate as False, so the difference can be ignored
# by applications that only need to know if the content is explicit.
self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0]
def _start_media_content(self, attrsD):
context = self._getContext()
context.setdefault('media_content', [])
context['media_content'].append(attrsD)
def _start_media_thumbnail(self, attrsD):
context = self._getContext()
context.setdefault('media_thumbnail', [])
self.push('url', 1) # new
context['media_thumbnail'].append(attrsD)
def _end_media_thumbnail(self):
url = self.pop('url')
context = self._getContext()
if url != None and len(url.strip()) != 0:
if not context['media_thumbnail'][-1].has_key('url'):
context['media_thumbnail'][-1]['url'] = url
def _start_media_player(self, attrsD):
self.push('media_player', 0)
self._getContext()['media_player'] = FeedParserDict(attrsD)
def _end_media_player(self):
value = self.pop('media_player')
context = self._getContext()
context['media_player']['content'] = value
def _start_newlocation(self, attrsD):
self.push('newlocation', 1)
def _end_newlocation(self):
url = self.pop('newlocation')
context = self._getContext()
# don't set newlocation if the context isn't right
if context is not self.feeddata:
return
context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip())
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
if _debug: sys.stderr.write('trying StrictFeedParser\n')
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
self.decls = {}
def startPrefixMapping(self, prefix, uri):
self.trackNamespace(prefix, uri)
if uri == 'http://www.w3.org/1999/xlink':
self.decls['xmlns:'+prefix] = uri
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD, self.decls = self.decls, {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname))
for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = [
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
'source', 'track', 'wbr'
]
def __init__(self, encoding, _type):
self.encoding = encoding
self._type = _type
if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
def parse_starttag(self,i):
j=sgmllib.SGMLParser.parse_starttag(self, i)
if self._type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
#data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace
data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
try:
bytes
if bytes is str:
raise NameError
self.encoding = self.encoding + '_INVALID_PYTHON_3'
except NameError:
if self.encoding and type(data) == type(u''):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs: return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if type(value) != type(u''):
try:
value = unicode(value, self.encoding)
except:
value = unicode(value, 'iso-8859-1')
try:
# Currently, in Python 3 the key is already a str, and cannot be decoded again
uattrs.append((unicode(key, self.encoding), value))
except TypeError:
uattrs.append((key, value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs=strattrs.encode(self.encoding)
except:
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
else:
self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
if ref.startswith('x'):
value = unichr(int(ref[1:],16))
else:
value = unichr(int(ref))
if value in _cp1252.keys():
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%(ref)s;' % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if name2codepoint.has_key(ref):
self.pieces.append('&%(ref)s;' % locals())
else:
self.pieces.append('&%(ref)s' % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_data, text=%s\n' % text)
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%(text)s-->' % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%(text)s>' % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%(text)s>' % locals())
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
def parse_declaration(self, i):
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
# escape the doctype declaration and continue parsing
self.handle_data('<')
return i+1
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _MicroformatsParser:
STRING = 1
DATE = 2
URI = 3
NODE = 4
EMAIL = 5
known_xfn_relationships = ['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me']
known_binary_extensions = ['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv']
def __init__(self, data, baseuri, encoding):
self.document = BeautifulSoup.BeautifulSoup(data)
self.baseuri = baseuri
self.encoding = encoding
if type(data) == type(u''):
data = data.encode(encoding)
self.tags = []
self.enclosures = []
self.xfn = []
self.vcard = None
def vcardEscape(self, s):
if type(s) in (type(''), type(u'')):
s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n')
return s
def vcardFold(self, s):
s = re.sub(';+$', '', s)
sFolded = ''
iMax = 75
sPrefix = ''
while len(s) > iMax:
sFolded += sPrefix + s[:iMax] + '\n'
s = s[iMax:]
sPrefix = ' '
iMax = 74
sFolded += sPrefix + s
return sFolded
def normalize(self, s):
return re.sub(r'\s+', ' ', s).strip()
def unique(self, aList):
results = []
for element in aList:
if element not in results:
results.append(element)
return results
def toISO8601(self, dt):
return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt)
def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0):
all = lambda x: 1
sProperty = sProperty.lower()
bFound = 0
bNormalize = 1
propertyMatch = {'class': re.compile(r'\b%s\b' % sProperty)}
if bAllowMultiple and (iPropertyType != self.NODE):
snapResults = []
containers = elmRoot(['ul', 'ol'], propertyMatch)
for container in containers:
snapResults.extend(container('li'))
bFound = (len(snapResults) != 0)
if not bFound:
snapResults = elmRoot(all, propertyMatch)
bFound = (len(snapResults) != 0)
if (not bFound) and (sProperty == 'value'):
snapResults = elmRoot('pre')
bFound = (len(snapResults) != 0)
bNormalize = not bFound
if not bFound:
snapResults = [elmRoot]
bFound = (len(snapResults) != 0)
arFilter = []
if sProperty == 'vcard':
snapFilter = elmRoot(all, propertyMatch)
for node in snapFilter:
if node.findParent(all, propertyMatch):
arFilter.append(node)
arResults = []
for node in snapResults:
if node not in arFilter:
arResults.append(node)
bFound = (len(arResults) != 0)
if not bFound:
if bAllowMultiple: return []
elif iPropertyType == self.STRING: return ''
elif iPropertyType == self.DATE: return None
elif iPropertyType == self.URI: return ''
elif iPropertyType == self.NODE: return None
else: return None
arValues = []
for elmResult in arResults:
sValue = None
if iPropertyType == self.NODE:
if bAllowMultiple:
arValues.append(elmResult)
continue
else:
return elmResult
sNodeName = elmResult.name.lower()
if (iPropertyType == self.EMAIL) and (sNodeName == 'a'):
sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0]
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'abbr'):
sValue = elmResult.get('title')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (iPropertyType == self.URI):
if sNodeName == 'a': sValue = elmResult.get('href')
elif sNodeName == 'img': sValue = elmResult.get('src')
elif sNodeName == 'object': sValue = elmResult.get('data')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'img'):
sValue = elmResult.get('alt')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue:
sValue = elmResult.renderContents()
sValue = re.sub(r'<\S[^>]*>', '', sValue)
sValue = sValue.replace('\r\n', '\n')
sValue = sValue.replace('\r', '\n')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue: continue
if iPropertyType == self.DATE:
sValue = _parse_date_iso8601(sValue)
if bAllowMultiple:
arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue)
else:
return bAutoEscape and self.vcardEscape(sValue) or sValue
return arValues
def findVCards(self, elmRoot, bAgentParsing=0):
sVCards = ''
if not bAgentParsing:
arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1)
else:
arCards = [elmRoot]
for elmCard in arCards:
arLines = []
def processSingleString(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1).decode(self.encoding)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue))
return sValue or u''
def processSingleURI(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.URI)
if sValue:
sContentType = ''
sEncoding = ''
sValueKey = ''
if sValue.startswith('data:'):
sEncoding = ';ENCODING=b'
sContentType = sValue.split(';')[0].split('/').pop()
sValue = sValue.split(',', 1).pop()
else:
elmValue = self.getPropertyValue(elmCard, sProperty)
if elmValue:
if sProperty != 'url':
sValueKey = ';VALUE=uri'
sContentType = elmValue.get('type', '').strip().split('/').pop().strip()
sContentType = sContentType.upper()
if sContentType == 'OCTET-STREAM':
sContentType = ''
if sContentType:
sContentType = ';TYPE=' + sContentType.upper()
arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue))
def processTypeValue(sProperty, arDefaultType, arForceType=None):
arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1)
for elmResult in arResults:
arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1)
if arForceType:
arType = self.unique(arForceType + arType)
if not arType:
arType = arDefaultType
sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue))
# AGENT
# must do this before all other properties because it is destructive
# (removes nested class="vcard" nodes so they don't interfere with
# this vcard's other properties)
arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1)
for elmAgent in arAgent:
if re.compile(r'\bvcard\b').search(elmAgent.get('class')):
sAgentValue = self.findVCards(elmAgent, 1) + '\n'
sAgentValue = sAgentValue.replace('\n', '\\n')
sAgentValue = sAgentValue.replace(';', '\\;')
if sAgentValue:
arLines.append(self.vcardFold('AGENT:' + sAgentValue))
# Completely remove the agent element from the parse tree
elmAgent.extract()
else:
sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1);
if sAgentValue:
arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue))
# FN (full name)
sFN = processSingleString('fn')
# N (name)
elmName = self.getPropertyValue(elmCard, 'n')
if elmName:
sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1)
sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1)
arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1)
arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1)
arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1)
arLines.append(self.vcardFold('N:' + sFamilyName + ';' +
sGivenName + ';' +
','.join(arAdditionalNames) + ';' +
','.join(arHonorificPrefixes) + ';' +
','.join(arHonorificSuffixes)))
elif sFN:
# implied "N" optimization
# http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization
arNames = self.normalize(sFN).split()
if len(arNames) == 2:
bFamilyNameFirst = (arNames[0].endswith(',') or
len(arNames[1]) == 1 or
((len(arNames[1]) == 2) and (arNames[1].endswith('.'))))
if bFamilyNameFirst:
arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1]))
else:
arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0]))
# SORT-STRING
sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1)
if sSortString:
arLines.append(self.vcardFold('SORT-STRING:' + sSortString))
# NICKNAME
arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1)
if arNickname:
arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname)))
# PHOTO
processSingleURI('photo')
# BDAY
dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE)
if dtBday:
arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday)))
# ADR (address)
arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1)
for elmAdr in arAdr:
arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1)
if not arType:
arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1
sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1)
sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1)
sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1)
sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1)
sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1)
sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1)
sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1)
arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' +
sPostOfficeBox + ';' +
sExtendedAddress + ';' +
sStreetAddress + ';' +
sLocality + ';' +
sRegion + ';' +
sPostalCode + ';' +
sCountryName))
# LABEL
processTypeValue('label', ['intl','postal','parcel','work'])
# TEL (phone number)
processTypeValue('tel', ['voice'])
# EMAIL
processTypeValue('email', ['internet'], ['internet'])
# MAILER
processSingleString('mailer')
# TZ (timezone)
processSingleString('tz')
# GEO (geographical information)
elmGeo = self.getPropertyValue(elmCard, 'geo')
if elmGeo:
sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1)
sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1)
arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude))
# TITLE
processSingleString('title')
# ROLE
processSingleString('role')
# LOGO
processSingleURI('logo')
# ORG (organization)
elmOrg = self.getPropertyValue(elmCard, 'org')
if elmOrg:
sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1)
if not sOrganizationName:
# implied "organization-name" optimization
# http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization
sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1)
if sOrganizationName:
arLines.append(self.vcardFold('ORG:' + sOrganizationName))
else:
arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1)
arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit)))
# CATEGORY
arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1)
if arCategory:
arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory)))
# NOTE
processSingleString('note')
# REV
processSingleString('rev')
# SOUND
processSingleURI('sound')
# UID
processSingleString('uid')
# URL
processSingleURI('url')
# CLASS
processSingleString('class')
# KEY
processSingleURI('key')
if arLines:
arLines = [u'BEGIN:vCard',u'VERSION:3.0'] + arLines + [u'END:vCard']
sVCards += u'\n'.join(arLines) + u'\n'
return sVCards.strip()
def isProbablyDownloadable(self, elm):
attrsD = elm.attrMap
if not attrsD.has_key('href'): return 0
linktype = attrsD.get('type', '').strip()
if linktype.startswith('audio/') or \
linktype.startswith('video/') or \
(linktype.startswith('application/') and not linktype.endswith('xml')):
return 1
path = urlparse.urlparse(attrsD['href'])[2]
if path.find('.') == -1: return 0
fileext = path.split('.').pop().lower()
return fileext in self.known_binary_extensions
def findTags(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile(r'\btag\b')}):
href = elm.get('href')
if not href: continue
urlscheme, domain, path, params, query, fragment = \
urlparse.urlparse(_urljoin(self.baseuri, href))
segments = path.split('/')
tag = segments.pop()
if not tag:
tag = segments.pop()
tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', ''))
if not tagscheme.endswith('/'):
tagscheme += '/'
self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''}))
def findEnclosures(self):
all = lambda x: 1
enclosure_match = re.compile(r'\benclosure\b')
for elm in self.document(all, {'href': re.compile(r'.+')}):
if not enclosure_match.search(elm.get('rel', '')) and not self.isProbablyDownloadable(elm): continue
if elm.attrMap not in self.enclosures:
self.enclosures.append(elm.attrMap)
if elm.string and not elm.get('title'):
self.enclosures[-1]['title'] = elm.string
def findXFN(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}):
rels = elm.get('rel', '').split()
xfn_rels = []
for rel in rels:
if rel in self.known_xfn_relationships:
xfn_rels.append(rel)
if xfn_rels:
self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string})
def _parseMicroformats(htmlSource, baseURI, encoding):
if not BeautifulSoup: return
if _debug: sys.stderr.write('entering _parseMicroformats\n')
try:
p = _MicroformatsParser(htmlSource, baseURI, encoding)
except UnicodeEncodeError:
# sgmllib throws this exception when performing lookups of tags
# with non-ASCII characters in them.
return
p.vcard = p.findVCards(p.document)
p.findTags()
p.findEnclosures()
p.findXFN()
return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard}
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = [('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')]
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _makeSafeAbsoluteURI(_urljoin(self.baseuri, uri.strip()))
def unknown_starttag(self, tag, attrs):
if _debug:
sys.stderr.write('tag: [%s] with attributes: [%s]\n' % (tag, str(attrs)))
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
if _debug:
sys.stderr.write('entering _resolveRelativeURIs\n')
p = _RelativeURIResolver(baseURI, encoding, _type)
p.feed(htmlSource)
return p.output()
def _makeSafeAbsoluteURI(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
return _urljoin(base, rel or u'')
if not base:
return rel or u''
if not rel:
scheme = urlparse.urlparse(base)[0]
if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
return base
return u''
uri = _urljoin(base, rel)
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return u''
return uri
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data',
'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay',
'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'prompt', 'pqg', 'radiogroup', 'readonly', 'rel', 'repeat-max',
'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows',
'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src',
'start', 'step', 'summary', 'suppress', 'tabindex', 'target', 'template',
'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign',
'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap',
'xml:lang']
unacceptable_elements_with_end_tag = ['script', 'applet', 'style']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
# survey of common keywords found in feeds
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = ['annotation', 'annotation-xml', 'maction', 'math',
'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none', 'semantics']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant',
'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign',
'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href',
'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink']
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use']
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
'min', 'name', 'offset', 'opacity', 'orient', 'origin',
'overline-position', 'overline-thickness', 'panose-1', 'path',
'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv',
'stop-color', 'stop-opacity', 'strikethrough-position',
'strikethrough-thickness', 'stroke', 'stroke-dasharray',
'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage',
'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2',
'underline-position', 'underline-thickness', 'unicode', 'unicode-range',
'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width',
'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
'y2', 'zoomAndPan']
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# add implicit namespaces to html5 inline svg/mathml
if self._type.endswith('html'):
if not dict(attrs).get('xmlns'):
if tag=='svg':
attrs.append( ('xmlns','http://www.w3.org/2000/svg') )
if tag=='math':
attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') )
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK += 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK += 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
elif not tag in self.acceptable_elements:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if filter(lambda (n,v): n.startswith('xlink:'),attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
# make sure the uri uses an acceptable uri scheme
if key == u'href':
value = _makeSafeAbsoluteURI(value)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value: clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math' and self.mathmlOK: self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg' and self.svgOK: self.svgOK -= 1
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return ''
# This replaced a regexp that used re.match and was prone to pathological back-tracking.
if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip(): return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value: continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def parse_comment(self, i, report=1):
ret = _BaseHTMLProcessor.parse_comment(self, i, report)
if ret >= 0:
return ret
# if ret == -1, this may be a malicious attempt to circumvent
# sanitization, or a page-destroying unclosed comment
match = re.compile(r'--[^>]*>').search(self.rawdata, i+4)
if match:
return match.end()
# unclosed comment; deliberately fail to handle_data()
return len(self.rawdata)
def _sanitizeHTML(htmlSource, encoding, _type):
p = _HTMLSanitizer(encoding, _type)
htmlSource = htmlSource.replace('<![CDATA[', '<![CDATA[')
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = type(data) == type(u'')
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
if ((code / 100) == 3) and (code != 304):
return self.http_error_302(req, fp, code, msg, headers)
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
return infourl
def http_error_302(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
def http_error_301(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
http_error_300 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions)
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
try:
assert sys.version.split()[0] >= '2.3.3'
assert base64 != None
user, passw = _base64decode(req.headers['Authorization'].split(' ')[1]).split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
except:
return self.http_error_default(req, fp, code, msg, headers)
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
if request_headers is supplied it is a dictionary of HTTP request headers
that will override the values generated by FeedParser.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if url_file_stream_or_string == '-':
return sys.stdin
if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
# Deal with the feed URI scheme
if url_file_stream_or_string.startswith('feed:http'):
url_file_stream_or_string = url_file_stream_or_string[5:]
elif url_file_stream_or_string.startswith('feed:'):
url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:]
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.standard_b64encode(user_passwd).strip()
# iri support
try:
if isinstance(url_file_stream_or_string,unicode):
url_file_stream_or_string = url_file_stream_or_string.encode('idna').decode('utf-8')
else:
url_file_stream_or_string = url_file_stream_or_string.decode('utf-8').encode('idna').decode('utf-8')
except:
pass
# try to open with urllib2 (to use optional headers)
request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers)
opener = apply(urllib2.build_opener, tuple(handlers + [_FeedURLHandler()]))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string, 'rb')
except:
pass
# treat url_file_stream_or_string as string
return _StringIO(str(url_file_stream_or_string))
def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers):
request = urllib2.Request(url)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if type(modified) == type(''):
modified = _parse_date(modified)
elif isinstance(modified, datetime.datetime):
modified = modified.utctimetuple()
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
# use this for whatever -- cookies, special headers, etc
# [('Cookie','Something'),('x-special-header','Another Value')]
for header_name, header_value in request_headers.items():
request.add_header(header_name, header_value)
request.add_header('A-IM', 'feed') # RFC 3229 support
return request
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(\.(?P<fracsecond>\d+))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
try:
del tmpl
except NameError:
pass
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
try:
del regex
except NameError:
pass
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m: break
if not m: return
if m.span() == (0, 0): return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params.keys():
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tuple(tm)))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m: return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
_mssql_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
def _parse_date_mssql(dateString):
'''Parse a string according to the MS SQL date format'''
m = _mssql_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_mssql)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m: return
try:
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
except:
return
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date)
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m: return
try:
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
except:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?'
'|(?P<julian>\d\d\d)))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d)(?:[.,]\d+)?)?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
# Account for the Etc/GMT timezone by stripping 'Etc/'
elif len(data) == 5 and data[4].lower().startswith('etc/'):
data[4] = data[4][4:]
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
rfc822._timezones.update(_additional_timezones)
registerDateHandler(_parse_date_rfc822)
def _parse_date_perforce(aDateString):
"""parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
# Fri, 2006/09/15 08:19:53 EDT
_my_date_pattern = re.compile( \
r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
dow, year, month, day, hour, minute, second, tz = \
_my_date_pattern.search(aDateString).groups()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
registerDateHandler(_parse_date_perforce)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
if not date9tuple: continue
if len(date9tuple) != 9:
if _debug: sys.stderr.write('date handler function must return 9-tuple\n')
raise ValueError
map(int, date9tuple)
return date9tuple
except Exception, e:
if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e)))
pass
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). CJKCodecs and iconv_codec help a lot;
you should definitely install them if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
return content_type, params.get('charset', '').replace("'", '')
sniffed_xml_encoding = ''
xml_encoding = ''
true_encoding = ''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type', http_headers.get('Content-type')))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == _l2bytes([0x4c, 0x6f, 0xa7, 0x94]):
# EBCDIC
xml_data = _ebcdic_to_ascii(xml_data)
elif xml_data[:4] == _l2bytes([0x00, 0x3c, 0x00, 0x3f]):
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == _l2bytes([0xfe, 0xff])) and (xml_data[2:4] != _l2bytes([0x00, 0x00])):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x3c, 0x00, 0x3f, 0x00]):
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == _l2bytes([0xff, 0xfe])) and (xml_data[2:4] != _l2bytes([0x00, 0x00])):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x00, 0x00, 0x00, 0x3c]):
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x3c, 0x00, 0x00, 0x00]):
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x00, 0x00, 0xfe, 0xff]):
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == _l2bytes([0xff, 0xfe, 0x00, 0x00]):
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == _l2bytes([0xef, 0xbb, 0xbf]):
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>')).match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
true_encoding = http_encoding or 'us-ascii'
elif http_headers and (not (http_headers.has_key('content-type') or http_headers.has_key('Content-type'))):
true_encoding = xml_encoding or 'iso-8859-1'
else:
true_encoding = xml_encoding or 'utf-8'
# some feeds claim to be gb2312 but are actually gb18030.
# apparently MSIE and Firefox both do the following switch:
if true_encoding.lower() == 'gb2312':
true_encoding = 'gb18030'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding)
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == _l2bytes([0xfe, 0xff])) and (data[2:4] != _l2bytes([0x00, 0x00])):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16be':
sys.stderr.write('trying utf-16be instead\n')
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == _l2bytes([0xff, 0xfe])) and (data[2:4] != _l2bytes([0x00, 0x00])):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16le':
sys.stderr.write('trying utf-16le instead\n')
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == _l2bytes([0xef, 0xbb, 0xbf]):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-8':
sys.stderr.write('trying utf-8 instead\n')
encoding = 'utf-8'
data = data[3:]
elif data[:4] == _l2bytes([0x00, 0x00, 0xfe, 0xff]):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32be':
sys.stderr.write('trying utf-32be instead\n')
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == _l2bytes([0xff, 0xfe, 0x00, 0x00]):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32le':
sys.stderr.write('trying utf-32le instead\n')
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
start = re.search(_s2bytes('<\w'), data)
start = start and start.start() or -1
head,data = data[:start+1], data[start+1:]
entity_pattern = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE)
entity_results=entity_pattern.findall(head)
head = entity_pattern.sub(_s2bytes(''), head)
doctype_pattern = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE)
doctype_results = doctype_pattern.findall(head)
doctype = doctype_results and doctype_results[0] or _s2bytes('')
if doctype.lower().count(_s2bytes('netscape')):
version = 'rss091n'
else:
version = None
# only allow in 'safe' inline entity definitions
replacement=_s2bytes('')
if len(doctype_results)==1 and entity_results:
safe_pattern=re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"'))
safe_entities=filter(lambda e: safe_pattern.match(e),entity_results)
if safe_entities:
replacement=_s2bytes('<!DOCTYPE feed [\n <!ENTITY') + _s2bytes('>\n <!ENTITY ').join(safe_entities) + _s2bytes('>\n]>')
data = doctype_pattern.sub(replacement, head) + data
return version, data, dict(replacement and [(k.decode('utf-8'), v.decode('utf-8')) for k, v in safe_pattern.findall(replacement)])
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[], request_headers={}, response_headers={}):
'''Parse a feed from a URL, file, stream, or string.
request_headers, if given, is a dict from http header name to value to add
to the request; this overrides internally generated values.
'''
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
if _XML_AVAILABLE:
result['bozo'] = 0
if not isinstance(handlers, list):
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = None
f = None
if hasattr(f, 'headers'):
result['headers'] = dict(f.headers)
# overwrite existing headers using response_headers
if 'headers' in result:
result['headers'].update(response_headers)
elif response_headers:
result['headers'] = copy.deepcopy(response_headers)
# if feed is gzip-compressed, decompress it
if f and data and 'headers' in result:
if gzip and result['headers'].get('content-encoding') == 'gzip':
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except Exception, e:
# Some feeds claim to be gzipped but they're not, so
# we get garbage. Ideally, we should re-request the
# feed without the 'Accept-encoding: gzip' header,
# but we don't.
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
elif zlib and result['headers'].get('content-encoding') == 'deflate':
try:
data = zlib.decompress(data, -zlib.MAX_WBITS)
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
# save HTTP headers
if 'headers' in result:
if 'etag' in result['headers'] or 'ETag' in result['headers']:
etag = result['headers'].get('etag', result['headers'].get('ETag'))
if etag:
result['etag'] = etag
if 'last-modified' in result['headers'] or 'Last-Modified' in result['headers']:
modified = result['headers'].get('last-modified', result['headers'].get('Last-Modified'))
if modified:
result['modified'] = _parse_date(modified)
if hasattr(f, 'url'):
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'close'):
f.close()
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
http_headers = result.get('headers', {})
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if http_headers.has_key('content-type') or http_headers.has_key('Content-type'):
bozo_message = '%s is not an XML media type' % http_headers.get('content-type', http_headers.get('Content-type'))
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
if data is not None:
result['version'], data, entities = _stripDoctype(data)
# ensure that baseuri is an absolute uri using an acceptable URI scheme
contentloc = http_headers.get('content-location', http_headers.get('Content-Location', ''))
href = result.get('href', '')
baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href
baselang = http_headers.get('content-language', http_headers.get('Content-Language', None))
# if server sent 304, we're done
if result.get('status', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if data is None:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding):
if not proposed_encoding: continue
if proposed_encoding in tried_encodings: continue
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
break
except:
pass
# if no luck and we have auto-detection library, try that
if (not known_encoding) and chardet:
try:
proposed_encoding = chardet.detect(data)['encoding']
if proposed_encoding and (proposed_encoding not in tried_encodings):
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried utf-8 yet, try that
if (not known_encoding) and ('utf-8' not in tried_encodings):
try:
proposed_encoding = 'utf-8'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried windows-1252 yet, try that
if (not known_encoding) and ('windows-1252' not in tried_encodings):
try:
proposed_encoding = 'windows-1252'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried iso-8859-2 yet, try that.
if (not known_encoding) and ('iso-8859-2' not in tried_encodings):
try:
proposed_encoding = 'iso-8859-2'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck, give up
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = ''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'document declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
if hasattr(saxparser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
saxparser.parse(source)
except Exception, e:
if _debug:
import traceback
traceback.print_stack()
traceback.print_exc()
sys.stderr.write('xml parsing failed\n')
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities)
feedparser.feed(data.decode('utf-8', 'replace'))
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
class Serializer:
def __init__(self, results):
self.results = results
class TextSerializer(Serializer):
def write(self, stream=sys.stdout):
self._writer(stream, self.results, '')
def _writer(self, stream, node, prefix):
if not node: return
if hasattr(node, 'keys'):
keys = node.keys()
keys.sort()
for k in keys:
if k in ('description', 'link'): continue
if node.has_key(k + '_detail'): continue
if node.has_key(k + '_parsed'): continue
self._writer(stream, node[k], prefix + k + '.')
elif type(node) == types.ListType:
index = 0
for n in node:
self._writer(stream, n, prefix[:-1] + '[' + str(index) + '].')
index += 1
else:
try:
s = str(node).encode('utf-8')
s = s.replace('\\', '\\\\')
s = s.replace('\r', '')
s = s.replace('\n', r'\n')
stream.write(prefix[:-1])
stream.write('=')
stream.write(s)
stream.write('\n')
except:
pass
class PprintSerializer(Serializer):
def write(self, stream=sys.stdout):
if self.results.has_key('href'):
stream.write(self.results['href'] + '\n\n')
from pprint import pprint
pprint(self.results, stream)
stream.write('\n')
if __name__ == '__main__':
try:
from optparse import OptionParser
except:
OptionParser = None
if OptionParser:
optionParser = OptionParser(version=__version__, usage="%prog [options] url_or_filename_or_-")
optionParser.set_defaults(format="pprint")
optionParser.add_option("-A", "--user-agent", dest="agent", metavar="AGENT", help="User-Agent for HTTP URLs")
optionParser.add_option("-e", "--referer", "--referrer", dest="referrer", metavar="URL", help="Referrer for HTTP URLs")
optionParser.add_option("-t", "--etag", dest="etag", metavar="TAG", help="ETag/If-None-Match for HTTP URLs")
optionParser.add_option("-m", "--last-modified", dest="modified", metavar="DATE", help="Last-modified/If-Modified-Since for HTTP URLs (any supported date format)")
optionParser.add_option("-f", "--format", dest="format", metavar="FORMAT", help="output results in FORMAT (text, pprint)")
optionParser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="write debugging information to stderr")
(options, urls) = optionParser.parse_args()
if options.verbose:
_debug = 1
if not urls:
optionParser.print_help()
sys.exit(0)
else:
if not sys.argv[1:]:
print __doc__
sys.exit(0)
class _Options:
etag = modified = agent = referrer = None
format = 'pprint'
options = _Options()
urls = sys.argv[1:]
zopeCompatibilityHack()
serializer = globals().get(options.format.capitalize() + 'Serializer', Serializer)
for url in urls:
results = parse(url, etag=options.etag, modified=options.modified, agent=options.agent, referrer=options.referrer)
serializer(results).write(sys.stdout)
| Python |
import unittest
import mox
import clients
import client_model
from datetime import datetime
from google.appengine.ext import db
from google.appengine.api import channel
# Import the 'testbed' module.
from google.appengine.ext import testbed
class ClientsUnitTest(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
# At first, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed which will prepare the usage of service stubs.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
def test_add_client(self):
self.mox.StubOutWithMock(clients, 'channel')
clients.channel.create_channel(mox.IsA(str))
self.mox.ReplayAll()
(id, token) = clients.add_client('http://example.com/feed')
self.mox.UnsetStubs()
self.mox.VerifyAll()
self.assertEqual(1, len(client_model.Client.all().fetch(2)))
def test_add_client_no_feed(self):
try:
clients.add_client()
self.fail('add_client with no feed should fail.')
except:
"""Expected behavior."""
def test_send_filtered_messages(self):
self.mox.StubOutWithMock(clients, 'channel')
self.mox.StubOutWithMock(clients, 'datetime')
clients.channel.create_channel(mox.IsA(str))
clients.channel.send_message(mox.IsA(str), '[{"id": "foo"}, {"id": "bar"}]')
self.mox.ReplayAll()
(id, token) = clients.add_client('http://example.com/feed')
clients.send_filtered_messages(id, 'http://example.com/feed',
[{'id': 'foo'}, {'id': 'bar'}])
self.mox.UnsetStubs()
self.mox.VerifyAll()
def test_send_filtered_messages_with_dup(self):
self.mox.StubOutWithMock(clients, 'channel')
self.mox.StubOutWithMock(clients, 'datetime')
clients.channel.create_channel(mox.IsA(str))
clients.channel.send_message(mox.IsA(str), '[{"id": "foo"}, {"id": "bar"}]')
clients.channel.send_message(mox.IsA(str), '[{"id": "baz"}]')
self.mox.ReplayAll()
(id, token) = clients.add_client('http://example.com/feed')
clients.send_filtered_messages(id, 'http://example.com/feed',
[{'id': 'foo'}, {'id': 'bar'}])
clients.send_filtered_messages(id, 'http://example.com/feed',
[{'id': 'foo'}, {'id': 'baz'}])
self.mox.UnsetStubs()
self.mox.VerifyAll()
if __name__ == '__main__':
unittest.main() | Python |
"""A parser for SGML, using the derived class as a static DTD."""
# XXX This only supports those SGML features used by HTML.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special). RCDATA is
# not supported at all.
import _markupbase
import re
__all__ = ["SGMLParser", "SGMLParseError"]
# Regular expressions used for parsing
interesting = re.compile('[&<]')
incomplete = re.compile('&([a-zA-Z][a-zA-Z0-9]*|#[0-9]*)?|'
'<([a-zA-Z][^<>]*|'
'/([a-zA-Z][^<>]*)?|'
'![^<>]*)?')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#([0-9]+)[^0-9]')
starttagopen = re.compile('<[>a-zA-Z]')
shorttagopen = re.compile('<[a-zA-Z][-.a-zA-Z0-9]*/')
shorttag = re.compile('<([a-zA-Z][-.a-zA-Z0-9]*)/([^/]*)/')
piclose = re.compile('>')
endbracket = re.compile('[<>]')
tagfind = re.compile('[a-zA-Z][-_.a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?')
class SGMLParseError(RuntimeError):
"""Exception raised for all parse errors."""
pass
# SGML parser base class -- find tags and call handler functions.
# Usage: p = SGMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods
# with special names to handle tags: start_foo and end_foo to handle
# <foo> and </foo>, respectively, or do_foo to handle <foo> by itself.
# (Tags are converted to lower case for this purpose.) The data
# between tags is passed to the parser by calling self.handle_data()
# with some data as argument (the data may be split up in arbitrary
# chunks). Entity references are passed by calling
# self.handle_entityref() with the entity reference as argument.
class SGMLParser(_markupbase.ParserBase):
# Definition of entities -- derived classes may override
entity_or_charref = re.compile('&(?:'
'([a-zA-Z][-.a-zA-Z0-9]*)|#([0-9]+)'
')(;?)')
def __init__(self, verbose=0):
"""Initialize and reset this instance."""
self.verbose = verbose
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.__starttag_text = None
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
_markupbase.ParserBase.reset(self)
def setnomoretags(self):
"""Enter literal mode (CDATA) till EOF.
Intended for derived classes only.
"""
self.nomoretags = self.literal = 1
def setliteral(self, *args):
"""Enter literal mode (CDATA).
Intended for derived classes only.
"""
self.literal = 1
def feed(self, data):
"""Feed some data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n'). (This just saves the text,
all the processing is done by goahead().)
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle the remaining data."""
self.goahead(1)
def error(self, message):
raise SGMLParseError(message)
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.nomoretags:
self.handle_data(rawdata[i:n])
i = n
break
match = interesting.search(rawdata, i)
if match: j = match.start()
else: j = n
if i < j:
self.handle_data(rawdata[i:j])
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
i = k
continue
if rawdata.startswith("</", i):
k = self.parse_endtag(i)
if k < 0: break
i = k
self.literal = 0
continue
if self.literal:
if n > (i + 1):
self.handle_data("<")
i = i+1
else:
# incomplete
break
continue
if rawdata.startswith("<!--", i):
# Strictly speaking, a comment is --.*--
# within a declaration tag <!...>.
# This should be removed,
# and comments handled only in parse_declaration.
k = self.parse_comment(i)
if k < 0: break
i = k
continue
if rawdata.startswith("<?", i):
k = self.parse_pi(i)
if k < 0: break
i = i+k
continue
if rawdata.startswith("<!", i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
k = self.parse_declaration(i)
if k < 0: break
i = k
continue
elif rawdata[i] == '&':
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
match = charref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_charref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
else:
self.error('neither < nor & ??')
# We get here only if incomplete matches but
# nothing else
match = incomplete.match(rawdata, i)
if not match:
self.handle_data(rawdata[i])
i = i+1
continue
j = match.end(0)
if j == n:
break # Really incomplete
self.handle_data(rawdata[i:j])
i = j
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = n
self.rawdata = rawdata[i:]
# XXX if end: check for empty stack
# Extensions for the DOCTYPE scanner:
_decl_otherchars = '='
# Internal -- parse processing instr, return length or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
if rawdata[i:i+2] != '<?':
self.error('unexpected call to parse_pi()')
match = piclose.search(rawdata, i+2)
if not match:
return -1
j = match.start(0)
self.handle_pi(rawdata[i+2: j])
j = match.end(0)
return j-i
def get_starttag_text(self):
return self.__starttag_text
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
start_pos = i
rawdata = self.rawdata
if shorttagopen.match(rawdata, i):
# SGML shorthand: <tag/data/ == <tag>data</tag>
# XXX Can data contain &... (entity or char refs)?
# XXX Can data contain < or > (tag characters)?
# XXX Can there be whitespace before the first /?
match = shorttag.match(rawdata, i)
if not match:
return -1
tag, data = match.group(1, 2)
self.__starttag_text = '<%s/' % tag
tag = tag.lower()
k = match.end(0)
self.finish_shorttag(tag, data)
self.__starttag_text = rawdata[start_pos:match.end(1) + 1]
return k
# XXX The following should skip matching quotes (' or ")
# As a shortcut way to exit, this isn't so bad, but shouldn't
# be used to locate the actual end of the start tag since the
# < or > characters may be embedded in an attribute value.
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
if rawdata[i:i+2] == '<>':
# SGML shorthand: <> == <last open tag seen>
k = j
tag = self.lasttag
else:
match = tagfind.match(rawdata, i+1)
if not match:
self.error('unexpected call to parse_starttag')
k = match.end(0)
tag = rawdata[i+1:k].lower()
self.lasttag = tag
while k < j:
match = attrfind.match(rawdata, k)
if not match: break
attrname, rest, attrvalue = match.group(1, 2, 3)
if not rest:
attrvalue = attrname
else:
if (attrvalue[:1] == "'" == attrvalue[-1:] or
attrvalue[:1] == '"' == attrvalue[-1:]):
# strip quotes
attrvalue = attrvalue[1:-1]
attrvalue = self.entity_or_charref.sub(
self._convert_ref, attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = match.end(0)
if rawdata[j] == '>':
j = j+1
self.__starttag_text = rawdata[start_pos:j]
self.finish_starttag(tag, attrs)
return j
# Internal -- convert entity or character reference
def _convert_ref(self, match):
if match.group(2):
return self.convert_charref(match.group(2)) or \
'&#%s%s' % match.groups()[1:]
elif match.group(3):
return self.convert_entityref(match.group(1)) or \
'&%s;' % match.group(1)
else:
return '&%s' % match.group(1)
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
tag = rawdata[i+2:j].strip().lower()
if rawdata[j] == '>':
j = j+1
self.finish_endtag(tag)
return j
# Internal -- finish parsing of <tag/data/ (same as <tag>data</tag>)
def finish_shorttag(self, tag, data):
self.finish_starttag(tag, [])
self.handle_data(data)
self.finish_endtag(tag)
# Internal -- finish processing of start tag
# Return -1 for unknown tag, 0 for open-only tag, 1 for balanced tag
def finish_starttag(self, tag, attrs):
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
self.unknown_starttag(tag, attrs)
return -1
else:
self.handle_starttag(tag, method, attrs)
return 0
else:
self.stack.append(tag)
self.handle_starttag(tag, method, attrs)
return 1
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
if not tag:
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
if tag not in self.stack:
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
self.unknown_endtag(tag)
else:
self.report_unbalanced(tag)
return
found = len(self.stack)
for i in range(found):
if self.stack[i] == tag: found = i
while len(self.stack) > found:
tag = self.stack[-1]
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
method = None
if method:
self.handle_endtag(tag, method)
else:
self.unknown_endtag(tag)
del self.stack[-1]
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- report an unbalanced </...> tag.
def report_unbalanced(self, tag):
if self.verbose:
print('*** Unbalanced </' + tag + '>')
print('*** Stack:', self.stack)
def convert_charref(self, name):
"""Convert character reference, may be overridden."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127:
return
return self.convert_codepoint(n)
def convert_codepoint(self, codepoint):
return chr(codepoint)
def handle_charref(self, name):
"""Handle character reference, no need to override."""
replacement = self.convert_charref(name)
if replacement is None:
self.unknown_charref(name)
else:
self.handle_data(replacement)
# Definition of entities -- derived classes may override
entitydefs = \
{'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''}
def convert_entityref(self, name):
"""Convert entity references.
As an alternative to overriding this method; one can tailor the
results by setting up the self.entitydefs mapping appropriately.
"""
table = self.entitydefs
if name in table:
return table[name]
else:
return
def handle_entityref(self, name):
"""Handle entity references, no need to override."""
replacement = self.convert_entityref(name)
if replacement is None:
self.unknown_entityref(name)
else:
self.handle_data(replacement)
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle declaration, could be overridden
def handle_decl(self, decl):
pass
# Example -- handle processing instruction, could be overridden
def handle_pi(self, data):
pass
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, ref): pass
class TestSGMLParser(SGMLParser):
def __init__(self, verbose=0):
self.testdata = ""
SGMLParser.__init__(self, verbose)
def handle_data(self, data):
self.testdata = self.testdata + data
if len(repr(self.testdata)) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print('data:', repr(data))
def handle_comment(self, data):
self.flush()
r = repr(data)
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print('comment:', r)
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print('start tag: <' + tag + '>')
else:
print('start tag: <' + tag, end=' ')
for name, value in attrs:
print(name + '=' + '"' + value + '"', end=' ')
print('>')
def unknown_endtag(self, tag):
self.flush()
print('end tag: </' + tag + '>')
def unknown_entityref(self, ref):
self.flush()
print('*** unknown entity ref: &' + ref + ';')
def unknown_charref(self, ref):
self.flush()
print('*** unknown char ref: &#' + ref + ';')
def unknown_decl(self, data):
self.flush()
print('*** unknown decl: [' + data + ']')
def close(self):
SGMLParser.close(self)
self.flush()
def test(args = None):
import sys
if args is None:
args = sys.argv[1:]
if args and args[0] == '-s':
args = args[1:]
klass = SGMLParser
else:
klass = TestSGMLParser
if args:
file = args[0]
else:
file = 'test.html'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError as msg:
print(file, ":", msg)
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
for c in data:
x.feed(c)
x.close()
if __name__ == '__main__':
test()
| Python |
#!/usr/bin/python
import optparse
import sys
# Note that you have to install the unittest2 package, first.
import unittest2
USAGE = """%prog SDK_PATH TEST_PATH
Run unit tests for App Engine apps.
SDK_PATH Path to the SDK installation
TEST_PATH Path to package containing test modules"""
def main(sdk_path, test_path):
sys.path.insert(0, sdk_path)
import dev_appserver
dev_appserver.fix_sys_path()
suite = unittest2.loader.TestLoader().discover(test_path,
pattern='*_test.py')
unittest2.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
parser = optparse.OptionParser(USAGE)
options, args = parser.parse_args()
if len(args) != 2:
print 'Error: Exactly 2 arguments required.'
parser.print_help()
sys.exit(1)
SDK_PATH = args[0]
TEST_PATH = args[1]
main(SDK_PATH, TEST_PATH)
| Python |
#!/usr/bin/env python
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit http://feedparser.org/ for the latest version
Visit http://feedparser.org/docs/ for the latest documentation
Required: Python 2.4 or later
Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "5.0.1"
__license__ = """Copyright (c) 2002-2008, Mark Pilgrim, All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>",
"Sam Ruby <http://intertwingly.net/>",
"Ade Oshineye <http://blog.oshineye.com/>",
"Martin Pool <http://sourcefrog.net/>",
"Kurt McKee <http://kurtmckee.org/>"]
_debug = 0
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
# ---------- Python 3 modules (make it work if possible) ----------
try:
import rfc822
except ImportError:
from email import _parseaddr as rfc822
try:
# Python 3.1 introduces bytes.maketrans and simultaneously
# deprecates string.maketrans; use bytes.maketrans if possible
_maketrans = bytes.maketrans
except (NameError, AttributeError):
import string
_maketrans = string.maketrans
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
# Python 3.1 deprecates decodestring in favor of decodebytes
_base64decode = getattr(base64, 'decodebytes', base64.decodestring)
except:
base64 = binascii = None
def _s2bytes(s):
# Convert a UTF-8 str to bytes if the interpreter is Python 3
try:
return bytes(s, 'utf8')
except (NameError, TypeError):
# In Python 2.5 and below, bytes doesn't exist (NameError)
# In Python 2.6 and above, bytes and str are the same (TypeError)
return s
def _l2bytes(l):
# Convert a list of ints to bytes if the interpreter is Python 3
try:
if bytes is not str:
# In Python 2.6 and above, this call won't raise an exception
# but it will return bytes([65]) as '[65]' instead of 'A'
return bytes(l)
raise NameError
except NameError:
return ''.join(map(chr, l))
# If you want feedparser to allow all URL schemes, set this to ()
# List culled from Python's urlparse documentation at:
# http://docs.python.org/library/urlparse.html
# as well as from "URI scheme" at Wikipedia:
# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
# Many more will likely need to be added!
ACCEPTABLE_URI_SCHEMES = (
'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'mailto',
'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu', 'sftp',
'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet', 'wais',
# Additional common-but-unofficial schemes
'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
)
#ACCEPTABLE_URI_SCHEMES = ()
# ---------- required modules (should come with any Python distribution) ----------
import sgmllib, re, sys, copy, urlparse, time, types, cgi, urllib, urllib2, datetime
try:
from io import BytesIO as _StringIO
except ImportError:
try:
from cStringIO import StringIO as _StringIO
except:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except:
gzip = None
try:
import zlib
except:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
from xml.sax.saxutils import escape as _xmlescape
_XML_AVAILABLE = 1
except:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
# cjkcodecs and iconv_codec provide support for more character encodings.
# Both are available from http://cjkpython.i18n.org/
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
if _debug:
import chardet.constants
chardet.constants._debug = 1
except:
chardet = None
# reversable htmlentitydefs mappings for Python 2.2
try:
from htmlentitydefs import name2codepoint, codepoint2name
except:
import htmlentitydefs
name2codepoint={}
codepoint2name={}
for (name,codepoint) in htmlentitydefs.entitydefs.iteritems():
if codepoint.startswith('&#'): codepoint=unichr(int(codepoint[2:-1]))
name2codepoint[name]=ord(codepoint)
codepoint2name[ord(codepoint)]=name
# BeautifulSoup parser used for parsing microformats from embedded HTML content
# http://www.crummy.com/software/BeautifulSoup/
# feedparser is tested with BeautifulSoup 3.0.x, but it might work with the
# older 2.x series. If it doesn't, and you can figure out why, I'll accept a
# patch and modify the compatibility statement accordingly.
try:
import BeautifulSoup
except:
BeautifulSoup = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
sgmllib.special = re.compile('<!')
sgmllib.charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
if sgmllib.endbracket.search(' <').start(0):
class EndBracketRegEx:
def __init__(self):
# Overriding the built-in sgmllib.endbracket regex allows the
# parser to find angle brackets embedded in element attributes.
self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self,string,index=0):
match = self.endbracket.match(string,index)
if match is not None:
# Returning a new object in the calling thread's context
# resolves a thread-safety.
return EndBracketMatch(match)
return None
class EndBracketMatch:
def __init__(self, match):
self.match = match
def start(self, n):
return self.match.end(n)
sgmllib.endbracket = EndBracketRegEx()
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
'hotrss': 'Hot RSS'
}
try:
UserDict = dict
except NameError:
# Python 2.1 does not have dict
from UserDict import UserDict
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
class FeedParserDict(UserDict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['summary', 'subtitle'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
return UserDict.__getitem__(self, 'tags')[0]['term']
if key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in UserDict.__getitem__(self, 'links') if link['rel']=='enclosure']
if key == 'license':
for link in UserDict.__getitem__(self, 'links'):
if link['rel']=='license' and link.has_key('href'):
return link['href']
if key == 'categories':
return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')]
realkey = self.keymap.get(key, key)
if type(realkey) == types.ListType:
for k in realkey:
if UserDict.__contains__(self, k):
return UserDict.__getitem__(self, k)
if UserDict.__contains__(self, key):
return UserDict.__getitem__(self, key)
return UserDict.__getitem__(self, realkey)
def __setitem__(self, key, value):
for k in self.keymap.keys():
if key == k:
key = self.keymap[k]
if type(key) == types.ListType:
key = key[0]
return UserDict.__setitem__(self, key, value)
def get(self, key, default=None):
if self.has_key(key):
return self[key]
else:
return default
def setdefault(self, key, value):
if not self.has_key(key):
self[key] = value
return self[key]
def has_key(self, key):
try:
return hasattr(self, key) or UserDict.__contains__(self, key)
except AttributeError:
return False
# This alias prevents the 2to3 tool from changing the semantics of the
# __contains__ function below and exhausting the maximum recursion depth
__has_key = has_key
def __getattr__(self, key):
try:
return self.__dict__[key]
except KeyError:
pass
try:
assert not key.startswith('_')
return self.__getitem__(key)
except:
raise AttributeError, "object has no attribute '%s'" % key
def __setattr__(self, key, value):
if key.startswith('_') or key == 'data':
self.__dict__[key] = value
else:
return self.__setitem__(key, value)
def __contains__(self, key):
return self.__has_key(key)
def zopeCompatibilityHack():
global FeedParserDict
del FeedParserDict
def FeedParserDict(aDict=None):
rc = {}
if aDict:
rc.update(aDict)
return rc
_ebcdic_to_ascii_map = None
def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
)
_ebcdic_to_ascii_map = _maketrans( \
_l2bytes(range(256)), _l2bytes(emap))
return s.translate(_ebcdic_to_ascii_map)
_cp1252 = {
unichr(128): unichr(8364), # euro sign
unichr(130): unichr(8218), # single low-9 quotation mark
unichr(131): unichr( 402), # latin small letter f with hook
unichr(132): unichr(8222), # double low-9 quotation mark
unichr(133): unichr(8230), # horizontal ellipsis
unichr(134): unichr(8224), # dagger
unichr(135): unichr(8225), # double dagger
unichr(136): unichr( 710), # modifier letter circumflex accent
unichr(137): unichr(8240), # per mille sign
unichr(138): unichr( 352), # latin capital letter s with caron
unichr(139): unichr(8249), # single left-pointing angle quotation mark
unichr(140): unichr( 338), # latin capital ligature oe
unichr(142): unichr( 381), # latin capital letter z with caron
unichr(145): unichr(8216), # left single quotation mark
unichr(146): unichr(8217), # right single quotation mark
unichr(147): unichr(8220), # left double quotation mark
unichr(148): unichr(8221), # right double quotation mark
unichr(149): unichr(8226), # bullet
unichr(150): unichr(8211), # en dash
unichr(151): unichr(8212), # em dash
unichr(152): unichr( 732), # small tilde
unichr(153): unichr(8482), # trade mark sign
unichr(154): unichr( 353), # latin small letter s with caron
unichr(155): unichr(8250), # single right-pointing angle quotation mark
unichr(156): unichr( 339), # latin small ligature oe
unichr(158): unichr( 382), # latin small letter z with caron
unichr(159): unichr( 376)} # latin capital letter y with diaeresis
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
try:
return urlparse.urljoin(base, uri)
except:
uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)])
return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
#Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
'http://search.yahoo.com/mrss/': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://www.w3.org/XML/1998/namespace': 'xml'
}
_matchnamespaces = {}
can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo']
can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if _debug: sys.stderr.write('initializing FeedParser\n')
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
self.svgOK = 0
self.hasTitle = 0
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
def unknown_starttag(self, tag, attrs):
if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
# normalize attrs
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
# the sgml parser doesn't handle entities in attributes, but
# strict xml parsers do -- account for this difference
if isinstance(self, _LooseFeedParser):
attrs = [(k, v.replace('&', '&')) for k, v in attrs]
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
if type(baseuri) != type(u''):
try:
baseuri = unicode(baseuri, self.encoding)
except:
baseuri = unicode(baseuri, 'iso-8859-1')
# ensure that self.baseuri is always an absolute URI that
# uses a whitelisted URI scheme (e.g. not `javscript:`)
if self.baseuri:
self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri
else:
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
if tag in ['xhtml:div', 'div']: return # typepad does this 10/2007
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
if tag.find(':') <> -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
if tag == 'svg': self.svgOK += 1
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
# Since there's no handler or something has gone wrong we explicitly add the element and its attributes
unknown_tag = prefix + suffix
if len(attrsD) == 0:
# No attributes so merge it into the encosing dictionary
return self.push(unknown_tag, 1)
else:
# Has attributes so create it in its own dictionary
context = self._getContext()
context[unknown_tag] = attrsD
def unknown_endtag(self, tag):
if _debug: sys.stderr.write('end %s\n' % tag)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
if suffix == 'svg' and self.svgOK: self.svgOK -= 1
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
if self.svgOK: raise AttributeError()
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
if tag in ['xhtml:div', 'div']: return # typepad does this 10/2007
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack: return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack: return
if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref)
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities.keys():
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try: name2codepoint[ref]
except KeyError: text = '&%s;' % ref
else: text = unichr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack: return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if _debug: sys.stderr.write('entering parse_declaration\n')
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
# CDATA block began but didn't finish
k = len(self.rawdata)
return k
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
if k >= 0:
return k+1
else:
# We have an incomplete CDATA block.
return k
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text' or contentType == 'plain':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
self.version = 'rss090'
if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
self.version = 'rss10'
if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if self._matchnamespaces.has_key(loweruri):
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack: return
if self.elementstack[-1][0] != element: return
element, expectingText, pieces = self.elementstack.pop()
if self.version == 'atom10' and self.contentparams.get('type','text') == 'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0: break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
# Ensure each piece is a str for Python 3
for (i, v) in enumerate(pieces):
if not isinstance(v, basestring):
pieces[i] = v.decode('utf-8')
output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText: return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = _base64decode(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
except TypeError:
# In Python 3, base64 takes and outputs bytes, not str
# This may not be the most correct way to accomplish this
output = _base64decode(output.encode('utf-8')).decode('utf-8')
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
if self.lookslikehtml(output):
self.contentparams['type']='text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and RESOLVE_RELATIVE_URIS:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', 'text/html'))
# parse microformats
# (must do this before sanitizing because some microformats
# rely on elements that we sanitize)
if is_htmlish and element in ['content', 'description', 'summary']:
mfresults = _parseMicroformats(output, self.baseuri, self.encoding)
if mfresults:
for tag in mfresults.get('tags', []):
self._addTag(tag['term'], tag['scheme'], tag['label'])
for enclosure in mfresults.get('enclosures', []):
self._start_enclosure(enclosure)
for xfn in mfresults.get('xfn', []):
self._addXFN(xfn['relationships'], xfn['href'], xfn['name'])
vcard = mfresults.get('vcard')
if vcard:
self._getContext()['vcard'] = vcard
# sanitize embedded markup
if is_htmlish and SANITIZE_HTML:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', 'text/html'))
if self.encoding and type(output) != type(u''):
try:
output = unicode(output, self.encoding)
except:
pass
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding in ('utf-8', 'utf-8_INVALID_PYTHON_3') and type(output) == type(u''):
try:
output = unicode(output.encode('iso-8859-1'), 'utf-8')
except:
pass
# map win-1252 extensions to the proper code points
if type(output) == type(u''):
output = u''.join([c in _cp1252.keys() and _cp1252[c] or c for c in output])
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
if element == 'title' and self.hasTitle:
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
if not self.inimage:
# query variables in urls in link elements are improperly
# converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
# unhandled character references. fix this special case.
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
# fix query variables; see above for the explanation
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
context[element] = output
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang: self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
def lookslikehtml(self, s):
if self.version.startswith('atom'): return
if self.contentparams.get('type','text/html') != 'text/plain': return
# must have a close tag or a entity reference to qualify
if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)): return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
re.findall(r'</?(\w+)',s)): return
# all entities must have been defined as valid HTML entities
from htmlentitydefs import entitydefs
if filter(lambda e: e not in entitydefs.keys(),
re.findall(r'&(\w+);',s)): return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value, overwrite=False):
context = self._getContext()
if overwrite:
context[key] = value
else:
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
#If we're here then this is an RSS feed.
#If we don't have a version or have a version that starts with something
#other than RSS then there's been a mistake. Correct it.
if not self.version or not self.version.startswith('rss'):
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_dlhottitles(self, attrsD):
self.version = 'hotrss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
_start_feedinfo = _start_channel
def _cdf_common(self, attrsD):
if attrsD.has_key('lastmod'):
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if attrsD.has_key('href'):
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
if not self.inentry:
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.hasTitle = 0
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.hasTitle = 0
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
# Append a new FeedParserDict when expecting an author
context = self._getContext()
context.setdefault('authors', [])
context['authors'].append(FeedParserDict())
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage and self.feeddata.has_key('image'):
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
context.setdefault('authors', [FeedParserDict()])
context['authors'][-1][key] = value
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author: return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.replace('<>', '')
author = author.replace('<>', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, FeedParserDict())
if author:
context['%s_detail' % key]['name'] = author
if email:
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
self.hasTitle = 0
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
_start_product = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value), overwrite=True)
_end_dcterms_issued = _end_published
_end_issued = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_pubdate = _start_updated
_start_dc_date = _start_updated
_start_lastbuilddate = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value, overwrite=True)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_pubdate = _end_updated
_end_dc_date = _end_updated
_end_lastbuilddate = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value), overwrite=True)
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True)
def _start_cc_license(self, attrsD):
context = self._getContext()
value = self._getAttribute(attrsD, 'rdf:resource')
attrsD = FeedParserDict()
attrsD['rel']='license'
if value: attrsD['href']=value
context.setdefault('links', []).append(attrsD)
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
_start_creativeCommons_license = _start_creativecommons_license
def _end_creativecommons_license(self):
value = self.pop('license')
context = self._getContext()
attrsD = FeedParserDict()
attrsD['rel']='license'
if value: attrsD['href']=value
context.setdefault('links', []).append(attrsD)
del context['license']
_end_creativeCommons_license = _end_creativecommons_license
def _addXFN(self, relationships, href, name):
context = self._getContext()
xfn = context.setdefault('xfn', [])
value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name})
if value not in xfn:
xfn.append(value)
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label): return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(value)
def _start_category(self, attrsD):
if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _start_media_category(self, attrsD):
attrsD.setdefault('scheme', 'http://search.yahoo.com/mrss/category_schema')
self._start_category(attrsD)
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split():
self._addTag(term, 'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value: return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
_end_media_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
if attrsD['rel'] == 'self':
attrsD.setdefault('type', 'application/atom+xml')
else:
attrsD.setdefault('type', 'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
if not (self.inentry and self.inimage):
context['links'].append(FeedParserDict(attrsD))
if attrsD.has_key('href'):
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
_start_producturl = _start_link
def _end_link(self):
value = self.pop('link')
context = self._getContext()
_end_producturl = _end_link
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
def _start_title(self, attrsD):
if self.svgOK: return self.unknown_starttag('title', attrsD.items())
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
if self.svgOK: return
value = self.popContent('title')
if not value: return
context = self._getContext()
self.hasTitle = 1
_end_dc_title = _end_title
def _end_media_title(self):
hasTitle = self.hasTitle
self._end_title()
self.hasTitle = hasTitle
def _start_description(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if context.has_key('generator_detail'):
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel']='enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
def _start_source(self, attrsD):
if 'url' in attrsD:
# This means that we're processing a source element from an RSS 2.0 feed
self.sourcedata['href'] = attrsD[u'url']
self.push('source', 1)
self.insource = 1
self.hasTitle = 0
def _end_source(self):
self.insource = 0
value = self.pop('source')
if value:
self.sourcedata['title'] = value
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_prodlink(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToSummary = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToSummary:
self._save('summary', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
_end_prodlink = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
if attrsD.get('href'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
# Convert 'yes' -> True, 'clean' to False, and any other value to None
# False and None both evaluate as False, so the difference can be ignored
# by applications that only need to know if the content is explicit.
self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0]
def _start_media_content(self, attrsD):
context = self._getContext()
context.setdefault('media_content', [])
context['media_content'].append(attrsD)
def _start_media_thumbnail(self, attrsD):
context = self._getContext()
context.setdefault('media_thumbnail', [])
self.push('url', 1) # new
context['media_thumbnail'].append(attrsD)
def _end_media_thumbnail(self):
url = self.pop('url')
context = self._getContext()
if url != None and len(url.strip()) != 0:
if not context['media_thumbnail'][-1].has_key('url'):
context['media_thumbnail'][-1]['url'] = url
def _start_media_player(self, attrsD):
self.push('media_player', 0)
self._getContext()['media_player'] = FeedParserDict(attrsD)
def _end_media_player(self):
value = self.pop('media_player')
context = self._getContext()
context['media_player']['content'] = value
def _start_newlocation(self, attrsD):
self.push('newlocation', 1)
def _end_newlocation(self):
url = self.pop('newlocation')
context = self._getContext()
# don't set newlocation if the context isn't right
if context is not self.feeddata:
return
context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip())
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
if _debug: sys.stderr.write('trying StrictFeedParser\n')
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
self.decls = {}
def startPrefixMapping(self, prefix, uri):
self.trackNamespace(prefix, uri)
if uri == 'http://www.w3.org/1999/xlink':
self.decls['xmlns:'+prefix] = uri
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD, self.decls = self.decls, {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname))
for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = [
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
'source', 'track', 'wbr'
]
def __init__(self, encoding, _type):
self.encoding = encoding
self._type = _type
if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
def parse_starttag(self,i):
j=sgmllib.SGMLParser.parse_starttag(self, i)
if self._type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
#data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace
data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
try:
bytes
if bytes is str:
raise NameError
self.encoding = self.encoding + '_INVALID_PYTHON_3'
except NameError:
if self.encoding and type(data) == type(u''):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs: return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if type(value) != type(u''):
try:
value = unicode(value, self.encoding)
except:
value = unicode(value, 'iso-8859-1')
try:
# Currently, in Python 3 the key is already a str, and cannot be decoded again
uattrs.append((unicode(key, self.encoding), value))
except TypeError:
uattrs.append((key, value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs=strattrs.encode(self.encoding)
except:
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
else:
self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
if ref.startswith('x'):
value = unichr(int(ref[1:],16))
else:
value = unichr(int(ref))
if value in _cp1252.keys():
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%(ref)s;' % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if name2codepoint.has_key(ref):
self.pieces.append('&%(ref)s;' % locals())
else:
self.pieces.append('&%(ref)s' % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_data, text=%s\n' % text)
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%(text)s-->' % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%(text)s>' % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%(text)s>' % locals())
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
def parse_declaration(self, i):
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
# escape the doctype declaration and continue parsing
self.handle_data('<')
return i+1
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _MicroformatsParser:
STRING = 1
DATE = 2
URI = 3
NODE = 4
EMAIL = 5
known_xfn_relationships = ['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me']
known_binary_extensions = ['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv']
def __init__(self, data, baseuri, encoding):
self.document = BeautifulSoup.BeautifulSoup(data)
self.baseuri = baseuri
self.encoding = encoding
if type(data) == type(u''):
data = data.encode(encoding)
self.tags = []
self.enclosures = []
self.xfn = []
self.vcard = None
def vcardEscape(self, s):
if type(s) in (type(''), type(u'')):
s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n')
return s
def vcardFold(self, s):
s = re.sub(';+$', '', s)
sFolded = ''
iMax = 75
sPrefix = ''
while len(s) > iMax:
sFolded += sPrefix + s[:iMax] + '\n'
s = s[iMax:]
sPrefix = ' '
iMax = 74
sFolded += sPrefix + s
return sFolded
def normalize(self, s):
return re.sub(r'\s+', ' ', s).strip()
def unique(self, aList):
results = []
for element in aList:
if element not in results:
results.append(element)
return results
def toISO8601(self, dt):
return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt)
def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0):
all = lambda x: 1
sProperty = sProperty.lower()
bFound = 0
bNormalize = 1
propertyMatch = {'class': re.compile(r'\b%s\b' % sProperty)}
if bAllowMultiple and (iPropertyType != self.NODE):
snapResults = []
containers = elmRoot(['ul', 'ol'], propertyMatch)
for container in containers:
snapResults.extend(container('li'))
bFound = (len(snapResults) != 0)
if not bFound:
snapResults = elmRoot(all, propertyMatch)
bFound = (len(snapResults) != 0)
if (not bFound) and (sProperty == 'value'):
snapResults = elmRoot('pre')
bFound = (len(snapResults) != 0)
bNormalize = not bFound
if not bFound:
snapResults = [elmRoot]
bFound = (len(snapResults) != 0)
arFilter = []
if sProperty == 'vcard':
snapFilter = elmRoot(all, propertyMatch)
for node in snapFilter:
if node.findParent(all, propertyMatch):
arFilter.append(node)
arResults = []
for node in snapResults:
if node not in arFilter:
arResults.append(node)
bFound = (len(arResults) != 0)
if not bFound:
if bAllowMultiple: return []
elif iPropertyType == self.STRING: return ''
elif iPropertyType == self.DATE: return None
elif iPropertyType == self.URI: return ''
elif iPropertyType == self.NODE: return None
else: return None
arValues = []
for elmResult in arResults:
sValue = None
if iPropertyType == self.NODE:
if bAllowMultiple:
arValues.append(elmResult)
continue
else:
return elmResult
sNodeName = elmResult.name.lower()
if (iPropertyType == self.EMAIL) and (sNodeName == 'a'):
sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0]
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'abbr'):
sValue = elmResult.get('title')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (iPropertyType == self.URI):
if sNodeName == 'a': sValue = elmResult.get('href')
elif sNodeName == 'img': sValue = elmResult.get('src')
elif sNodeName == 'object': sValue = elmResult.get('data')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'img'):
sValue = elmResult.get('alt')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue:
sValue = elmResult.renderContents()
sValue = re.sub(r'<\S[^>]*>', '', sValue)
sValue = sValue.replace('\r\n', '\n')
sValue = sValue.replace('\r', '\n')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue: continue
if iPropertyType == self.DATE:
sValue = _parse_date_iso8601(sValue)
if bAllowMultiple:
arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue)
else:
return bAutoEscape and self.vcardEscape(sValue) or sValue
return arValues
def findVCards(self, elmRoot, bAgentParsing=0):
sVCards = ''
if not bAgentParsing:
arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1)
else:
arCards = [elmRoot]
for elmCard in arCards:
arLines = []
def processSingleString(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1).decode(self.encoding)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue))
return sValue or u''
def processSingleURI(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.URI)
if sValue:
sContentType = ''
sEncoding = ''
sValueKey = ''
if sValue.startswith('data:'):
sEncoding = ';ENCODING=b'
sContentType = sValue.split(';')[0].split('/').pop()
sValue = sValue.split(',', 1).pop()
else:
elmValue = self.getPropertyValue(elmCard, sProperty)
if elmValue:
if sProperty != 'url':
sValueKey = ';VALUE=uri'
sContentType = elmValue.get('type', '').strip().split('/').pop().strip()
sContentType = sContentType.upper()
if sContentType == 'OCTET-STREAM':
sContentType = ''
if sContentType:
sContentType = ';TYPE=' + sContentType.upper()
arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue))
def processTypeValue(sProperty, arDefaultType, arForceType=None):
arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1)
for elmResult in arResults:
arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1)
if arForceType:
arType = self.unique(arForceType + arType)
if not arType:
arType = arDefaultType
sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue))
# AGENT
# must do this before all other properties because it is destructive
# (removes nested class="vcard" nodes so they don't interfere with
# this vcard's other properties)
arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1)
for elmAgent in arAgent:
if re.compile(r'\bvcard\b').search(elmAgent.get('class')):
sAgentValue = self.findVCards(elmAgent, 1) + '\n'
sAgentValue = sAgentValue.replace('\n', '\\n')
sAgentValue = sAgentValue.replace(';', '\\;')
if sAgentValue:
arLines.append(self.vcardFold('AGENT:' + sAgentValue))
# Completely remove the agent element from the parse tree
elmAgent.extract()
else:
sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1);
if sAgentValue:
arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue))
# FN (full name)
sFN = processSingleString('fn')
# N (name)
elmName = self.getPropertyValue(elmCard, 'n')
if elmName:
sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1)
sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1)
arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1)
arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1)
arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1)
arLines.append(self.vcardFold('N:' + sFamilyName + ';' +
sGivenName + ';' +
','.join(arAdditionalNames) + ';' +
','.join(arHonorificPrefixes) + ';' +
','.join(arHonorificSuffixes)))
elif sFN:
# implied "N" optimization
# http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization
arNames = self.normalize(sFN).split()
if len(arNames) == 2:
bFamilyNameFirst = (arNames[0].endswith(',') or
len(arNames[1]) == 1 or
((len(arNames[1]) == 2) and (arNames[1].endswith('.'))))
if bFamilyNameFirst:
arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1]))
else:
arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0]))
# SORT-STRING
sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1)
if sSortString:
arLines.append(self.vcardFold('SORT-STRING:' + sSortString))
# NICKNAME
arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1)
if arNickname:
arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname)))
# PHOTO
processSingleURI('photo')
# BDAY
dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE)
if dtBday:
arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday)))
# ADR (address)
arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1)
for elmAdr in arAdr:
arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1)
if not arType:
arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1
sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1)
sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1)
sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1)
sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1)
sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1)
sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1)
sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1)
arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' +
sPostOfficeBox + ';' +
sExtendedAddress + ';' +
sStreetAddress + ';' +
sLocality + ';' +
sRegion + ';' +
sPostalCode + ';' +
sCountryName))
# LABEL
processTypeValue('label', ['intl','postal','parcel','work'])
# TEL (phone number)
processTypeValue('tel', ['voice'])
# EMAIL
processTypeValue('email', ['internet'], ['internet'])
# MAILER
processSingleString('mailer')
# TZ (timezone)
processSingleString('tz')
# GEO (geographical information)
elmGeo = self.getPropertyValue(elmCard, 'geo')
if elmGeo:
sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1)
sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1)
arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude))
# TITLE
processSingleString('title')
# ROLE
processSingleString('role')
# LOGO
processSingleURI('logo')
# ORG (organization)
elmOrg = self.getPropertyValue(elmCard, 'org')
if elmOrg:
sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1)
if not sOrganizationName:
# implied "organization-name" optimization
# http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization
sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1)
if sOrganizationName:
arLines.append(self.vcardFold('ORG:' + sOrganizationName))
else:
arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1)
arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit)))
# CATEGORY
arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1)
if arCategory:
arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory)))
# NOTE
processSingleString('note')
# REV
processSingleString('rev')
# SOUND
processSingleURI('sound')
# UID
processSingleString('uid')
# URL
processSingleURI('url')
# CLASS
processSingleString('class')
# KEY
processSingleURI('key')
if arLines:
arLines = [u'BEGIN:vCard',u'VERSION:3.0'] + arLines + [u'END:vCard']
sVCards += u'\n'.join(arLines) + u'\n'
return sVCards.strip()
def isProbablyDownloadable(self, elm):
attrsD = elm.attrMap
if not attrsD.has_key('href'): return 0
linktype = attrsD.get('type', '').strip()
if linktype.startswith('audio/') or \
linktype.startswith('video/') or \
(linktype.startswith('application/') and not linktype.endswith('xml')):
return 1
path = urlparse.urlparse(attrsD['href'])[2]
if path.find('.') == -1: return 0
fileext = path.split('.').pop().lower()
return fileext in self.known_binary_extensions
def findTags(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile(r'\btag\b')}):
href = elm.get('href')
if not href: continue
urlscheme, domain, path, params, query, fragment = \
urlparse.urlparse(_urljoin(self.baseuri, href))
segments = path.split('/')
tag = segments.pop()
if not tag:
tag = segments.pop()
tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', ''))
if not tagscheme.endswith('/'):
tagscheme += '/'
self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''}))
def findEnclosures(self):
all = lambda x: 1
enclosure_match = re.compile(r'\benclosure\b')
for elm in self.document(all, {'href': re.compile(r'.+')}):
if not enclosure_match.search(elm.get('rel', '')) and not self.isProbablyDownloadable(elm): continue
if elm.attrMap not in self.enclosures:
self.enclosures.append(elm.attrMap)
if elm.string and not elm.get('title'):
self.enclosures[-1]['title'] = elm.string
def findXFN(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}):
rels = elm.get('rel', '').split()
xfn_rels = []
for rel in rels:
if rel in self.known_xfn_relationships:
xfn_rels.append(rel)
if xfn_rels:
self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string})
def _parseMicroformats(htmlSource, baseURI, encoding):
if not BeautifulSoup: return
if _debug: sys.stderr.write('entering _parseMicroformats\n')
try:
p = _MicroformatsParser(htmlSource, baseURI, encoding)
except UnicodeEncodeError:
# sgmllib throws this exception when performing lookups of tags
# with non-ASCII characters in them.
return
p.vcard = p.findVCards(p.document)
p.findTags()
p.findEnclosures()
p.findXFN()
return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard}
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = [('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')]
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _makeSafeAbsoluteURI(_urljoin(self.baseuri, uri.strip()))
def unknown_starttag(self, tag, attrs):
if _debug:
sys.stderr.write('tag: [%s] with attributes: [%s]\n' % (tag, str(attrs)))
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
if _debug:
sys.stderr.write('entering _resolveRelativeURIs\n')
p = _RelativeURIResolver(baseURI, encoding, _type)
p.feed(htmlSource)
return p.output()
def _makeSafeAbsoluteURI(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
return _urljoin(base, rel or u'')
if not base:
return rel or u''
if not rel:
scheme = urlparse.urlparse(base)[0]
if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
return base
return u''
uri = _urljoin(base, rel)
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return u''
return uri
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data',
'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay',
'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'prompt', 'pqg', 'radiogroup', 'readonly', 'rel', 'repeat-max',
'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows',
'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src',
'start', 'step', 'summary', 'suppress', 'tabindex', 'target', 'template',
'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign',
'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap',
'xml:lang']
unacceptable_elements_with_end_tag = ['script', 'applet', 'style']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
# survey of common keywords found in feeds
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = ['annotation', 'annotation-xml', 'maction', 'math',
'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none', 'semantics']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant',
'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign',
'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href',
'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink']
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use']
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
'min', 'name', 'offset', 'opacity', 'orient', 'origin',
'overline-position', 'overline-thickness', 'panose-1', 'path',
'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv',
'stop-color', 'stop-opacity', 'strikethrough-position',
'strikethrough-thickness', 'stroke', 'stroke-dasharray',
'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage',
'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2',
'underline-position', 'underline-thickness', 'unicode', 'unicode-range',
'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width',
'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
'y2', 'zoomAndPan']
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# add implicit namespaces to html5 inline svg/mathml
if self._type.endswith('html'):
if not dict(attrs).get('xmlns'):
if tag=='svg':
attrs.append( ('xmlns','http://www.w3.org/2000/svg') )
if tag=='math':
attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') )
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK += 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK += 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
elif not tag in self.acceptable_elements:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if filter(lambda (n,v): n.startswith('xlink:'),attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
# make sure the uri uses an acceptable uri scheme
if key == u'href':
value = _makeSafeAbsoluteURI(value)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value: clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math' and self.mathmlOK: self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg' and self.svgOK: self.svgOK -= 1
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return ''
# This replaced a regexp that used re.match and was prone to pathological back-tracking.
if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip(): return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value: continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def parse_comment(self, i, report=1):
ret = _BaseHTMLProcessor.parse_comment(self, i, report)
if ret >= 0:
return ret
# if ret == -1, this may be a malicious attempt to circumvent
# sanitization, or a page-destroying unclosed comment
match = re.compile(r'--[^>]*>').search(self.rawdata, i+4)
if match:
return match.end()
# unclosed comment; deliberately fail to handle_data()
return len(self.rawdata)
def _sanitizeHTML(htmlSource, encoding, _type):
p = _HTMLSanitizer(encoding, _type)
htmlSource = htmlSource.replace('<![CDATA[', '<![CDATA[')
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = type(data) == type(u'')
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
if ((code / 100) == 3) and (code != 304):
return self.http_error_302(req, fp, code, msg, headers)
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
return infourl
def http_error_302(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
def http_error_301(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
http_error_300 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions)
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
try:
assert sys.version.split()[0] >= '2.3.3'
assert base64 != None
user, passw = _base64decode(req.headers['Authorization'].split(' ')[1]).split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
except:
return self.http_error_default(req, fp, code, msg, headers)
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
if request_headers is supplied it is a dictionary of HTTP request headers
that will override the values generated by FeedParser.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if url_file_stream_or_string == '-':
return sys.stdin
if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
# Deal with the feed URI scheme
if url_file_stream_or_string.startswith('feed:http'):
url_file_stream_or_string = url_file_stream_or_string[5:]
elif url_file_stream_or_string.startswith('feed:'):
url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:]
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.standard_b64encode(user_passwd).strip()
# iri support
try:
if isinstance(url_file_stream_or_string,unicode):
url_file_stream_or_string = url_file_stream_or_string.encode('idna').decode('utf-8')
else:
url_file_stream_or_string = url_file_stream_or_string.decode('utf-8').encode('idna').decode('utf-8')
except:
pass
# try to open with urllib2 (to use optional headers)
request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers)
opener = apply(urllib2.build_opener, tuple(handlers + [_FeedURLHandler()]))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string, 'rb')
except:
pass
# treat url_file_stream_or_string as string
return _StringIO(str(url_file_stream_or_string))
def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers):
request = urllib2.Request(url)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if type(modified) == type(''):
modified = _parse_date(modified)
elif isinstance(modified, datetime.datetime):
modified = modified.utctimetuple()
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
# use this for whatever -- cookies, special headers, etc
# [('Cookie','Something'),('x-special-header','Another Value')]
for header_name, header_value in request_headers.items():
request.add_header(header_name, header_value)
request.add_header('A-IM', 'feed') # RFC 3229 support
return request
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(\.(?P<fracsecond>\d+))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
try:
del tmpl
except NameError:
pass
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
try:
del regex
except NameError:
pass
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m: break
if not m: return
if m.span() == (0, 0): return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params.keys():
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tuple(tm)))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m: return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
_mssql_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
def _parse_date_mssql(dateString):
'''Parse a string according to the MS SQL date format'''
m = _mssql_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_mssql)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m: return
try:
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
except:
return
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date)
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m: return
try:
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
except:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?'
'|(?P<julian>\d\d\d)))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d)(?:[.,]\d+)?)?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
# Account for the Etc/GMT timezone by stripping 'Etc/'
elif len(data) == 5 and data[4].lower().startswith('etc/'):
data[4] = data[4][4:]
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
rfc822._timezones.update(_additional_timezones)
registerDateHandler(_parse_date_rfc822)
def _parse_date_perforce(aDateString):
"""parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
# Fri, 2006/09/15 08:19:53 EDT
_my_date_pattern = re.compile( \
r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
dow, year, month, day, hour, minute, second, tz = \
_my_date_pattern.search(aDateString).groups()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
registerDateHandler(_parse_date_perforce)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
if not date9tuple: continue
if len(date9tuple) != 9:
if _debug: sys.stderr.write('date handler function must return 9-tuple\n')
raise ValueError
map(int, date9tuple)
return date9tuple
except Exception, e:
if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e)))
pass
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). CJKCodecs and iconv_codec help a lot;
you should definitely install them if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
return content_type, params.get('charset', '').replace("'", '')
sniffed_xml_encoding = ''
xml_encoding = ''
true_encoding = ''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type', http_headers.get('Content-type')))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == _l2bytes([0x4c, 0x6f, 0xa7, 0x94]):
# EBCDIC
xml_data = _ebcdic_to_ascii(xml_data)
elif xml_data[:4] == _l2bytes([0x00, 0x3c, 0x00, 0x3f]):
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == _l2bytes([0xfe, 0xff])) and (xml_data[2:4] != _l2bytes([0x00, 0x00])):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x3c, 0x00, 0x3f, 0x00]):
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == _l2bytes([0xff, 0xfe])) and (xml_data[2:4] != _l2bytes([0x00, 0x00])):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x00, 0x00, 0x00, 0x3c]):
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x3c, 0x00, 0x00, 0x00]):
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x00, 0x00, 0xfe, 0xff]):
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == _l2bytes([0xff, 0xfe, 0x00, 0x00]):
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == _l2bytes([0xef, 0xbb, 0xbf]):
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>')).match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
true_encoding = http_encoding or 'us-ascii'
elif http_headers and (not (http_headers.has_key('content-type') or http_headers.has_key('Content-type'))):
true_encoding = xml_encoding or 'iso-8859-1'
else:
true_encoding = xml_encoding or 'utf-8'
# some feeds claim to be gb2312 but are actually gb18030.
# apparently MSIE and Firefox both do the following switch:
if true_encoding.lower() == 'gb2312':
true_encoding = 'gb18030'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding)
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == _l2bytes([0xfe, 0xff])) and (data[2:4] != _l2bytes([0x00, 0x00])):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16be':
sys.stderr.write('trying utf-16be instead\n')
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == _l2bytes([0xff, 0xfe])) and (data[2:4] != _l2bytes([0x00, 0x00])):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16le':
sys.stderr.write('trying utf-16le instead\n')
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == _l2bytes([0xef, 0xbb, 0xbf]):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-8':
sys.stderr.write('trying utf-8 instead\n')
encoding = 'utf-8'
data = data[3:]
elif data[:4] == _l2bytes([0x00, 0x00, 0xfe, 0xff]):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32be':
sys.stderr.write('trying utf-32be instead\n')
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == _l2bytes([0xff, 0xfe, 0x00, 0x00]):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32le':
sys.stderr.write('trying utf-32le instead\n')
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
start = re.search(_s2bytes('<\w'), data)
start = start and start.start() or -1
head,data = data[:start+1], data[start+1:]
entity_pattern = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE)
entity_results=entity_pattern.findall(head)
head = entity_pattern.sub(_s2bytes(''), head)
doctype_pattern = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE)
doctype_results = doctype_pattern.findall(head)
doctype = doctype_results and doctype_results[0] or _s2bytes('')
if doctype.lower().count(_s2bytes('netscape')):
version = 'rss091n'
else:
version = None
# only allow in 'safe' inline entity definitions
replacement=_s2bytes('')
if len(doctype_results)==1 and entity_results:
safe_pattern=re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"'))
safe_entities=filter(lambda e: safe_pattern.match(e),entity_results)
if safe_entities:
replacement=_s2bytes('<!DOCTYPE feed [\n <!ENTITY') + _s2bytes('>\n <!ENTITY ').join(safe_entities) + _s2bytes('>\n]>')
data = doctype_pattern.sub(replacement, head) + data
return version, data, dict(replacement and [(k.decode('utf-8'), v.decode('utf-8')) for k, v in safe_pattern.findall(replacement)])
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[], request_headers={}, response_headers={}):
'''Parse a feed from a URL, file, stream, or string.
request_headers, if given, is a dict from http header name to value to add
to the request; this overrides internally generated values.
'''
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
if _XML_AVAILABLE:
result['bozo'] = 0
if not isinstance(handlers, list):
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = None
f = None
if hasattr(f, 'headers'):
result['headers'] = dict(f.headers)
# overwrite existing headers using response_headers
if 'headers' in result:
result['headers'].update(response_headers)
elif response_headers:
result['headers'] = copy.deepcopy(response_headers)
# if feed is gzip-compressed, decompress it
if f and data and 'headers' in result:
if gzip and result['headers'].get('content-encoding') == 'gzip':
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except Exception, e:
# Some feeds claim to be gzipped but they're not, so
# we get garbage. Ideally, we should re-request the
# feed without the 'Accept-encoding: gzip' header,
# but we don't.
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
elif zlib and result['headers'].get('content-encoding') == 'deflate':
try:
data = zlib.decompress(data, -zlib.MAX_WBITS)
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
# save HTTP headers
if 'headers' in result:
if 'etag' in result['headers'] or 'ETag' in result['headers']:
etag = result['headers'].get('etag', result['headers'].get('ETag'))
if etag:
result['etag'] = etag
if 'last-modified' in result['headers'] or 'Last-Modified' in result['headers']:
modified = result['headers'].get('last-modified', result['headers'].get('Last-Modified'))
if modified:
result['modified'] = _parse_date(modified)
if hasattr(f, 'url'):
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'close'):
f.close()
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
http_headers = result.get('headers', {})
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if http_headers.has_key('content-type') or http_headers.has_key('Content-type'):
bozo_message = '%s is not an XML media type' % http_headers.get('content-type', http_headers.get('Content-type'))
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
if data is not None:
result['version'], data, entities = _stripDoctype(data)
# ensure that baseuri is an absolute uri using an acceptable URI scheme
contentloc = http_headers.get('content-location', http_headers.get('Content-Location', ''))
href = result.get('href', '')
baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href
baselang = http_headers.get('content-language', http_headers.get('Content-Language', None))
# if server sent 304, we're done
if result.get('status', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if data is None:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding):
if not proposed_encoding: continue
if proposed_encoding in tried_encodings: continue
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
break
except:
pass
# if no luck and we have auto-detection library, try that
if (not known_encoding) and chardet:
try:
proposed_encoding = chardet.detect(data)['encoding']
if proposed_encoding and (proposed_encoding not in tried_encodings):
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried utf-8 yet, try that
if (not known_encoding) and ('utf-8' not in tried_encodings):
try:
proposed_encoding = 'utf-8'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried windows-1252 yet, try that
if (not known_encoding) and ('windows-1252' not in tried_encodings):
try:
proposed_encoding = 'windows-1252'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried iso-8859-2 yet, try that.
if (not known_encoding) and ('iso-8859-2' not in tried_encodings):
try:
proposed_encoding = 'iso-8859-2'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck, give up
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = ''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'document declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
if hasattr(saxparser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
saxparser.parse(source)
except Exception, e:
if _debug:
import traceback
traceback.print_stack()
traceback.print_exc()
sys.stderr.write('xml parsing failed\n')
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities)
feedparser.feed(data.decode('utf-8', 'replace'))
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
class Serializer:
def __init__(self, results):
self.results = results
class TextSerializer(Serializer):
def write(self, stream=sys.stdout):
self._writer(stream, self.results, '')
def _writer(self, stream, node, prefix):
if not node: return
if hasattr(node, 'keys'):
keys = node.keys()
keys.sort()
for k in keys:
if k in ('description', 'link'): continue
if node.has_key(k + '_detail'): continue
if node.has_key(k + '_parsed'): continue
self._writer(stream, node[k], prefix + k + '.')
elif type(node) == types.ListType:
index = 0
for n in node:
self._writer(stream, n, prefix[:-1] + '[' + str(index) + '].')
index += 1
else:
try:
s = str(node).encode('utf-8')
s = s.replace('\\', '\\\\')
s = s.replace('\r', '')
s = s.replace('\n', r'\n')
stream.write(prefix[:-1])
stream.write('=')
stream.write(s)
stream.write('\n')
except:
pass
class PprintSerializer(Serializer):
def write(self, stream=sys.stdout):
if self.results.has_key('href'):
stream.write(self.results['href'] + '\n\n')
from pprint import pprint
pprint(self.results, stream)
stream.write('\n')
if __name__ == '__main__':
try:
from optparse import OptionParser
except:
OptionParser = None
if OptionParser:
optionParser = OptionParser(version=__version__, usage="%prog [options] url_or_filename_or_-")
optionParser.set_defaults(format="pprint")
optionParser.add_option("-A", "--user-agent", dest="agent", metavar="AGENT", help="User-Agent for HTTP URLs")
optionParser.add_option("-e", "--referer", "--referrer", dest="referrer", metavar="URL", help="Referrer for HTTP URLs")
optionParser.add_option("-t", "--etag", dest="etag", metavar="TAG", help="ETag/If-None-Match for HTTP URLs")
optionParser.add_option("-m", "--last-modified", dest="modified", metavar="DATE", help="Last-modified/If-Modified-Since for HTTP URLs (any supported date format)")
optionParser.add_option("-f", "--format", dest="format", metavar="FORMAT", help="output results in FORMAT (text, pprint)")
optionParser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="write debugging information to stderr")
(options, urls) = optionParser.parse_args()
if options.verbose:
_debug = 1
if not urls:
optionParser.print_help()
sys.exit(0)
else:
if not sys.argv[1:]:
print __doc__
sys.exit(0)
class _Options:
etag = modified = agent = referrer = None
format = 'pprint'
options = _Options()
urls = sys.argv[1:]
zopeCompatibilityHack()
serializer = globals().get(options.format.capitalize() + 'Serializer', Serializer)
for url in urls:
results = parse(url, etag=options.etag, modified=options.modified, agent=options.agent, referrer=options.referrer)
serializer(results).write(sys.stdout)
| Python |
import clients
import pshb_client
import feedparser
import logging
import urllib
import os
import zlib
from datetime import datetime
from datetime import timedelta
from django.utils import simplejson
from google.appengine.api import app_identity
from google.appengine.api import channel
from google.appengine.ext import db
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
TOPIC_URL = 'http://www.dailymile.com/entries.atom'
class People():
def get_person(self, person_url):
person = memcache.get(person_url)
if not person:
response = urlfetch.fetch(person_url + '.json')
if response.status_code != 200:
return None
person = simplejson.loads(response.content)
memcache.add(person_url, person, 24 * 60 * 60)
return person
class Locations():
MAP_URL_TEMPLATE = 'http://maps.googleapis.com/maps/api/geocode/json?%s'
def get_latlong(self, location):
latlong = memcache.get(location)
if not latlong:
if isinstance(location, unicode):
location = location.encode('utf-8')
url = Locations.MAP_URL_TEMPLATE % urllib.urlencode({'address': location,
'sensor': 'false'})
response = urlfetch.fetch(url)
if response.status_code != 200:
return None
geocode_data = simplejson.loads(response.content)
if geocode_data['status'] == 'OK':
latlong = geocode_data['results'][0]['geometry']['location']
memcache.add(location, latlong)
return latlong
class Messages():
def messages_from_entries(self, entries):
messages = []
for entry in entries:
if entry['tags'][0]['term'] != 'http://schemas.dailymile.com/entry#workout':
continue
person = People().get_person(entry['author_detail']['href'])
if person and 'location' in person:
latlong = Locations().get_latlong(person['location'])
else:
latlong = None
messages.append({
'entry': entry['title'],
'item': {
'person_url': entry['author_detail']['href'],
'person_name': entry['author_detail']['name'],
'title': entry['title_detail']['value'],
'url': entry['links'][0]['href'],
'img': entry['links'][2]['href'],
},
'latlng': latlong,
'id': entry['id']
})
memcache.set('latest-unfiltered-messages', messages)
return messages
def get_initial_messages(self):
return simplejson.dumps(memcache.get('latest-unfiltered-messages') or [])
def get_mock_messages(self):
return simplejson.dumps(self.messages_from_entries(feedparser.parse(MOCK_FEED)['entries']))
class SubCallbackPage(pshb_client.SubCallbackPage):
def strip_entry(self, entry):
return {'id': entry['id'],
'title': entry['title'],
'author_detail': entry['author_detail'],
'title_detail': entry['title_detail'],
'links': entry['links'],
'tags': entry['tags']}
class BroadcastPage(webapp.RequestHandler):
def post(self):
entries = simplejson.loads(zlib.decompress(self.request.body))
messages = Messages().messages_from_entries(entries)
if clients.update_clients(TOPIC_URL, messages) == 0:
hostname = app_identity.get_default_version_hostname()
pshb_client.unsubscribe(TOPIC_URL, 'http://' + hostname + '/subcb',
'http://www.pubsubhubbub.com',
'tokentokentoken')
class MockPage(webapp.RequestHandler):
def get(self):
urlfetch.fetch(url='http://localhost:8080/subcb', payload=MOCK_FEED, method=urlfetch.POST)
class MainPage(webapp.RequestHandler):
def get(self):
hostname = app_identity.get_default_version_hostname()
pshb_client.subscribe(TOPIC_URL, 'http://' + hostname + '/subcb',
'http://www.pubsubhubbub.com',
'tokentokentoken')
if (not self.request.get('nt')) and ('token' in self.request.cookies):
token = self.request.cookies['token']
else:
(cid, token) = clients.add_client(TOPIC_URL)
logging.warning('Created client: %s' % cid)
expiration = (datetime.utcnow() + clients.TOKEN_EXPIRATION).strftime("%a, %d %b %Y %H:%M:%S GMT")
self.response.headers.add_header('Set-Cookie', 'token=%s; expires=%s' % (token, expiration))
self.response.headers.add_header('Set-Cookie', 'cid=%s; expires=%s' % (cid, expiration))
logging.warning('Created token: %s, expires %s' % (token, expiration))
if self.request.get('mock'):
initial_messages = Messages().get_mock_messages()
else:
initial_messages = Messages().get_initial_messages()
if not initial_messages:
initial_messages = '[]'
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(template.render(path, {'token': token, 'initial_messages': initial_messages}));
class ChannelConnectedPage(webapp.RequestHandler):
def post(self):
cid = self.request.get('from')
logging.info('Channel connected: %s' % cid)
clients.connect_client(cid)
class ChannelDisconnectedPage(webapp.RequestHandler):
def post(self):
cid = self.request.get('from')
logging.info('Channel disconnected: %s' % cid)
clients.disconnect_client(cid)
application = webapp.WSGIApplication(
[('/', MainPage),
('/_ah/channel/connected/', ChannelConnectedPage),
('/_ah/channel/disconnected/', ChannelDisconnectedPage),
('/mockmockmock', MockPage),
('/newdata', BroadcastPage),
('/subcb', SubCallbackPage)],
debug=True)
def main():
run_wsgi_app(application)
MOCK_FEED = """
<?xml version="1.0" encoding="UTF-8"?>
<feed xml:lang="en-US" xmlns="http://www.w3.org/2005/Atom" xmlns:activity="http://activitystrea.ms/spec/1.0/" xmlns:media="http://example.com/to-be-confirmed" xmlns:thr="http://purl.org/syndication/thread/1.0">
<id>tag:www.dailymile.com,2005:/entries</id>
<link rel="alternate" type="text/html" href="http://www.dailymile.com"/>
<link rel="self" type="application/atom+xml" href="http://www.dailymile.com/entries.atom"/>
<title>dailymile Public Feed</title>
<updated>2011-02-25T20:29:57Z</updated>
<generator uri="http://www.dailymile.com/">dailymile</generator>
<icon>http://www.dailymile.com/favicon.ico</icon>
<link rel="hub" href="http://pubsubhubbub.appspot.com/"/>
<entry>
<id>tag:www.dailymile.com,2010:/entries/5578775</id>
<published>2011-02-25T12:29:57-08:00</published>
<updated>2011-02-25T12:29:57-08:00</updated>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/NatalieA/entries/5578775"/>
<title type="text">Natalie posted a workout</title>
<category term="http://schemas.dailymile.com/entry#workout"/>
<link rel="replies" type="applicaton/xhtml+xml" thr:count="0" href="http://www.dailymile.com/people/NatalieA/entries/5578775#comments"/>
<activity:verb>http://activitystrea.ms/schema/1.0/post/</activity:verb>
<activity:object>
<id>tag:www.dailymile.com,2010:workout/5578775</id>
<title type="text">Natalie ran for 40 hours</title>
<published>2011-02-25T12:29:57-08:00</published>
<activity:object-type>http://activitystrea.ms/schema/1.0/workout</activity:object-type>
<content type="xhtml">
<span class="workout-feeling good">good</span>
<a href="/people/NatalieA/entries/5578775" class="workout-title">gym</a>
<span class="workout-time">40:00</span>
<div class="entry-description">
<p>20 min run, 6 x 1:00 sprints, 10 min run. treadmills are not as fun as running outside...</p>
</div> </content>
</activity:object>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<span class="workout-activity-type">Running</span><span class="workout-feeling good">good</span>
<a href="/people/NatalieA/entries/5578775" class="workout-title">gym</a>
<span class="workout-time">40:00</span>
<div class="entry-description">
<p>20 min run, 6 x 1:00 sprints, 10 min run. treadmills are not as fun as running outside...</p>
</div> </div>
</content>
<author>
<activity:object-type>http://activitystrea.ms/schema/1.0/person</activity:object-type>
<name>Natalie</name>
<uri>http://www.dailymile.com/people/NatalieA</uri>
<link rel="photo" type="image/jpeg" href="http://s1.dmimg.com/pictures/users/51747/1266334372_avatar.jpg"/>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/NatalieA"/>
</author>
</entry>
<entry>
<id>tag:www.dailymile.com,2010:/entries/5578774</id>
<published>2011-02-25T12:29:56-08:00</published>
<updated>2011-02-25T12:29:56-08:00</updated>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/gillygirl/entries/5578774"/>
<title type="text">Leigh Ann posted a workout</title>
<category term="http://schemas.dailymile.com/entry#workout"/>
<link rel="replies" type="applicaton/xhtml+xml" thr:count="0" href="http://www.dailymile.com/people/gillygirl/entries/5578774#comments"/>
<activity:verb>http://activitystrea.ms/schema/1.0/post/</activity:verb>
<activity:object>
<id>tag:www.dailymile.com,2010:workout/5578774</id>
<title type="text">Leigh Ann did a fitness workout for 1 hour</title>
<published>2011-02-25T12:29:56-08:00</published>
<activity:object-type>http://activitystrea.ms/schema/1.0/workout</activity:object-type>
<content type="xhtml">
<span class="workout-feeling great">great</span>
<a href="/people/gillygirl/entries/5578774" class="workout-title">Ellipitical machine workout</a>
<span class="workout-time">01:00</span>
<div class="entry-description">
<p>At least I can rely on my ellipitical machine when I need a little escape. I started listening to music again and it is giving my the motivation I need to get through some workouts.</p>
</div> </content>
</activity:object>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<span class="workout-activity-type">Fitness</span><span class="workout-feeling great">great</span>
<a href="/people/gillygirl/entries/5578774" class="workout-title">Ellipitical machine workout</a>
<span class="workout-time">01:00</span>
<div class="entry-description">
<p>At least I can rely on my ellipitical machine when I need a little escape. I started listening to music again and it is giving my the motivation I need to get through some workouts.</p>
</div> </div>
</content>
<author>
<activity:object-type>http://activitystrea.ms/schema/1.0/person</activity:object-type>
<name>Leigh Ann G.</name>
<uri>http://www.dailymile.com/people/gillygirl</uri>
<link rel="photo" type="image/jpeg" href="http://s1.dmimg.com/pictures/users/111853/1287936213_avatar.jpg"/>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/gillygirl"/>
</author>
</entry>
<entry>
<id>tag:www.dailymile.com,2010:/entries/5578773</id>
<published>2011-02-25T12:29:55-08:00</published>
<updated>2011-02-25T12:29:55-08:00</updated>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/SharonG5/entries/5578773"/>
<title type="text">Sharon posted a workout</title>
<category term="http://schemas.dailymile.com/entry#workout"/>
<link rel="replies" type="applicaton/xhtml+xml" thr:count="0" href="http://www.dailymile.com/people/SharonG5/entries/5578773#comments"/>
<activity:verb>http://activitystrea.ms/schema/1.0/post/</activity:verb>
<activity:object>
<id>tag:www.dailymile.com,2010:workout/5578773</id>
<title type="text">Sharon walked 2 sec</title>
<published>2011-02-25T12:29:55-08:00</published>
<activity:object-type>http://activitystrea.ms/schema/1.0/workout</activity:object-type>
<content type="xhtml">
<a href="/people/SharonG5/entries/5578773" class="workout-title">Golf Course Upper Route, Walk</a>
<div class="entry-description">
<p>Started Walk at 12:29 PM, <a href="http://j.mp/fLfumC" rel="nofollow" target="_blank">http://j.mp/fLfumC</a>, Walkmeter will speak your messages to me.</p>
</div> </content>
</activity:object>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<span class="workout-activity-type">Walking</span>
<a href="/people/SharonG5/entries/5578773" class="workout-title">Golf Course Upper Route, Walk</a>
<div class="entry-description">
<p>Started Walk at 12:29 PM, <a href="http://j.mp/fLfumC" rel="nofollow" target="_blank">http://j.mp/fLfumC</a>, Walkmeter will speak your messages to me.</p>
</div> </div>
</content>
<source>
<generator uri="http://www.abvio.com/walkmeter">Walkmeter</generator>
</source>
<author>
<activity:object-type>http://activitystrea.ms/schema/1.0/person</activity:object-type>
<name>Sharon G.</name>
<uri>http://www.dailymile.com/people/SharonG5</uri>
<link rel="photo" type="image/jpeg" href="http://www.dailymile.com/images/defaults/user_avatar.jpg"/>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/SharonG5"/>
</author>
</entry>
<entry>
<id>tag:www.dailymile.com,2010:/entries/5578772</id>
<published>2011-02-25T12:29:54-08:00</published>
<updated>2011-02-25T12:29:54-08:00</updated>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/bojo/entries/5578772"/>
<title type="text">Bojo posted a workout</title>
<category term="http://schemas.dailymile.com/entry#workout"/>
<link rel="replies" type="applicaton/xhtml+xml" thr:count="0" href="http://www.dailymile.com/people/bojo/entries/5578772#comments"/>
<activity:verb>http://activitystrea.ms/schema/1.0/post/</activity:verb>
<activity:object>
<id>tag:www.dailymile.com,2010:workout/5578772</id>
<title type="text">Bojo ran 6.1 miles in 56 mins</title>
<published>2011-02-25T12:29:54-08:00</published>
<activity:object-type>http://activitystrea.ms/schema/1.0/workout</activity:object-type>
<content type="xhtml">
<span class="workout-feeling great">great</span>
<a href="/people/bojo/entries/5578772" class="workout-title">park loops</a>
<span class="workout-distance">6.1
<span class="workout-distance-units">mi</span></span>
<span class="workout-time">00:57</span>
<span class="workout-pace">09:18 pace</span>
<div class="entry-description">
<p>thought i had a nice break in the rain then the skies just busted open along with hurricane winds at 1.7. stood under awning for a few til it was just drizzling. not a bad run, just interesting....</p>
</div> </content>
</activity:object>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<span class="workout-activity-type">Running</span><span class="workout-feeling great">great</span>
<a href="/people/bojo/entries/5578772" class="workout-title">park loops</a>
<span class="workout-distance">6.1
<span class="workout-distance-units">mi</span></span>
<span class="workout-time">00:57</span>
<span class="workout-pace">09:18 pace</span>
<div class="entry-description">
<p>thought i had a nice break in the rain then the skies just busted open along with hurricane winds at 1.7. stood under awning for a few til it was just drizzling. not a bad run, just interesting....</p>
</div> </div>
</content>
<author>
<activity:object-type>http://activitystrea.ms/schema/1.0/person</activity:object-type>
<name>Bojo</name>
<uri>http://www.dailymile.com/people/bojo</uri>
<link rel="photo" type="image/jpeg" href="http://s3.dmimg.com/pictures/users/87363/1273944150_avatar.jpg"/>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/bojo"/>
</author>
</entry>
<entry>
<id>tag:www.dailymile.com,2010:/entries/5578770</id>
<published>2011-02-25T12:29:43-08:00</published>
<updated>2011-02-25T12:29:43-08:00</updated>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/Run_Pablo_Run/entries/5578770"/>
<title type="text">Paul posted a workout</title>
<category term="http://schemas.dailymile.com/entry#workout"/>
<link rel="replies" type="applicaton/xhtml+xml" thr:count="0" href="http://www.dailymile.com/people/Run_Pablo_Run/entries/5578770#comments"/>
<activity:verb>http://activitystrea.ms/schema/1.0/post/</activity:verb>
<activity:object>
<id>tag:www.dailymile.com,2010:workout/5578770</id>
<title type="text">Paul ran 1 mile in 6 mins and 40 secs</title>
<published>2011-02-25T12:29:43-08:00</published>
<activity:object-type>http://activitystrea.ms/schema/1.0/workout</activity:object-type>
<content type="xhtml">
<span class="workout-feeling good">good</span>
<a href="/people/Run_Pablo_Run/entries/5578770" class="workout-title">Treadmill</a>
<span class="workout-distance">1
<span class="workout-distance-units">mi</span></span>
<span class="workout-time">00:06:40</span>
<span class="workout-pace">06:40 pace</span>
<div class="entry-description">
<p>Quick warmup run for working out at the gym.</p>
</div> </content>
</activity:object>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<span class="workout-activity-type">Running</span><span class="workout-feeling good">good</span>
<a href="/people/Run_Pablo_Run/entries/5578770" class="workout-title">Treadmill</a>
<span class="workout-distance">1
<span class="workout-distance-units">mi</span></span>
<span class="workout-time">00:06:40</span>
<span class="workout-pace">06:40 pace</span>
<div class="entry-description">
<p>Quick warmup run for working out at the gym.</p>
</div> </div>
</content>
<author>
<activity:object-type>http://activitystrea.ms/schema/1.0/person</activity:object-type>
<name>Paul G.</name>
<uri>http://www.dailymile.com/people/Run_Pablo_Run</uri>
<link rel="photo" type="image/jpeg" href="http://s1.dmimg.com/pictures/users/195158/1297273873_avatar.jpg"/>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/Run_Pablo_Run"/>
</author>
</entry>
<entry>
<id>tag:www.dailymile.com,2010:/entries/5578767</id>
<published>2011-02-25T12:29:33-08:00</published>
<updated>2011-02-25T12:29:33-08:00</updated>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/KieraC/entries/5578767"/>
<title type="text">Kiera posted a workout</title>
<category term="http://schemas.dailymile.com/entry#workout"/>
<link rel="replies" type="applicaton/xhtml+xml" thr:count="0" href="http://www.dailymile.com/people/KieraC/entries/5578767#comments"/>
<activity:verb>http://activitystrea.ms/schema/1.0/post/</activity:verb>
<activity:object>
<id>tag:www.dailymile.com,2010:workout/5578767</id>
<title type="text">Kiera did a fitness workout for 20 mins</title>
<published>2011-02-25T12:29:33-08:00</published>
<activity:object-type>http://activitystrea.ms/schema/1.0/workout</activity:object-type>
<content type="xhtml">
<span class="workout-feeling good">good</span>
<a href="/people/KieraC/entries/5578767" class="workout-title">my stairway</a>
<span class="workout-time">00:20</span>
<div class="entry-description">
<div class="preview_text"><p>finally put our 'stair master' of a house to work for me...with 2 sick kids at home I knew I wasn't going to make it to the Y or even around the block...and with them in front of the tv a wii worko<span class="ellipsis">...</span> <a href="#" onclick="$(this).up('div.preview_text').hide().next().show(); return false;">read more</a></p></div><div class="full_text" style="display: none"><p>finally put our 'stair master' of a house to work for me...with 2 sick kids at home I knew I wasn't going to make it to the Y or even around the block...and with them in front of the tv a wii workout wasn't going to happen either...so I spent 15 minutes doing our stairs...kicked my bootah.</p></div>
</div> </content>
</activity:object>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<span class="workout-activity-type">Fitness</span><span class="workout-feeling good">good</span>
<a href="/people/KieraC/entries/5578767" class="workout-title">my stairway</a>
<span class="workout-time">00:20</span>
<div class="entry-description">
<div class="preview_text"><p>finally put our 'stair master' of a house to work for me...with 2 sick kids at home I knew I wasn't going to make it to the Y or even around the block...and with them in front of the tv a wii worko<span class="ellipsis">...</span> <a href="#" onclick="$(this).up('div.preview_text').hide().next().show(); return false;">read more</a></p></div><div class="full_text" style="display: none"><p>finally put our 'stair master' of a house to work for me...with 2 sick kids at home I knew I wasn't going to make it to the Y or even around the block...and with them in front of the tv a wii workout wasn't going to happen either...so I spent 15 minutes doing our stairs...kicked my bootah.</p></div>
</div> </div>
</content>
<author>
<activity:object-type>http://activitystrea.ms/schema/1.0/person</activity:object-type>
<name>Kiera C.</name>
<uri>http://www.dailymile.com/people/KieraC</uri>
<link rel="photo" type="image/jpeg" href="http://s1.dmimg.com/pictures/users/189831/1296256103_avatar.jpg"/>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/KieraC"/>
</author>
</entry>
<entry>
<id>tag:www.dailymile.com,2010:/entries/5578766</id>
<published>2011-02-25T12:29:26-08:00</published>
<updated>2011-02-25T12:29:37-08:00</updated>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/SharonG5/entries/5578766"/>
<title type="text">Sharon posted a workout</title>
<category term="http://schemas.dailymile.com/entry#workout"/>
<link rel="replies" type="applicaton/xhtml+xml" thr:count="0" href="http://www.dailymile.com/people/SharonG5/entries/5578766#comments"/>
<activity:verb>http://activitystrea.ms/schema/1.0/post/</activity:verb>
<activity:object>
<id>tag:www.dailymile.com,2010:workout/5578766</id>
<title type="text">Sharon walked 0.01 miles 11 sec</title>
<published>2011-02-25T12:29:26-08:00</published>
<activity:object-type>http://activitystrea.ms/schema/1.0/workout</activity:object-type>
<content type="xhtml">
<a href="/people/SharonG5/entries/5578766" class="workout-title">Walk</a>
<span class="workout-distance">0.01
<span class="workout-distance-units">mi</span></span>
<span class="workout-pace">18:19 pace</span>
<div class="entry-description">
<p>Stopped Walk at 12:29 PM, <a href="http://j.mp/fLfumC" rel="nofollow" target="_blank">http://j.mp/fLfumC</a>.</p>
</div> </content>
</activity:object>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<span class="workout-activity-type">Walking</span>
<a href="/people/SharonG5/entries/5578766" class="workout-title">Walk</a>
<span class="workout-distance">0.01
<span class="workout-distance-units">mi</span></span>
<span class="workout-pace">18:19 pace</span>
<div class="entry-description">
<p>Stopped Walk at 12:29 PM, <a href="http://j.mp/fLfumC" rel="nofollow" target="_blank">http://j.mp/fLfumC</a>.</p>
</div> </div>
</content>
<source>
<generator uri="http://www.abvio.com/walkmeter">Walkmeter</generator>
</source>
<author>
<activity:object-type>http://activitystrea.ms/schema/1.0/person</activity:object-type>
<name>Sharon G.</name>
<uri>http://www.dailymile.com/people/SharonG5</uri>
<link rel="photo" type="image/jpeg" href="http://www.dailymile.com/images/defaults/user_avatar.jpg"/>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/SharonG5"/>
</author>
</entry>
<entry>
<id>tag:www.dailymile.com,2010:/entries/5578765</id>
<published>2011-02-25T12:29:22-08:00</published>
<updated>2011-02-25T12:29:22-08:00</updated>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/ShaylaD/entries/5578765"/>
<title type="text">Shayla posted a workout</title>
<category term="http://schemas.dailymile.com/entry#workout"/>
<link rel="replies" type="applicaton/xhtml+xml" thr:count="0" href="http://www.dailymile.com/people/ShaylaD/entries/5578765#comments"/>
<activity:verb>http://activitystrea.ms/schema/1.0/post/</activity:verb>
<activity:object>
<id>tag:www.dailymile.com,2010:workout/5578765</id>
<title type="text">Shayla ran 5.1 miles in 41 mins</title>
<published>2011-02-25T12:29:22-08:00</published>
<activity:object-type>http://activitystrea.ms/schema/1.0/workout</activity:object-type>
<content type="xhtml">
<span class="workout-distance">5.1
<span class="workout-distance-units">mi</span></span>
<span class="workout-time">00:41</span>
<span class="workout-pace">08:05 pace</span>
<div class="entry-description">
<p>Felt pretty good out there today. It was very pleasant running out on the Capitol City Trail but a little more breezy on the way back in. Nonetheless, the sunshine was nice and there was no wicked ... <a href="/people/ShaylaD/entries/5578765">read more</a></p>
</div> </content>
</activity:object>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<span class="workout-activity-type">Running</span>
<span class="workout-distance">5.1
<span class="workout-distance-units">mi</span></span>
<span class="workout-time">00:41</span>
<span class="workout-pace">08:05 pace</span>
<div class="entry-description">
<p>Felt pretty good out there today. It was very pleasant running out on the Capitol City Trail but a little more breezy on the way back in. Nonetheless, the sunshine was nice and there was no wicked ... <a href="/people/ShaylaD/entries/5578765">read more</a></p>
</div> </div>
</content>
<author>
<activity:object-type>http://activitystrea.ms/schema/1.0/person</activity:object-type>
<name>Shayla D.</name>
<uri>http://www.dailymile.com/people/ShaylaD</uri>
<link rel="photo" type="image/jpeg" href="http://s2.dmimg.com/pictures/users/17701/1285814622_avatar.jpg"/>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/ShaylaD"/>
</author>
</entry>
<entry>
<id>tag:www.dailymile.com,2010:/entries/5578763</id>
<published>2011-02-25T12:29:05-08:00</published>
<updated>2011-02-25T12:29:05-08:00</updated>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/markleeman/entries/5578763"/>
<title type="text">Mark posted a workout</title>
<category term="http://schemas.dailymile.com/entry#workout"/>
<link rel="replies" type="applicaton/xhtml+xml" thr:count="0" href="http://www.dailymile.com/people/markleeman/entries/5578763#comments"/>
<activity:verb>http://activitystrea.ms/schema/1.0/post/</activity:verb>
<activity:object>
<id>tag:www.dailymile.com,2010:workout/5578763</id>
<title type="text">Mark ran 5 miles in 37 mins</title>
<published>2011-02-25T12:29:05-08:00</published>
<activity:object-type>http://activitystrea.ms/schema/1.0/workout</activity:object-type>
<content type="xhtml">
<span class="workout-distance">5
<span class="workout-distance-units">mi</span></span>
<span class="workout-time">00:37</span>
<span class="workout-pace">07:25 pace</span>
<div class="entry-description">
<p></p>
</div> </content>
</activity:object>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<span class="workout-activity-type">Running</span>
<span class="workout-distance">5
<span class="workout-distance-units">mi</span></span>
<span class="workout-time">00:37</span>
<span class="workout-pace">07:25 pace</span>
<div class="entry-description">
<p></p>
</div> </div>
</content>
<author>
<activity:object-type>http://activitystrea.ms/schema/1.0/person</activity:object-type>
<name>Mark L.</name>
<uri>http://www.dailymile.com/people/markleeman</uri>
<link rel="photo" type="image/jpeg" href="http://s2.dmimg.com/pictures/users/43971/1296338285_avatar.jpg"/>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/markleeman"/>
</author>
</entry>
<entry>
<id>tag:www.dailymile.com,2010:/entries/5578762</id>
<published>2011-02-25T12:29:05-08:00</published>
<updated>2011-02-25T12:29:42-08:00</updated>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/LindaB8/entries/5578762"/>
<title type="text">Linda posted a workout</title>
<category term="http://schemas.dailymile.com/entry#workout"/>
<link rel="replies" type="applicaton/xhtml+xml" thr:count="0" href="http://www.dailymile.com/people/LindaB8/entries/5578762#comments"/>
<activity:verb>http://activitystrea.ms/schema/1.0/post/</activity:verb>
<activity:object>
<id>tag:www.dailymile.com,2010:workout/5578762</id>
<title type="text">Linda ran 3.1 miles in 45 mins</title>
<published>2011-02-25T12:29:05-08:00</published>
<activity:object-type>http://activitystrea.ms/schema/1.0/workout</activity:object-type>
<content type="xhtml">
<span class="workout-feeling good">good</span>
<span class="workout-distance">3.1
<span class="workout-distance-units">mi</span></span>
<span class="workout-time">00:45</span>
<span class="workout-pace">14:30 pace</span>
<div class="entry-description">
<p></p>
</div> </content>
</activity:object>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<span class="workout-activity-type">Running</span><span class="workout-feeling good">good</span>
<span class="workout-distance">3.1
<span class="workout-distance-units">mi</span></span>
<span class="workout-time">00:45</span>
<span class="workout-pace">14:30 pace</span>
<div class="entry-description">
<p></p>
</div> </div>
</content>
<author>
<activity:object-type>http://activitystrea.ms/schema/1.0/person</activity:object-type>
<name>Linda B.</name>
<uri>http://www.dailymile.com/people/LindaB8</uri>
<link rel="photo" type="image/jpeg" href="http://s2.dmimg.com/pictures/users/173194/1297541798_avatar.jpg"/>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/LindaB8"/>
</author>
</entry>
<entry>
<id>tag:www.dailymile.com,2010:/entries/5578761</id>
<published>2011-02-25T12:29:00-08:00</published>
<updated>2011-02-25T12:29:00-08:00</updated>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/AndrewP3/entries/5578761"/>
<title type="text">Andrew posted a workout</title>
<category term="http://schemas.dailymile.com/entry#workout"/>
<link rel="replies" type="applicaton/xhtml+xml" thr:count="0" href="http://www.dailymile.com/people/AndrewP3/entries/5578761#comments"/>
<activity:verb>http://activitystrea.ms/schema/1.0/post/</activity:verb>
<activity:object>
<id>tag:www.dailymile.com,2010:workout/5578761</id>
<title type="text">Andrew ran 7.02 miles in 71 mins</title>
<published>2011-02-25T12:29:00-08:00</published>
<activity:object-type>http://activitystrea.ms/schema/1.0/workout</activity:object-type>
<content type="xhtml">
<a href="/people/AndrewP3/entries/5578761" class="workout-title">5 mi Tempo + 1 mi Warm Up + 1...</a>
<span class="workout-distance">7.02
<span class="workout-distance-units">mi</span></span>
<span class="workout-time">01:12</span>
<span class="workout-pace">10:13 pace</span>
<div class="entry-description">
<p>5 mi Tempo + 1 mi Warm Up + 1 mi Cool Down (7 mi)</p>
</div> </content>
</activity:object>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<span class="workout-activity-type">Running</span>
<a href="/people/AndrewP3/entries/5578761" class="workout-title">5 mi Tempo + 1 mi Warm Up + 1...</a>
<span class="workout-distance">7.02
<span class="workout-distance-units">mi</span></span>
<span class="workout-time">01:12</span>
<span class="workout-pace">10:13 pace</span>
<div class="entry-description">
<p>5 mi Tempo + 1 mi Warm Up + 1 mi Cool Down (7 mi)</p>
</div> </div>
</content>
<author>
<activity:object-type>http://activitystrea.ms/schema/1.0/person</activity:object-type>
<name>Andrew P.</name>
<uri>http://www.dailymile.com/people/AndrewP3</uri>
<link rel="photo" type="image/jpeg" href="http://s3.dmimg.com/pictures/users/152533/1288127105_avatar.jpg"/>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/AndrewP3"/>
</author>
</entry>
<entry>
<id>tag:www.dailymile.com,2010:/entries/5578760</id>
<published>2011-02-25T12:28:46-08:00</published>
<updated>2011-02-25T12:28:46-08:00</updated>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/TheresaL3/entries/5578760"/>
<title type="text">Theresa posted an image</title>
<category term="http://schemas.dailymile.com/entry#image"/>
<link rel="replies" type="applicaton/xhtml+xml" thr:count="0" href="http://www.dailymile.com/people/TheresaL3/entries/5578760#comments"/>
<activity:verb>http://activitystrea.ms/schema/1.0/post/</activity:verb>
<activity:object>
<id>tag:www.dailymile.com,2010:image/5578760</id>
<title type="text">Theresa shared a photo</title>
<published>2011-02-25T12:28:46-08:00</published>
<caption>My new plates! :)</caption>
<activity:object-type>http://activitystrea.ms/schema/1.0/image</activity:object-type>
<content type="xhtml">
<div class="image-container">
<a href="/people/TheresaL3/entries/5578760"><img alt="Shared Photo" src="http://media.dailymile.com/photos/117283/e48586325164a31e72e4f5a7fa886942.jpg" /></a>
</div>
<div class="entry-description">
<p>My new plates! :)</p>
</div> </content>
</activity:object>
<link rel="enclosure" type="image/jpeg" href="http://media.dailymile.com/photos/117283/e48586325164a31e72e4f5a7fa886942.jpg" media:width="520" media:height="273" length="218171"/>
<link rel="preview" type="image/jpeg" href="http://media.dailymile.com/photos/117283/e48586325164a31e72e4f5a7fa886942.jpg" media:width="75" media:height="75"/>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<div class="image-container">
<a href="/people/TheresaL3/entries/5578760"><img alt="Shared Photo" src="http://media.dailymile.com/photos/117283/e48586325164a31e72e4f5a7fa886942.jpg" /></a>
</div>
<div class="entry-description">
<p>My new plates! :)</p>
</div> </div>
</content>
<author>
<activity:object-type>http://activitystrea.ms/schema/1.0/person</activity:object-type>
<name>Theresa L.</name>
<uri>http://www.dailymile.com/people/TheresaL3</uri>
<link rel="photo" type="image/jpeg" href="http://s1.dmimg.com/pictures/users/157089/1289227138_avatar.jpg"/>
<link rel="alternate" type="text/html" href="http://www.dailymile.com/people/TheresaL3"/>
</author>
</entry>
</feed>
"""
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
import optparse
import sys
# Note that you have to install the unittest2 package, first.
import unittest2
USAGE = """%prog SDK_PATH TEST_PATH
Run unit tests for App Engine apps.
SDK_PATH Path to the SDK installation
TEST_PATH Path to package containing test modules"""
def main(sdk_path, test_path):
sys.path.insert(0, sdk_path)
import dev_appserver
dev_appserver.fix_sys_path()
suite = unittest2.loader.TestLoader().discover(test_path,
pattern='*_test.py')
unittest2.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
parser = optparse.OptionParser(USAGE)
options, args = parser.parse_args()
if len(args) != 2:
print 'Error: Exactly 2 arguments required.'
parser.print_help()
sys.exit(1)
SDK_PATH = args[0]
TEST_PATH = args[1]
main(SDK_PATH, TEST_PATH)
| Python |
import client_model
import feedparser
import logging
from datetime import datetime
from datetime import timedelta
from django.utils import simplejson
from google.appengine.api import channel
from google.appengine.ext import db
from google.appengine.api import memcache
# Channel API tokens expire after two hours.
TOKEN_EXPIRATION = timedelta(hours = 2)
def add_client(feed):
"""Add a new client to the database."""
client = client_model.Client()
client.feeds = [feed]
db.put(client)
cid = str(client.key().id())
return (cid, channel.create_channel(cid))
def set_client_connect_state(cid, connect_state):
logging.info('Looking up client %s' % cid)
client = client_model.Client.get_by_id(int(cid))
client.connected = connect_state
client.put()
def connect_client(cid):
set_client_connect_state(cid, True)
def disconnect_client(cid):
set_client_connect_state(cid, False)
def get_memcache_id(clientid, feed, message):
return clientid + '.' + feed + '.' + message['id']
def send_filtered_messages(clientid, feed, messages):
"""Send messages to a client, doing a best-effort elimination of dupes."""
messages_to_send = []
for message in messages:
id = get_memcache_id(clientid, feed, message)
if memcache.get(id):
continue
memcache.add(id, 's')
messages_to_send.append(message)
if len(messages_to_send):
message = simplejson.dumps(messages_to_send);
logging.debug("Sending (%s): %s" % (clientid, message))
channel.send_message(clientid, message)
def broadcast_messages(feed, messages):
"""Broadcast the given message list to all known clients.
Args:
messages: A list of objects to be sent to the clients. These messages
will be JSON-encoded before sending. Each message object must have an
'id' field, used to eliminate duplicates.
"""
q = client_model.Client.all()
connected_clients = 0
total_clients = 0
for client in q:
total_clients += 1
if datetime.utcnow() - client.created > TOKEN_EXPIRATION:
logging.debug('Removing expired client: %s' % str(client.created))
client.delete()
total_clients -= 1
elif client.connected:
connected_clients += 1
logging.debug('Sending message')
send_filtered_messages(str(client.key().id()), feed, messages)
else:
logging.debug('Skipping disconnected client %s' % client.created)
logging.debug('Connected clients: %d' % connected_clients)
logging.debug('Total clients: %d' % total_clients)
return total_clients
def update_clients(feed, messages, client=None):
if client:
send_filtered_messages(client, feed, messages)
return 1
else:
return broadcast_messages(feed, messages)
| Python |
import feedparser
import logging
import urllib
import zlib
from django.utils import simplejson
from google.appengine.api import app_identity
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.api import urlfetch
from google.appengine.ext import webapp
class SubCallbackPage(webapp.RequestHandler):
def get(self):
if self.request.get('hub.challenge'):
logging.debug('cb %s' % self.request.get('hub.challenge'))
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write(self.request.get('hub.challenge'))
def strip_entry(self, entry):
return entry
def get_payload(self):
"""Do a first-pass removal of messages we already know about."""
feed = feedparser.parse(self.request.body)
entries = feed['entries']
entries_to_send = []
for entry in entries:
if not memcache.get(entry['id']):
memcache.set(entry['id'], 1)
entries_to_send.append(self.strip_entry(entry))
return simplejson.dumps(entries_to_send)
def post(self):
taskqueue.add(url='/newdata', payload=zlib.compress(self.get_payload()))
self.response.out.write('ok')
def set_subscribe_state(topic_url, callback_url, hub_url, secret, mode):
hostname = app_identity.get_default_version_hostname()
post_fields = {
'hub.callback': 'http://' + hostname + '/subcb?url=http://www.dailymile.com/entries.atom',
'hub.mode': mode,
'hub.topic': topic_url,
'hub.verify': 'async',
'hub.verify_token': 'tokentokentoken'
}
url = 'http://pubsubhubbub.appspot.com'
response = urlfetch.fetch(url, method=urlfetch.POST,
payload=urllib.urlencode(post_fields))
logging.debug('%s (%s): %d: %s' % (url, str(post_fields), response.status_code, response.content))
def subscribe(topic_url, callback_url, hub_url, secret):
logging.debug('Subscribing with callback %s' % callback_url)
set_subscribe_state(topic_url, callback_url, hub_url, secret, 'subscribe')
def unsubscribe(topic_url, callback_url, hub_url, secret):
logging.debug('Unsubscribing.')
set_subscribe_state(topic_url, callback_url, hub_url, secret, 'unsubscribe')
| Python |
from google.appengine.ext import db
class Client(db.Model):
"""A record of a client connection. The string representation of the 'created'
field is the clientid used by the Channel API
"""
created = db.DateTimeProperty(required=True, auto_now_add=True)
feeds = db.StringListProperty(required=True)
connected = db.BooleanProperty(required=True, default=False)
| Python |
from setuptools import setup, find_packages
setup(
name='firepy',
version='0.1.5',
description='FirePHP for Python',
long_description=('This is a python server library for FirePHP '
'supporting python built-in logging facility '
'and Django.'),
author='Sung-jin Hong',
author_email='serialx@serialx.net',
license='MIT',
url='http://code.google.com/p/firepy/',
download_url='http://code.google.com/p/firepy/downloads/list',
packages=find_packages(),
zip_safe=False,
)
| Python |
# The MIT License
#
# Copyright (c) 2009 Sung-jin Hong <serialx@serialx.net>
# Many code here derived from:
# http://code.cmlenz.net/diva/browser/trunk/diva/ext/firephp.py
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
try:
# Try import from json module in python 2.6>
import json
except ImportError:
try:
# Try import from django
from django.utils import simplejson as json
except ImportError:
import simplejson as json
# Max size of eash headers. Exists because Firefox has limits (5000)
HEADER_SIZE_MAX = 4000
def _extract_traceback(tb):
frames = []
while tb:
tb_frame = tb.tb_frame
f_locals = tb_frame.f_locals
f_code = tb_frame.f_code
frames.append({
'filename': f_code.co_filename,
'lineno': tb.tb_lineno,
'locals': f_locals,
'name': f_code.co_name,
'args': [
#f_code.co_varnames[i] for i in range(f_code.co_argcount)
f_locals.get(f_code.co_varnames[i]) for i in range(f_code.co_argcount)
],
'hide': tb_frame.f_locals.get('__traceback_hide__')
})
tb = tb.tb_next
return frames
def _filter_traceback(frames):
hidden = False
retval = []
for idx, frame in enumerate(frames):
hide = frame['hide']
if hide in ('before', 'before_and_this'):
del retval[:]
hidden = False
if hide == 'before_and_this':
continue
elif hide in ('reset', 'reset_and_this'):
hidden = False
if hide == 'reset_and_this':
continue
elif hide in ('after', 'after_and_this'):
hidden = True
if hide == 'after_and_this':
continue
elif hidden:
continue
if not hide:
retval.append(frame)
return retval
class FirePHP(object):
"""Core module that generates FirePHP JSON notation."""
LEVELMAP = {'DEBUG': 'LOG', 'WARNING': 'WARN', 'CRITICAL': 'ERROR'}
@classmethod
def log_level(cls, python_log_level):
"""Translate python log level to FirePHP's."""
return cls.LEVELMAP.get(python_log_level, python_log_level)
@classmethod
def exception(cls, record):
"""Translate exc_info into FirePHP JSON notation."""
assert record.exc_info
exc_info = record.exc_info
exc_type, exc_value, exc_tb = exc_info
frames = _filter_traceback(_extract_traceback(exc_tb))
return [{"Type": 'EXCEPTION',
"File": record.pathname,
"Line": record.lineno},
{'Type': 'throw',
'Class': exc_type.__name__,
'Message': record.getMessage(),
'File': frames[-1]['filename'],
'Line': frames[-1]['lineno'],
'Function': frames[-1]['name'],
'Trace': list(reversed([{
'function': frame['name'],
'args': frame['args'],
'line': frame['lineno'],
'file': frame['filename']
} for frame in frames]))}]
@classmethod
def log(cls, record):
"""Translate log record into FirePHP JSON."""
level = cls.log_level(record.levelname)
return [
{"Type": level,
"File": record.pathname,
"Line": record.lineno},
record.getMessage()]
@classmethod
def table(cls, table_, label=None):
"""Generates FirePHP table JSON."""
if not label:
label = ""
return [
{"Type":"TABLE",
"Label": label,
"File":"a.py",
"Line":23},
table_,
]
@classmethod
def base_headers(cls):
"""Base FirePHP JSON protocol headers."""
return [
('X-Wf-Protocol-1',
'http://meta.wildfirehq.org/Protocol/JsonStream/0.2'),
('X-Wf-1-Plugin-1',
'http://meta.firephp.org/Wildfire/Plugin/FirePHP/Library-FirePHPCore/0.2.0'),
('X-Wf-1-Structure-1',
'http://meta.firephp.org/Wildfire/Structure/FirePHP/FirebugConsole/0.1'),
]
@classmethod
def generate_headers(cls, logs):
def encode_robust(obj):
return repr(obj)
index = 1
for log in logs:
code = json.dumps(log, default=encode_robust)
if len(code) >= HEADER_SIZE_MAX: # Too large header for firefox, split it
cut = code[:HEADER_SIZE_MAX]
rest = code[HEADER_SIZE_MAX:]
yield ('X-Wf-1-1-1-%d' % index, '%d|%s|\\' % (len(code), cut))
index += 1
while True:
cut = rest[:HEADER_SIZE_MAX]
rest = rest[HEADER_SIZE_MAX:]
if rest: # If it's not the end
yield ('X-Wf-1-1-1-%d' % index, '|%s|\\' % (cut))
index += 1
else: # If it's the end
yield ('X-Wf-1-1-1-%d' % index, '|%s|' % (cut))
index += 1
break
else:
yield ('X-Wf-1-1-1-%d' % index, '%d|%s|' % (len(code), code))
index += 1
yield ('X-Wf-1-Index', str(index - 1))
| Python |
# The MIT License
#
# Copyright (c) 2009 Sung-jin Hong <serialx@serialx.net>
# Many code here derived from:
# http://code.cmlenz.net/diva/browser/trunk/diva/ext/firephp.py
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
from firepy.firephp import FirePHP
class FirePHPHandler(logging.Handler):
logs = []
def emit(self, record):
"""
Record all the logs that comes by converting them to JSON notation.
"""
if record.exc_info:
FirePHPHandler.logs.append(FirePHP.exception(record))
else:
FirePHPHandler.logs.append(FirePHP.log(record))
| Python |
# The MIT License
#
# Copyright (c) 2009 Sung-jin Hong <serialx@serialx.net>
# Many code here derived from:
# http://code.cmlenz.net/diva/browser/trunk/diva/ext/firephp.py
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
from django.conf import settings
from django.db import connection
from firepy.loghandler import FirePHPHandler
from firepy.firephp import FirePHP
class FirePHPMiddleware():
"""FirePHP middleware for Django."""
def __init__(self):
# Set the FirePHP log handler to the root logger.
firephp = FirePHPHandler()
firephp.setLevel(logging.DEBUG)
logging.root.setLevel(logging.DEBUG)
logging.root.addHandler(firephp)
def process_request(self, request):
# Reset the log buffer.
FirePHPHandler.logs = []
def process_response(self, request, response):
# Ignore the static media file requests
if settings.MEDIA_URL and request.META['PATH_INFO'].startswith(settings.MEDIA_URL):
return response
if not settings.MEDIA_URL:
logging.warn('Please set MEDIA_URL to filter out unncessary FirePHP logs.')
# Calculate db times
time = 0.0
for q in connection.queries:
time += float(q['time'])
# Generate db times table
query_info = [["SQL Statement","Time"]]
query_info += [[query['sql'], query['time']]
for query in connection.queries]
label = "%d SQL queries took %f seconds" % \
(len(connection.queries), time)
FirePHPHandler.logs.append(FirePHP.table(query_info, label))
# Set base FirePHP JSON protocol headers
headers = FirePHP.base_headers()
for key, value in headers:
response[key] = value
# Translate the JSON message to headers
for key, value in FirePHP.generate_headers(FirePHPHandler.logs):
response[key] = value
return response
| Python |
# The MIT License
#
# Copyright (c) 2009 Sung-jin Hong <serialx@serialx.net>
# Many code here derived from:
# http://code.cmlenz.net/diva/browser/trunk/diva/ext/firephp.py
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from django.db import connection
import logging
import cgi
import simplejson as json
LEVELMAP = {'DEBUG': 'LOG', 'WARNING': 'WARN', 'CRITICAL': 'ERROR'}
def _extract_traceback(tb):
frames = []
while tb:
tb_frame = tb.tb_frame
f_locals = tb_frame.f_locals
f_code = tb_frame.f_code
frames.append({
'filename': f_code.co_filename,
'lineno': tb.tb_lineno,
'locals': f_locals,
'name': f_code.co_name,
'args': [
#f_code.co_varnames[i] for i in range(f_code.co_argcount)
f_locals.get(f_code.co_varnames[i]) for i in range(f_code.co_argcount)
],
'hide': tb_frame.f_locals.get('__traceback_hide__')
})
tb = tb.tb_next
return frames
def _filter_traceback(frames):
hidden = False
retval = []
for idx, frame in enumerate(frames):
hide = frame['hide']
if hide in ('before', 'before_and_this'):
del retval[:]
hidden = False
if hide == 'before_and_this':
continue
elif hide in ('reset', 'reset_and_this'):
hidden = False
if hide == 'reset_and_this':
continue
elif hide in ('after', 'after_and_this'):
hidden = True
if hide == 'after_and_this':
continue
elif hidden:
continue
if not hide:
retval.append(frame)
return retval
class FirePHPHandler(logging.Handler):
logs = []
def emit(self, record):
level = LEVELMAP.get(record.levelname, record.levelname)
if record.exc_info:
exc_type, exc_value, exc_tb = record.exc_info
frames = _filter_traceback(_extract_traceback(exc_tb))
FirePHPHandler.logs.append([
{"Type": 'EXCEPTION',
"File": record.pathname,
"Line": record.lineno},
{'Type': 'throw',
'Class': exc_type.__name__,
'Message': record.getMessage(),
'File': frames[-1]['filename'],
'Line': frames[-1]['lineno'],
'Function': frames[-1]['name'],
'Trace': list(reversed([{
'function': frame['name'],
'args': frame['args'],
'line': frame['lineno'],
'file': frame['filename']
} for frame in frames]))}
])
else:
FirePHPHandler.logs.append([
{"Type": level,
"File": record.pathname,
"Line": record.lineno},
record.getMessage()])
class FireMiddleware():
def __init__(self):
firephp = FirePHPHandler()
firephp.setLevel(logging.DEBUG)
logging.root.setLevel(logging.DEBUG)
logging.root.addHandler(firephp)
def process_request(self, request):
FirePHPHandler.logs = []
def process_response(self, request, response):
time = 0.0
for q in connection.queries:
time += float(q['time'])
query_info = [["SQL Statement","Time"]]
query_info += [[query['sql'],
query['time']] for query in connection.queries]
FirePHPHandler.logs.append([
{"Type":"TABLE",
"Label":"%d SQL queries took %f seconds" % (len(connection.queries), time),
"File":"a.py",
"Line":23},
query_info,
])
response['X-Wf-Protocol-1'] = 'http://meta.wildfirehq.org/Protocol/JsonStream/0.2'
response['X-Wf-1-Plugin-1'] = 'http://meta.firephp.org/Wildfire/Plugin/FirePHP/Library-FirePHPCore/0.2.0'
response['X-Wf-1-Structure-1'] = 'http://meta.firephp.org/Wildfire/Structure/FirePHP/FirebugConsole/0.1'
def encode_robust(obj):
return repr(obj)
for i, log in enumerate(FirePHPHandler.logs):
code = json.dumps(log, default=encode_robust)
response['X-Wf-1-1-1-%d' % i] = '%d|%s|' % (len(code), code)
#response['X-Wf-1-Index'] = '1'
return response
| Python |
#!/usr/bin/python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
import sys
def func():
frame = sys._getframe()
tb = frame.f_back
f_code = tb.f_code
print f_code.co_filename, ':', f_code.co_name , tb.f_lineno
print dir(frame)
print dir(tb)
print dir(f_code)
def func2():
func()
func2()
| Python |
#!/usr/bin/python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^fire/', include('firephp.fire.urls')),
(r'^fire/', 'firephp.fire.views.fire'),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/(.*)', admin.site.root),
)
| Python |
# Django settings for firephp project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'test.db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'a9r!c_2#z2elf45+fl1hem3h4r72zi)2fm&2xrs^vl)gx5wrgj'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'firephp.firemiddleware.FireMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'firephp.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
)
| Python |
from django.db import models
# Create your models here.
| Python |
# The MIT License
#
# Copyright (c) 2009 Sung-jin Hong <serialx@serialx.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
from django.contrib.auth.models import User
from django.http import HttpResponse
def a(a, b, c):
raise ValueError("asexception!!")
def fire(request):
logging.debug([1, {"asdf":(123, 2, "3")}, "3"])
logging.debug("debug")
logging.info("info")
logging.warn("warn %d", 2)
logging.error("error")
logging.critical("critical")
ab = User.objects.filter(username="serialx").all()
for i in ab:
print i
ab = User.objects.all()
for i in ab:
print i
try:
a(1, 2, "3")
except:
logging.exception("sdf!!!")
response = HttpResponse("hello world!")
return response
| Python |
from __future__ import division
import pylab as pl
import numpy as np
import pandas as pd
class tctGibbs:
#initialization
# inputs:
def __init__(self,cro=76,cbu=20,cwo=4,talup=120,tallow=90,
tdisup=80,tdislow=40,tturup=100,tturlow=60,tarrup=420,
tarrlow=300,tsupup=180,tsuplow=60,sizemin=100,sizeroom=2000,
sizebldg=10000,sizemax=3000000,
upA=0.047,lowA=0.0029,upalph=2,lowalph=1):
#observed bin values
self.cro = cro #contained to room
self.cbu = cbu #contained to building
self.cwo = cwo #contained to world
#specify constraints on the fixed parameters
self.talup = talup # upper bound on t_alarm
self.tallow = tallow # lower bound on t_alarm
self.tdisup = tdisup # upper bound on t_dispatch
self.tdislow = tdislow # lower bound on t_dispatch
self.tturup = tturup # upper bound on t_turnout
self.tturlow = tturlow # lower bound on t_turnout
self.tarrup = tarrup # upper bound on t_arrival
self.tarrlow = tarrlow # lower bound on t_arrival
self.tsupup = tsupup # upper bound on t_suppression
self.tsuplow = tsuplow # lower bound on t_suppression
#specify expected fire sizes for spread behavior (kW)
self.sizemin = sizemin #natively set to 0.01 to avoid numerical singularities on lower bound
self.sizeroom = sizeroom #threshold for binning a fire into contained to room
self.sizebldg = sizebldg #threshold on binning a fire into contained to building
self.sizemax = sizemax #reasonable physical threshold for a structure fire
#Note: value of 3,000,000 taken from FDS prediction
#of peak HRR for WTC 1 fire on 9/11, from NCSTAR 1-5
#Figure 6-30
#specify original bounds on A and alpha
self.upA = upA
self.lowA = lowA
self.upalph = upalph
self.lowalph = lowalph
#calculate total number of fires
self.n_fires=cro+cbu+cwo
#instantiate initial draws for A and alpha
self.ARoom=np.random.uniform(lowA,upA)
self.ABldg=np.random.uniform(lowA,upA)
self.ABeyond=np.random.uniform(lowA,upA)
self.alphRoom=np.random.uniform(lowalph,upalph)
self.alphBldg=np.random.uniform(lowalph,upalph)
self.alphBeyond=np.random.uniform(lowalph,upalph)
#instantiate variables for tcor fires
self.tcorRoom = 0
self.tcorBldg = 0
self.tcorBeyond = 0
#create initial containers for all of the task time variables
self.tal = np.random.uniform(tallow,talup) # upper bound on t_alarm
self.tdis = np.random.uniform(tdislow,tdisup) # upper bound on t_dispatch
self.ttur = np.random.uniform(tturlow,tturup) # upper bound on t_turnout
self.tarr = np.random.uniform(tarrlow,tarrup)# upper bound on t_arrival
self.tsup = np.random.uniform(tsuplow,tsupup) # upper bound on t_suppression
self.tfiretasks = self.tal+self.tdis+self.ttur+self.tarr+self.tsup
#Create draw functions for the Gibbs sampler
#Draw new values for fire department timing
def draw_tfiretasks(self):
self.tal = np.random.uniform(self.tallow,self.talup) # upper bound on t_alarm
self.tdis = np.random.uniform(self.tdislow,self.tdisup) # upper bound on t_dispatch
self.ttur = np.random.uniform(self.tturlow,self.tturup) # upper bound on t_turnout
self.tarr = np.random.uniform(self.tarrlow,self.tarrup)# upper bound on t_arrival
self.tsup = np.random.uniform(self.tsuplow,self.tsupup) # upper bound on t_suppression
self.tfiretasks = self.tal+self.tdis+self.ttur+self.tarr+self.tsup
#Draw the tcor values for relevant fires
#Inputs: relevant Qmin and Qmax thresholds and current A and alph values
def draw_tcor(self,Qmin,Qmax,A,alph):
lowtcor = (Qmin/A)**(1/alph)-self.tfiretasks
uptcor = (Qmax/A)**(1/alph)-self.tfiretasks
return np.random.uniform(lowtcor,uptcor)
#Draw the A values for relevant fires
#Inputs: relevant Qmin and Qmax thresholds and current tcor and alph values
def draw_A(self,Qmin,Qmax,tcor,alph):
lowA = (Qmin)/(max(tcor+self.tfiretasks,0.0001)**(alph))
upA = min((Qmax)/(max(tcor+self.tfiretasks,0.0001)**(alph)),
Qmin/self.tfiretasks**2)
#return np.random.uniform(max(lowA,self.lowA),min(upA,self.upA))
return np.random.uniform(lowA,upA)
#Draw the tcor values for room fires
#Inputs: relevant Qmin and Qmax thresholds and current tcor and A values
def draw_alph(self,Qmin,Qmax,tcor,A):
lowalph = (pl.log(Qmin)-pl.log(A))/pl.log(max(tcor+self.tfiretasks,0.0001))
upalph = (pl.log(Qmax)-pl.log(A))/pl.log(max(tcor+self.tfiretasks,0.0001))
if(upalph < self.lowalph):
upalph = self.lowalph
#return np.random.uniform(max(self.lowalph,lowalph),min(self.upalph,upalph))
#return np.random.uniform(self.lowalph,self.upalph)
return self.upalph
#Gibbs sampling function
def fireGibbs(self,n_iter,burn,thin,Qmin,Qmax,tcor,A,alph):
print 'fireGibbs called'
n_store = int(np.ceil((n_iter-burn))/thin+0.00001)
gibbstcor = np.full(n_store,-1)
gibbsA = np.full(n_store,-1)
gibbsalph = np.full(n_store,-1)
s = 0
for i in range(0,n_iter):
self.draw_tfiretasks()
A = self.draw_A(Qmin,Qmax,tcor,alph)
tcor = self.draw_tcor(Qmin,Qmax,A,alph)
alph = self.draw_alph(Qmin,Qmax,tcor,A)
if(i >= burn and i%thin==0):
gibbstcor[s] = tcor
gibbsA[s] = A
gibbsalph[s] = alph
s = s+1
return(gibbstcor,gibbsA,gibbsalph)
#output storage function
def gibbs_store(self,gibbsoutputlist,filenameoutputlist):
for i in range(0,len(gibbsoutputlist)):
f=open('../RawOutput/'+filenameoutputlist[i],'wb')
np.savetxt(f,gibbsoutputlist[i],delimiter=',')
f.close()
#Main class running function
def runGibbs(self,n_iter=1000,burn=500,thin=5):
#Run room fires first and output
gibbstcor,gibbsA,gibbsalph = self.fireGibbs(n_iter,burn,thin,self.sizemin,
self.sizeroom,self.tcorRoom,
self.ARoom,self.alphRoom)
#store output
self.gibbs_store([gibbstcor,gibbsA,gibbsalph],['tcorRoom.csv',
'ARoom.csv','alphRoom.csv'])
#Run building fires next and output
gibbstcor,gibbsA,gibbsalph = self.fireGibbs(n_iter,burn,thin,self.sizeroom,
self.sizebldg,self.tcorBldg,
self.ABldg,self.alphBldg)
#store output
self.gibbs_store([gibbstcor,gibbsA,gibbsalph],['tcorBldg.csv',
'ABldg.csv','alphBldg.csv'])
#Run beyond building fires last and output
gibbstcor,gibbsA,gibbsalph = self.fireGibbs(n_iter,burn,thin,self.sizebldg,
self.sizemax,self.tcorBeyond,
self.ABeyond,self.alphBeyond)
#store output
self.gibbs_store([gibbstcor,gibbsA,gibbsalph],['tcorBeyond.csv',
'ABeyond.csv','alphBeyond.csv'])
test = tctGibbs()
test.runGibbs(100000,100,10)
| Python |
from __future__ import division
import matplotlib
matplotlib.use('Agg')
import numpy as np
from pylab import *
import pandas as pd
# Optimization class for finding tct
class tctOptim:
# Initialization
# Inputs: total number of fires, true number of fires in each bin (list)
def __init__(self,total_fires,true_bins):
self.total_fires = total_fires
self.true_bins = true_bins
# Sampler function for model parameters
# Inputs:
def sampler(self):
# Draw n samples from each distribution where n is the total number of fires
self.alpha = np.random.uniform(0.0029,0.047,size=self.total_fires)
self.exp = np.random.uniform(1,2,size=self.total_fires)
time_to_alarm = np.random.uniform(30,60,size=self.total_fires)
time_to_dispatch = np.random.uniform(40,80,size=self.total_fires)
time_to_turnout = np.random.uniform(60,100,size=self.total_fires)
time_to_arrival = np.random.uniform(300,420,size=self.total_fires)
time_to_suppress = np.random.uniform(60,180,size=self.total_fires)
self.running_time = time_to_alarm + time_to_dispatch + time_to_turnout + time_to_arrival + time_to_suppress
# Binning function for fires
# Inputs: optional tct
def binFun(self,tct=0.0):
# Bin counters
room_of_origin = 0
floor_of_origin = 0
structure_loss = 0
fire_size = np.zeros(total_fires)
# Loop through all fires and assign to a bin
for num in range(0,self.total_fires):
running_time_cor = self.running_time[num] + tct
fire_size[num] = self.alpha[num]*(running_time_cor)**self.exp[num]
# Assesing damage typical resident structure
if fire_size[num] < 2000:
room_of_origin = room_of_origin + 1
elif fire_size[num] > 2000 and fire_size[num] < 10000:
floor_of_origin =floor_of_origin + 1
else:
structure_loss =structure_loss + 1
return [room_of_origin,floor_of_origin,structure_loss]
# Error function
# Inputs: tct (s)
def eCalc(self,tct):
# Get bins with corrected time
pred_bins = self.binFun(tct=tct)
eTot = 0.0
# Calculate the sum of squared errors
for i in range(len(self.true_bins)):
eTot += (self.true_bins[i]-pred_bins[i])**2
return eTot
# Golden section search for tct
# Inputs: optional print out
def goldenSection(self,verb=False):
# Set low and high bounds of search
it_L = -500.
it_H = 1000.
f_calls = 0
count = 1
# Low and high error calculations
eTotal_L = self.eCalc(it_L)
eTotal_H = self.eCalc(it_H)
f_calls = f_calls + 2
if verb == True:
print("Initial Error Calculations for tct")
print 'Low Guess: '+str(eTotal_L)
print 'High Guess: '+str(eTotal_H)
print("#\tvalue\terror")
# Golden Ratio
gold = (1.0 + 5**.5) / 2.0
# Calculate first golden point
it_A = (gold * it_L + it_H)/(gold + 1.0)
eTotal_A = self.eCalc(it_A)
f_calls = f_calls + 1
if verb == True:
print "%i\t%.2f\t%.2f" %(count, it_A, eTotal_A)
count += 1
while abs(it_L-it_H)>1.0 and count < 50:
# Calculate next golden point for comparison
it_B = it_L + it_H - it_A
eTotal_B = self.eCalc(it_B)
f_calls = f_calls + 1
if verb == True:
print "%i\t%.2f\t%.6f" %(count, it_B, eTotal_B)
# print it_L,it_A,it_B,it_H
# print eTotal_L,eTotal_A,eTotal_B,eTotal_H
count += 1
# Decide new point assignment based on whether A or B is greater
if it_A < it_B:
if eTotal_B>eTotal_A:
it_H = it_B
eTotal_H = eTotal_B
elif eTotal_B<=eTotal_A:
it_L = it_A
eTotal_L = eTotal_A
it_A = it_B
eTotal_A = eTotal_B
elif it_A > it_B:
if eTotal_B>eTotal_A:
it_L = it_B
eTotal_L = eTotal_B
elif eTotal_B<=eTotal_A:
it_H = it_A
eTotal_H = eTotal_A
it_A = it_B
eTotal_A = eTotal_B
return it_A
# Read in data and get total fires
data = pd.read_csv('../Data/ArlingtonCensusFire.csv', header=0)
total_fires = len(data['inc_type'])
room_origin = 0
floor_origin = 0
structure = 0
for i in data['fire_sprd']:
if i == 1 or i == 2:
room_origin = room_origin + 1
elif i == 3 or i == 4:
floor_origin = floor_origin + 1
else:
structure = structure + 1
print 'Total fires:',total_fires
# True bins
bAct = [room_origin,floor_origin,structure]
print 'Actual bins:', bAct
# Call optimizer class
tOpt = tctOptim(total_fires,bAct)
# Sample set of times and alphas
tOpt.sampler()
# Print bins of sample
print 'Single Bin Sample:',tOpt.binFun()
# Single optimization run
tct = tOpt.goldenSection()
print 'Single correction time:',tct
print 'Single corrected bin:',tOpt.binFun(tct=tct)
# Array of correction times
n = 1000
tct = np.zeros(n)
# Resample and optimize for tct n times
for i in range(n):
if i%20 == 0:
print 'Sample',i
tOpt.sampler()
tct[i] = tOpt.goldenSection()
# Plot a histogram of tct
plt.figure()
plt.hist(tct,bins=20)
plt.xlabel('t correction (s)',size=18)
plt.ylabel('Count',size=18)
savefig('../Figures/t_correct_histogram.pdf',format='pdf')
# Save tct array
np.savetxt('../Figures/t_correct_raw.csv',tct,delimiter=',')
| Python |
import psycopg2
import random
class DISTImport(object):
"""Contains the import methods for the DIST model.
The DistImport class contains the import methods for the DIST model.
Additionally, it stores the relevant input parameters used by other aspects
of the model in attributes.
Attributes:
extent_list (list): ordered list of the extents of fire spread.
firespread_count (dict): dictionary of fire spread attributes.
"""
def __init__(self):
self.extent_list = ['object','room','floor','bldg','beyond']
self.firespread_count = {x: 0 for x in self.extent_list}
def get_firespread_count(self):
"""Return the firespread_count attribute
>>> test.get_firespread_count()
{'bldg': 0, 'object': 0, 'beyond': 0, 'room': 0, 'floor': 0}
"""
return self.firespread_count
def get_extent_list(self):
"""Return the extent_list attribute
>>> test.get_extent_list()
['object', 'room', 'floor', 'bldg', 'beyond']
"""
return self.extent_list
# Fire spread import methods
def flatfile_import(self,flatfiles):
"""Parse a set of NFIRS incident flat files for structure fires.
Args:
flatfiles (list): a list of file pathnames for files to be parsed.
Returns:
changes the values of the firespread_count attributes to calculated
values
"""
raise NotImplementedError
def pgdb_import(self,dbname,user,host,password,tablename):
"""Parse a relevant NFIRS database table for structure fire spread count.
Returns:
changes the relevant values of the firespread_count
>>> test.pgdb_import('nfirs2','postgres','localhost','network345','marginalstrucfires')
>>> test.get_firespread_count()
{'bldg': 411340L, 'object': 72775L, 'beyond': 324971L, 'room': 353886L, 'floor': 88510L}
"""
try:
conn = psycopg2.connect("dbname='{}' user='{}' host='{}' \
password='{}'".format(dbname,user,
host,password))
except:
print "I am unable to connect to the database."
cur = conn.cursor()
cur.execute("""SELECT fire_sprd, count(*) from {0} where fire_sprd IN \
('1','2','3','4','5') group by fire_sprd \
order by fire_sprd desc""".format(tablename))
rows = cur.fetchall()
for row, extent in zip(rows, self.extent_list):
self.firespread_count[extent]=row[1]
def set_firespread_count(self,list_of_five_values):
"""Manually set the values of firespread_count
>>> test.set_firespread_count([300,200,50,100,10])
>>> test.get_firespread_count()
{'bldg': 100, 'object': 300, 'beyond': 10, 'room': 200, 'floor': 50}
"""
for value,extent in zip(list_of_five_values,self.extent_list):
self.firespread_count[extent]=value
# Room area distribution import methods
def room_area_set_uniform_limits(self,lowerbound,upperbound):
"""Set the limits of the uniform room area distribution.
>>> test.room_area_set_uniform_limits(20,30)
"""
self.room_area_limits = (lowerbound,upperbound)
def room_area_get_uniform_limits(self):
"""Return the uniform distribution limits of the room areas
>>> test.room_area_get_uniform_limits()
self.room_area_limits is undefined!
>>> test.room_area_set_uniform_limits(20,30)
>>> test.room_area_get_uniform_limits()
(20, 30)
"""
try:
return self.room_area_limits
except AttributeError:
print "self.room_area_limits is undefined!"
def room_area_data_import(self,databasevariables):
"""Import set of room areas from an external source.
Args:
Insert database variables here
Returns:
generate a room area values attribute that can be manipulated.
"""
raise NotImplementedError
#Floor Area distribution import methods
#Note that floor area is optional in the model. it will largely pertain
#to high-rise buildings where the floor area is more likely to be reported
#and sensible
def floor_area_set_uniform_limits(self,lowerbound,upperbound):
"""Set the limits of the uniform floor area distribution.
>>> test.floor_area_set_uniform_limits(20,30)
"""
self.floor_area_limits = (lowerbound,upperbound)
def floor_area_get_uniform_limits(self):
"""Return the uniform distribution limits of the floor areas
>>> test.floor_area_get_uniform_limits()
self.floor_area_limits is undefined!
>>> test.floor_area_set_uniform_limits(20,30)
>>> test.floor_area_get_uniform_limits()
(20, 30)
"""
try:
return self.floor_area_limits
except AttributeError:
print "self.floor_area_limits is undefined!"
def floor_area_data_import(self,databasevariables):
"""Import set of floor areas from an external source.
Args:
Insert database variables here
Returns:
generate a floor area values attribute that can be manipulated.
"""
raise NotImplementedError
#Building Area distribution import methods
def bldg_area_set_uniform_limits(self, lowerbound, upperbound):
"""Set the limits of the uniform room area distribution.
>>> test.bldg_area_set_uniform_limits(20,30)
"""
self.bldg_area_limits = (lowerbound,upperbound)
def bldg_area_get_uniform_limits(self):
"""Return the uniform distribution limits of the bldg areas
>>> test.bldg_area_get_uniform_limits()
self.bldg_area_limits is undefined!
>>> test.bldg_area_set_uniform_limits(20,30)
>>> test.bldg_area_get_uniform_limits()
(20, 30)
"""
try:
return self.bldg_area_limits
except AttributeError:
print "self.bldg_area_limits is undefined!"
def bldg_area_data_import(self,databasevariables):
"""Import set of building areas from external source.
Args:
Insert relevant variables here
Returns:
generate a bldg area values attribute that can be manipulated.
"""
raise NotImplementedError
def bldg_area_res_census_data_import(self,options):
"""Import AHS estimates of housing stock building size
This might either query some census API or, if necessary,
take a manual .csv. Whichever implementation works best.
Note that unit sizes are only available in select geographic areas
and nationally. See census tables C-02-AH and C-02-AH-M on
factfinder.census.gov
Args:
Up for debate.
Returns:
list_of_two_equal_sized_lists (list of lists):
first list contains the square footage of unit category
second list contains the building area extent counts (
"""
raise NotImplementedError
#Time input methods
def alarm_time_set_uniform_limits(self,lowerbound,upperbound):
"""Set the limits of the alarm time uniform distribution
>>> test.alarm_time_set_uniform_limits(20,30)
"""
self.alarm_time_limits = (lowerbound,upperbound)
def alarm_time_get_uniform_limits(self):
"""Return the uniform distribution limits of the alarm time
>>> test.alarm_time_get_uniform_limits()
self.alarm_time_limits is undefined!
>>> test.alarm_time_set_uniform_limits(20,30)
>>> test.alarm_time_get_uniform_limits()
(20, 30)
"""
try:
return self.alarm_time_limits
except AttributeError:
print "self.alarm_time_limits is undefined!"
def dispatch_time_set_uniform_limits(self,lowerbound,upperbound):
"""Set the limits of the dispatch time uniform distribution
>>> test.dispatch_time_set_uniform_limits(20,30)
"""
self.dispatch_time_limits = (lowerbound,upperbound)
def dispatch_time_get_uniform_limits(self):
"""Return the uniform distribution limits of the dispatch time
>>> test.dispatch_time_get_uniform_limits()
self.dispatch_time_limits is undefined!
>>> test.dispatch_time_set_uniform_limits(20,30)
>>> test.dispatch_time_get_uniform_limits()
(20, 30)
"""
try:
return self.dispatch_time_limits
except AttributeError:
print "self.dispatch_time_limits is undefined!"
def turnout_time_set_uniform_limits(self,lowerbound,upperbound):
"""Set the limits of the turnout time uniform distribution
>>> test.turnout_time_set_uniform_limits(20,30)
"""
self.turnout_time_limits = (lowerbound,upperbound)
def turnout_time_get_uniform_limits(self):
"""Return the uniform distribution limits of the turnout time
>>> test.turnout_time_get_uniform_limits()
self.turnout_time_limits is undefined!
>>> test.turnout_time_set_uniform_limits(20,30)
>>> test.turnout_time_get_uniform_limits()
(20, 30)
"""
try:
return self.turnout_time_limits
except AttributeError:
print "self.turnout_time_limits is undefined!"
def arrival_time_set_uniform_limits(self,lowerbound,upperbound):
"""Set the limits of the arrival time uniform distribution
>>> test.arrival_time_set_uniform_limits(20,30)
"""
self.arrival_time_limits = (lowerbound,upperbound)
def arrival_time_get_uniform_limits(self):
"""Return the uniform distribution limits of the arrival time
>>> test.arrival_time_get_uniform_limits()
self.arrival_time_limits is undefined!
>>> test.arrival_time_set_uniform_limits(20,30)
>>> test.arrival_time_get_uniform_limits()
(20, 30)
"""
try:
return self.arrival_time_limits
except AttributeError:
print "self.arrival_time_limits is undefined!"
def suppression_time_set_uniform_limits(self,lowerbound,upperbound):
"""Set the limits of the suppression time uniform distribution
>>> test.suppression_time_set_uniform_limits(20,30)
"""
self.suppression_time_limits = (lowerbound,upperbound)
def suppression_time_get_uniform_limits(self):
"""Return the uniform distribution limits of the suppression time
>>> test.suppression_time_get_uniform_limits()
self.suppression_time_limits is undefined!
>>> test.suppression_time_set_uniform_limits(20,30)
>>> test.suppression_time_get_uniform_limits()
(20, 30)
"""
try:
return self.suppression_time_limits
except AttributeError:
print "self.suppression_time_limits is undefined!"
if __name__=="__main__":
import doctest
doctest.testmod(extraglobs={'test': DISTImport()})
| Python |
import numpy as np
import DIST_import
import DIST_calculations
import DIST_output
floor_extent=False
#import all the values
Dimport = DIST_import.DISTImport()
#Dimport.pgdb_import('nfirs2','postgres','localhost','password','table(or_view?)ofstructurefires')
Dimport.set_firespread_count([93,190,39,64,9])
Dimport.room_area_set_uniform_limits(72,380)
Dimport.bldg_area_set_uniform_limits(1088,9004)
Dimport.alarm_time_set_uniform_limits(90,120)
Dimport.dispatch_time_set_uniform_limits(40,80)
Dimport.turnout_time_set_uniform_limits(60,100)
Dimport.arrival_time_set_uniform_limits(300,420)
Dimport.suppression_time_set_uniform_limits(60,180)
Dcalc = DIST_calculations.DISTCalculate(floor_extent=floor_extent)
Dout = DIST_output.DISTOutput(
Dimport.get_extent_list(),Dimport.get_firespread_count(), floor_extent
)
#set Gibbs chain settings
n_iter = 10000
burn_in = 500
thin = 1
iter_total = n_iter+burn_in
#determine size of the chains necessary to hold data
n_store = int(n_iter/thin+0.0001)
#obtain a list of attributes to be recorded
#Note that you must always include DIST_room, DIST_bldg, and DIST_beyond
#in that order
record_list = ['DIST_room','DIST_bldg','DIST_beyond','room_area']
#initialize space for the chains
chain_record = np.full((n_store,len(record_list)),-1000)
#begin gibbs sampling
call_list = ['alarm_time','dispatch_time','turnout_time','arrival_time',
'suppression_time','room_area','bldg_area']
for i in range(iter_total):
drawfunctions = ['draw_uniform_{}'.format(x) for x in call_list]
getfunctions = ['{}_get_uniform_limits'.format(x) for x in call_list]
for x,y in zip(drawfunctions,getfunctions):
draws = getattr(Dcalc, x)
gets = getattr(Dimport,y)
draws(*gets())
Dcalc.draw_DIST_room()
Dcalc.draw_DIST_bldg()
Dcalc.draw_DIST_beyond()
if(i >= burn_in):
for z,label in zip(range(chain_record.shape[1]),record_list):
chain_record[i-burn_in,z] = getattr(Dcalc, label)
#output the DIST score. For 'default' values (present values in fields)
#the value returned should be 13.0, I don't know how to doctest a script
print round(Dout.DIST_score(chain_record[...,0],chain_record[...,1],
chain_record[...,2]))
| Python |
from __future__ import division
import numpy as np
import copy
import matplotlib.pyplot as plt
class DISTOutput(object):
"""Contains the output methods for the DIST model.
The DistOutput class contains the output methods for the DIST model.
Note that this class does not contain the raw output values, which are
tracked within the wrapper class. Instead, it contains the methods for
manipulating the raw output into presentable formats, including diagnostic
graphs, summaries, and the final DIST score.
Attributes:
extent_list(list): list of firespread extents.
firespread_count(dict): relevant counts of firespread extents.
'relevant' refers to the fact that this will contain either
room, building (bldg), and spread beyond (beyond) extents,
or could optionally include floor if floor_extent is True.
floor_extent(boolean): flag designating whether floor extent is
to be included in the output calculations. This might become
relevant when doing occupancy calculations where floor is a
usefully delimiting extent.
"""
def __init__(self,extent_list,firespread_count,floor_extent=False):
"""initialize attributes of the DISTOutput class.
Args:
extent_list(list): list of firespread extents, see DISTImport class.
firespread_count(dict): dictionary of firespread counts by extent.
see DISTImport class.
>>> extent_list = ['object','room','floor','bldg','beyond']
>>> firespread_count = {x: 30 for x in extent_list}
>>> Dout = DISTOutput(extent_list,firespread_count,True)
>>> print Dout.firespread_count
{'floor': 30, 'beyond': 30, 'bldg': 30, 'room': 60}
>>> extent_list = ['object','room','floor','bldg','beyond']
>>> firespread_count = {x: 30 for x in extent_list}
>>> Dout = DISTOutput(extent_list,firespread_count)
>>> print Dout.firespread_count
{'beyond': 30, 'bldg': 60, 'room': 60}
"""
self.floor_extent = floor_extent
self.firespread_count = copy.deepcopy(firespread_count)
self.firespread_count['room'] = (self.firespread_count['object']+
self.firespread_count['room'])
self.firespread_count.pop('object', None)
if not self.floor_extent:
self.firespread_count['bldg'] = (self.firespread_count['floor']+
self.firespread_count['bldg'])
self.firespread_count.pop('floor', None)
#diagnostic methods
def traceplot(self,vector_of_drawn_values):
"""generate a traceplot of a vector of drawn values.
"""
raise NotImplementedError
def densityplot(self,vector_of_drawn_values):
"""Plot the estimated probability density function of the drawn values.
"""
raise NotImplementedError
def summarystats(self,vector_of_drawn_values,
list_of_quantiles=[0.025,0.25,0.5,0.75,0.975]):
"""Calculate and print summary statistics of given raw output.
"""
raise NotImplementedError
def save_raw_output(self,vector_of_drawn_values):
"""Save raw output to a temporary .csv file
"""
raise NotImplementedError
#Aggregate Raw output methods
def raw_DIST_compute(self,DIST_room,DIST_bldg,DIST_beyond,DIST_floor=None):
"""Compute the raw DIST value from the raw constituent chains
Note that inputs should be in the form of numpy vectors.
Inputs should also, by design, be of equal length.
Returns:
Numpy vector of raw DIST values compiled from extent chains.
"""
total_fires = sum(self.firespread_count.values())
roomweight = self.firespread_count['room']/total_fires
bldgweight = self.firespread_count['bldg']/total_fires
beyondweight = self.firespread_count['beyond']/total_fires
if self.floor_extent:
floorweight = self.firespread_count['floor']/total_fires
raw_DIST = (roomweight*DIST_room+bldgweight*DIST_bldg+
beyondweight*DIST_beyond)
if DIST_floor is not None:
raw_DIST = raw_DIST + floorweight*DIST_floor
return raw_DIST
def DIST_score(self,DIST_room,DIST_bldg,DIST_beyond,DIST_floor=None):
"""Compute the single value DIST score from the raw constituent chains
Note that inputs should be in the form of numpy vectors.
"""
raw_DIST = self.raw_DIST_compute(DIST_room,DIST_bldg,DIST_beyond,DIST_floor)
raw_DIST[raw_DIST < 0] = 0
DIST_score = np.average(raw_DIST)
return DIST_score
#Output to file methods to be added
if __name__=="__main__":
import doctest
doctest.testmod()
| Python |
from __future__ import division
import random
from math import log
class DISTCalculate(object):
"""Contains the calculation methods for the DIST model.
The DISTCalculate class contains the calculation methods for the DIST model.
Additionally, it possesses attributes tracking present values of random
draws for Gibbs sampling.
The idea behind the class is that it will store the calculation methods
for the distributions, as well as the random draw methods, and the present
state of the variables being used in Gibbs sampling.
Attributes:
alarm_time(float): current value of alarm response time (s)
dispatch_time(float): current value of dispatch time (s)
turnout_time(float): current value of turnout time (s)
arrival_time(float): current value of arrival time (s)
suppression_time(float): current value of suppression time (s)
room_area(float): current value of room_area (sq. ft.)
building_area(float): current value of building_area (sq. ft.)
theta(float): value of theta as used by the model
ao(float): value of ao as used by the model. This defaults to 1 sq. ft.
floor_extent(boolean): indicate whether the floor firespread extent is
included in the analysis
DIST_
Additional attributes are invoked as necessary in class methods
"""
def __init__(self, theta=7.08e-3, ao=1,floor_extent=False):
"""Initialize the DISTCalculate class
Note that the variables set to None in the function need to be drawn
before they assume their proper float value as described in the class
docstring.
"""
self.alarm_time = None
self.dispatch_time = None
self.turnout_time = None
self.arrival_time = None
self.suppression_time = None
self.room_area = None
self.bldg_area = None
self.ao = ao
self.theta = theta
self.floor_extent = floor_extent
self.DIST_room = None
self.DIST_bldg = None
self.DIST_beyond = None
if floor_extent:
self.DIST_floor = None
self.floor_area = None
#Time drawing methods
def draw_uniform_alarm_time(self,lowerbound,upperbound):
"""Draw a new value of alarm time from its uniform distribution
>>> random.seed(1234)
>>> test.draw_uniform_alarm_time(20,30)
>>> print round(test.alarm_time,2)
29.66
"""
self.alarm_time = random.uniform(lowerbound,upperbound)
def draw_uniform_dispatch_time(self,lowerbound,upperbound):
"""Draw a new value of dispatch time from its uniform distribution
>>> random.seed(1234)
>>> test.draw_uniform_dispatch_time(20,30)
>>> print round(test.dispatch_time,2)
29.66
"""
self.dispatch_time = random.uniform(lowerbound,upperbound)
def draw_uniform_turnout_time(self,lowerbound,upperbound):
"""Draw a new value of turnout time from its uniform distribution
>>> random.seed(1234)
>>> test.draw_uniform_turnout_time(20,30)
>>> print round(test.turnout_time,2)
29.66
"""
self.turnout_time = random.uniform(lowerbound,upperbound)
def draw_uniform_arrival_time(self,lowerbound,upperbound):
"""Draw a new value of arrival time from its uniform distribution
>>> random.seed(1234)
>>> test.draw_uniform_arrival_time(20,30)
>>> print round(test.arrival_time,2)
29.66
"""
self.arrival_time = random.uniform(lowerbound,upperbound)
def draw_uniform_suppression_time(self,lowerbound,upperbound):
"""Draw a new value of suppression time from its uniform distribution
>>> random.seed(1234)
>>> test.draw_uniform_suppression_time(20,30)
>>> print round(test.suppression_time,2)
29.66
"""
self.suppression_time = random.uniform(lowerbound,upperbound)
#Room area drawing methods
def draw_uniform_room_area(self,lowerbound,upperbound):
"""Draw a new value of room area from its uniform distribution
>>> random.seed(1234)
>>> test.draw_uniform_room_area(20,30)
>>> print round(test.room_area,2)
29.66
"""
self.room_area = random.uniform(lowerbound,upperbound)
def draw_sampled_room_area(self,list_of_room_areas):
"""Draw a new value of room area from a supplied list of room areas
Note one should have upwards of 50-100 room areas before this starts
to become valid.
>>> random.seed(1234)
>>> test.draw_sampled_room_area(range(1,10))
>>> print test.room_area
9
"""
self.room_area = random.sample(list_of_room_areas,1)[0]
#Building area drawing methods
def draw_uniform_bldg_area(self,lowerbound,upperbound):
"""Draw a new value of building area from its uniform distribution
>>> random.seed(1234)
>>> test.draw_uniform_bldg_area(20,30)
>>> print round(test.bldg_area,2)
29.66
"""
self.bldg_area = random.uniform(lowerbound,upperbound)
def draw_res_census_bldg_area(
self,list_of_building_size_categories,
list_of_number_of_units_in_each_category):
"""Draw a new value of residential building area from AHS data
Data will only be available for "metropolitan statistical areas" and
nationally.
"""
raise NotImplementedError
#Floor area drawing methods
def draw_uniform_floor_area(self,lowerbound,upperbound):
"""Draw a new value of building area from its uniform distribution
>>> random.seed(1234)
>>> test.draw_uniform_floor_area(20,30)
>>> print round(test.floor_area,2)
29.66
"""
self.floor_area = random.uniform(lowerbound,upperbound)
#Define Method for drawing the values of DIST for different spread extents
def draw_DIST_room(self):
"""Draw a new value of DIST corresponding to confined to Room extent
Note that prior to using this method one must draw all of the time
attributes and area attributes. See the doctest.
>>> random.seed(1234)
>>> call_list = ['alarm_time','dispatch_time','turnout_time','arrival_time',
... 'suppression_time','room_area','bldg_area']
>>> names = ['draw_uniform_{}'.format(x) for x in call_list]
>>> for x in names:
... callname = getattr(test, x)
... callname(20,30)
>>> test.draw_DIST_room()
>>> print round(test.DIST_room,2)
-94.1
"""
tasktime = (self.alarm_time+self.dispatch_time+self.turnout_time+
self.arrival_time+self.suppression_time)
lowerbound = ((log(self.ao)-log(self.ao))/self.theta)-tasktime
upperbound = ((log(self.room_area)-log(self.ao))/self.theta)-tasktime
if lowerbound > upperbound:
raise LowerBoundgreaterthanUpperBoundexception
self.DIST_room = random.uniform(lowerbound,upperbound)
def draw_DIST_floor(self):
"""Draw a new value of DIST corresponding to confined to floor extent
Note that prior to using this method one must draw all of the time
attributes and area attributes. See the doctest.
>>> random.seed(1234)
>>> call_list = ['alarm_time','dispatch_time','turnout_time','arrival_time',
... 'suppression_time','room_area','floor_area','bldg_area']
>>> names = ['draw_uniform_{}'.format(x) for x in call_list]
>>> for x in names:
... callname = getattr(test, x)
... callname(30,30)
>>> test.draw_DIST_floor()
>>> print round(test.DIST_floor,2)
330.4
"""
tasktime = (self.alarm_time+self.dispatch_time+self.turnout_time+
self.arrival_time+self.suppression_time)
lowerbound = ((log(self.room_area)-log(self.ao))/self.theta)-tasktime
upperbound = ((log(self.floor_area)-log(self.ao))/self.theta)-tasktime
if lowerbound > upperbound:
raise LowerBoundgreaterthanUpperBoundexception
self.DIST_floor = random.uniform(lowerbound,upperbound)
def draw_DIST_bldg(self):
"""Draw a new value of DIST corresponding to confined to building extent
Note that prior to using this method one must draw all of the time
attributes and area attributes. See the doctest.
>>> random.seed(1234)
>>> call_list = ['alarm_time','dispatch_time','turnout_time','arrival_time',
... 'suppression_time','room_area','bldg_area']
>>> names = ['draw_uniform_{}'.format(x) for x in call_list]
>>> for x in names:
... callname = getattr(test, x)
... callname(30,30)
>>> test.draw_DIST_bldg()
>>> print round(test.DIST_bldg,2)
330.4
>>> random.seed(1234)
>>> Dcalc = DISTCalculate(floor_extent=True)
>>> call_list = ['alarm_time','dispatch_time','turnout_time','arrival_time',
... 'suppression_time','floor_area','room_area','bldg_area']
>>> names = ['draw_uniform_{}'.format(x) for x in call_list]
>>> for x in names:
... callname = getattr(Dcalc, x)
... callname(20,20)
>>> Dcalc.draw_DIST_bldg()
>>> print round(Dcalc.DIST_bldg,2)
323.13
"""
tasktime = (self.alarm_time+self.dispatch_time+self.turnout_time+
self.arrival_time+self.suppression_time)
if self.floor_extent:
lowerbound = ((log(self.floor_area)-log(self.ao))/self.theta)-tasktime
else:
lowerbound = ((log(self.room_area)-log(self.ao))/self.theta)-tasktime
upperbound = ((log(self.bldg_area)-log(self.ao))/self.theta)-tasktime
if lowerbound > upperbound:
raise LowerBoundgreaterthanUpperBoundexception
self.DIST_bldg = random.uniform(lowerbound,upperbound)
def draw_DIST_beyond(self):
"""Draw a new value of DIST corresponding to confined to Room extent
Note that prior to using this method one must draw all of the time
attributes and area attributes. See the doctest.
Note that the DIST_beyond extent functions differently. Rather than
drawing from a uniform distribution, its value is deterministically
calculated from the present values of the time attributes and the
building area.
>>> random.seed(1234)
>>> call_list = ['alarm_time','dispatch_time','turnout_time','arrival_time',
... 'suppression_time','room_area','bldg_area']
>>> names = ['draw_uniform_{}'.format(x) for x in call_list]
>>> for x in names:
... callname = getattr(test, x)
... callname(40,40)
>>> test.draw_DIST_beyond()
>>> print round(test.DIST_beyond,2)
321.03
"""
tasktime = (self.alarm_time+self.dispatch_time+self.turnout_time+
self.arrival_time+self.suppression_time)
self.DIST_beyond = ((log(self.bldg_area)-log(self.ao))/self.theta)-tasktime
if __name__=="__main__":
import doctest
doctest.testmod(extraglobs={'test': DISTCalculate()})
| Python |
from __future__ import division
import pylab as pl
import numpy as np
import pandas as pd
class tctGibbs:
#initialization
# inputs:
def __init__(self,cro=76,cbu=20,cwo=4,talup=120,tallow=90,
tdisup=80,tdislow=40,tturup=100,tturlow=60,tarrup=420,
tarrlow=300,tsupup=180,tsuplow=60,arearoomlow=72,
arearoomhigh=380,areabldglow=1088,areabldghigh=9004,
upAo=1,lowAo=0.1,uptheta=0.009900,lowtheta=0.003564):
#0.009900 0.003564
#observed bin values
self.cro = cro #contained to room
self.cbu = cbu #contained to building
self.cwo = cwo #contained to world
#specify constraints on the fixed parameters
self.talup = talup # upper bound on t_alarm
self.tallow = tallow # lower bound on t_alarm
self.tdisup = tdisup # upper bound on t_dispatch
self.tdislow = tdislow # lower bound on t_dispatch
self.tturup = tturup # upper bound on t_turnout
self.tturlow = tturlow # lower bound on t_turnout
self.tarrup = tarrup # upper bound on t_arrival
self.tarrlow = tarrlow # lower bound on t_arrival
self.tsupup = tsupup # upper bound on t_suppression
self.tsuplow = tsuplow # lower bound on t_suppression
#specify floor area ranges(sq. ft)
self.arearoomhigh = arearoomhigh
self.arearoomlow = arearoomlow
self.areabldglow = areabldglow
self.areabldghigh = areabldghigh
#specify original bounds on Ao and theta
self.upAo = upAo
self.lowAo = lowAo
self.uptheta = uptheta
self.lowtheta = lowtheta
#calculate total number of fires
self.n_fires=cro+cbu+cwo
#instantiate initial draws for Ao and theta
self.AoRoom= self.upAo#np.random.uniform(lowAo,upAo)
self.AoBldg= self.upAo#np.random.uniform(lowAo,upAo)
self.AoBeyond= self.upAo#np.random.uniform(lowAo,upAo)
self.thetaRoom= np.mean([self.uptheta,self.lowtheta])#np.random.uniform(lowtheta,uptheta)
self.thetaBldg= np.mean([self.uptheta,self.lowtheta])#np.random.uniform(lowtheta,uptheta)
self.thetaBeyond= np.mean([self.uptheta,self.lowtheta])#np.random.uniform(lowtheta,uptheta)
#instantiate variables for tcor fires
self.tcorRoom = 0
self.tcorBldg = 0
self.tcorBeyond = 0
#create initial containers for all of the task time variables
self.tal = np.random.uniform(tallow,talup) # upper bound on t_alarm
self.tdis = np.random.uniform(tdislow,tdisup) # upper bound on t_dispatch
self.ttur = np.random.uniform(tturlow,tturup) # upper bound on t_turnout
self.tarr = np.random.uniform(tarrlow,tarrup)# upper bound on t_arrival
self.tsup = np.random.uniform(tsuplow,tsupup) # upper bound on t_suppression
self.tfiretasks = self.tal+self.tdis+self.ttur+self.tarr+self.tsup
#Create draw functions for the Gibbs sampler
#Draw new values for fire department timing
def draw_tfiretasks(self):
self.tal = np.random.uniform(self.tallow,self.talup) # upper bound on t_alarm
self.tdis = np.random.uniform(self.tdislow,self.tdisup) # upper bound on t_dispatch
self.ttur = np.random.uniform(self.tturlow,self.tturup) # upper bound on t_turnout
self.tarr = np.random.uniform(self.tarrlow,self.tarrup)# upper bound on t_arrival
self.tsup = np.random.uniform(self.tsuplow,self.tsupup) # upper bound on t_suppression
self.tfiretasks = self.tal+self.tdis+self.ttur+self.tarr+self.tsup
#Draw the tcor values for relevant fires
#Inputs: relevant Amin and Amax thresholds and current Ao and theta values
def draw_tcor(self,Amin,Amax,Ao,theta):
lowtcor = (np.log(Amin)-np.log(Ao))/theta-self.tfiretasks
uptcor = (np.log(Amax)-np.log(Ao))/theta-self.tfiretasks
return np.random.uniform(lowtcor,uptcor)
#Draw the Ao values for relevant fires
#Inputs: relevant Amin and Amax thresholds and current tcor and theta values
def draw_Ao(self,Amin,Amax,tcor,theta):
#return np.random.uniform(max(lowAo,self.lowAo),min(upAo,self.upAo))
#return np.random.uniform(lowAo,upAo)
return self.upAo
#Draw the tcor values for room fires
#Inputs: relevant Amin and Amax thresholds and current tcor and Ao values
def draw_theta(self,Amin,Amax,tcor,Ao):
#return np.random.uniform(max(self.lowtheta,lowtheta),min(self.uptheta,uptheta))
#return np.random.uniform(self.lowtheta,self.uptheta)
return np.mean([self.uptheta,self.lowtheta])
def draw_Abounds(self,Aboundmin,Aboundmax):
return np.random.uniform(Aboundmin,Aboundmax)
#Gibbs sampling function
def fireGibbs(self,n_iter,burn,thin,Aminboundmin,Aminboundmax,Amaxboundmin,Amaxboundmax,tcor,Ao,theta):
print 'fireGibbs called'
n_store = int(np.ceil((n_iter-burn))/thin+0.00001)
gibbstcor = np.full(n_store,-1)
gibbsAo = np.full(n_store,-1)
gibbstheta = np.full(n_store,-1)
s = 0
for i in range(0,n_iter):
self.draw_tfiretasks()
if(Aminboundmin == Amaxboundmin and Aminboundmax == Amaxboundmax):
Aminbound = self.draw_Abounds(Aminboundmin,Aminboundmax)
Amaxbound = Aminbound
else:
Aminbound = self.draw_Abounds(Aminboundmin,Aminboundmax)
Amaxbound = self.draw_Abounds(Amaxboundmin,Amaxboundmax)
Ao = self.draw_Ao(Aminbound,Amaxbound,tcor,theta)
theta = self.draw_theta(Aminbound,Amaxbound,tcor,Ao)
tcor = self.draw_tcor(Aminbound,Amaxbound,Ao,theta)
if(i >= burn and i%thin==0):
gibbstcor[s] = tcor
gibbsAo[s] = Ao
gibbstheta[s] = theta
s = s+1
return(gibbstcor,gibbsAo,gibbstheta)
#output storage function
def gibbs_store(self,gibbsoutputlist,filenameoutputlist):
for i in range(0,len(gibbsoutputlist)):
f=open('../RawOutput/'+filenameoutputlist[i],'wb')
np.savetxt(f,gibbsoutputlist[i],delimiter=',')
f.close()
#Main class running function
def runGibbs(self,n_iter=1000,burn=500,thin=5):
#Run room fires first and output
gibbstcor,gibbsAo,gibbstheta = self.fireGibbs(n_iter,burn,thin,self.upAo,
self.upAo,self.arearoomlow,self.arearoomhigh,
self.tcorRoom,self.AoRoom,self.thetaRoom)
#store output
self.gibbs_store([gibbstcor,gibbsAo,gibbstheta],['tcorRoom.csv',
'AoRoom.csv','thetaRoom.csv'])
#Run building fires next and output
gibbstcor,gibbsAo,gibbstheta = self.fireGibbs(n_iter,burn,thin,self.arearoomlow,
self.arearoomhigh,self.areabldglow,
self.areabldghigh,self.tcorBldg,
self.AoBldg,self.thetaBldg)
#store output
self.gibbs_store([gibbstcor,gibbsAo,gibbstheta],['tcorBldg.csv',
'AoBldg.csv','thetaBldg.csv'])
#Run beyond building fires last and output
gibbstcor,gibbsAo,gibbstheta = self.fireGibbs(n_iter,burn,thin,self.areabldglow,
self.areabldghigh,self.areabldglow,
self.areabldghigh,self.tcorBeyond,
self.AoBeyond,self.thetaBeyond)
#store output
self.gibbs_store([gibbstcor,gibbsAo,gibbstheta],['tcorBeyond.csv',
'AoBeyond.csv','thetaBeyond.csv'])
test = tctGibbs()
test.runGibbs(10000,0,1)
| Python |
#Anderson
#2-19
from __future__ import division
import numpy as np
import pandas as pd
from pylab import *
import random
#import the data
incidents = pd.read_csv('../Data/ArlingtonCensusFireDataYearly.csv')
#aggregate the yearly number of residential structure fires that ACFD responded to
yeardist = incidents.groupby('year').aggregate('sum')['COUNT']
#aggregate the total number of residential fires that occurred in each census tract
tractincidents = incidents.groupby('GEO.id2').aggregate('sum')['COUNT']
#delete the incidents object to save memory (someday this may be huge)
del incidents
#add 1 to all tract incidents to allow zero incident tracts a small probability of selection
tractincidents = tractincidents + 1
tractincidents.sort()
#build the cumulative distribution for selecting tracts
tractshare = tractincidents/tractincidents.sum()
tractcum = tractshare.cumsum()
###figure out how to draw from the cumulative distribution
randdraw = 0.01 #pretend this comes frmo an rng
tractind = np.where(randdraw <= tractcum)[0].min()
#slice the distribution and retrieve the tract (index) corresponding to drawn value.
tractdraw = tractcum[tractind:tractind+1].index.tolist()
print tractdraw
###derive the normal distribution approximation to use as a stopping rule
yrmean = yeardist.mean()
print yrmean
print yeardist
yrvar=yeardist.var()
####perform normal draws
yrdraw = round(np.random.normal(yrmean,sqrt(yrvar)))
print yrdraw
| Python |
#Weinschenk
#12-14
from __future__ import division
import numpy as np
import pandas as pd
from pylab import *
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
import random
incident = pd.read_csv('../Data/arlington_incidents.csv', header=0)
total_incidents = len(incident['incident_class_code'])
total_fires = 0
for i in incident['incident_class_code']:
if i == 1:
total_fires = total_fires + 1
years_of_data = 6
#random pull off historical data for ignition
fire_call_year = int(total_incidents/years_of_data)
ignition = zeros(fire_call_year, dtype=bool)
for num in range(0,fire_call_year):
rand = random.randrange(1,len(incident['incident_class_code']),1)
if incident['incident_class_code'][rand] == 1:
ignition[num] = True
print sum(ignition), 'projected fires' #prints number of fires per year
#determine location of fire and structure type
#firegrowth model
fire_size = zeros(sum(ignition))
room_of_origin = 0
floor_of_origin = 0
structure_loss = 0
for num in range(0,sum(ignition)):
alpha = np.random.uniform(0.0029,0.047)
time_to_alarm = np.random.uniform(30,60)
time_to_dispatch = np.random.uniform(40,80)
time_to_turnout = np.random.uniform(60,100)
time_to_arrival = np.random.uniform(300,420)
time_to_suppress = np.random.uniform(60,180)
running_time = time_to_alarm+time_to_dispatch+time_to_turnout+time_to_arrival+time_to_suppress
fire_size[num] = alpha*(running_time)**2
#assesing damage typical resident structure
if fire_size[num] < 2000:
room_of_origin = room_of_origin + 1
elif fire_size[num] > 2000 and fire_size[num] < 10000:
floor_of_origin =floor_of_origin + 1
else:
structure_loss =structure_loss + 1
print room_of_origin, 'fire(s) room of origin |', floor_of_origin, ' fire(s) floor of origin |', structure_loss, 'fire(s) with total structure loss'
#firefighter response model
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
Utility script to patch a cmake project and convert a target to a browser plugin compatible bundle
Original Author(s): Richard Bateman
Created: 15 January 2010
License: Dual license model; choose one of two:
New BSD License
http://www.opensource.org/licenses/bsd-license.php
- or -
GNU Lesser General Public License, version 2.1
http://www.gnu.org/licenses/lgpl-2.1.html
Copyright 2009 Firebreath development team
"""
import re, sys
from optparse import OptionParser
class XCodeProject:
projectFname = ""
projectString = ""
targets = dict()
targetsBlock = re.compile(r"(\s*)(targets = \(([^)]*)\);)", re.S)
targetsList = re.compile(r"(\s*)([A-F0-9]*) /\* ([^ ]+) \*/")
targetsBlockStr = ""
def __init__(self, fName):
self.projectFname = fName
f = open(fName)
self.projectString = f.read()
f.close()
self.targetsBlockStr = self.targetsBlock.search(self.projectString).group(0)
tlist = self.targetsList.findall(self.targetsBlockStr)
for it in tlist:
self.targets[it[2]] = it[1]
# This is all target patching stuff
targetBlockRegEx = r"(\s*)(%s[^{\n]*{[^}]*};)"
targetBlockStr = ""
newTargetBlockStr = ""
prodRef = re.compile(r"productReference\s*=\s*(\S*)")
prodRefId = None
prodType = re.compile(r'productType\s*=\s*"([^"]*)"')
prodTypeStr = None
refBlockRegEx = r"%s /\* \S* \*/ = {[^}]+}";
refBlockStr = ""
newRefBlockStr = ""
def patchTarget(self, tName):
tId = self.targets[tName]
projectString = self.projectString
self.targetBlockStr = re.search(self.targetBlockRegEx % tId, projectString).group(0)
self.prodRefId = self.prodRef.search(self.targetBlockStr).group(1)
self.prodTypeStr = self.prodType.search(self.targetBlockStr).group(1)
self.refBlockStr = re.search(self.refBlockRegEx % self.prodRefId, projectString).group(0)
self.newRefBlockStr = self.refBlockStr.replace("compiled.mach-o.executable", "wrapper.cfbundle")
self.newTargetBlockStr = self.targetBlockStr.replace(self.prodTypeStr, "com.apple.product-type.bundle")
self.projectString = self.projectString.replace(self.targetBlockStr, self.newTargetBlockStr)
self.projectString = self.projectString.replace(self.refBlockStr, self.newRefBlockStr)
def save(self, fName):
if fName == None:
fName = self.projectFname
f = open(fName, "w")
f.write(self.projectString)
f.close()
def Main():
# Define the command-line interface via OptionParser
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option("-f", "--filename", dest = "fName", help="Filename of the xcode project to patch")
parser.add_option("-t", "--target", dest = "target", help="Target to patch")
options, args = parser.parse_args()
if options.fName and options.target:
print "Patching XCode project %s with target %s" % (options.fName, options.target)
proj = XCodeProject(options.fName)
proj.patchTarget(options.target)
proj.save(options.fName)
else:
parser.print_help()
sys.exit(1)
if __name__ == "__main__":
Main()
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
Utility script to generate/modify Firebreath plug-in projects.
Original Author(s): Ben Loveridge, Richard Bateman
Created: 14 December 2009
License: Dual license model; choose one of two:
New BSD License
http://www.opensource.org/licenses/bsd-license.php
- or -
GNU Lesser General Public License, version 2.1
http://www.gnu.org/licenses/lgpl-2.1.html
Copyright 2009 Packet Pass, Inc. and the Firebreath development team
"""
import os, re, sys, time, uuid
from fbgen.gen_templates import *
from optparse import OptionParser
from ConfigParser import SafeConfigParser
def getTemplateFiles(basePath, origPath=None):
"""
Obtains the location to the template files. Discovers any newly added files automatically.
@param basePath location from which to start searching for files.
@param origPath used to strip path information from the returned values. Defaults to None.
@returns array of strings each entry representing a single file.
"""
if origPath is None:
origPath = basePath
plen = len(origPath) + len(os.path.sep)
files = []
for filename in os.listdir(basePath):
tmpName = os.path.join(basePath, filename)
if filename == '.' or filename == ".." or tmpName is None:
continue
if os.path.isdir(tmpName):
files.extend(getTemplateFiles(tmpName, origPath) )
else:
files.append(tmpName[plen:])
return files
def createDir(dirName):
"""
Creates a directory, even if it has to create parent directories to do so
"""
parentDir = os.path.dirname(dirName)
print "Parent of %s is %s" % (dirName, parentDir)
if os.path.isdir(parentDir):
print "Creating dir %s" % dirName
os.mkdir(dirName)
else:
createDir(parentDir)
createDir(dirName)
def Main():
"""
Parse the commandline and execute the appropriate actions.
"""
# Define the command-line interface via OptionParser
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option("-p", "--plugin-name", dest = "pluginName")
parser.add_option("-i", "--plugin-identifier", dest = "pluginIdent",
help = "3 or more alphanumeric characters (underscores allowed after first position)")
parser.add_option("-c", "--company-name", dest = "companyName")
parser.add_option("-d", "--company-domain", dest = "companyDomain")
parser.add_option("-g", "--disable-gui", dest = "disableGUI")
options, args = parser.parse_args()
if options.pluginName and options.pluginIdent and options.companyName and options.companyDomain:
options.interactive = False
else:
options.interactive = True
scriptDir = os.path.dirname(os.path.abspath(__file__) )
cfgFilename = os.path.join(scriptDir, ".fbgen.cfg")
cfgFile = SafeConfigParser()
cfgFile.read(cfgFilename)
# Instantiate the appropriate classes
plugin = Plugin(name = options.pluginName, ident = options.pluginIdent, disable_gui = options.disableGUI)
plugin.readCfg(cfgFile)
company = Company(name = options.companyName)
company.readCfg(cfgFile)
if options.interactive:
try:
plugin.promptValues()
company.promptValues()
except KeyboardInterrupt:
print "" # get off of the line where the KeyboardInterrupt happened
sys.exit(0) # terminate gracefully
plugin.updateCfg(cfgFile)
company.updateCfg(cfgFile)
guid = GUID(ident = plugin.ident, domain = company.domain)
# Generate the guids needed by the templates
generatedGuids = AttrDictSimple()
generatedGuids.GUIDS_TYPELIB = guid.generate("TYPELIB")
generatedGuids.GUIDS_CONTROLIF = guid.generate("CONTROLIF")
generatedGuids.GUIDS_CONTROL = guid.generate("CONTROL")
generatedGuids.GUIDS_JSIF = guid.generate("JSIF")
generatedGuids.GUIDS_JSOBJ = guid.generate("JSOBJ")
generatedGuids.GUIDS_EVTSRC = guid.generate("EVTSRC")
generatedGuids.GUIDS_INSTPROD = guid.generate("INSTPROD")
generatedGuids.GUIDS_INSTUPGR = guid.generate("INSTUPGR")
generatedGuids.GUIDS_INSTUPGR64 = guid.generate("INSTUPGR64")
generatedGuids.GUIDS_companydircomp = guid.generate("companydircomp")
generatedGuids.GUIDS_installdircomp = guid.generate("installdircomp")
# Time-related values used in templates
templateTime = AttrDictSimple(YEAR = time.strftime("%Y"))
# Save configuration for another go
cfgFile.write(open(cfgFilename, "wb") )
# Make sure we can get into the projects directory
basePath = os.path.join(scriptDir, "projects")
if not os.path.isdir(basePath):
try:
os.mkdir(basePath)
except:
print "Unable to create directory", basePath
sys.exit(1)
# Try to create a directory for this project
projPath = os.path.abspath(os.path.join(basePath, "%s" % plugin.ident))
if os.path.isdir(projPath):
try:
overwrite = raw_input("\nDirectory already exists. Continue anyway? [y/N] ")
except KeyboardInterrupt:
print "" # get off of the line where the KeyboardInterrupt happened
sys.exit(0) # terminate gracefully
if len(overwrite) == 0 or overwrite[0] not in ("Y", "y"):
print "\nAborting"
sys.exit(1)
else:
try:
os.mkdir(projPath)
except:
print "Failed to create project directory", projPath
sys.exit(1)
print "\nProcessing templates"
srcDir = os.path.join(scriptDir, "fbgen", "src")
srcDirLen = len(srcDir) + len(os.path.sep)
templateFiles = getTemplateFiles(srcDir)
for tpl in templateFiles:
try:
tplPath, tplFilename = os.path.split(tpl)
if tplFilename.startswith("Template"):
tplFilename = tplFilename.replace("Template", plugin.ident, 1)
if tplPath:
filename = os.path.join(projPath, tplPath, tplFilename)
else:
filename = os.path.join(projPath, tplFilename)
dirname = os.path.dirname(filename)
if not os.path.isdir(dirname):
createDir(dirname)
tplFile = os.path.join("fbgen", "src", tpl)
print tplFile
template = Template(tplFile)
#Special case for binary files
if(tplFilename == "background.png"):
input = open(tplFile, "rb")
output = open(filename, "wb")
output.write(input.read())
else:
f = open(filename, "wb")
f.write(template.process(plugin, company, guid, generatedGuids, templateTime))
print " Processed", tpl
except:
print " Error processing", tpl
raise
print "Done. Files placed in", projPath
if __name__ == "__main__":
Main()
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
Utility script to import doxygen docs into confluence
Original Author(s): Richard Bateman
Created: 18 October 2009
License: Dual license model; choose one of two:
New BSD License
http://www.opensource.org/licenses/bsd-license.php
- or -
GNU Lesser General Public License, version 2.1
http://www.gnu.org/licenses/lgpl-2.1.html
Copyright 2009 the Firebreath development team
"""
import os, sys, SOAPpy
from xml.dom import minidom
from itertools import izip
nameCount = {}
class Doxygen2Confluence:
nameCount = {}
inputHtmlPath = os.path.join("docs", "html")
outputHtmlPath = os.path.join("docs", "patched")
inputList = {}
pathMap = {}
baseUrl = "/display/documentation/%s"
classDocsUrl = "http://classdocs.firebreath.org/"
url = "http://www.firebreath.org/rpc/soap-axis/confluenceservice-v2?wsdl"
server = SOAPpy.SOAPProxy(url)
rpc = SOAPpy.WSDL.Proxy(url)
token = ""
space = "documentation"
topPages = {
"class" : "1280302",
"struct" : "1280302",
"namespace" : "1280313",
"file" : "1280310",
"typedef": "1282223",
"function": "1282221",
#"enum": "1280313",
}
parents = {}
createdPages = []
username = ""
password = ""
def login(self):
self.token = self.rpc.login(self.username, self.password)
def __init__(self, username, password):
SOAPpy.Parser._parseSOAP = self.confluence_soap_parser
self.username = username
self.password = password
self.login()
def getName(self, name):
count = 1
retVal = name.replace("::", " ")
if name in self.nameCount:
count = self.nameCount[name]
count = count + 1
retVal = "%s (%s)" % (name, count)
self.nameCount[name] = count
return retVal.replace("<", "(").replace(">", ")").replace("/", " ")
def makeFirstPageInConfluence(self, pageId, targetPageId):
children = self.rpc.getChildren(self.token, SOAPpy.Types.longType(long(pageId)))
if len(children) and children[0]["id"] != targetPageId:
print "Moving %s to before %s" % (targetPageId, children[0]["id"])
self.rpc.movePage(self.token, SOAPpy.Types.longType(long(targetPageId)), SOAPpy.Types.longType(long(children[0]["id"])), "above")
def exportToConfluence(self, refId, pageName, kind):
try:
page = self.rpc.getPage(self.token, self.space, pageName)
except:
try:
self.login()
page = self.rpc.getPage(self.token, self.space, pageName)
except:
page = {"space": self.space, "title": pageName}
if kind == "file":
filename = "%s_source.html" % refId
else:
filename = "%s.html" % refId
npage = {
"content": "{doxygen_init}{html-include:url=http://classdocs.firebreath.org/patched/%s}" % filename,
"space": page["space"],
"title": page["title"],
}
if hasattr(page, 'id'):
npage["id"] = SOAPpy.Types.longType(long(page["id"]))
npage["parentId"] = SOAPpy.Types.longType(long(self.parents[refId]))
npage["version"] = SOAPpy.Types.intType(int(page["version"]))
n = 0
while n < 10:
try:
npage["content"] = self.rpc.convertWikiToStorageFormat(self.token, npage['content'])
npage = self.rpc.storePage(self.token, npage)
self.createdPages.append(npage["id"])
self.rpc.setContentPermissions(self.token, SOAPpy.Types.longType(long(npage["id"])), "Edit", [ {'groupName': 'confluence-administrators', 'type': 'Edit'} ])
break;
except Exception as ex:
self.login()
print type(ex)
print ex.args
print ex
pass
return npage["id"]
def cleanConfluence(self):
for kind, id in self.topPages.items():
print "Scanning pages for %s (id %s)" % (kind, id)
pages = self.rpc.getDescendents(self.token, SOAPpy.Types.longType(long(id)))
for page in pages:
if (page["id"] not in self.createdPages) and (page["id"] not in self.topPages.values()):
print "Removing defunct page: %s (%s)" % (page["title"], page["id"])
self.rpc.removePage(self.token, SOAPpy.Types.longType(long(page["id"])))
def processDirectory(self, path):
xml = minidom.parse("docs/xml/index.xml")
compounds = xml.documentElement.getElementsByTagName("compound")
refidMap = {}
Info = {}
for com in compounds:
refid = com.getAttribute("refid")
kind = com.getAttribute("kind")
compoundName = com.getElementsByTagName("name")[0].firstChild.wholeText
realName = self.getName("%s %s" % (kind, compoundName.replace("::", " ")))
if os.path.exists(os.path.join(path, "%s-members.html" % refid)):
refidMap["%s-members.html" % refid] = self.baseUrl % (realName + " Members")
filename = "%s.html" % refid
if kind == "file":
filename = "%s_source.html" % refid
if os.path.exists(os.path.join(path, filename)):
Info[refid] = {}
Info[refid]["kind"] = kind
Info[refid]["name"] = realName
Info[refid]["members"] = {}
refidMap[filename] = self.baseUrl % realName
if kind == "file":
print "%s => %s" % (filename, self.baseUrl % realName)
continue
for mem in com.getElementsByTagName("member"):
memName = mem.getElementsByTagName("name")[0].firstChild.wholeText
memRefId = mem.getAttribute("refid")
memRefId = memRefId[0:memRefId.rindex("_")]
memKind = mem.getAttribute("kind")
if memKind == "enumvalue":
continue
if (os.path.exists(os.path.join(path, memRefId + ".html"))):
if kind == "namespace":
localName = self.getName("%s %s %s" % (memKind, compoundName, memName))
else:
localName = self.getName(Info[refid]["name"] + " " + memName)
refidMap["%s.html" % memRefId] = self.baseUrl % localName
Info[refid]["members"][memRefId] = {}
Info[refid]["members"][memRefId]["kind"] = memKind
Info[refid]["members"][memRefId]["name"] = localName
self.inputList = Info
self.pathMap = refidMap
def processFile(self, filename, inPath, outPath):
f = open(os.path.join(inPath, filename), "r")
fileText = f.read()
f.close()
for id, url in izip(self.pathMap.keys(), self.pathMap.values()):
#print "Changing %s to %s" % (id, url)
try:
fileText = fileText.replace(id, url)
except UnicodeDecodeError:
fileText = fileText.replace(id.encode('utf8'), url.encode('utf8'))
fileText = fileText.replace(r'img src="', r'img src="http://classdocs.firebreath.org/')
nf = open(os.path.join(outPath, filename), "w")
nf.write(fileText)
nf.close()
def writeNewFiles(self, inPath, outPath):
if not os.path.exists(outPath):
os.mkdir(outPath)
self.processFile("annotated.html", inPath, outPath)
self.processFile("hierarchy.html", inPath, outPath)
self.processFile("files.html", inPath, outPath)
self.processFile("namespaces.html", inPath, outPath)
self.processFile("classes.html", inPath, outPath)
# Now we're going to load the files, process them, and write them to the output directory
for refid, item in self.inputList.items():
filename = "%s.html" % refid
if item["kind"] == "file":
filename = "%s_source.html" % refid
if os.path.exists(os.path.join(inPath, "%s-members.html" % refid)):
self.processFile("%s-members.html" % refid, inPath, outPath)
#print "Opening file %s" % filename
self.processFile(filename, inPath, outPath)
for memid, mem in item["members"].items():
#print "Member: %s" % memid
self.processFile("%s.html" % memid, inPath, outPath)
def begin(self):
self.processDirectory(self.inputHtmlPath)
self.writeNewFiles(self.inputHtmlPath, self.outputHtmlPath)
for refid, item in self.inputList.items():
parentId = None
if item["kind"] in self.topPages:
parentId = self.topPages[item["kind"]]
else:
print "Could not find %s in " % item["kind"], self.topPages
continue
self.parents[refid] = parentId
print "Exporting %s to confluence..." % item["name"]
pageId = self.exportToConfluence(refid, item["name"], item["kind"])
for memid, mem in item["members"].items():
#print "Exporting %s to confluence..." % mem["name"]
if item["kind"] == "namespace" and mem["kind"] in self.topPages:
self.parents[memid] = self.topPages[mem["kind"]]
else:
self.parents[memid] = pageId
self.exportToConfluence(memid, mem["name"], mem["kind"])
if os.path.exists(os.path.join(self.inputHtmlPath, "%s-members.html" % refid)):
self.parents["%s-members" % refid] = pageId
membersPageId = self.exportToConfluence("%s-members" % refid, "%s Members" % item["name"], "members")
self.makeFirstPageInConfluence(pageId, membersPageId)
self.cleanConfluence()
# This parser is due to this bug https://jira.atlassian.com/browse/CONF-6720
# once that bug is fixed this parser can be retired
def confluence_soap_parser(self, xml_str, rules=None, parser=SOAPpy.Parser._parseSOAP):
attribute = 'xsi:type="soapenc:Array"'
xml_str = xml_str.replace('%s %s' % (attribute, attribute), attribute)
return parser(xml_str, rules=rules)
def Main():
"""
Parse the commandline and execute the appropriate actions.
"""
a = Doxygen2Confluence(sys.argv[1], sys.argv[2])
a.begin()
if __name__ == "__main__":
Main()
| Python |
# ############################################################
# Original Author: Georg Fritzsche
#
# Created: November 6, 2009
# License: Dual license model; choose one of two:
# New BSD License
# http://www.opensource.org/licenses/bsd-license.php
# - or -
# GNU Lesser General Public License, version 2.1
# http://www.gnu.org/licenses/lgpl-2.1.html
#
# Copyright 2009 Georg Fritzsche, Firebreath development team
# ############################################################
#!/usr/bin/python
import time
from datetime import date
def tern(a,b,c):
i = 0
if(a): i = 1
return (b,c)[i]
# ######################################
max_args = 10
tab = "\t"
endl = "\n"
headerFileName = "MethodConverter.h"
includeGuardName = "METHOD_CONVERTER_H"
methodWrapStructName = "method_wrapper"
makeWrapperFunctionName = "make_method"
callMethodFunctorName = "FB::CallMethodFunctor"
indent = 0;
def ind_in():
global indent
indent += 1
def ind_out():
global indent
indent -= 1
def ind():
return tab*indent
# ######################################
f = open(headerFileName, "w")
def wl(s):
f.write(ind()+s+endl)
# legal
wl("/**********************************************************\\")
wl("Original Author: Georg Fritzsche")
wl("Generated on: " + date.today().isoformat())
wl("License: Dual license model; choose one of two:")
wl(" New BSD License")
wl(" http://www.opensource.org/licenses/bsd-license.php")
wl(" - or -")
wl(" GNU Lesser General Public License, version 2.1")
wl(" http://www.gnu.org/licenses/lgpl-2.1.html")
wl("")
wl("Copyright 2009 Georg Fritzsche, Firebreath development team")
wl("\\**********************************************************/")
wl("")
# start
wl("")
wl("#if !defined("+includeGuardName+")")
wl("#define "+includeGuardName)
wl("")
wl("#if defined(_MSC_VER)")
wl("# pragma once")
wl("#endif")
wl("")
# includes
wl("#include <boost/function.hpp>")
wl("#include <boost/bind.hpp>")
wl("#include \"ConverterUtils.h\"")
wl("")
# prologue
wl("namespace FB")
wl("{")
ind_in()
# wrapper
wl("namespace detail { namespace methods")
wl("{")
ind_in()
wl("using FB::detail::plain_type;")
wl("using FB::convertArgument;")
wl("")
for i in range(max_args+1):
for voidReturn in [False, True]:
s = "template<class C"
if not voidReturn:
s += ", class R"
for i2 in range(i):
s += ", class T"+str(i2)
wl(s + ", class F>")
s = "struct "+methodWrapStructName+str(i)
if voidReturn: # specializing on void return type
s += "<C, void"
for i2 in range(i):
s += ", T"+str(i2)
s += ", F>"
wl(s)
wl("{")
ind_in()
wl("typedef FB::variant result_type;")
wl("F f;")
wl(methodWrapStructName+str(i)+"(F f) : f(f) {}")
wl("FB::variant operator()(C* instance, const FB::VariantList& in)")
wl("{")
ind_in()
if i<1:
wl("if(in.size() != 0)")
else:
wl("typedef typename plain_type<T"+str(i-1)+">::type TLast;")
wl("if(!checkArgumentCount<TLast>(in, "+str(i)+"))")
wl(tab+"return FB::variant();")
if voidReturn: # specializing on void return type
s = "(instance->*f)("
else:
s = "return (instance->*f)(";
# if i>0:
# s+="in[0].convert_cast<typename FB::detail::plain_type<T0>::type>()"
# for i2 in range(1,i):
# s+= ", in["+str(i2)+"].convert_cast<typename FB::detail::plain_type<T"+str(i2)+">::type>()"
if i<1:
wl(s+");")
else:
wl(s)
ind_in()
s = "convertArgument<typename plain_type<T0>::type>(in[0], 1)"
if i>1:
for i2 in range(1,i-1):
wl(s+",")
s = "convertArgument<typename plain_type<T"+str(i2)+">::type>(in["+str(i2)+"], "+str(i2+1)+")"
wl(s+",")
wl("convertLastArgument<TLast>(in, "+str(i)+"));")
ind_out()
if voidReturn: # specializing on void return type
wl("return FB::variant();")
ind_out()
wl("}")
ind_out()
wl("};")
ind_out()
wl("} } // namespace detail::methods")
wl("")
# make_wrapper
for i in range(max_args+1):
for constness in ['', ' const']:
typenames = ""
if i>0:
typenames += "class T0"
for i2 in range(1,i):
typenames += ", class T"+str(i2)
typenames_app = ""
if i>0:
typenames_app = ", "+typenames
types = ""
if i>0:
types += "T0"
for i2 in range(1,i):
types += ", T"+str(i2)
print " * "+types
types_app = ""
if i>0:
types_app = ", "+types
wl("template<class C, class R"+typenames_app+">")
wl("inline "+callMethodFunctorName)
wl(makeWrapperFunctionName+"(C* instance, R (C::*function)("+types+")"+constness+")")
wl("{")
ind_in()
wl("return boost::bind(FB::detail::methods::"+methodWrapStructName+str(i)+"<C, R"+types_app+", R (C::*)("+types+")"+constness+">(function), instance, _1);")
ind_out()
wl("}")
wl("")
# epilogue
ind_out()
wl("} // namespace FB")
wl("")
wl("#endif //"+includeGuardName)
wl("")
f.close()
| Python |
#!/usr/bin/env python
import os, re, string, sys, uuid
class AttrDictSimple(dict):
def __getattr__(self, attr): return self[attr]
def __setattr__(self, attr, value): self[attr] = value
def __delattr__(self, attr): del self[attr]
class Template(string.Template):
delimiter = "@"
def __init__(self, filename):
if not os.path.isfile(filename):
raise ValueError('Unable to read file with name %s' % filename)
super(self.__class__, self).__init__(open(filename).read())
def process(self, *args):
params = AttrDictSimple()
for arg in args:
params.update(self.generateReplacementDict(arg))
return self.substitute(params)
def generateReplacementDict(self, obj):
if isinstance(obj, dict):
return obj
assert isinstance(obj, Base), "Must provide a base FBGen class"
retdict = AttrDictSimple([("%s_%s" % (obj.__class__.__name__.upper(), k), obj[k]) for k in obj.keys.keys() if hasattr(obj, k)])
if retdict.get('PLUGIN_disable_gui') != None:
if retdict.get('PLUGIN_disable_gui') == "true":
retdict.update(PLUGIN_disable_gui='set (FB_GUI_DISABLED');
retdict.update(PLUGIN_disable_gui_mac='0');
else:
retdict.update(PLUGIN_disable_gui='#set (FB_GUI_DISABLED');
retdict.update(PLUGIN_disable_gui_mac='1');
return retdict;
class Base(object):
def __getitem__(self, item): return getattr(self, item)
def __init__(self, **kwargs):
for k, v in kwargs.items():
if hasattr(self, k): setattr(self, k, v)
self.keys = AttrDictSimple(
name = ("Name", re.compile(r"^.+$"), "Name must be at least one character, and may not contain carriage returns."),
ident = ("Identifier", re.compile(r"^[a-zA-Z][a-zA-Z\d_]{2,}$"), "Identifier must be 3 or more alphanumeric characters (underscore allowed)."),
desc = ("Description", re.compile(r"^.+$"), "Description must be one or more characters long!"),
prefix = ("Prefix", re.compile(r"^[a-zA-Z][a-zA-Z\d_]{2,4}$"), "Prefix must be 3 to 5 alphanumeric characters (underscores allowed)."),
domain = ("Domain", re.compile(r"^([a-zA-Z0-9]+(\-[a-zA-Z0-9]+)*\.)*[a-zA-Z0-9]+(\-[a-zA-Z0-9]+)*\.[a-zA-Z]{2,4}$"), "Domain must be a valid domain name."),
mimetype = ("MIME type", re.compile(r"^[a-zA-Z0-9]+\/[a-zA-Z0-9\-]+$"), "Please use alphanumeric characters and dashes in the format: application/x-firebreath"),
disable_gui = ("has no UI", re.compile(r"^true$|false$"), "Please enter valid input: true or false"),
)
def getValue(self, key, default):
desc, regex, error = self.keys[key]
if default is None:
default = ""
value = raw_input("%s %s [%s]: " % (self.__class__.__name__, desc, default)) or default
if regex.match(value):
return value
else:
print "Invalid syntax: %s" % error
return self.getValue(key, default)
def promptValues(self):
"""
Override in sub-classes. Prompts for necessary configuration information.
"""
pass
def readCfg(self, cfg):
"""
Override in sub-classes. Reads an existing configuration out of a file for anything not already defined by other means.
"""
pass
def updateCfg(self, cfg):
"""
Override in sub-classes. Updates a configuration object with current values.
"""
pass
class JSAPI_Member(Base):
"""
Used to define a JSAPI Member. This may go away in a future version as I haven't quite decided how to deal with these yet.
"""
ident = None
type = None
def __init__(self):
print "Initializing JSAPI_Member"
self.types = AttrDictSimple(
string = "std::string",
int = "long", # changed int to long since IE will pass it as a long anyway and we want to avoid issues.
long = "long",
double = "double",
bool = "bool",
variant = "FB::variant",
dynamic = "FB::VariantList",
JSOBJ = "FB::JSAPIPtr",
API = "FB::JSObject",
)
def translateType(self, type):
return self.types[type]
def isValidType(self, type):
return self.types.has_key(type)
def setType(self, type):
self.type = type
def getRealType(self):
return self.translateType(self.type)
class JSAPI_Property(JSAPI_Member):
"""
Used to define a JSAPI Property. This may go away in a future version as I haven't quite decided how to deal with these yet.
"""
def __init__(self, ident, type):
super(JSAPI_Property, self).__init__()
if not self.isValidType(type):
raise Exception("Invalid type %s. Valid types are: %s" % type, ', '.join(self.types))
self.type = type
self.ident = ident
class JSAPI_Method(JSAPI_Member):
"""
Used to define a JSAPI Method. This may go away in a future version as I haven't quite decided how to deal with these yet.
"""
argTypes = ["string"]
def __init__(self, ident, type, argTypes):
super(JSAPI_Method, self).__init__()
self.type = type
self.ident = ident
self.argTypes = argTypes
for curArg in argTypes:
if not self.isValidType(curArg):
raise Exception("Invalid type %s. Valid types are: %s" % (curArg, ', '.join(self.types)))
def getRealArgTypes(self):
retVal = []
for cur in self.argTypes:
retVal.append(self.translateType(cur))
return retVal
class Plugin(Base):
name = None
ident = None
prefix = None
desc = None
mimetype = None
disable_gui = None
def makeDefaultPrefix(self, startName, delim = " "):
MIN_LEN_PREFIX=3
MAX_LEN_PREFIX=5
pattern = re.compile(r"([A-Z][A-Z][a-z])|([a-z][A-Z])")
if startName is None:
return None
if MIN_LEN_PREFIX <= len(startName) <= MAX_LEN_PREFIX:
return startName.upper()
normalize = lambda s:s
seperated = normalize(pattern.sub(lambda m: m.group()[:1] + delim + m.group()[1:], startName))
words = seperated.split()
if MIN_LEN_PREFIX <= len(words) <= MAX_LEN_PREFIX:
return "".join( [ lett[0] for lett in words ]).upper()[:MAX_LEN_PREFIX]
postfix = ""
word = len(words) - 1
needed = MIN_LEN_PREFIX - len(words) + 1
while len(postfix) < needed:
stillNeeded = needed - len(postfix)
postfix = words[word][:stillNeeded] + postfix
if len(postfix) < needed:
needed += 1
word -= 1
return "".join( [ lett[0] for lett in words[:word] ] ).upper() + postfix.upper()
def __init__(self, **kwargs):
super(Plugin, self).__init__(**kwargs)
if self.mimetype:
self.mimetype = self.mimetype.lower()
def promptValues(self):
name = self.name
ident = self.ident
self.name = self.getValue("name", self.name)
self.ident = self.getValue("ident", re.sub(r"[^a-zA-Z\d\-_]", "", self.ident or self.name))
self.prefix = self.getValue("prefix", self.prefix if name == self.name else self.makeDefaultPrefix(self.name))
self.mimetype = self.getValue("mimetype", self.mimetype if ident == self.ident else "application/x-%s" % self.ident.lower()).lower()
self.desc = self.getValue("desc", self.desc)
self.disable_gui = self.getValue("disable_gui", self.disable_gui or "false").lower()
def readCfg(self, cfg):
if not cfg.has_section("plugin"):
return
self.name = self.name or cfg.get("plugin", "name")
self.ident = self.ident or cfg.get("plugin", "ident")
self.prefix = self.prefix or cfg.get("plugin", "prefix")
self.mimetype = self.mimetype or cfg.get("plugin", "mimetype").lower()
self.desc = self.desc or cfg.get("plugin", "description")
if self.disable_gui == None:
self.disable_gui = cfg.get("plugin", "disable_gui") or False
def updateCfg(self, cfg):
if not cfg.has_section("plugin"):
cfg.add_section("plugin")
cfg.set("plugin", "name", self.name)
cfg.set("plugin", "ident", self.ident)
cfg.set("plugin", "prefix", self.prefix)
cfg.set("plugin", "mimetype", self.mimetype)
cfg.set("plugin", "description", self.desc)
cfg.set("plugin", "disable_gui", self.disable_gui)
def __str__(self):
return "\nPlugin Details:\n--------------\nName: %(name)s\nIdentifier: %(ident)s\nPrefix: %(prefix)s\nMIME type: %(mimetype)s\nDescription: %(desc)s\nGUI: %(disable_gui)s" % self
class Company(Base):
name = None
ident = None
domain = None
def __init__(self, **kwargs):
super(Company, self).__init__(**kwargs)
def promptValues(self):
self.name = self.getValue("name", self.name)
self.ident = self.getValue("ident", self.ident or re.sub(r"[^a-zA-Z\d\-_]", "", self.name))
self.domain = self.getValue("domain", self.domain or "%s.com" % self.ident.lower())
def readCfg(self, cfg):
if not cfg.has_section("company"):
return
self.name = self.name or cfg.get("company", "name")
self.ident = self.ident or cfg.get("company", "ident")
self.domain = self.domain or cfg.get("company", "domain")
def updateCfg(self, cfg):
if not cfg.has_section("company"):
cfg.add_section("company")
cfg.set("company", "name", self.name)
cfg.set("company", "ident", self.ident)
cfg.set("company", "domain", self.domain)
def __str__(self):
return "\nCompany Details\n---------------\nName: %(name)s\nIdentifier: %(ident)s\nDomain: %(domain)s" % self
class GUID(Base):
"""
This class will create a Master GUID based on the plugin identifier and company domain. This allows the generated GUIDs to always be
the same if created with/for the same intent.
"""
master = None
domain = None
ident = None
def __init__(self, **kwargs):
super(GUID, self).__init__(**kwargs)
self.master = uuid.uuid5(uuid.NAMESPACE_DNS, self.ident + '.' + self.domain)
def generate(self, string):
return uuid.uuid5(self.master, string)
| Python |
#!/usr/bin/python
"""
Runs javac -Xlint on all files in all subdirectories.
Collects results into JavaLint.txt
"""
import os
outputfile = "JavaLint.txt"
javadirs = []
for path, dirs, files in os.walk('.'):
for file in files:
if file.endswith(".java"):
javadirs.append(path)
break
start = os.getcwd()
for jd in javadirs:
os.chdir(jd)
print jd
os.system("javac -source 1.5 -Xlint -Xlint:-serial *.java -Xstdout " + outputfile)
os.chdir(start)
results = open(start + os.sep + outputfile, 'w')
for jd in javadirs:
messages = open(jd + os.sep + outputfile).read()
if len(messages):
print >>results, '='*40 + "\n" + jd + "\n" + '='*40 + "\n" + messages
| Python |
"""FindBugsExcluder.py
Creates a filter file from the xml and text output of FindBugs
To prepare, you must run
findbugs -textui . > findbugs.txt
findbugs -textui -xml . > findbugs.xml
Once you've run this program you can then run
findbugs -textui -exclude FindBugsFilter-auto.xml .
To exclude the bugs that have been discovered.
The program includes the suggested changes with each exclusion,
so you can go through FindBugsFilter-auto.xml and decide
to fix things and remove their "Match" nodes.
"""
from xml.dom.minidom import parse
import xml.dom
import os, sys, re, pprint
xml_buglist = 'findbugs.xml' #'D:\\aaa-TIJ4\\code\\findbugs.xml'
text_buglist = 'findbugs.txt' # 'D:\\aaa-TIJ4\\code\\findbugs.txt'
findbugs_filter = 'FindBugsFilter-auto.xml' # 'D:\\aaa-TIJ4\\code\\FindBugsFilter-auto.xml'
def main():
textbugs = [bug.split(':', 1) for bug in file(text_buglist)
if bug.startswith("M ") or bug.startswith("H ")]
textbugs = [(bug[0].split()[2], bug[1].strip()) for bug in textbugs]
dom1 = parse(xml_buglist)
dom2 = xml.dom.getDOMImplementation().createDocument(
None, "FindBugsFilter", None)
bugsDone = []
for bugNode in [bug for bug in dom1.firstChild.childNodes
if bug.nodeName == "BugInstance"]:
for child in bugNode.childNodes:
if child.nodeName == "Class":
classname = child.attributes.item(0).value
bugtype = bugNode.attributes.item(2).value
if (bugtype, classname) in bugsDone:
continue
else:
bugsDone.append((bugtype, classname))
match = dom2.createElement("Match")
match.setAttribute("class", classname)
bugCode = dom2.createElement("BugCode")
bugCode.setAttribute("name", bugtype)
match.appendChild(bugCode)
for textbug in textbugs:
if textbug[0] == bugtype and classname in textbug[1]:
match.appendChild(dom2.createComment(textbug[1]))
dom2.documentElement.appendChild(match)
break # out of inner for loop
file(findbugs_filter, 'w').write(dom2.toprettyxml(' ', '\n'))
if __name__ == "__main__": main()
| Python |
#!/usr/bin/python
"""
To do:
3) command-line argument (to test a single file)
- What about exceptions and aborts?
-If ...is embedded anywhere in a line, that portion becomes a .*? regexp
---------------
Find files with
/* Output:
Run the programs and capture the output, compare with anticipated output.
/* Output: (80% match)
For files that vary from run to run
Complete punt:
/* Output: (Sample)
(More elaborate design in SimpleTest1.py)
"""
import os, re, glob, sys, string, codecs
from difflib import SequenceMatcher
argTag = '// {Args: '
targetOutput = re.compile("/* Output:(.*?)\n(.*)\n\*///:~", re.DOTALL)
class SimpleTest:
def __init__(self, fileName, text, referencePath, reportFile):
self.fileName = fileName
self.normalOutput = self.fileName + "-output.txt"
self.errorOutput = self.fileName + "-erroroutput.txt"
self.text = text
self.referencePath = referencePath
self.reportFile = reportFile
self.package = ""
self.args = ""
self.runTest = True
self.insertOutput = True
self.EmbeddedComparisonOutput = False
self.comparisonFile = None
self.lines = self.text.split("\n")
for line in self.lines:
if "{RunByHand}" in line or \
line.startswith("import javax.swing.*;") or \
"c12:ZipCompress.java" in line or \
"/* (Execute to see output) *///:~" in line:
self.runTest = False
if line.startswith("package"):
self.package = line.split()[1][:-1] + "."
if line.startswith(argTag):
self.args = line[len(argTag):].strip()
assert self.args.rfind('}') != -1, "%s, %s" % (self.args, referencePath)
self.args = self.args[:self.args.rfind('}')]
if line.startswith("// {main:"):
self.fileName = line.split()[-1][:-1]
if line.startswith("// {Exec:"):
self.command = line.split(':', 1)[1].strip()[:-1]
if "/* Output:" in line:
self.EmbeddedComparisonOutput = True
if line.startswith("} /*"):
break # Out of for loop
#if "} ///:~" in line: # Extra space
# self.insertOutput = False
def run(self):
if not self.runTest: return
if not hasattr(self, "command"):
self.command = "java " + self.package + self.fileName + " " + self.args
# Capture standard output into a local file.
self.command = self.command + " > " + self.normalOutput
print self.command
os.system(self.command)
if os.stat(self.normalOutput).st_size:
return self.compareResults(self.normalOutput)
# Capture error output into a local file.
# The '2>' requires cygwin under Windows, or *nix:
self.command = self.command + " 2> " + self.errorOutput
print self.command
os.system(self.command)
return self.compareResults(self.errorOutput)
def compareResults(self, fileName):
# Read output file that was just generated:
results = makePrintable(file(fileName).read())
results = results.replace('\t', ' ')
results = results.strip()
file("Generated.txt",'w').write(results)
# Strip off trailing spaces on each line:
results = "\n".join([line.rstrip() for line in results.split("\n")])
controlSample = self.getControlSample()
ratio = 1.0
if controlSample:
controlOutput = controlSample.group(2).rstrip()
if "\n..." in controlOutput:
controlLines = controlOutput.split("\n")[:-1]
resultLines = results.split("\n")[:len(controlLines)]
controlOutput = "\n".join(controlLines)
results = "\n".join(resultLines)
file("controlOutput.txt",'w').write(controlOutput)
modifier = controlSample.group(1)
if "match" in modifier:
ratio = float(re.findall("\d+", modifier)[0]) / 100
print "Looking for", ratio, "match"
if "Sample" in modifier:
ratio = 0.0
actualRatio = SequenceMatcher(None, controlOutput, results).ratio()
if actualRatio < ratio:
self.reportFile.write("mismatch in " + self.referencePath + "\n")
self.reportFile.write("Actual ratio " + str(actualRatio) + "\n")
self.reportFile.write("expected:\n")
self.reportFile.write(controlOutput + "\n")
self.reportFile.write("----------actual:----------\n")
self.reportFile.write(results + "\n")
file(self.fileName + "-control.txt", 'w').write(controlOutput)
file(self.fileName + "-results.txt", 'w').write(results)
self.reportFile.write("---------------------------\n")
os.system("cmp " + self.fileName + "-control.txt "
+ self.fileName + "-results.txt"
+ " > cmp-out.txt")
self.reportFile.write(file("cmp-out.txt").read())
self.reportFile.write("=" * 40 + "\n")
else:
pass #!!! No control sample, create initial one here
def appendOutput(self):
if self.insertOutput:
# Rewrite the tail of the source file if the result is nonzero
self.lines[-2] = '}'
self.lines[-1] = "/* Output:"
for tline in file(self.fileName + "-output.txt"):
self.lines.append(tline.rstrip())
self.lines.append("*///:~")
self.lines.append("")
file(self.fileName + ".java", 'w').write("\n".join(self.lines))
def getControlSample(self):
"""Finds the control sample, returns an re group
First element is the arguments, second is the actual data"""
if self.EmbeddedComparisonOutput:
self.sourceOutput = targetOutput.search(self.text)
else:
return None
return self.sourceOutput
def makePrintable(s):
for c in s:
if c not in string.printable: return _makePrintable(s)
return s
def _makePrintable(s):
result = ''
for c in s:
if c not in string.printable: result += ' '
else: result += c
return result
class ReportFile:
def __init__(self, filePath):
self.filePath = filePath
self.file = None
def write(self, line):
if not self.file:
self.file = file(self.filePath, 'w')
self.file.write(line)
print line
def close(self):
if self.file:
self.file.close()
if __name__ == "__main__":
if len(sys.argv) > 1:
javaSource = sys.argv[1]
if javaSource.endswith("."): javaSource = javaSource[:-1]
if not javaSource.endswith(".java"): javaSource += ".java"
os.system("javac " + javaSource)
SimpleTest(javaSource.split('.')[0], file(javaSource).read(), javaSource, sys.stdout).run()
sys.exit()
start = os.getcwd()
reportFile = ReportFile(start + os.sep + "OutputErrors.txt")
for root, dirs, files in os.walk('.'):
print root
os.chdir(root)
for f in [name.split('.')[0] for name in files if name.endswith(".java")]:
text = file(f + ".java").read()
# Only perform verification if there is an output tag:
if text.find("/* Output:") != -1:
referencePath = os.path.join(root, f + ".java")
SimpleTest(f, text, referencePath, reportFile).run()
os.chdir(start)
reportFile.close()
if reportFile.file:
print "Errors in OutputErrors.txt"
| Python |
"""RedundantImportDetector.py
Discover redundant java imports using brute force.
Requires Python 2.3"""
import os, sys, re
from glob import glob
reportFile = file("RedundantImports.txt", 'w')
startDir = 'D:\\aaa-TIJ4\\code'
# Regular expression to find the block of import statements:
findImports = re.compile("\n(?:import .*?\n)+")
baseDir = os.path.abspath(".")
print "basDir:", baseDir
def main():
for javaFile in glob("*.java") + glob("**/*.java"):
print javaFile
checkImports(os.path.join(baseDir, javaFile))
def checkImports(javaFile):
java = file(javaFile).read()
imports = findImports.search(java)
if imports:
imports = [f for f in imports.group(0).split('\n') if f != '']
fileParts = findImports.split(java)
assert len(fileParts) == 2
for mutated in mutateImports(imports):
file(javaFile, 'w').write(fileParts[0] + mutated + fileParts[1])
print "changing to", os.path.dirname(javaFile)
os.chdir(os.path.dirname(javaFile))
if os.system("javac " + os.path.basename(javaFile)) == 0:
print >>reportFile, javaFile + "\n" + mutated
redundantRemoved = "\n".join(
[m for m in mutated.split("\n")
if not m.startswith("//")])
print >>reportFile, redundantRemoved
file(javaFile, 'w').write(fileParts[0] +
redundantRemoved + fileParts[1])
return # No further attempts
file(javaFile, 'w').write(java) # Restore original file
def mutateImports(imports):
'''Generates different versions of imports, each with a
different line commented out'''
for i in range(len(imports)):
mutated = imports[:]
mutated[i] = '//' + mutated[i]
yield "\n".join([''] + mutated + [''])
if __name__ == "__main__": main()
| Python |
#!/usr/bin/python
"""
Eclipse.py by Bruce Eckel, for Thinking in Java 4e
Modify or insert package statments so that Eclipse is happy with the code tree.
Run this with no arguments from the root of the code tree.
The Ant build will not work once you run this program!
You may also want to modify the dotproject and dotclasspath text below.
You must have Python 2.3 installed to run this program. See www.python.org.
"""
import os
os.remove("reusing/Lisa.java");
for path, dirs, files in os.walk('.'):
for file in files:
if file.endswith(".java"):
filepath = path + os.sep + file
firstLine = open(filepath).readline().strip()
tagPath = firstLine.split()[1]
tagPath = ".".join(tagPath.split('/')[:-1])
packageStatement = "package " + tagPath + ";"
code = open(filepath).readlines()
found = False
for line in code:
if line.startswith("package "):
found = True
if not found:
code.insert(1, packageStatement + " /* Added by Eclipse.py */\n")
open(filepath, 'w').writelines(code)
here = os.path.abspath('.').replace("\\", "/")
if here.startswith("/cygdrive/"): # If using cygwin
here = here.replace("/cygdrive/", "", 1)
here = here[0] + ":" + here[1:]
print "here", here
open(".classpath", 'w').write(\
"""<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry excluding="polymorphism/|holding/|flow/|exceptions/|concurrency/|typeinfo/|innerclasses/|arrays/|interfaces/|reusing/|initialization/|cloning/|io/|containers/|generics/|xml/|hiding/|io/xfiles/|passing/|gui/|annotations/|enumerated/|discovering/|object/|strings/|swt/" kind="src" path=""/>
<classpathentry kind="src" path="annotations"/>
<classpathentry kind="src" path="arrays"/>
<classpathentry kind="src" path="cloning"/>
<classpathentry kind="src" path="concurrency"/>
<classpathentry kind="src" path="containers"/>
<classpathentry kind="src" path="discovering"/>
<classpathentry kind="src" path="enumerated"/>
<classpathentry kind="src" path="exceptions"/>
<classpathentry kind="src" path="flow"/>
<classpathentry kind="src" path="generics"/>
<classpathentry kind="src" path="gui"/>
<classpathentry kind="src" path="hiding"/>
<classpathentry kind="src" path="holding"/>
<classpathentry kind="src" path="initialization"/>
<classpathentry kind="src" path="innerclasses"/>
<classpathentry kind="src" path="interfaces"/>
<classpathentry excluding="xfiles/" kind="src" path="io"/>
<classpathentry kind="src" path="io/xfiles"/>
<classpathentry kind="src" path="object"/>
<classpathentry kind="src" path="passing"/>
<classpathentry kind="src" path="polymorphism"/>
<classpathentry kind="src" path="reusing"/>
<classpathentry kind="src" path="strings"/>
<classpathentry kind="src" path="swt"/>
<classpathentry kind="src" path="typeinfo"/>
<classpathentry kind="src" path="xml"/>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/jre1.5.0_01"/>
<classpathentry kind="output" path="bin"/>
</classpath>
""") # % (here, here))
open(".project", 'w').write(\
"""<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>TIJ4</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.jdt.core.javanature</nature>
</natures>
</projectDescription>
""")
if not os.path.exists(".settings"):
os.mkdir(".settings")
os.chdir(".settings")
open("org.eclipse.jdt.core.prefs", 'w').write(\
"""#Fri Jan 14 11:03:37 MST 2005
org.eclipse.jdt.core.compiler.debug.localVariable=generate
org.eclipse.jdt.core.compiler.compliance=1.5
org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve
org.eclipse.jdt.core.compiler.debug.sourceFile=generate
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.5
org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
eclipse.preferences.version=1
org.eclipse.jdt.core.compiler.debug.lineNumber=generate
org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
org.eclipse.jdt.core.compiler.source=1.5
org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
""")
print """Project ready to be opened with Eclipse (see www.Eclipse.org)
Use DEclipse.py if you want to go back to building with Ant."""
| Python |
#!/usr/bin/python
"""
DEclipse.py by Bruce Eckel, for Thinking in Java 4e
Undoes the effect of Eclipse.py, so that Ant can be used
again to build the code tree.
You must have Python 2.3 installed to run this program. See www.python.org.
"""
import os
for path, dirs, files in os.walk('.'):
for file in files:
if file.endswith(".java"):
filepath = path + os.sep + file
code = open(filepath).readlines()
found = False
for n, line in enumerate(code):
if line.find(" /* Added by Eclipse.py */") != -1:
del code[n]
open(filepath, 'w').writelines(code)
print "Project ready to be built with Ant."
| Python |
#!/usr/bin/python
"""
To do:
3) command-line argument (to test a single file)
- What about exceptions and aborts?
-If ...is embedded anywhere in a line, that portion becomes a .*? regexp
---------------
Find files with
/* Output:
Run the programs and capture the output, compare with anticipated output.
/* Output: (80% match)
For files that vary from run to run
Complete punt:
/* Output: (Sample)
(More elaborate design in SimpleTest1.py)
"""
import os, re, glob, sys, string, codecs
from difflib import SequenceMatcher
argTag = '// {Args: '
targetOutput = re.compile("/* Output:(.*?)\n(.*)\n\*///:~", re.DOTALL)
class SimpleTest:
def __init__(self, fileName, text, referencePath, reportFile):
self.fileName = fileName
self.normalOutput = self.fileName + "-output.txt"
self.errorOutput = self.fileName + "-erroroutput.txt"
self.text = text
self.referencePath = referencePath
self.reportFile = reportFile
self.package = ""
self.args = ""
self.runTest = True
self.insertOutput = True
self.EmbeddedComparisonOutput = False
self.comparisonFile = None
self.lines = self.text.split("\n")
for line in self.lines:
if "{RunByHand}" in line or \
line.startswith("import javax.swing.*;") or \
"c12:ZipCompress.java" in line or \
"/* (Execute to see output) *///:~" in line:
self.runTest = False
if line.startswith("package"):
self.package = line.split()[1][:-1] + "."
if line.startswith(argTag):
self.args = line[len(argTag):].strip()
assert self.args.rfind('}') != -1, "%s, %s" % (self.args, referencePath)
self.args = self.args[:self.args.rfind('}')]
if line.startswith("// {main:"):
self.fileName = line.split()[-1][:-1]
if line.startswith("// {Exec:"):
self.command = line.split(':', 1)[1].strip()[:-1]
if "/* Output:" in line:
self.EmbeddedComparisonOutput = True
if line.startswith("} /*"):
break # Out of for loop
#if "} ///:~" in line: # Extra space
# self.insertOutput = False
def run(self):
if not self.runTest: return
if not hasattr(self, "command"):
self.command = "java " + self.package + self.fileName + " " + self.args
# Capture standard output into a local file.
self.command = self.command + " > " + self.normalOutput
print self.command
os.system(self.command)
if os.stat(self.normalOutput).st_size:
return self.compareResults(self.normalOutput)
# Capture error output into a local file.
# The '2>' requires cygwin under Windows, or *nix:
self.command = self.command + " 2> " + self.errorOutput
print self.command
os.system(self.command)
return self.compareResults(self.errorOutput)
def compareResults(self, fileName):
# Read output file that was just generated:
results = makePrintable(file(fileName).read())
results = results.replace('\t', ' ')
results = results.strip()
file("Generated.txt",'w').write(results)
# Strip off trailing spaces on each line:
results = "\n".join([line.rstrip() for line in results.split("\n")])
controlSample = self.getControlSample()
ratio = 1.0
if controlSample:
controlOutput = controlSample.group(2).rstrip()
if "\n..." in controlOutput:
controlLines = controlOutput.split("\n")[:-1]
resultLines = results.split("\n")[:len(controlLines)]
controlOutput = "\n".join(controlLines)
results = "\n".join(resultLines)
file("controlOutput.txt",'w').write(controlOutput)
modifier = controlSample.group(1)
if "match" in modifier:
ratio = float(re.findall("\d+", modifier)[0]) / 100
print "Looking for", ratio, "match"
if "Sample" in modifier:
ratio = 0.0
actualRatio = SequenceMatcher(None, controlOutput, results).ratio()
if actualRatio < ratio:
self.reportFile.write("mismatch in " + self.referencePath + "\n")
self.reportFile.write("Actual ratio " + str(actualRatio) + "\n")
self.reportFile.write("expected:\n")
self.reportFile.write(controlOutput + "\n")
self.reportFile.write("----------actual:----------\n")
self.reportFile.write(results + "\n")
file(self.fileName + "-control.txt", 'w').write(controlOutput)
file(self.fileName + "-results.txt", 'w').write(results)
self.reportFile.write("---------------------------\n")
os.system("cmp " + self.fileName + "-control.txt "
+ self.fileName + "-results.txt"
+ " > cmp-out.txt")
self.reportFile.write(file("cmp-out.txt").read())
self.reportFile.write("=" * 40 + "\n")
else:
pass #!!! No control sample, create initial one here
def appendOutput(self):
if self.insertOutput:
# Rewrite the tail of the source file if the result is nonzero
self.lines[-2] = '}'
self.lines[-1] = "/* Output:"
for tline in file(self.fileName + "-output.txt"):
self.lines.append(tline.rstrip())
self.lines.append("*///:~")
self.lines.append("")
file(self.fileName + ".java", 'w').write("\n".join(self.lines))
def getControlSample(self):
"""Finds the control sample, returns an re group
First element is the arguments, second is the actual data"""
if self.EmbeddedComparisonOutput:
self.sourceOutput = targetOutput.search(self.text)
else:
return None
return self.sourceOutput
def makePrintable(s):
for c in s:
if c not in string.printable: return _makePrintable(s)
return s
def _makePrintable(s):
result = ''
for c in s:
if c not in string.printable: result += ' '
else: result += c
return result
class ReportFile:
def __init__(self, filePath):
self.filePath = filePath
self.file = None
def write(self, line):
if not self.file:
self.file = file(self.filePath, 'w')
self.file.write(line)
print line
def close(self):
if self.file:
self.file.close()
if __name__ == "__main__":
if len(sys.argv) > 1:
javaSource = sys.argv[1]
if javaSource.endswith("."): javaSource = javaSource[:-1]
if not javaSource.endswith(".java"): javaSource += ".java"
os.system("javac " + javaSource)
SimpleTest(javaSource.split('.')[0], file(javaSource).read(), javaSource, sys.stdout).run()
sys.exit()
start = os.getcwd()
reportFile = ReportFile(start + os.sep + "OutputErrors.txt")
for root, dirs, files in os.walk('.'):
print root
os.chdir(root)
for f in [name.split('.')[0] for name in files if name.endswith(".java")]:
text = file(f + ".java").read()
# Only perform verification if there is an output tag:
if text.find("/* Output:") != -1:
referencePath = os.path.join(root, f + ".java")
SimpleTest(f, text, referencePath, reportFile).run()
os.chdir(start)
reportFile.close()
if reportFile.file:
print "Errors in OutputErrors.txt"
| Python |
#!/usr/bin/python
"""
Eclipse.py by Bruce Eckel, for Thinking in Java 4e
Modify or insert package statments so that Eclipse is happy with the code tree.
Run this with no arguments from the root of the code tree.
The Ant build will not work once you run this program!
You may also want to modify the dotproject and dotclasspath text below.
You must have Python 2.3 installed to run this program. See www.python.org.
"""
import os
os.remove("reusing/Lisa.java");
for path, dirs, files in os.walk('.'):
for file in files:
if file.endswith(".java"):
filepath = path + os.sep + file
firstLine = open(filepath).readline().strip()
tagPath = firstLine.split()[1]
tagPath = ".".join(tagPath.split('/')[:-1])
packageStatement = "package " + tagPath + ";"
code = open(filepath).readlines()
found = False
for line in code:
if line.startswith("package "):
found = True
if not found:
code.insert(1, packageStatement + " /* Added by Eclipse.py */\n")
open(filepath, 'w').writelines(code)
here = os.path.abspath('.').replace("\\", "/")
if here.startswith("/cygdrive/"): # If using cygwin
here = here.replace("/cygdrive/", "", 1)
here = here[0] + ":" + here[1:]
print "here", here
open(".classpath", 'w').write(\
"""<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry excluding="polymorphism/|holding/|flow/|exceptions/|concurrency/|typeinfo/|innerclasses/|arrays/|interfaces/|reusing/|initialization/|cloning/|io/|containers/|generics/|xml/|hiding/|io/xfiles/|passing/|gui/|annotations/|enumerated/|discovering/|object/|strings/|swt/" kind="src" path=""/>
<classpathentry kind="src" path="annotations"/>
<classpathentry kind="src" path="arrays"/>
<classpathentry kind="src" path="cloning"/>
<classpathentry kind="src" path="concurrency"/>
<classpathentry kind="src" path="containers"/>
<classpathentry kind="src" path="discovering"/>
<classpathentry kind="src" path="enumerated"/>
<classpathentry kind="src" path="exceptions"/>
<classpathentry kind="src" path="flow"/>
<classpathentry kind="src" path="generics"/>
<classpathentry kind="src" path="gui"/>
<classpathentry kind="src" path="hiding"/>
<classpathentry kind="src" path="holding"/>
<classpathentry kind="src" path="initialization"/>
<classpathentry kind="src" path="innerclasses"/>
<classpathentry kind="src" path="interfaces"/>
<classpathentry excluding="xfiles/" kind="src" path="io"/>
<classpathentry kind="src" path="io/xfiles"/>
<classpathentry kind="src" path="object"/>
<classpathentry kind="src" path="passing"/>
<classpathentry kind="src" path="polymorphism"/>
<classpathentry kind="src" path="reusing"/>
<classpathentry kind="src" path="strings"/>
<classpathentry kind="src" path="swt"/>
<classpathentry kind="src" path="typeinfo"/>
<classpathentry kind="src" path="xml"/>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/jre1.5.0_01"/>
<classpathentry kind="output" path="bin"/>
</classpath>
""") # % (here, here))
open(".project", 'w').write(\
"""<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>TIJ4</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.jdt.core.javanature</nature>
</natures>
</projectDescription>
""")
if not os.path.exists(".settings"):
os.mkdir(".settings")
os.chdir(".settings")
open("org.eclipse.jdt.core.prefs", 'w').write(\
"""#Fri Jan 14 11:03:37 MST 2005
org.eclipse.jdt.core.compiler.debug.localVariable=generate
org.eclipse.jdt.core.compiler.compliance=1.5
org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve
org.eclipse.jdt.core.compiler.debug.sourceFile=generate
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.5
org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
eclipse.preferences.version=1
org.eclipse.jdt.core.compiler.debug.lineNumber=generate
org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
org.eclipse.jdt.core.compiler.source=1.5
org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
""")
print """Project ready to be opened with Eclipse (see www.Eclipse.org)
Use DEclipse.py if you want to go back to building with Ant."""
| Python |
#!/usr/bin/python
"""
DEclipse.py by Bruce Eckel, for Thinking in Java 4e
Undoes the effect of Eclipse.py, so that Ant can be used
again to build the code tree.
You must have Python 2.3 installed to run this program. See www.python.org.
"""
import os
for path, dirs, files in os.walk('.'):
for file in files:
if file.endswith(".java"):
filepath = path + os.sep + file
code = open(filepath).readlines()
found = False
for n, line in enumerate(code):
if line.find(" /* Added by Eclipse.py */") != -1:
del code[n]
open(filepath, 'w').writelines(code)
print "Project ready to be built with Ant."
| Python |
#!/usr/bin/python
"""
Runs javac -Xlint on all files in all subdirectories.
Collects results into JavaLint.txt
"""
import os
outputfile = "JavaLint.txt"
javadirs = []
for path, dirs, files in os.walk('.'):
for file in files:
if file.endswith(".java"):
javadirs.append(path)
break
start = os.getcwd()
for jd in javadirs:
os.chdir(jd)
print jd
os.system("javac -source 1.5 -Xlint -Xlint:-serial *.java -Xstdout " + outputfile)
os.chdir(start)
results = open(start + os.sep + outputfile, 'w')
for jd in javadirs:
messages = open(jd + os.sep + outputfile).read()
if len(messages):
print >>results, '='*40 + "\n" + jd + "\n" + '='*40 + "\n" + messages
| Python |
#!/usr/bin/python
"""
Runs a Java program, appends output if it's not there
-force as first argument when doing batch files forces overwrite
"""
import os, re, sys
argTag = '// {Args: '
oldOutput = re.compile("/* Output:.*?\n(.*)\n\*///:~(?s)")
def makeOutputIncludedFile(path, fileName, changeReport, force = False):
oldDir = os.getcwd()
os.chdir(path)
base = fileName.split('.')[0]
package = ''
args = ''
command = None
for line in file(fileName):
if line.startswith("} /*"):
break # Out of for loop
if line.startswith("package"):
words = line.strip().split()
package = words[1][:-1] + '.' # remove ';'
if line.startswith(argTag):
args = line[len(argTag):].strip()
assert args.rfind('}') != -1, "%s, %s" % (args, fileName)
args = " " +args[:args.rfind('}')]
if line.startswith("// {main:"):
base = line.split()[-1]
base = base[:-1]
if line.startswith("// {Exec:"):
command = line.split(':', 1)[1].strip()[:-1]
if not command:
command = "java " + package + base + args
command += " > " + base + "-output.txt"
print command
result = os.system(command)
if(result != 0):
raise Exception, "Command returned nonzero value: " + str(result)
# Read output file that was just generated:
results = file(base + "-output.txt").read().strip()
# Strip off trailing spaces on each line:
results = "\n".join([line.rstrip() for line in results.split("\n")])
results = results.replace('\t', ' ')
if results:
if force or not oldOutput.findall(file(fileName).read()):
processedText = createProcessedJavaText(results, fileName)
open(fileName, 'w').write(processedText + "\n")
if changeReport:
changeReport.write(os.path.join(path, fileName) + "\n")
return # Don't need to try for error output
##### Duplicate for standard error output:
command += " 2> " + base + "-erroroutput.txt"
print command
result = os.system(command)
if(result != 0):
raise Exception, "Command returned nonzero value: " + str(result)
# Read error file that was just generated:
results = file(base + "-erroroutput.txt").read().strip()
# Strip off trailing spaces on each line:
results = "\n".join([line.rstrip() for line in results.split("\n")])
results = results.replace('\t', ' ')
if results:
if force or not oldOutput.findall(file(fileName).read()):
processedText = createProcessedJavaText(results, fileName)
open(fileName, 'w').write(processedText + "\n")
if changeReport:
changeReport.write(os.path.join(path, fileName) + "\n")
os.chdir(oldDir)
def createProcessedJavaText(results, fileName):
processedJava = ''
for line in [line.rstrip() for line in file(fileName)]:
if line.startswith("} ///:~"):
processedJava += "} /* Output:\n" + results + "\n*///:~"
return processedJava
if line.startswith("} /* Output:"):
processedJava += line + "\n" + results + "\n*///:~" # Preserve modifiers
return processedJava
processedJava += line + "\n"
raise Exception, "No marker found at end of file " + path + " " + fileName
class ReportFile:
def __init__(self, filePath):
self.filePath = filePath
self.file = None
def write(self, line):
if not self.file:
self.file = file(self.filePath, 'w')
self.file.write(line)
print line
def close(self):
if self.file:
self.file.close()
if __name__ == "__main__":
start = os.getcwd()
args = sys.argv[1:]
forceFlag = False
if len(args):
if args[0] == "-force":
forceFlag = True
print "forceFlag = ", forceFlag
del args[0]
if len(args) > 0:
for javaSource in args:
if javaSource.endswith("."): javaSource = javaSource[:-1]
if not javaSource.endswith(".java"): javaSource += ".java"
os.system("javac " + javaSource)
makeOutputIncludedFile(os.getcwd(), javaSource, None, force = True)
else:
changeReport = ReportFile(os.path.join(start, "Changes.txt"))
for root, dirs, files in os.walk('.'):
if (os.sep + "gui") in root: continue
path = os.path.normpath(os.path.join(start,root))
print path
for name in [name for name in files if name.endswith(".java")]:
java = file(os.path.join(path, name)).read()
if "public static void main(String" in java and \
not "{RunByHand}" in java and \
not "{ThrowsException}" in java and \
not "/* (Execute to see output) *///:~" in java and \
not "} /* Same output as" in java:
if forceFlag or not "} /* Output:" in java:
print "\t", name
makeOutputIncludedFile(path, name, changeReport, force = forceFlag)
changeReport.close()
os.system("uedit32 /f Changes.txt &")
| Python |
#!/usr/bin/python
"""
Runs a Java program, appends output if it's not there
-force as first argument when doing batch files forces overwrite
"""
import os, re, sys
argTag = '// {Args: '
oldOutput = re.compile("/* Output:.*?\n(.*)\n\*///:~(?s)")
def makeOutputIncludedFile(path, fileName, changeReport, force = False):
oldDir = os.getcwd()
os.chdir(path)
base = fileName.split('.')[0]
package = ''
args = ''
command = None
for line in file(fileName):
if line.startswith("} /*"):
break # Out of for loop
if line.startswith("package"):
words = line.strip().split()
package = words[1][:-1] + '.' # remove ';'
if line.startswith(argTag):
args = line[len(argTag):].strip()
assert args.rfind('}') != -1, "%s, %s" % (args, fileName)
args = " " +args[:args.rfind('}')]
if line.startswith("// {main:"):
base = line.split()[-1]
base = base[:-1]
if line.startswith("// {Exec:"):
command = line.split(':', 1)[1].strip()[:-1]
if not command:
command = "java " + package + base + args
command += " > " + base + "-output.txt"
print command
result = os.system(command)
if(result != 0):
raise Exception, "Command returned nonzero value: " + str(result)
# Read output file that was just generated:
results = file(base + "-output.txt").read().strip()
# Strip off trailing spaces on each line:
results = "\n".join([line.rstrip() for line in results.split("\n")])
results = results.replace('\t', ' ')
if results:
if force or not oldOutput.findall(file(fileName).read()):
processedText = createProcessedJavaText(results, fileName)
open(fileName, 'w').write(processedText + "\n")
if changeReport:
changeReport.write(os.path.join(path, fileName) + "\n")
return # Don't need to try for error output
##### Duplicate for standard error output:
command += " 2> " + base + "-erroroutput.txt"
print command
result = os.system(command)
if(result != 0):
raise Exception, "Command returned nonzero value: " + str(result)
# Read error file that was just generated:
results = file(base + "-erroroutput.txt").read().strip()
# Strip off trailing spaces on each line:
results = "\n".join([line.rstrip() for line in results.split("\n")])
results = results.replace('\t', ' ')
if results:
if force or not oldOutput.findall(file(fileName).read()):
processedText = createProcessedJavaText(results, fileName)
open(fileName, 'w').write(processedText + "\n")
if changeReport:
changeReport.write(os.path.join(path, fileName) + "\n")
os.chdir(oldDir)
def createProcessedJavaText(results, fileName):
processedJava = ''
for line in [line.rstrip() for line in file(fileName)]:
if line.startswith("} ///:~"):
processedJava += "} /* Output:\n" + results + "\n*///:~"
return processedJava
if line.startswith("} /* Output:"):
processedJava += line + "\n" + results + "\n*///:~" # Preserve modifiers
return processedJava
processedJava += line + "\n"
raise Exception, "No marker found at end of file " + path + " " + fileName
class ReportFile:
def __init__(self, filePath):
self.filePath = filePath
self.file = None
def write(self, line):
if not self.file:
self.file = file(self.filePath, 'w')
self.file.write(line)
print line
def close(self):
if self.file:
self.file.close()
if __name__ == "__main__":
start = os.getcwd()
args = sys.argv[1:]
forceFlag = False
if len(args):
if args[0] == "-force":
forceFlag = True
print "forceFlag = ", forceFlag
del args[0]
if len(args) > 0:
for javaSource in args:
if javaSource.endswith("."): javaSource = javaSource[:-1]
if not javaSource.endswith(".java"): javaSource += ".java"
os.system("javac " + javaSource)
makeOutputIncludedFile(os.getcwd(), javaSource, None, force = True)
else:
changeReport = ReportFile(os.path.join(start, "Changes.txt"))
for root, dirs, files in os.walk('.'):
if (os.sep + "gui") in root: continue
path = os.path.normpath(os.path.join(start,root))
print path
for name in [name for name in files if name.endswith(".java")]:
java = file(os.path.join(path, name)).read()
if "public static void main(String" in java and \
not "{RunByHand}" in java and \
not "{ThrowsException}" in java and \
not "/* (Execute to see output) *///:~" in java and \
not "} /* Same output as" in java:
if forceFlag or not "} /* Output:" in java:
print "\t", name
makeOutputIncludedFile(path, name, changeReport, force = forceFlag)
changeReport.close()
os.system("uedit32 /f Changes.txt &")
| Python |
'''
Created on 2012-7-27
@author: root
'''
import os
import glob
import time
#更改当前的工作目录
os.chdir('/usr/eclipse/pythonjellybean/src/cn/taylor/jellybean/pythonDataType')
#找出当前所有的文件,用到通配符
filelist = glob.glob('*.py')
print(filelist)
fullFileList = [os.path.realpath(elem) for elem in filelist]
print(fullFileList)
metadata = os.stat('tuple.py')
print(metadata.st_mtime)
print(time.localtime(metadata.st_mtime))
print(metadata.st_size);
print(os.path.realpath('set.py'))
def change_path():
os.chdir('/usr/eclipse/pythonjellybean/src/cn/taylor/jellybean/pythonDataType')
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.