code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
# gozerbot/monitor.py
#
#
""" monitors .. call callback on bot output. """
## gozerlib import s
from gozerlib.monitor import Monitor
## gozerlib.socket.irc imports
from ircevent import Ircevent
## classes
class Outmonitor(Monitor):
""" monitor for bot output (bot.send). """
def handle(self, bot, txt):
""" fire outmonitor callbacks. """
ievent = Ircevent().parse(bot, txt)
if not ievent:
rlog(10, 'monitor', "can't make ircevent: %s" % txt)
return
ievent.nick = bot.nick
try:
ievent.userhost = bot.userhosts.data[bot.nick]
except (KeyError, AttributeError):
ievent.userhost = "bot@bot"
Monitor.handle(self, bot, ievent)
# bot.send() monitor
outmonitor = Outmonitor('outmonitor')
# bot.say() monitor
saymonitor = Monitor('saymonitor')
| Python |
# gozerlib/socket/irc/irc.py
#
#
"""
an Irc object handles the connection to the irc server .. receiving,
sending, connect and reconnect code.
"""
## gozerlib imports
from gozerlib.utils.exception import handle_exception
from gozerlib.socket.utils.generic import getrandomnick, toenc, fromenc, strippedtxt
from gozerlib.socket.utils.generic import fix_format, splittxt, waitforqueue, uniqlist
from gozerlib.utils.locking import lockdec
from gozerlib.config import cfg as config
from gozerlib.datadir import datadir
from gozerlib.botbase import BotBase
from gozerlib.threads import start_new_thread, threaded
from gozerlib.utils.pdod import Pdod
from gozerlib.channelbase import ChannelBase
from gozerlib.morphs import inputmorphs, outputmorphs
## gozerlib.irc imports
from ircevent import Ircevent
from monitor import saymonitor
from wait import Wait
## basic imports
import time
import thread
import socket
import threading
import os
import Queue
import random
import logging
## locks
outlock = thread.allocate_lock()
outlocked = lockdec(outlock)
## exceptions
class AlreadyConnected(Exception):
""" already connected exception """
pass
class AlreadyConnecting(Exception):
""" bot is already connecting exception """
pass
class Irc(BotBase):
""" the irc class, provides interface to irc related stuff. """
def __init__(self, cfg=None, users=None, plugs=None, *args, **kwargs):
BotBase.__init__(self, cfg, users, plugs, *args, **kwargs)
BotBase.setstate(self)
self.type = 'irc'
self.wait = Wait()
self.outputlock = thread.allocate_lock()
self.fsock = None
self.oldsock = None
self.sock = None
if self.cfg:
if not self.cfg.has_key('nolimiter'):
self.nolimiter = 0
else:
self.nolimiter = self.cfg['nolimiter']
self.reconnectcount = 0
self.pongcheck = 0
self.nickchanged = 0
self.noauto433 = 0
if self.state:
if not self.state.has_key('alternick'):
self.state['alternick'] = self.cfg['alternick']
if not self.state.has_key('no-op'):
self.state['no-op'] = []
self.nrevents = 0
self.gcevents = 0
self.outqueues = [Queue.Queue() for i in range(10)]
self.tickqueue = Queue.Queue()
self.nicks401 = []
self.stopreadloop = False
self.stopoutloop = False
if self.port == 0:
self.port = 6667
self.connectlock = thread.allocate_lock()
self.connectok = threading.Event()
self.encoding = 'utf-8'
self.blocking = 1
def __del__(self):
self.exit()
def _raw(self, txt):
""" send raw text to the server. """
if not txt:
return
logging.debug("irc - sending %s" % txt)
try:
self.lastoutput = time.time()
itxt = toenc(outputmorphs.do(txt), self.encoding)
if self.cfg.has_key('ssl') and self.cfg['ssl']:
self.sock.write(itxt + '\n')
else:
self.sock.send(itxt[:500] + '\n')
except Exception, ex:
# check for broken pipe error .. if so ignore
# used for nonblocking sockets
try:
(errno, errstr) = ex
if errno != 32 and errno != 9:
raise
else:
time.sleep(0.5)
except:
logging.warn("irc - ERROR: can't send %s" % str(ex))
self.reconnect()
def _connect(self):
""" connect to server/port using nick. """
if self.connecting:
raise AlreadyConnecting()
if self.connected:
raise AlreadyConnected()
self.stopped = 0
self.connecting = True
self.connectok.clear()
self.connectlock.acquire()
# create socket
if self.ipv6:
self.oldsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.ipv6 = 1
else:
self.oldsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
assert(self.oldsock)
# optional bind
server = self.server
elite = self.cfg['bindhost'] or config['bindhost']
if elite:
try:
self.oldsock.bind((elite, 0))
except socket.gaierror:
logging.info("irc - %s - can't bind to %s" % (self.name, elite))
# resolve the IRC server and pick a random server
if not server:
# valid IPv6 ip?
try: socket.inet_pton(socket.AF_INET6, self.server)
except socket.error: pass
else: server = self.server
if not server:
# valid IPv4 ip?
try: socket.inet_pton(socket.AF_INET, self.server)
except socket.error: pass
else: server = self.server
if not server:
# valid hostname?
ips = []
try:
for item in socket.getaddrinfo(self.server, None):
if item[0] in [socket.AF_INET, socket.AF_INET6] and item[1] == socket.SOCK_STREAM:
ip = item[4][0]
if ip not in ips: ips.append(ip)
except socket.error: pass
else: server = random.choice(ips)
# do the connect .. set timeout to 30 sec upon connecting
logging.warn('irc - connecting to %s (%s)' % (server, self.server))
self.oldsock.settimeout(15)
self.oldsock.connect((server, int(self.port)))
# we are connected
logging.warn('irc - connection ok')
time.sleep(1)
self.connected = True
# make file socket
self.fsock = self.oldsock.makefile("r")
# set blocking
self.oldsock.setblocking(self.blocking)
self.fsock._sock.setblocking(self.blocking)
# set socket time out
if self.blocking:
socktimeout = self.cfg['socktimeout']
if not socktimeout:
socktimeout = 301.0
else:
socktimeout = float(socktimeout)
self.oldsock.settimeout(socktimeout)
self.fsock._sock.settimeout(socktimeout)
# enable ssl if set
if self.cfg.has_key('ssl') and self.cfg['ssl']:
logging.info('irc - ssl enabled')
self.sock = socket.ssl(self.oldsock)
else:
self.sock = self.oldsock
# try to release the outputlock
try:
self.outputlock.release()
except thread.error:
pass
return 1
def start(self):
self._connect()
# start input and output loops
start_new_thread(self._readloop, ())
start_new_thread(self._outloop, ())
# logon and start monitor
self._logon()
self.nickchanged = 0
self.reconnectcount = 0
saymonitor.start()
self.connectok.wait()
logging.warn("irc - logged on!")
return 1
def _readloop(self):
""" loop on the socketfile. """
self.stopreadloop = 0
self.stopped = 0
doreconnect = 0
timeout = 1
logging.debug('irc - starting readloop')
prevtxt = ""
while not self.stopped and not self.stopreadloop:
try:
time.sleep(0.01)
if self.cfg.has_key('ssl') and self.cfg['ssl']:
intxt = inputmorphs.do(self.sock.read()).split('\n')
else:
intxt = inputmorphs.do(self.fsock.readline()).split('\n')
# if intxt == "" the other side has disconnected
if self.stopreadloop or self.stopped:
doreconnect = 0
break
if not intxt or not intxt[0]:
doreconnect = 1
break
if prevtxt:
intxt[0] = prevtxt + intxt[0]
prevtxt = ""
if intxt[-1] != '':
prevtxt = intxt[-1]
intxt = intxt[:-1]
for r in intxt:
r = r.rstrip()
rr = fromenc(r, self.encoding)
if not rr:
continue
res = strippedtxt(rr)
res = rr
logging.debug(u"irc - %s" % res)
# parse txt read into an ircevent
try:
ievent = Ircevent().parse(self, res)
except Exception, ex:
handle_exception()
continue
# call handle_ievent
if ievent:
self.handle_ievent(ievent)
timeout = 1
except UnicodeError:
handle_exception()
continue
except socket.timeout:
# timeout occured .. first time send ping .. reconnect if
# second timeout follows
if self.stopped:
break
timeout += 1
if timeout > 2:
doreconnect = 1
logging('irc - no pong received')
break
logging.debug("irc - socket timeout")
pingsend = self.ping()
if not pingsend:
doreconnect = 1
break
continue
except socket.sslerror, ex:
# timeout occured .. first time send ping .. reconnect if
# second timeout follows
if self.stopped or self.stopreadloop:
break
if not 'timed out' in str(ex):
handle_exception()
doreconnect = 1
break
timeout += 1
if timeout > 2:
doreconnect = 1
logging.info('irc - no pong received')
break
logging.error("irc - socket timeout")
pingsend = self.ping()
if not pingsend:
doreconnect = 1
break
continue
except IOError, ex:
if self.blocking and 'temporarily' in str(ex):
time.sleep(0.5)
continue
handle_exception()
reconnect = 1
break
except Exception, ex:
if self.stopped or self.stopreadloop:
break
err = ex
try:
(errno, msg) = ex
except:
errno = -1
msg = err
# check for temp. unavailable error .. raised when using
# nonblocking socket .. 35 is FreeBSD 11 is Linux
if errno == 35 or errno == 11:
time.sleep(0.5)
continue
logging.error("irc - error in readloop: %s" % msg)
doreconnect = 1
break
logging.error('irc - readloop stopped')
self.connectok.clear()
self.connected = False
# see if we need to reconnect
if doreconnect:
time.sleep(2)
self.reconnect()
def _getqueue(self):
""" get one of the outqueues. """
go = self.tickqueue.get()
for index in range(len(self.outqueues)):
if not self.outqueues[index].empty():
return self.outqueues[index]
def putonqueue(self, nr, *args):
""" put output onto one of the output queues. """
self.outqueues[nr].put_nowait(*args)
self.tickqueue.put_nowait('go')
def _outloop(self):
""" output loop. """
logging.debug('irc - starting output loop')
self.stopoutloop = 0
while not self.stopped and not self.stopoutloop:
queue = self._getqueue()
if queue:
try:
res = queue.get_nowait()
except Queue.Empty:
continue
if not res:
continue
try:
(printto, what, who, how, fromm, speed) = res
except ValueError:
self.send(res)
continue
if not self.stopped and not self.stopoutloop and printto \
not in self.nicks401:
self.out(printto, what, who, how, fromm, speed)
else:
time.sleep(0.1)
logging.debug('irc - stopping output loop')
def _logon(self):
""" log on to the network. """
# if password is provided send it
if self.password:
logging.warn('irc - sending password')
self._raw("PASS %s" % self.password)
# register with irc server
logging.warn('irc - registering with %s using nick %s' % (self.server, self.nick))
logging.warn('irc - this may take a while')
# check for username and realname
username = self.nick or self.cfg['username']
realname = self.cfg['realname'] or username
# first send nick
time.sleep(1)
self._raw("NICK %s" % self.nick)
time.sleep(1)
# send USER
self._raw("USER %s localhost localhost :%s" % (username, \
realname))
# wait on login
self.connectok.wait()
def _onconnect(self):
""" overload this to run after connect. """
pass
def _resume(self, data, reto=None):
""" resume to server/port using nick. """
try:
if data['ssl']:
self.connectwithjoin()
return 1
except KeyError:
pass
try:
fd = int(data['fd'])
except (TypeError, ValueError):
fd = None
self.connecting = False # we're already connected
self.nick = data['nick']
self.orignick = self.nick
self.server = str(data['server'])
self.port = int(data['port'])
self.password = data['password']
self.ipv6 = data['ipv6']
self.ssl = data['ssl']
# create socket
if self.ipv6:
if fd:
self.sock = socket.fromfd(fd , socket.AF_INET6, socket.SOCK_STREAM)
else:
self.sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.ipv6 = 1
else:
if fd:
self.sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# do the connect .. set timeout to 30 sec upon connecting
logging.info('irc - resuming to ' + self.server)
self.sock.settimeout(30)
self.stopped = 0
# make file socket
self.fsock = self.sock.makefile("r")
# set blocking
self.sock.setblocking(self.blocking)
# set socket time out
if self.blocking:
socktimeout = self.cfg['socktimeout']
if not socktimeout:
socktimeout = 301.0
else:
socktimeout = float(socktimeout)
self.sock.settimeout(socktimeout)
# start readloop
logging.debug('resuming readloop')
start_new_thread(self._readloop, ())
start_new_thread(self._outloop, ())
# init
self.reconnectcount = 0
self.nickchanged = 0
self.connecting = False
# still there server?
self._raw('PING :RESUME %s' % str(time.time()))
self.connectok.set()
self.connected = True
self.reconnectcount = 0
if reto:
self.say(reto, 'rebooting done')
saymonitor.start()
return 1
def _resumedata(self):
""" return data used for resume. """
try:
fd = self.sock.fileno()
except AttributeError, ex:
fd = None
self.exit()
return {self.name: {
'nick': self.nick,
'server': self.server,
'port': self.port,
'password': self.password,
'ipv6': self.ipv6,
'ssl': self.ssl,
'fd': fd
}}
def outputsizes(self):
""" return sizes of output queues. """
result = []
for q in self.outqueues:
result.append(q.qsize())
return result
def broadcast(self, txt):
""" broadcast txt to all joined channels. """
for i in self.state['joinedchannels']:
self.say(i, txt, speed=-1)
def save(self):
""" save state data. """
if self.state:
self.state.save()
def connect(self, reconnect=True):
""" connect to server/port using nick .. connect can timeout so catch
exception .. reconnect if enabled.
"""
res = 0
try:
res = self._connect()
if res:
self.connectok.wait()
self._onconnect()
self.connected = True
logging.warn('logged on !')
self.connecting = False
except AlreadyConnecting:
return 0
except AlreadyConnected:
return 0
except Exception, ex:
self.connectlock.release()
if self.stopped:
return 0
logging.error('connecting error: %s' % str(ex))
if reconnect:
self.reconnect()
return
raise
# add bot to the fleet
#if not fleet.byname(self.name):
# fleet.addbot(self)
self.connectlock.release()
return res
def shutdown(self):
""" shutdown the bot. """
logging.warn('irc - shutdown')
self.stopoutputloop = 1
self.stopped = 1
time.sleep(1)
self.tickqueue.put_nowait('go')
self.close()
self.connecting = False
self.connected = False
self.connectok.clear()
def close(self):
""" close the connection. """
try:
if self.cfg.has_key('ssl') and self.cfg['ssl']:
self.oldsock.shutdown(2)
else:
self.sock.shutdown(2)
except:
pass
try:
if self.cfg.has_key('ssl') and self.cfg['ssl']:
self.oldsock.close()
else:
self.sock.close()
self.fsock.close()
except:
pass
def exit(self):
""" exit the bot. """
self.stopreadloop = 1
self.stopped = 1
self.connected = 0
self.shutdown()
def reconnect(self):
""" reconnect to the irc server. """
try:
if self.stopped:
return 0
# determine how many seconds to sleep
if self.reconnectcount > 0:
reconsleep = self.reconnectcount*15
logging.warn('irc - sleeping %s seconds for reconnect' % reconsleep)
time.sleep(reconsleep)
if self.stopped:
logging.warn('irc - stopped.. not reconnecting')
return 1
if self.connected:
logging.warn('irc - already connected .. not reconnecting')
return 1
self.reconnectcount += 1
self.exit()
logging.warn('reconnecting')
result = self.connect()
return result
except Exception, ex:
handle_exception()
def handle_pong(self, ievent):
""" set pongcheck on received pong. """
logging.debug('received server pong')
self.pongcheck = 1
def sendraw(self, txt):
""" send raw text to the server. """
if self.stopped:
return
logging.debug(u'irc - sending %s' % txt)
self._raw(txt)
def fakein(self, txt):
""" do a fake ircevent. """
if not txt:
return
logging.warn('irc - fakein - %s' % txt)
self.handle_ievent(Ircevent().parse(self, txt))
def say(self, printto, what, who=None, how='msg', fromm=None, speed=0, groupchat=False):
""" say what to printto. """
if not printto or not what or printto in self.nicks401:
return
# if who is set add "who: " to txt
if not 'socket' in repr(printto):
if who:
what = "%s: %s" % (who, what)
if speed > 9:
speed = 9
self.putonqueue(9-speed, (printto, what, who, how, fromm, speed))
return
# do the sending
try:
printto.send(what + '\n')
time.sleep(0.001)
except Exception, ex :
if "Broken pipe" in str(ex) or "Bad file descriptor" in str(ex):
return
handle_exception()
def out(self, printto, what, who=None, how='msg', fromm=None, speed=5):
""" output the first 375 chars .. put the rest into cache. """
# convert the data to the encoding
try:
what = toenc(what.rstrip())
except Exception, ex:
logging.error("can't output: %s" % str(ex))
return
if not what:
return
# split up in parts of 375 chars overflowing on word boundaries
txtlist = splittxt(what)
size = 0
# send first block
self.output(printto, txtlist[0], how, who, fromm)
# see if we need to store output in less cache
result = ""
if len(txtlist) > 2:
if not fromm:
self.less(printto, txtlist[1:])
else:
self.less(fromm, txtlist[1:])
size = len(txtlist) - 2
result = txtlist[1:2][0]
if size:
result += " (+%s)" % size
else:
if len(txtlist) == 2:
result = txtlist[1]
# send second block
if result:
self.output(printto, result, how, who, fromm)
def output(self, printto, what, how='msg' , who=None, fromm=None):
""" first output .. then call saymonitor. """
self.outputnolog(printto, what, how, who, fromm)
saymonitor.put(self.name, printto, what, who, how, fromm)
def outputnolog(self, printto, what, how, who=None, fromm=None):
""" do output to irc server .. rate limit to 3 sec. """
try:
what = fix_format(what)
if what:
if how == 'msg':
self.privmsg(printto, what)
elif how == 'notice':
self.notice(printto, what)
elif how == 'ctcp':
self.ctcp(printto, what)
except Exception, ex:
handle_exception()
def donick(self, nick, setorig=0, save=0, whois=0):
""" change nick .. optionally set original nick and/or save to config. """
if not nick:
return
# disable auto 433 nick changing
self.noauto433 = 1
# set up wait for NICK command and issue NICK
queue = Queue.Queue()
nick = nick[:16]
self.wait.register('NICK', self.nick[:16], queue, 12)
self._raw('NICK %s\n' % nick)
result = waitforqueue(queue, 5)
# reenable 433 auto nick changing
self.noauto433 = 0
if not result:
return 0
self.nick = nick
# send whois
if whois:
self.whois(nick)
# set original
if setorig:
self.orignick = nick
# save nick to state and config file
if save:
self.state['nick'] = nick
self.state.save()
self.cfg.set('nick', nick)
self.cfg.save()
return 1
def join(self, channel, password=None):
""" join channel with optional password. """
if not channel:
return
# do join with password
if password:
self._raw('JOIN %s %s' % (channel, password))
chan = ChannelBase(self.datadir + os.sep + 'channels' + os.sep + channel)
if chan:
chan.setpass('IRC', password)
else:
# do pure join
self._raw('JOIN %s' % channel)
if self.state:
if channel not in self.state.data.joinedchannels:
self.state.data.joinedchannels.append(channel)
self.state.save()
def part(self, channel):
""" leave channel. """
if not channel:
return
self._raw('PART %s' % channel)
try:
self.state.data['joinedchannels'].remove(channel)
self.state.save()
except (KeyError, ValueError):
pass
def who(self, who):
""" send who query. """
if not who:
return
self.putonqueue(6, 'WHO %s' % who.strip())
def names(self, channel):
""" send names query. """
if not channel:
return
self.putonqueue(6, 'NAMES %s' % channel)
def whois(self, who):
""" send whois query. """
if not who:
return
self.putonqueue(6, 'WHOIS %s' % who)
def privmsg(self, printto, what):
""" send privmsg to irc server. """
if not printto or not what:
return
self.send('PRIVMSG %s :%s' % (printto, what))
def send(self, txt):
""" send text to irc server. """
if not txt:
return
if self.stopped:
return
try:
self.outputlock.acquire()
now = time.time()
timetosleep = 4 - (now - self.lastoutput)
if timetosleep > 0 and not self.nolimiter:
logging.warn('irc - flood protect')
time.sleep(timetosleep)
txt = toenc(strippedtxt(txt))
txt = txt.rstrip()
self._raw(txt)
try:
self.outputlock.release()
except:
pass
except Exception, ex:
try:
self.outputlock.release()
except:
pass
if not self.blocking and 'broken pipe' in str(ex).lower():
time.sleep(0.5)
else:
logging.error('irc - send error: %s' % str(ex))
self.reconnect()
return
def voice(self, channel, who):
""" give voice. """
if not channel or not who:
return
self.putonqueue(9, 'MODE %s +v %s' % (channel, who))
def doop(self, channel, who):
""" give ops. """
if not channel or not who:
return
self._raw('MODE %s +o %s' % (channel, who))
def delop(self, channel, who):
""" de-op user. """
if not channel or not who:
return
self._raw('MODE %s -o %s' % (channel, who))
def quit(self, reason='http://feedprovider.googlecode.com'):
""" send quit message. """
logging.debug('irc - sending quit')
try:
self._raw('QUIT :%s' % reason)
except IOError:
pass
def notice(self, printto, what):
""" send notice. """
if not printto or not what:
return
self.send('NOTICE %s :%s' % (printto, what))
def ctcp(self, printto, what):
""" send ctcp privmsg. """
if not printto or not what:
return
self.send("PRIVMSG %s :\001%s\001" % (printto, what))
def ctcpreply(self, printto, what):
""" send ctcp notice. """
if not printto or not what:
return
self.putonqueue(2, "NOTICE %s :\001%s\001" % (printto, what))
def action(self, printto, what):
""" do action. """
if not printto or not what:
return
self.putonqueue(9, "PRIVMSG %s :\001ACTION %s\001" % (printto, what))
def handle_ievent(self, ievent):
""" handle ircevent .. dispatch to 'handle_command' method. """
try:
if ievent.cmnd == 'JOIN' or ievent.msg:
if ievent.nick in self.nicks401:
self.nicks401.remove(ievent.nick)
logging.warn('irc - %s joined .. unignoring' % ievent.nick)
# see if the irc object has a method to handle the ievent
method = getattr(self,'handle_' + ievent.cmnd.lower())
# try to call method
if method:
try:
method(ievent)
except:
handle_exception()
except AttributeError:
# no command method to handle event
pass
try:
# see if there are wait callbacks
self.wait.check(ievent)
except:
handle_exception()
def handle_432(self, ievent):
""" erroneous nick. """
self.handle_433(ievent)
def handle_433(self, ievent):
""" handle nick already taken. """
if self.noauto433:
return
nick = ievent.arguments[1]
# check for alternick
alternick = self.state['alternick']
if alternick and not self.nickchanged:
logging.warn('irc - using alternick %s' % alternick)
self.donick(alternick)
self.nickchanged = 1
return
# use random nick
randomnick = getrandomnick()
self._raw("NICK %s" % randomnick)
self.nick = randomnick
logging.warn('irc - ALERT: nick %s already in use/unavailable .. using randomnick %s' % (nick, randomnick))
self.nickchanged = 1
def handle_ping(self, ievent):
""" send pong response. """
if not ievent.txt:
return
self._raw('PONG :%s' % ievent.txt)
def handle_001(self, ievent):
""" we are connected. """
self.connectok.set()
self.connected = True
self.whois(self.nick)
def handle_privmsg(self, ievent):
""" check if msg is ctcp or not .. return 1 on handling. """
if ievent.txt and ievent.txt[0] == '\001':
self.handle_ctcp(ievent)
return 1
def handle_notice(self, ievent):
""" handle notice event .. check for version request. """
if ievent.txt and ievent.txt.find('VERSION') != -1:
self.say(ievent.nick, self.cfg['version'], None, 'notice')
return 1
def handle_ctcp(self, ievent):
""" handle client to client request .. version and ping. """
if ievent.txt.find('VERSION') != -1:
self.ctcpreply(ievent.nick, 'VERSION %s' % self.cfg['version'])
if ievent.txt.find('PING') != -1:
try:
pingtime = ievent.txt.split()[1]
pingtime2 = ievent.txt.split()[2]
if pingtime:
self.ctcpreply(ievent.nick, 'PING ' + pingtime + ' ' + \
pingtime2)
except IndexError:
pass
def handle_error(self, ievent):
""" show error. """
logging.error(str(ievent))
if ievent.cmnd == "422":
return
if ievent.txt.startswith('Closing'):
logging.error("irc - %s" % ievent.txt)
else:
logging.error("irc - %s - %s" % (ievent.arguments, ievent.txt))
def ping(self):
""" ping the irc server. """
logging.debug('irc - sending ping')
try:
self.putonqueue(1, 'PING :%s' % self.server)
return 1
except Exception, ex:
logging.debug("irc - can't send ping: %s" % str(ex))
return 0
def handle_401(self, ievent):
""" handle 401 .. nick not available. """
try:
nick = ievent.arguments[1]
if nick not in self.nicks401:
logging.warn('irc - 401 on %s .. ignoring' % nick)
self.nicks401.append(nick)
except:
pass
def handle_700(self, ievent):
""" handle 700 .. encoding request of the server. """
try:
self.encoding = ievent.arguments[1]
logging.warn('irc - 700 encoding now is %s' % self.encoding)
except:
pass
| Python |
# gozerbot/channels.py
#
#
"""
channel related data. implemented with a persisted dict of dicts.
:example:
key = channels[event.channel]['key']
"""
## gozerlib imports
from gozerlib.utils.pdod import Pdod
class Channels(Pdod):
"""
channels class .. per channel data.
:param fname: filename to persist the data to
:type fname: string
"""
def __init__(self, fname):
# call base constructor
Pdod.__init__(self, fname)
# make sure attributes are initialised
for j in self.data.values():
if not j.has_key('perms'):
j['perms'] = []
if not j.has_key('autovoice'):
j['autovoice'] = 0
def __setitem__(self, a, b):
# if item is not in dict initialise it to empty dict
if not self.data.has_key(a):
self.data[a] = {}
# assign data
self.data[a] = b
def getchannels(self):
"""
return channels.
"""
result = [] # list of channels found
# loop over channels
for channel in self.data.keys():
channel = channel.strip()
if channel not in result:
result.append(channel)
return result
def getchannelswithkeys(self):
"""
return channels with keys.
"""
result = []
# loop over channels gathering channel name and key
for channel in self.data.keys():
channel = channel.strip()
try:
key = self.data[channel]['key']
if not channel + ' ' + key in result:
result.append(channel + ' ' + key)
except KeyError:
if channel not in result:
result.append(channel)
return result
def getkey(self, channel):
"""
return key of channel if set.
:param channel: channel to get key from
:type channel: string
"""
try:
key = self.data[channel]['key']
except:
key = None
return key
def getnick(self, channel):
"""
return bot nick of channel if set.
:param channel: channel to get key from
:type channel: string
"""
try:
nick = self.data[channel]['nick']
except:
nick = None
return nick
| Python |
# gozerlib/utils/exception.py
#
#
""" exception related functions. """
## basic imports
import sys
import traceback
import logging
import thread
import os
import logging
## define
exceptionlist = []
exceptionevents = []
## functions
def exceptionmsg():
""" create exception message as a string. """
exctype, excvalue, tb = sys.exc_info()
trace = traceback.extract_tb(tb)
result = ""
for i in trace:
fname = i[0]
linenr = i[1]
func = i[2]
plugfile = fname[:-3].split(os.sep)
mod = []
for i in plugfile[::-1]:
if i in ['gaeupload', 'feedprovider']:
break
mod.append(i)
if i in ['gozerlib', 'waveapi', 'google']:
break
ownname = '.'.join(mod[::-1])
result += "%s:%s %s | " % (ownname, linenr, func)
del trace
res = "%s%s: %s" % (result, exctype, excvalue)
if res not in exceptionlist:
exceptionlist.append(res)
return res
def handle_exception(event=None, log=True, txt=""):
""" handle exception.. for now only print it. """
errormsg = exceptionmsg()
if txt:
errormsg = "%s - %s" % (txt, errormsg)
if log:
logging.error(errormsg)
if event:
exceptionevents.append((event, errormsg))
if event.bot:
event.bot.error = errormsg
event.reply(errormsg)
| Python |
# gozerlib/utils/lockmanager.py
#
#
""" manages locks """
## basic imports
import thread
import threading
## classes
class LockManager(object):
""" place to hold locks """
def __init__(self):
self.locks = {}
def allocate(self, name):
""" allocate a new lock """
self.locks[name] = thread.allocate_lock()
logging.debug('lockmanager - allocated %s' % name)
def get(self, name):
""" get lock """
if not self.locks.has_key(name):
self.allocate(name)
return self.locks[name]
def delete(self, name):
""" delete lock """
if self.locks.has_key(name):
del self.locks[name]
def acquire(self, name):
""" acquire lock """
if not self.locks.has_key(name):
self.allocate(name)
logging.debug('lockmanager - acquire %s' % name)
self.locks[name].acquire()
def release(self, name):
""" release lock """
logging.debug('lockmanager - releasing %s' % name)
self.locks[name].release()
class RlockManager(LockManager):
def allocate(self, name):
""" allocate a new lock """
self.locks[name] = threading.RLock()
logging.debug('lockmanager - allocated RLock %s' % name)
| Python |
# lib/utils/generic.py
#
#
""" generic functions. """
## lib imports
from exception import handle_exception
from trace import calledfrom
from lazydict import LazyDict
from gozerlib.datadir import datadir
## simplejson import
from simplejson import dumps
## generic imports
import time
import sys
import re
import getopt
import types
import os
import random
import Queue
import logging
## functions
def jsonstring(s):
if type(s) == types.TupleType:
s = list(s)
return dumps(s)
def getwho(bot, nick):
""" return userhost for nick. NOT IMPLEMENTED YET .. return the nick. """
return nick
def getversion(txt=""):
""" return a version string. """
from gozerlib.config import cfg
return u"%s" % (cfg.get('version') + u' ' + txt)
def splittxt(what, l=375):
""" split output into seperate chunks. """
txtlist = []
start = 0
end = l
length = len(what)
for i in range(length/end+1):
endword = what.find(' ', end)
if endword == -1:
endword = length
res = what[start:endword]
if res:
txtlist.append(res)
start = endword
end = start + l
return txtlist
def getrandomnick():
""" return a random nick. """
return "gbot2-" + str(random.randint(0, 100))
def decodeperchar(txt, encoding='utf-8', what=""):
""" decode a string char by char. strip chars that can't be decoded. """
res = []
nogo = []
for i in txt:
try:
res.append(i.decode(encoding))
except UnicodeDecodeError:
if i not in nogo:
nogo.append(i)
continue
if nogo:
if what:
logging.debug("%s: can't decode %s characters to %s" % (what, nogo, encoding))
else:
logging.debug("can't decode %s characters to %s" % (nogo, encoding))
return u"".join(res)
def toenc(what, encoding='utf-8'):
""" convert to encoding. """
if not what:
what= u""
try:
w = unicode(what)
return w.encode(encoding)
except UnicodeEncodeError:
logging.debug("can't encode %s to %s" % (what, encoding))
return u""
def fromenc(txt, encoding='utf-8', what=""):
""" convert from encoding. """
if not txt:
txt = u""
try:
if type(txt) == types.UnicodeType:
t = txt.encode(encoding)
t = unicode(txt)
return unicode(t.decode(encoding))
except UnicodeDecodeError:
return decodeperchar(txt, encoding, what)
def toascii(what):
""" convert to ascii. """
what = what.encode('ascii', 'replace')
return what
def tolatin1(what):
""" convert to latin1. """
what = what.encode('latin-1', 'replace')
return what
def strippedtxt(what, allowed=[]):
""" strip control characters from txt. """
txt = []
allowed = allowed + ['\001', '\002', '\003', '\t']
for i in what:
if ord(i) > 31 or (allowed and i in allowed):
txt.append(i)
return ''.join(txt)
def uniqlist(l):
""" return unique elements in a list (as list). """
result = []
for i in l:
if j not in result:
result.append(j)
return result
def jabberstrip(text, allowed=[]):
""" strip control characters for jabber transmission. """
txt = []
allowed = allowed + ['\n', '\t']
for i in text:
if ord(i) > 31 or (allowed and i in allowed):
txt.append(i)
return ''.join(txt)
def filesize(path):
""" return filesize of a file. """
return os.stat(path)[6]
def touch(fname):
""" touch a file. """
fd = os.open(fname, os.O_WRONLY | os.O_CREAT)
os.close(fd)
def stringinlist(s, l):
""" check is string is in list of strings. """
for i in l:
if s in i:
return 1
def stripped(userhost):
""" return a stripped userhost (everything before the '/'). """
return userhost.split('/')[0]
def gethighest(ddir, ffile):
""" get filename with the highest extension (number). """
highest = 0
for i in os.listdir(ddir):
if os.path.isdir(ddir + os.sep + i) and ffile in i:
try:
seqnr = i.split('.')[-1]
except IndexError:
continue
try:
if int(seqnr) > highest:
highest = int(seqnr)
except ValueError:
pass
ffile += '.' + str(highest + 1)
return ffile
def waitforqueue(queue, timeout=10, maxitems=None):
""" wait for results to arrive in a queue. return list of results. """
result = []
while 1:
try:
res = queue.get(1, timeout)
except Queue.Empty:
continue
if not res:
break
result.append(res)
if maxitems and len(result) == maxitems:
break
return result
def checkqueues(self, queues, resultlist):
""" check if resultlist is to be sent to the queues. if so do it! """
for queue in queues:
for item in resultlist:
queue.put_nowait(item)
return True
return False
def dosed(filename, sedstring):
try:
f = open(filename, 'r')
except IOError:
return
tmp = filename + '.tmp'
fout = open(tmp, 'w')
seds = sedstring.split('/')
fr = seds[1].replace('\\', '')
to = seds[2].replace('\\', '')
try:
for line in f:
l = line.replace(fr,to)
fout.write(l)
finally:
fout.flush()
fout.close()
try:
os.rename(tmp, filename)
except WindowsError:
# no atomic operation supported on windows! error is thrown when destination exists
os.remove(filename)
os.rename(tmp, filename)
| Python |
# gozerlib/utils/trace.py
#
#
""" trace related functions """
## basic imports
import sys
import os
## define
stopmarkers = ['gozerlib', 'commonplugs', 'waveplugs', 'socketplugs', 'waveapi', 'feedprovider']
## functions
def calledfrom(frame):
""" return the plugin name where given frame occured. """
try:
filename = frame.f_back.f_code.co_filename
plugfile = filename.split(os.sep)
if plugfile:
mod = []
for i in plugfile[::-1]:
mod.append(i)
if i in stopmarkers:
break
modstr = '.'.join(mod[::-1])[:-3]
if 'handler_' in modstr:
modstr = modstr.split('.')[-1]
except AttributeError:
modstr = None
del frame
return modstr
def callstack(frame):
""" return callstack trace as a string. """
result = []
loopframe = frame
while 1:
try:
filename = loopframe.f_back.f_code.co_filename
result.append("%s:%s" % '.'.join((filename[:-3].split(os.sep)), loopframe.f_back.f_lineno))
loopframe = loopframe.f_back
except:
break
del frame
return result
def whichmodule(depth=1):
""" return filename:lineno of the module. """
try:
frame = sys._getframe(depth)
plugfile = frame.f_back.f_code.co_filename[:-3].split('/')
lineno = frame.f_back.f_lineno
mod = []
for i in plugfile[::-1]:
mod.append(i)
if i in stopmarkers:
break
modstr = '.'.join(mod[::-1]) + ':' + str(lineno)
if 'handler_' in modstr:
modstr = modstr.split('.')[-1]
except AttributeError:
modstr = None
del frame
return modstr
def whichplugin(depth=1):
""" return filename:lineno of the module. """
try:
frame = sys._getframe(depth)
plugfile = frame.f_back.f_code.co_filename[:-3].split('/')
lineno = frame.f_back.f_lineno
mod = []
for i in plugfile[::-1]:
mod.append(i)
if i in stopmarkers:
break
modstr = '.'.join(mod[::-1])
if 'handler_' in modstr:
modstr = modstr.split('.')[-1]
except AttributeError:
modstr = None
del frame
return modstr
| Python |
# gozerlib/utils/lazydict.py
#
# thnx to maze
""" a lazydict allows dotted access to a dict .. dict.key. """
## simplejson imports
from simplejson import loads, dumps
## basic imports
from xml.sax.saxutils import unescape
import copy
import logging
## defines
cpy = copy.deepcopy
## classes
class LazyDict(dict):
""" lazy dict allows dotted access to a dict """
def __getattr__(self, attr, default=None):
""" get attribute. """
if not self.has_key(attr):
self[attr] = default
return self[attr]
def __setattr__(self, attr, value):
""" set attribute. """
self[attr] = value
def dostring(self):
""" return a string representation of the dict """
res = ""
cp = dict(self)
for item, value in cp.iteritems():
res += "%r=%r " % (item, value)
return res
def dump(self):
""" serialize this event to json. """
new = {}
for name in self:
try:
prop = getattr(self, name)
dumps(prop)
new[name] = prop
except TypeError:
pass
logging.debug('lazydict - tojson - %s' % str(new))
return dumps(new)
def load(self, input):
""" load from json string. """
instr = unescape(input)
try:
temp = loads(instr)
except ValueError:
logging.error("lazydict - can't decode %s" % input)
return self
if type(temp) != dict:
logging.error("lazydict - %s is not a dict" % str(temp))
return self
self.update(temp)
return self
| Python |
# gozerlib/utils/xmpp.py
#
#
""" XMPP related helper functions. """
def stripped(userhost):
""" strip resource from userhost. """
return userhost.split('/')[0]
def resource(userhost):
""" return resource of userhost. """
try:
return userhost.split('/')[1]
except ValueError:
return ""
| Python |
# gozerlib/utils/limlist.py
#
#
""" limited list """
class Limlist(list):
""" list with limited number of items """
def __init__(self, limit):
self.limit = limit
list.__init__(self)
def insert(self, index, item):
""" insert item at index .. pop oldest item if limit is reached """
if index > len(self):
return -1
if len(self) >= self.limit:
self.pop(len(self)-1)
list.insert(self, index, item)
def append(self, item):
""" add item to list .. pop oldest item if limit is reached """
if len(self) >= self.limit:
self.pop(0)
list.append(self, item)
| Python |
# lib/utils/timeutils.py
#
#
""" time related helper functions. """
## lib imports
from exception import handle_exception
## basic imports
import time
import re
import calendar
## vars
leapfactor = float(6*60*60)/float(365*24*60*60)
timere = re.compile('(\S+)\s+(\S+)\s+(\d+)\s+(\d+):(\d+):(\d+)\s+(\d+)')
bdmonths = ['Bo', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def elapsedstring(nsec, ywd = None):
""" given the number of seconds return a string of the elapsed time. """
nsec = int(float(nsec))
year = 365*24*60*60
week = 7*24*60*60
day = 24*60*60
hour = 60*60
minute = 60
nsec -= nsec * leapfactor
years = int(nsec/year)
nsec -= years*year
weeks = int(nsec/week)
nsec -= weeks*week
days = int(nsec/day)
nsec -= days*day
hours = int(nsec/hour)
nsec -= hours*hour
minutes = int(nsec/minute)
sec = int(nsec - minutes*minute)
result = ''
if (years > 1):
result = str(years) + " years "
if (years == 1):
result = "1 year "
if (weeks > 1):
result += str(weeks) + " weeks "
if (weeks == 1):
result += "1 week "
if (days > 1):
if ywd:
result += 'and '+ str(days) + " days"
else:
result += str(days) + " days "
if (days == 1):
if ywd:
result += 'and 1 day'
else:
result += "1 day "
if ywd:
return result
if (hours > 1):
result += str(hours) + " hours "
if (hours == 1):
result += "1 hour "
if (minutes > 1):
result += str(minutes) + " minutes "
if (minutes == 1):
result += "1 minute "
if sec == 0:
if result:
return result
else:
return 0
if (sec == 1):
if result:
result += "and 1 second "
else:
result = "1 second"
else:
if result:
result += "and " + str(sec) + " seconds"
else:
result = str(sec) + " seconds"
return result.strip()
def hourmin(ttime):
""" return the hours:minutes of a unix timestamp. """
result = ""
timeres = time.localtime(ttime)
if timeres[3] < 10:
result += "0" + str(timeres[3]) + ":"
else:
result += str(timeres[3]) + ":"
if timeres[4] < 10:
result += "0" + str(timeres[4])
else:
result += str(timeres[4])
return result
def striptime(what):
""" strip time indicators from string. """
what = str(what)
what = re.sub('\d+-\d+-\d+', '', what)
what = re.sub('\d+-\d+', '', what)
what = re.sub('\d+:\d+', '', what)
what = re.sub('\s+', ' ', what)
return what.strip()
def now():
""" return current time. """
if time.daylight:
ttime = time.ctime(time.time() + int(time.timezone) + 3600)
else:
ttime = time.ctime(time.time() + int(time.timezone))
return ttime
def today():
""" return time of 0:00 today. """
if time.daylight:
ttime = time.ctime(time.time() + int(time.timezone) + 3600)
else:
ttime = time.ctime(time.time() + int(time.timezone))
matched = re.search(timere, ttime)
if matched:
temp = "%s %s %s" % (matched.group(3), matched.group(2), matched.group(7))
timestring = time.strptime(temp, "%d %b %Y")
result = time.mktime(timestring)
return result
def strtotime(what):
""" convert string to time. """
daymonthyear = 0
hoursmin = 0
try:
dmyre = re.search('(\d+)-(\d+)-(\d+)', str(what))
if dmyre:
(day, month, year) = dmyre.groups()
day = int(day)
month = int(month)
year = int(year)
if day <= calendar.monthrange(year, month)[1]:
date = "%s %s %s" % (day, bdmonths[month], year)
daymonthyear = time.mktime(time.strptime(date, "%d %b %Y"))
else:
return None
else:
dmre = re.search('(\d+)-(\d+)', str(what))
if dmre:
year = time.localtime()[0]
(day, month) = dmre.groups()
day = int(day)
month = int(month)
if day <= calendar.monthrange(year, month)[1]:
date = "%s %s %s" % (day, bdmonths[month], year)
daymonthyear = time.mktime(time.strptime(date, "%d %b %Y"))
else:
return None
hmsre = re.search('(\d+):(\d+):(\d+)', str(what))
if hmsre:
(h, m, s) = hmsre.groups()
h = int(h)
m = int(m)
s = int(s)
if h > 24 or h < 0 or m > 60 or m < 0 or s > 60 or s < 0:
return None
hours = 60 * 60 * (int(hmsre.group(1)))
hoursmin = hours + int(hmsre.group(2)) * 60
hms = hoursmin + int(hmsre.group(3))
else:
hmre = re.search('(\d+):(\d+)', str(what))
if hmre:
(h, m) = hmre.groups()
h = int(h)
m = int(m)
if h > 24 or h < 0 or m > 60 or m < 0:
return None
hours = 60 * 60 * (int(hmre.group(1)))
hms = hours + int(hmre.group(2)) * 60
else:
hms = 0
if not daymonthyear and not hms:
return None
if daymonthyear == 0:
heute = today()
else:
heute = daymonthyear
return heute + hms
except OverflowError:
return None
except ValueError:
return None
except Exception, ex:
return None
def uurminsec(ttime):
""" return hours:minutes:seconds of the given time. """
result = ""
timeres = time.localtime(ttime)
if timeres[3] < 10:
result += "0" + str(timeres[3]) + ":"
else:
result += str(timeres[3]) + ":"
if timeres[4] < 10:
result += "0" + str(timeres[4]) + ":"
else:
result += str(timeres[4]) + ":"
if timeres[5] < 10:
result += "0" + str(timeres[5])
else:
result += str(timeres[5])
return result
def getdaymonth(ttime):
""" return day-month of the given time. """
timestr = time.ctime(ttime)
result = re.search(timere, timestr)
if result:
return (result.group(3), result.group(2))
else:
return (None, None)
def getdaymonthyear(ttime):
""" return day-month-year of the given time. """
timestr = time.ctime(ttime)
result = re.search(timere, timestr)
if result:
return (result.group(3), result.group(2), result.group[7])
else:
return (None, None, None)
def dmy(ttime):
""" return day month year as a string. """
timestr = time.ctime(ttime)
result = re.search(timere, timestr)
if result:
return "%s %s %s" % (result.group(3), result.group(2), result.group(7))
else:
return None
| Python |
# gozerbot/pdod.py
#
#
""" pickled dicts of dicts """
__copyright__ = 'this file is in the public domain'
from gozerlib.utils.lazydict import LazyDict
from gozerlib.utils.locking import lockdec
from gozerlib.persist import Persist
import thread
pdodlock = thread.allocate_lock()
locked = lockdec(pdodlock)
class Pdod(Persist):
""" pickled dicts of dicts """
def __init__(self, filename):
Persist.__init__(self, filename)
if not self.data:
self.data = LazyDict()
def __getitem__(self, name):
""" return item with name """
if self.data.has_key(name):
return self.data[name]
#@locked
def save(self):
Persist.save(self)
#@locked
def __delitem__(self, name):
""" delete name item """
if self.data.has_key(name):
return self.data.__delitem__(name)
#@locked
def __setitem__(self, name, item):
""" set name item """
self.data[name] = item
def __contains__(self, name):
return self.data.__contains__(name)
#@locked
def setdefault(self, name, default):
""" set default of name """
return self.data.setdefault(name, default)
def has_key(self, name):
""" has name key """
return self.data.has_key(name)
def has_key2(self, name1, name2):
""" has [name1][name2] key """
if self.data.has_key(name1):
return self.data[name1].has_key(name2)
def get(self, name1, name2):
""" get data[name1][name2] """
try:
result = self.data[name1][name2]
return result
except KeyError:
return None
#@locked
def set(self, name1, name2, item):
""" set name, name2 item """
if not self.data.has_key(name1):
self.data[name1] = {}
self.data[name1][name2] = item
| Python |
# gozerlib/utils/id.py
#
#
from gozerlib.utils.generic import toenc
import uuid
def getrssid(url, time):
key = unicode(url) + unicode(time)
return str(uuid.uuid3(uuid.NAMESPACE_DNS, toenc(key)))
| Python |
# gozerlio/utils/log.py
#
#
""" log module. """
import logging
import sys
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
def setloglevel(level_name):
level = LEVELS.get(level_name, logging.NOTSET)
logging.info("setting loglevel to %s (%s)" % (str(level), level_name))
logger = logging.getLogger()
logger.setLevel(level)
| Python |
# gozerbot/dol.py
#
#
""" dict of lists """
__copyright__ = 'this file is in the public domain'
class Dol(dict):
""" dol is dict of lists """
def insert(self, nr, item, issue):
""" add issue to item entry """
if self.has_key(item):
self[item].insert(nr, issue)
else:
self[item] = [issue]
return 1
def add(self, item, issue):
""" add issue to item entry """
if self.has_key(item):
self[item].append(issue)
else:
self[item] = [issue]
return 1
def adduniq(self, item, issue):
""" only add issue to item if it is not already there """
if self.has_key(item):
if issue in self[item]:
return 0
self.add(item, issue)
return 1
def delete(self, item, number):
""" del self[item][number] """
number = int(number)
if self.has_key(item):
try:
del self[item][number]
return 1
except IndexError:
return None
def remove(self, item, issue):
""" remove issue from item """
try:
self[item].remove(issue)
return 1
except ValueError:
pass
def has(self, item, issue):
""" check if item has issue """
try:
if issue in self[item]:
return 1
else:
return None
except KeyError:
pass
| Python |
# gozerlib/utils/locking.py
#
#
""" generic functions """
## lib imports
from trace import whichmodule
from lockmanager import LockManager, RlockManager
## generic imports
import logging
import sys
## defines
locks = []
lockmanager = LockManager()
rlockmanager = RlockManager()
## classes
class Locked(object):
""" class used to lock an entire object. UNTESTED"""
def __getattribute__(self, attr):
where = whichmodule(1)
logging.debug('locking - locking on %s' % where)
rlockmanager.acquire(object)
res = None
try:
res = super(Locked, self).__getattribute__(attr)
finally:
rlockmanager.release(object)
return res
## functions
def lockdec(lock):
""" locking decorator. """
def locked(func):
""" locking function for %s """ % str(func)
def lockedfunc(*args, **kwargs):
""" the locked function. """
where = whichmodule(1)
logging.debug('locking - locking on %s (%s)' % (where, str(func)))
lock.acquire()
locks.append(str(func))
res = None
try:
res = func(*args, **kwargs)
finally:
lock.release()
locks.remove(str(func))
return res
return lockedfunc
return locked
def funclocked(func):
""" locking function for %s """ % str(func)
def lockedfunc(*args, **kwargs):
""" the locked function. """
where = whichmodule(1)
logging.debug('locking - locking on %s' % where)
rlockmanager.acquire(func)
locks.append(str(func))
res = None
try:
res = func(*args, **kwargs)
finally:
rlockmanager.release(func)
locks.remove(str(func))
return res
return lockedfunc
| Python |
# gozerlib/utils/statdict.py
#
#
""" stats dict """
## classes
class StatDict(dict):
""" dictionary to hold stats """
def set(self, item, value):
""" set item to value """
self[item] = value
def upitem(self, item, value=1):
""" increase item """
if not self.has_key(item):
self[item] = value
return
self[item] += value
def top(self, start=1, limit=None):
""" return highest items """
result = []
for item, value in self.iteritems():
if value >= start:
result.append((item, value))
result.sort(lambda b, a: cmp(a[1], b[1]))
if limit:
result = result[:limit]
return result
def down(self, end=100, limit=None):
""" return lowest items """
result = []
for item, value in self.iteritems():
if value <= end:
result.append((item, value))
result.sort(lambda a, b: cmp(a[1], b[1]))
if limit:
return result[:limit]
else:
return result
| Python |
# gozerlib/utils/name.py
#
#
""" name related helper functions. """
## basic imports
import string
import os
## define
allowednamechars = string.ascii_letters + string.digits + '!.@-' + os.sep
## functions
def stripname(name, allowed=""):
""" strip all not allowed chars from name. """
res = ""
for c in name:
if ord(c) < 31:
res += "-"
elif c in allowednamechars + allowed:
res += c
else:
res += "-"
return res
def testname(name):
""" test if name is correct. """
for c in name:
if c not in allowedchars or ord(c) < 31:
return False
return True
| Python |
# gozerlib/utils/rsslist.py
#
#
""" create a list of rss data """
## lib imports
from exception import handle_exception
## basic imports
import xml.dom.minidom
## functions
def gettext(nodelist):
""" get text data from nodelist """
result = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE or node.nodeType == node.CDATA_SECTION_NODE:
stripped = node.data.strip()
if stripped:
result += stripped
return result
def makersslist(xlist, nodes , d={}):
""" recurse until txt is found """
for i in nodes:
if i.nodeType == i.ELEMENT_NODE:
dd = d[i.nodeName] = {}
makersslist(xlist, i.childNodes, dd)
if dd:
xlist.append(dd)
txt = gettext(i.childNodes)
if txt:
d[i.nodeName] = txt
def rsslist(txt):
""" create list of dictionaries with rss data """
dom = xml.dom.minidom.parseString(txt)
result = []
makersslist(result, dom.childNodes)
return result
| Python |
# gozerbot/pdol.py
#
#
""" pickled dict of lists """
__copyright__ = 'this file is in the public domain'
from gozerlib.persist import Persist
class Pdol(Persist):
""" pickled dict of lists """
def __init__(self, fname):
Persist.__init__(self, fname)
if not self.data:
self.data = {}
def __iter__(self, name):
return self.data[name].__iter__()
def __getitem__(self, item):
if self.data.has_key(item):
return self.data[item]
def __delitem__(self, item):
if self.data.has_key(item):
self.data.__delitem__(item)
return 1
def __setitem__(self, item, what):
if self.data.has_key(item):
self.data[item].append(what)
else:
self.data[item] = [what]
return 1
def add(self, item, what):
""" add what to items list """
return self.__setitem__(item, what)
def adduniq(self, item, what):
""" add what to items list if item not yet added """
if not self.data.has_key(item):
self.new(item)
if what not in self.data[item]:
return self.__setitem__(item, what)
def get(self, item):
""" get items list """
return self.__getitem__(item)
def new(self, what):
""" reset list of what """
self.data[what] = []
def delete(self, item, what):
""" remove what from item's list """
del self.data[item][what]
def extend(self, item, what):
if not self.data.has_key(item):
self.new(item)
self.data[item].extend(what)
def remove(self, item, what):
try:
self.data[item].remove(what)
return 1
except (ValueError, KeyError):
return 0
| Python |
# lib/utils/url.py
#
# most code taken from maze
""" url related functions. """
## lib imports
from generic import fromenc
from gozerlib.config import cfg
## basic imports
import logging
import time
import sys
import re
import traceback
import Queue
import urllib
import urllib2
import urlparse
import socket
import random
import os
import sgmllib
import thread
import types
import httplib
import StringIO
import htmlentitydefs
import tempfile
import cgi
## defines
try:
import chardet
except ImportError:
chardet = None
class istr(str):
pass
## functions
def useragent():
""" provide useragent string """
(name, version) = cfg['version'].split()[0:2]
return 'Mozilla/5.0 (compatible; %s %s; http://feedprovider.appspot.com)' % (name, version)
class CBURLopener(urllib.FancyURLopener):
""" our URLOpener """
def __init__(self, version, *args):
if version:
self.version = version
else:
self.version = useragent()
urllib.FancyURLopener.__init__(self, *args)
def geturl(url, version=None):
""" fetch an url. """
urllib._urlopener = CBURLopener(version)
logging.info('fetching %s' % url)
result = urllib.urlopen(url)
tmp = result.read()
result.close()
return tmp
def geturl2(url, decode=False):
""" use urllib2 to fetch an url. """
logging.info('fetching %s' % url)
request = urllib2.Request(url)
request.add_header('User-Agent', useragent())
opener = urllib2.build_opener()
result = opener.open(request)
tmp = result.read()
info = result.info() # add header information to .info attribute
result.close()
if decode:
encoding = get_encoding(tmp)
logging.info('%s encoding: %s' % (url, encoding))
res = istr(fromenc(tmp, encoding, url))
else:
res = istr(tmp)
res.info = info
return res
def geturl3(url, myheaders={}, postdata={},keyfile='', certfile="", port=80):
""" stub .. NOT USED. """
return geturl2(url)
def geturl4(url, myheaders={}, postdata={}, keyfile="", certfile="", port=80):
""" use httplib to fetch an url. """
headers = {'Content-Type': 'text/html', 'Accept': 'text/plain; text/html', 'User-Agent': useragent()}
headers.update(myheaders)
# parse URL components
urlparts = urlparse.urlparse(url)
try:
port = int(urlparts[1].split(':')[1])
host = urlparts[1].split(':')[0]
except:
host = urlparts[1]
# set up HTTP connection
if keyfile:
connection = httplib.HTTPSConnection(host, port, keyfile, \
certfile)
elif 'https' in urlparts[0]:
connection = httplib.HTTPSConnection(host, port)
else:
connection = httplib.HTTPConnection(host, port)
# make the request
if type(postdata) == types.DictType:
postdata = urllib.urlencode(postdata)
logging.info('fetching %s' % url)
connection.request('GET', urlparts[2])
# read the response and clean up
return connection.getresponse()
def posturl(url, myheaders, postdata, keyfile=None, certfile="",port=80):
""" very basic HTTP POST url retriever. """
# build headers
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Accept': 'text/plain; text/html', 'User-Agent': useragent()}
headers.update(myheaders)
# parse URL components
urlparts = urlparse.urlparse(url)
# set up HTTP connection
if keyfile:
connection = httplib.HTTPSConnection(urlparts[1], port, keyfile, \
certfile)
else:
connection = httplib.HTTPConnection(urlparts[1])
# make the request
if type(postdata) == types.DictType:
postdata = urllib.urlencode(postdata)
logging.info('fetching %s' % url)
connection.request('POST', urlparts[2], postdata, headers)
# read the response and clean up
return connection.getresponse()
def deleteurl(url, myheaders={}, postdata={}, keyfile="", certfile="", port=80):
""" very basic HTTP DELETE. """
# build headers
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Accept': 'text/plain; text/html', 'User-Agent': useragent()}
headers.update(myheaders)
# parse URL components
urlparts = urlparse.urlparse(url)
# set up HTTP connection
if keyfile and certfile:
connection = httplib.HTTPSConnection(urlparts[1], port, keyfile, \
certfile)
else:
connection = httplib.HTTPConnection(urlparts[1])
# make the request
if type(postdata) == types.DictType:
postdata = urllib.urlencode(postdata)
logging.info('fetching %s' % url)
connection.request('DELETE', urlparts[2], postdata, headers)
# read the response and clean up
return connection.getresponse()
def puturl(url, myheaders={}, postdata={}, keyfile="", certfile="", port=80):
""" very basic HTTP PUT. """
# build headers
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Accept': 'text/plain; text/html', 'User-Agent': useragent()}
headers.update(myheaders)
# parse URL components
urlparts = urlparse.urlparse(url)
# set up HTTP connection
if keyfile:
connection = httplib.HTTPSConnection(urlparts[1], port, keyfile, \
certfile)
else:
connection = httplib.HTTPConnection(urlparts[1])
# make the request
if type(postdata) == types.DictType:
postdata = urllib.urlencode(postdata)
logging.info('fetching %s' % url)
connection.request('PUT', urlparts[2], postdata, headers)
# read the response and clean up
return connection.getresponse()
def getpostdata(event):
""" retrive post data from url data. """
ctype, pdict = cgi.parse_header(event.headers.getheader('content-type'))
body = cgi.FieldStorage(fp=event.rfile, headers=event.headers, environ = {'REQUEST_METHOD':'POST'}, keep_blank_values = 1)
result = {}
for name in dict(body):
result[name] = body.getfirst(name)
return result
def decode_html_entities(s):
""" smart decoding of html entities to utf-8 """
re_ent_match = re.compile(u'&([^;]+);')
re_entn_match = re.compile(u'&#([^;]+);')
s = s.decode('utf-8', 'replace')
def to_entn(match):
""" convert to entities """
if htmlentitydefs.entitydefs.has_key(match.group(1)):
return htmlentitydefs.entitydefs[match.group(1)].decode('utf-8', \
'replace')
return match.group(0)
def to_utf8(match):
""" convert to utf-8 """
return unichr(long(match.group(1)))
s = re_ent_match.sub(to_entn, s)
s = re_entn_match.sub(to_utf8, s)
return s
def get_encoding(data):
""" get encoding from web data """
# first we try if we have the .info attribute to determine the encoding from
if hasattr(data, 'info') and data.info.has_key('content-type') and \
'charset' in data.info['content-type'].lower():
charset = data.info['content-type'].lower().split('charset', 1)[1].\
strip()
if charset[0] == '=':
charset = charset[1:].strip()
if ';' in charset:
return charset.split(';')[0].strip()
return charset
# try to find the charset in the meta tags,
# <meta http-equiv="content-type" content="text/html; charset=..." />
if '<meta' in data.lower():
metas = re.findall(u'<meta[^>]+>', data, re.I | re.M)
if metas:
for meta in metas:
test_http_equiv = re.search('http-equiv\s*=\s*[\'"]([^\'"]+)[\'"]', meta, re.I)
if test_http_equiv and test_http_equiv.group(1).lower() == 'content-type':
test_content = re.search('content\s*=\s*[\'"]([^\'"]+)[\'"]', meta, re.I)
if test_content:
test_charset = re.search('charset\s*=\s*([^\s\'"]+)', meta, re.I)
if test_charset:
return test_charset.group(1)
# everything else failed, let's see if we can use chardet
if chardet:
test = chardet.detect(data)
if test.has_key('encoding'):
return test['encoding']
# nothing found, let's fall back to the default encoding
return sys.getdefaultencoding()
class Stripper(sgmllib.SGMLParser):
""" html stripper. """
def __init__(self):
sgmllib.SGMLParser.__init__(self)
def strip(self, some_html):
""" strip html. """
self.theString = ""
self.feed(some_html)
self.close()
return self.theString
def handle_data(self, data):
""" data handler. """
self.theString += fromenc(data)
def striphtml(txt):
""" strip html from txt. """
stripper = Stripper()
txt = stripper.strip(fromenc(txt))
return txt
| Python |
# gozerlib/monitor.py
#
#
""" monitor the bots output. """
## gozerlib imports
from gozerlib.config import cfg as config
from utils.exception import handle_exception
from utils.trace import calledfrom
from config import cfg as config
from threadloop import ThreadLoop
from runner import cbrunners
import threads as thr
## basic imports
import Queue
import sys
import logging
## classes
class Monitor(ThreadLoop):
""" monitor base class. used as base class for jabber and irc
output monitoting.
:param name: name of the monitor
:type name: string
"""
def __init__(self, name="monitor"):
ThreadLoop.__init__(self, name)
self.outs = []
def add(self, name, callback, pre, threaded=False):
""" add a monitoring callback.
:param name: name of the plugin using this monitor callback
:type name: string
:param callback: the callback to fire
:type callback: function
:param pre: precondition (function) to check if callback should fire
:type pre: function
:param threaded: whether callback should be called in its own thread
:type threaded: boolean
:rtype: boolean
"""
name = name or calledfrom(sys._getframe(0))
if config['loadlist'] and name not in config['loadlist']:
return False
self.outs.append([name, callback, pre, threaded, False])
logging.debug('irc - added monitor %s (%s)' % (name, str(callback)))
return True
def disable(self, name):
name = name.lower()
for i in range(len(self.outs)-1, -1, -1):
if self.outs[i][0] == name:
self.outs[i][4] = False
def activate(self, name):
name = name.lower()
for i in range(len(self.outs)-1, -1, -1):
if self.outs[i][0] == name:
self.outs[i][4] = True
def unload(self, name):
""" delete monitor callback.
:param name: name of the plugin which monitors need to be unloaded
:type name: string
:rtype: integer .. number of monitors removed
"""
name = name.lower()
nr = 0
for i in range(len(self.outs)-1, -1, -1):
if self.outs[i][0] == name:
del self.outs[i]
nr += 1
return nr
def handle(self, *args, **kwargs):
""" check if monitor callbacks need to be fired.
:param args: arguments passed to the callback
:type args: list
:param kwargs: quoted arguments passed to the callback
:type kwargs: dict
:rtype: number of callbacks called
"""
nr = 0
for i in self.outs:
if not i[4]:
continue
# check if precondition is met
try:
if i[2]:
doit = i[2](*args, **kwargs)
else:
doit = 1
except Exception, ex:
handle_exception()
doit = 0
if doit:
# run monitor callback in its own thread
if not i[3]:
cbrunners[5].put("monitor-%s" % i[0], i[1], *args)
else:
thr.start_new_thread(i[1], args, kwargs)
nr += 1
return nr
| Python |
# gozerbot/morphs.py
#
#
""" convert input/output stream. """
## gozerlib imports
from gozerlib.utils.exception import handle_exception
from gozerlib.utils.trace import calledfrom
## basic imports
import sys
## classes
class Morph(object):
"""
transform stream.
:param func: morphing function
:type func: function
"""
def __init__(self, func):
self.plugname = calledfrom(sys._getframe(0))
self.func = func
self.activate = True
def do(self, *args, **kwargs):
"""
do the morphing.
"""
if not self.activate:
return
try:
return self.func(*args, **kwargs)
except Exception, ex:
handle_exception()
class MorphList(list):
""" list of morphs. """
def add(self, func, index=None):
"""
add morph.
:param func: morphing function
:type func: function
:param index: index into the morphlist
:type index: integer
:rtype: self
"""
if not index:
self.append(Morph(func))
else:
self.insert(index, Moprh(func))
return self
def do(self, input, *args, **kwargs):
"""
call morphing chain.
:param input: data to do the morphing on
:type input: string
"""
for morph in self:
input = morph.do(input, *args, **kwargs) or input
return input
def unload(self, plugname):
"""
unload morhps belonging to plug <plugname>.
:param plugname: the plugname to unload the morphs from
:type plugname: string
"""
for index in range(len(self)-1, -1, -1):
if self[index].plugname == plugname:
del self[index]
def disable(self, plugname):
"""
disable morhps belonging to plug <plugname>.
:param plugname: the plugname to unload the morphs from
:type plugname: string
"""
for index in range(len(self)-1, -1, -1):
if self[index].plugname == plugname:
self[index].activate = False
def activate(self, plugname):
"""
activate morhps belonging to plug <plugname>.
:param plugname: the plugname to unload the morphs from
:type plugname: string
"""
for index in range(len(self)-1, -1, -1):
if self[index].plugname == plugname:
self[index].activate = False
## INIT SECTION
# moprhs used on input
inputmorphs = MorphList()
# morphs used on output
outputmorphs = MorphList()
## END INIT
| Python |
# gozerbot/periodical.py
#
#
__author__ = "Wijnand 'tehmaze' Modderman - http://tehmaze.com"
__license__ = "BSD License"
## gozerlib imports
from utils.exception import handle_exception
from utils.trace import calledfrom, whichmodule
from utils.locking import lockdec
from utils.timeutils import strtotime
import threads as thr
## basic imorts
import datetime
import sys
import time
import thread
import types
## locks
plock = thread.allocate_lock()
locked = lockdec(plock)
pidcount = 0
## classes
class JobError(Exception):
"""
job error exception.
"""
pass
class Job(object):
"""
job to be scheduled.
"""
group = ''
pid = -1
def __init__(self):
global pidcount
pidcount += 1
self.pid = pidcount
def id(self):
"""
return job id.
"""
return self.pid
def member(self, group):
"""
check for group membership.
:param group: group to check for
:type group: string
:rtype: boolean
"""
return self.group == group
def do(self):
# try the callback
try:
self.func(*self.args, **self.kw)
except Exception, ex:
handle_exception()
class JobAt(Job):
"""
job to run at a specific time/interval/repeat.
:param start: start time
:type start: int, float or string
:param interval: time between alarms
:type interval: integer
:param repeat: number of repeats
:type interval: integer
:param func: the function to execute
:type func: function
"""
def __init__(self, start, interval, repeat, func, *args, **kw):
Job.__init__(self)
self.func = func
self.args = args
self.kw = kw
self.repeat = repeat
self.description = ""
self.counts = 0
# check start time format
if type(start) in [types.IntType, types.FloatType]:
self.next = float(start)
elif type(start) in [types.StringType, types.UnicodeType]:
d = strtotime(start)
if d and d > time.time():
self.next = d
else:
raise JobError("invalid date/time")
if type(interval) in [types.IntType]:
d = datetime.timedelta(days=interval)
self.delta = d.seconds
else:
self.delta = interval
def __repr__(self):
"""
return a string representation of the JobAt object.
"""
return '<JobAt instance next=%s, interval=%s, repeat=%d, function=%s>' % (str(self.next),
str(self.delta), self.repeat, str(self.func))
def check(self):
"""
run check to see if job needs to be scheduled.
"""
if self.next <= time.time():
logging.debug('periodical - running %s - %s' % (str(self.func), self.description))
self.func(*self.args, **self.kw)
self.next += self.delta
self.counts += 1
if self.repeat > 0 and self.counts >= self.repeat:
return False # remove this job
return True
class JobInterval(Job):
"""
job to be scheduled at certain interval.
:param interval: time between alarms
:type interval: integer
:param repeat: number of repeats
:type interval: integer
:param func: the function to execute
:type func: function
"""
def __init__(self, interval, repeat, func, *args, **kw):
Job.__init__(self)
self.func = func
self.args = args
self.kw = kw
self.repeat = int(repeat)
self.counts = 0
self.interval = float(interval)
self.description = ""
self.next = time.time() + self.interval
self.group = None
def __repr__(self):
return '<JobInterval instance next=%s, interval=%s, repeat=%d, group=%s, \
function=%s>' % (str(self.next), str(self.interval), self.repeat, self.group,
str(self.func))
def check(self):
"""
run check to see if job needs to be scheduled.
"""
if self.next <= time.time():
logging.debug('periodical - running %s - %s' % (str(self.func), self.description))
self.next = time.time() + self.interval
thr.start_new_thread(self.do, ())
self.counts += 1
if self.repeat > 0 and self.counts >= self.repeat:
return False # remove this job
return True
class Periodical(object):
"""
periodical scheduler.
"""
SLEEPTIME = 1 # smallest interval possible
def __init__(self):
self.jobs = []
self.running = []
self.run = True
def start(self):
"""
start the periodical scheduler.
"""
thr.start_new_thread(self.checkloop, ())
def addjob(self, sleeptime, repeat, function, description="" , *args, **kw):
"""
add a periodical job.
:param sleeptime: sleeptime between intervals
:type sleeptime: float
:param repeat: number of times to repeat
:type repeat: integer
:param function: function to execute
:type function: function
:param description: description of the periodical job
:type description: string
"""
job = JobInterval(sleeptime, repeat, function, *args, **kw)
job.group = calledfrom(sys._getframe())
job.description = str(description) or whichmodule()
self.jobs.append(job)
return job.pid
def changeinterval(self, pid, interval):
"""
change interval of of peridical job.
:param pid: id op the periodical job
:type pid: integer
:param interval: interval to set
:type interval: integer
"""
for i in periodical.jobs:
if i.pid == pid:
i.interval = interval
i.next = time.time() + interval
def looponce(self):
for job in self.jobs:
if job.next <= time.time():
self.runjob(job)
def checkloop(self):
"""
main loop of the periodical scheduler.
"""
while self.run:
for job in self.jobs:
if job.next <= time.time():
self.runjob(job)
time.sleep(self.SLEEPTIME)
def runjob(self, job):
"""
run a periodical job
:param job: the job to be runned
:type job: Job
"""
if not job.check():
self.killjob(job.id())
else:
self.running.append(job)
def kill(self):
'''
kill all jobs invoked by another module.
'''
group = calledfrom(sys._getframe())
self.killgroup(group)
def killgroup(self, group):
'''
kill all jobs with the same group.
:param group: the group of jobs to kill
:type group: string
'''
def shoot():
""" knock down all jobs belonging to group. """
deljobs = [job for job in self.jobs if job.member(group)]
for job in deljobs:
self.jobs.remove(job)
try:
self.running.remove(job)
except ValueError:
pass
logging.debug('periodical - killed %d jobs for %s' % (len(deljobs), group))
del deljobs
shoot() # *pow* you're dead ;)
def killjob(self, jobId):
'''
kill one job by its id.
:param jobId: the id of the job to kill
:type jobId: integer
:rtype: integer .. number of jobs killed
'''
def shoot():
deljobs = [x for x in self.jobs if x.id() == jobId]
numjobs = len(deljobs)
for job in deljobs:
self.jobs.remove(job)
try:
self.running.remove(job)
except ValueError:
pass
del deljobs
return numjobs
return shoot() # *pow* you're dead ;)
## functions
def interval(sleeptime, repeat=0):
"""
interval decorator.
:param sleeptime: time to sleep
:type sleeptime: integer
:param repeat: number of times to repeat the job
:type repeat: integet
"""
group = calledfrom(sys._getframe())
def decorator(function):
decorator.func_dict = function.func_dict
def wrapper(*args, **kw):
job = JobInterval(sleeptime, repeat, function, *args, **kw)
job.group = group
job.description = whichmodule()
periodical.jobs.append(job)
return wrapper
return decorator
def at(start, interval=1, repeat=1):
"""
at decorator.
:param start: start time of the periodical job
:type start: integer, float or string
:param interval: time between jobs
:type sleeptime: integer
:param repeat: number of times to repeat the job
:type repeat: integet
"""
group = calledfrom(sys._getframe())
def decorator(function):
decorator.func_dict = function.func_dict
def wrapper(*args, **kw):
job = JobAt(start, interval, repeat, function, *args, **kw)
job.group = group
job.description = whichmodule()
periodical.jobs.append(job)
wrapper.func_dict = function.func_dict
return wrapper
return decorator
def persecond(function):
"""
per second decorator.
:param function: function to execute every second
:type function: function
"""
minutely.func_dict = function.func_dict
group = calledfrom(sys._getframe())
def wrapper(*args, **kw):
job = JobInterval(1, 0, function, *args, **kw)
job.group = group
job.description = whichmodule()
periodical.jobs.append(job)
return wrapper
def minutely(function):
"""
minute decorator.
:param function: function to execute every minute
:type function: function
"""
minutely.func_dict = function.func_dict
group = calledfrom(sys._getframe())
def wrapper(*args, **kw):
job = JobInterval(60, 0, function, *args, **kw)
job.group = group
job.description = whichmodule()
periodical.jobs.append(job)
return wrapper
def hourly(function):
"""
hour decorator.
:param function: function to execute every hour
:type function: function
"""
hourly.func_dict = function.func_dict
group = calledfrom(sys._getframe())
def wrapper(*args, **kw):
job = JobInterval(3600, 0, function, *args, **kw)
job.group = group
job.description = whichmodule()
periodical.jobs.append(job)
return wrapper
def daily(function):
"""
day decorator.
:param function: function to execute every hour
:type function: function
"""
daily.func_dict = function.func_dict
group = calledfrom(sys._getframe())
def wrapper(*args, **kw):
job = JobInterval(86400, 0, function, *args, **kw)
job.group = group
job.description = whichmodule()
periodical.jobs.append(job)
return wrapper
## defnine
# the periodical scheduler
periodical = Periodical()
| Python |
# gozerlib/wave/bot.py
#
#
""" google wave bot. """
## gozerlib imports
from gozerlib.persist import Persist
from gozerlib.botbase import BotBase
from gozerlib.plugins import plugs
from gozerlib.utils.generic import getversion
from gozerlib.callbacks import callbacks
from gozerlib.outputcache import add
from gozerlib.config import Config
from gozerlib.utils.locking import lockdec
## gaelib imports
from gozerlib.gae.utils.auth import finduser
from event import WaveEvent
from waves import Wave
## waveapi v2 imports
from waveapi import events
from waveapi import robot
from waveapi import element
from waveapi import ops
from waveapi import blip
from google.appengine.ext import webapp
from waveapi import appengine_robot_runner
from django.utils import simplejson
from google.appengine.api import urlfetch
import config.credentials as credentials
import google
import waveapi
## generic imports
import logging
import cgi
import os
import time
import thread
## defines
waves = {}
saylock = thread.allocate_lock()
saylocked = lockdec(saylock)
## classes
class WaveBot(BotBase, robot.Robot):
"""
bot to implement google wave stuff.
:param name: bot's name
:type param: string
:param image_url: url pointing to the bots image
:type image_url: string
:param version: the bots version
:type version: string
:param profile_url: url pointing to the bots profile
:type profile_url: string
"""
def __init__(self, cfg=None, users=None, plugs=None, jid=None, domain=None,
image_url='http://feedprovider.appspot.com/assets/favicon.png',
profile_url='http://feedprovider.appspot.com/', *args, **kwargs):
sname = 'feedprovider'
BotBase.__init__(self, cfg, users, plugs, *args, **kwargs)
self.type = 'wave'
self.jid = jid
if self.cfg:
self.cfg['type'] = 'wave'
self.cfg.save()
self.type = "wave"
if cfg:
self.domain = cfg['domain'] or 'googlewave.com'
else:
self.domain = domain or 'googlewave.com'
if self.cfg and self.cfg['domain'] != self.domain:
self.cfg['domain'] = self.domain
self.cfg.save()
robot.Robot.__init__(self, name=sname, image_url=image_url, profile_url=profile_url)
self.set_verification_token_info(credentials.verification_token[self.domain], credentials.verification_secret[self.domain])
self.setup_oauth(credentials.Consumer_Key[self.domain], credentials.Consumer_Secret[self.domain],
server_rpc_base=credentials.RPC_BASE[self.domain])
self.register_handler(events.BlipSubmitted, self.OnBlipSubmitted)
self.register_handler(events.WaveletSelfAdded, self.OnSelfAdded)
self.register_handler(events.WaveletParticipantsChanged, self.OnParticipantsChanged)
self.iswave = True
#self.channels = Persist("gozerstore" + os.sep + "fleet" + os.sep + self.name + os.sep + "channels")
self.waves = waves
def OnParticipantsChanged(self, event, wavelet):
""" invoked when any participants have been added/removed. """
wevent = WaveEvent()
wevent.parse(self, event, wavelet)
callbacks.check(self, wevent)
def OnSelfAdded(self, event, wavelet):
""" invoked when the robot has been added. """
time.sleep(1)
logging.warn('wave - joined "%s" (%s) wave' % (wavelet._wave_id, wavelet._title))
wevent = WaveEvent()
wevent.parse(self, event, wavelet)
wevent.chan.save()
wave = wevent.chan
if wave.data.feeds:
wevent.set_title("FEEDPROVIDER - %s #%s" % (" - ".join(wave.data.feeds), str(wave.data.nrcloned)))
else:
wevent.set_title("FEEDPROVIDER - no feeds running - #%s" % str(wave.data.nrcloned))
wevent.insert_root("\n")
wevent.insert_root(
element.Gadget('http://feedprovider.appspot.com/feedform.xml'))
#wevent.append(
# element.Installer('http://feedprovider.appspot.com/feeder.xml'))
callbacks.check(self, wevent)
def OnBlipSubmitted(self, event, wavelet):
""" new blip added. here is where the command dispatching takes place. """
wevent = WaveEvent()
wevent.parse(self, event, wavelet)
wevent.auth = wevent.userhost
wave = wevent.chan
wave.data.seenblips += 1
wave.data.lastedited = time.time()
self.doevent(wevent)
@saylocked
def say(self, waveid, txt):
"""
output to the root id.
:param waveid: id of the wave
:type waveid: string
:param txt: text to output
:type txt: string
:rtype: None
"""
if not self.domain in self._server_rpc_base:
rpc_base = credentials.RPC_BASE[waveid.split("!")[0]]
self._server_rpc_base = rpc_base
logging.warn("waves - %s - server_rpc_base is %s" % (waveid, self._server_rpc_base))
wave = Wave(waveid)
if wave and wave.data.waveid:
wave.say(self, txt)
else:
logging.warn("we are not joined into %s" % waveid)
def toppost(self, waveid, txt):
"""
output to the root id.
:param waveid: id of the wave
:type waveid: string
:param txt: text to output
:type txt: string
:rtype: None
"""
if not self.domain in waveid:
logging.warn("wave - not connected - %s" % waveid)
return
wave = Wave(waveid)
if wave and wave.data.waveid:
wave.toppost(self, txt)
else:
logging.warn("we are not joined to %s" % waveid)
def newwave(self, domain=None, participants=None, submit=False):
"""
create a new wave.
"""
logging.warn("wave - new wave on domain %s" % domain)
newwave = self.new_wave(domain or self.domain, participants=participants, submit=submit)
return newwave
def run(self):
appengine_robot_runner.run(self, debug=True, extra_handlers=[])
| Python |
# gozerlib/wave/event.py
#
#
""" google wave events. """
## gozerlib imports
from gozerlib.eventbase import EventBase
from gozerlib.utils.exception import handle_exception
from gozerlib.gae.utils.auth import finduser
from gozerlib.gae.wave.waves import Wave
## basic imports
import logging
import cgi
import re
import time
## defines
findurl = re.compile(u"(http://.*)?")
class NotConnected(Exception):
pass
class WaveEvent(EventBase):
""" a wave event. """
def __init__(self):
EventBase.__init__(self)
self.type = "wave"
self.msg = False
self.target = None
self.roottarget = None
self.rootreply = None
self.gadget = None
self.result = []
def parse(self, bot, event, wavelet):
""" parse properties and context into a WaveEvent. """
#logging.debug("WaveEvent created")
self.bot = bot
self.eventin = event
self.wavelet = wavelet
#logging.debug("eventin: %s" % dir(self.eventin))
#logging.debug("wavelet: %s" % dir(self.wavelet))
self.waveid = self.wavelet._wave_id
self.blipid = self.eventin.blip_id
self.blip = self.eventin.blip
self.chan = Wave(self.waveid)
self.chan.parse(self.eventin, self.wavelet)
if not self.blip:
logging.warn("can't get blip id: %s" % self.blipid)
self.contributors = []
self.txt = ""
self.cmnd = ""
self.userhost = ""
self.ispoller = False
else:
#logging.debug("blip: %s" % dir(self.blip))
self.contributors = self.blip._contributors
self.origtxt = self.blip._content
self.txt = self.origtxt.strip()
if len(self.txt) >= 2:
self.usercmnd = self.txt[1:].split()[0]
else:
self.usercmnd = None
#logging.debug("blipdata: %s" % self.txt)
self.userhost = self.blip._creator
self.elements = self.blip._elements
#logging.debug("elements: %s" % unicode(self.elements))
for nr, elem in self.elements.iteritems():
logging.debug("wave - element - %s - %s" % (str(elem), dir(elem)))
if elem.get('ispoller') == 'yes':
self.ispoller = True
if elem.get('gadgetcmnd') == 'yes':
self.cbtype = "GADGETCMND"
logging.debug("wave.event - gadgetcmnd - %s" % str(elem))
self.txt = u"!" + elem.get("cmnd")
self.channel = self.waveid = elem.get("waveid")
self.gadgetnr = nr
self.cmndhow = elem.get('how')
self.userhost = elem.get('who')
self.auth = self.userhost
logging.debug("wave - event - auth is %s" % self.auth)
self.root = wavelet
self.rootblipid = wavelet._root_blip.blip_id
#logging.debug("rootblip: %s" % self.rootblipid)
self.rootblip = wavelet._root_blip
#logging.debug("rootblip: %s" % dir(self.rootblip))
logging.debug("root: %s" % dir(self.root))
#logging.debug("raw_data: %s" % unicode(self.root._raw_data))
self.raw_data = self.root._raw_data
self.domain = self.wavelet.domain
self.channel = self.waveid
self.origin = self.channel
self.title = self.root._title or self.channel
self.cbtype = event.type
if 'sandbox' in self.waveid:
self.url = "https://wave.google.com/a/wavesandbox.com/#restored:wave:%s" % self.waveid.replace('w+','w%252B')
else:
self.url = "https://wave.google.com/wave/#restored:wave:%s" % self.waveid.replace('w+','w%252B')
self.makeargs()
logging.warn(u'wave - in - %s - %s - %s' % (self.title, self.userhost, self.txt))
def __deepcopy__(self, a):
""" deepcopy a wave event. """
e = WaveEvent()
e.copyin(self)
return e
def _raw(self, outtxt, root=None):
""" send raw text to the server .. creates a blip on the root. """
pass
#logging.info(u"wave - out - %s - %s" % (self.userhost, outtxt))
#self.append(outtxt)
#self.bot.outmonitor(self.origin, self.channel, outtxt)
def toppost(self, txt):
reply = self.rootblip.reply()
reply.append(txt)
if self.chan:
self.chan.data.seenblips += 1
self.chan.data.lastedited = time.time()
return reply
def insert_root(self, item):
reply = self.rootblip.append(item)
if self.chan:
self.chan.data.seenblips += 1
self.chan.data.lastedited = time.time()
return self
def set_title(self, title, cloned=False):
if cloned and self.chan and self.chan.data.nrcloned:
title = "#".join(title.split("#")[:-1])
title += "#%s" % str(self.chan.data.nrcloned)
logging.warn("wave - setting title - %s" % title)
self.root._set_title(title)
return self
def append(self, item, annotations=None):
if not self.target and self.blip:
self.target = self.blip.reply()
self.result.append(unicode(item))
try:
self.target.append(item)
except Exception, ex:
handle_exception()
logging.debug("wave - append - annotations are %s" % str(annotations))
if annotations:
for ann in annotations:
if ann[0]:
try:
self.target.range(ann[0], ann[1]).annotate(ann[2], ann[3])
except Exception, ex:
handle_exception()
if self.chan:
self.chan.data.seenblips += 1
self.chan.data.lastedited = time.time()
return self
def append_root(self, item , annotations=None):
if not self.roottarget:
self.roottarget = self.rootblip.reply()
self.roottarget.append(item)
self.result.append(unicode(item))
if self.chan:
self.chan.data.seenblips += 1
self.chan.data.lastedited = time.time()
return self.roottarget
def appendtopper(self, item):
self.rootblip.append(item)
self.result.append(unicode(item))
if self.chan:
self.chan.data.seenblips += 1
self.chan.data.lastedited = time.time()
return self.rootblip
def reply(self, txt, resultlist=[], nritems=False, dot=", ", *args, **kwargs):
""" reply to blip. """
if self.checkqueues(resultlist):
return
outtxt = self.makeresponse(txt, resultlist, nritems, dot, *args, **kwargs)
if not outtxt:
return
self.result.append(unicode(outtxt))
#self.doc.SetText(cgi.escape(outtxt))
(res1, res2) = self.less(outtxt)
self.write(res1)
if res2:
self.write(res2)
def replyroot(self, txt, resultlist=[], nritems=False, root=None, *args, **kwargs):
""" reply to wave root. """
if self.checkqueues(resultlist):
return
if resultlist:
outtxt = txt + u" " + u' .. '.join(resultlist)
else:
outtxt = txt
if not outtxt:
return
self.result.append(unicode(outtxt))
logging.debug("wave - reply root - %s - %s" % (self.root, root))
(res1, res2) = self.less(outtxt)
self.write_root(res1, root)
if res2:
self.write_root(res2, root)
def write(self, outtxt, end="\n"):
""" write outtxt to the server. """
logging.warn(u"wave - out - %s - %s" % (self.userhost, unicode(outtxt)))
try:
annotations = []
for url in re.findall(findurl, outtxt):
start = outtxt.find(url.strip())
if start:
annotations.append((start+1, start+len(url), "link/manual", url.strip()))
except Exception, ex:
handle_exception()
if self.gadgetnr:
if self.cmndhow == 'output':
self.blip.at(self.gadgetnr).update_element({'text': outtxt, 'target': self.userhost})
elif self.cmndhow == 'status':
self.blip.at(self.gadgetnr).update_element({'status': outtxt, 'target': self.userhost})
else:
self.append(outtxt + end , annotations)
self.replied = True
self.bot.outmonitor(self.origin, self.channel, outtxt, self)
def write_root(self, outtxt, end="\n", root=None):
""" write to the root of a wave. """
logging.warn(u"wave - out - %s - %s" % (self.userhost, unicode(outtxt)))
self.append_root(outtxt + end)
self.replied = True
self.bot.outmonitor(self.origin, self.channel, outtxt, self)
def submit(self):
self.bot.submit(self.wavelet)
| Python |
# gozerlib/wave/waves.py
#
#
""" class to repesent a wave. """
## gozerlib imports
from gozerlib.channelbase import ChannelBase
from gozerlib.utils.exception import handle_exception
from gozerlib.utils.locking import lockdec
##
from simplejson import dumps
## google imports
import google
## basic imports
import logging
import copy
import os
import time
import re
import thread
## defines
findurl = re.compile("([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}|(((news|telnet|nttp|file|http|ftp|https)://)|(www|ftp)[-A-Za-z0-9]*\\.)[-A-Za-z0-9\\.]+)(:[0-9]*)?/[-A-Za-z0-9_\\$\\.\\+\\!\\*\\(\\),;:@&=\\?/~\\#\\%]*[^]'\\.}>\\),\\\"]")
cpy = copy.deepcopy
saylock = thread.allocate_lock()
saylocked = lockdec(saylock)
## classes
class Wave(ChannelBase):
""" a wave is seen as a channel. """
def __init__(self, waveid):
ChannelBase.__init__(self, 'gozerdata' + os.sep + 'waves' + os.sep + waveid)
self.data.seenblips = self.data.seenblips or 0
self.data.threshold = self.data.threshold or -1
self.data.nrcloned = self.data.nrcloned or 0
self.data.waveid = waveid
self.wavelet = None
self.event = None
logging.debug("created wave with id: %s" % waveid)
def parse(self, event, wavelet):
""" parse event into a Wave. """
self.data.json_data = event.json
self.data.title = wavelet._title
self.data.waveletid = wavelet._wavelet_id
self.wavelet = wavelet
self.event = event
logging.debug("parsed %s (%s) channel" % (self.data.waveid, self.data.title))
return self
def set_title(self, title, cloned=False):
self.event.set_title(title, cloned)
def clone(self, bot, event, title=None):
parts = list(event.root.participants)
newwave = bot.newwave(event.domain, parts)
logging.warn("wave - clone - populating wave with %s" % str(parts))
for id in parts:
newwave.participants.add(id)
if title:
if '#' in title:
title = "#".join(title.split("#")[:-1])
title += "#%s" % str(self.data.nrcloned + 1)
else:
title += " - #s" % str(self.data.nrcloned + 1)
newwave._set_title(title)
try:
txt = '\n'.join(event.rootblip.text.split('\n')[2:])
except IndexError:
txt = event.rootblip.text
newwave._root_blip.append(u'%s\n' % txt)
for element in event.rootblip.elements:
if element.type == 'GADGET':
newwave._root_blip.append(element)
#gadgetblip = newwave.reply()
#from waveapi.element import Gadget
#gadgetblip.append(Gadget("http://feedprovider.appspot.com/feedform.xml"))
blip = newwave.reply()
blip.append("\nthis wave is cloned from %s\n" % event.url)
wavelist = bot.submit(newwave)
logging.warn("wave - clone - %s - submit returned %s" % (list(newwave.participants), str(wavelist)))
if not wavelist:
logging.warn("submit of new wave failed")
return
try:
waveid = None
for item in wavelist:
try:
waveid = item['data']['waveId']
except (KeyError, ValueError):
continue
logging.warn("wave - newwave id is %s" % waveid)
if waveid and 'sandbox' in waveid:
url = "https://wave.google.com/a/wavesandbox.com/#restored:wave:%s" % waveid.replace('w+','w%252B')
else:
url = "https://wave.google.com/wave/#restored:wave:%s" % waveid.replace('w+','w%252B')
oldwave = Wave(event.waveid)
oldwave.data.threshold = -1
oldwave.save()
wave = Wave(waveid)
wave.parse(event, newwave)
wave.data.json_data = newwave.serialize()
wave.data.threshold = self.data.threshold or 200
wave.data.nrcloned = self.data.nrcloned + 1
wave.data.url = url
wave.save()
except Exception, ex:
handle_exception()
return
return wave
@saylocked
def say(self, bot, txt):
""" output some txt to the wave. """
if self.data.json_data:
logging.debug("wave - say - using BLIND - %s" % self.data.json_data)
wavelet = bot.blind_wavelet(self.data.json_data)
else:
logging.info("did not join channel %s" % self.id)
return
if not wavelet:
logging.error("cant get wavelet")
return
logging.warn('wave - out - %s - %s' % (self.data.title, txt))
try:
annotations = []
for url in txt.split():
if url.startswith("http://"):
logging.warn("wave - found url - %s" % str(url))
start = txt.find(url)
if start:
annotations.append((start+1, start+len(url), "link/manual", url))
except Exception, ex:
handle_exception()
logging.warn("annotations used: %s", annotations)
reply = wavelet.reply(txt)
if annotations:
for ann in annotations:
if ann[0]:
try:
reply.range(ann[0], ann[1]).annotate(ann[2], ann[3])
except Exception, ex:
handle_exception()
logging.warn("submitting to server: %s" % wavelet.serialize())
try:
bot.submit(wavelet)
except google.appengine.api.urlfetch_errors.DownloadError:
handle_exception()
#pass
self.data.seenblips += 1
self.data.lastedited = time.time()
self.save()
def toppost(self, bot, txt):
""" output some txt to the wave. """
if self.data.json_data:
logging.debug("wave - say - using BLIND - %s" % self.data.json_data)
wavelet = bot.blind_wavelet(self.data.json_data)
else:
logging.info("did not join channel %s" % self.id)
return
if not wavelet:
logging.error("cant get wavelet")
return
logging.warn('wave - out - %s - %s' % (self.data.title, txt))
try:
blip = wavelet._root_blip.reply()
blip.append(txt)
bot.submit(wavelet)
except google.appengine.api.urlfetch_errors.DownloadError:
handle_exception()
#pass
self.data.seenblips += 1
self.data.lastedited = time.time()
self.save()
| Python |
# gozerlib/utils/web.py
#
#
""" web related functions. """
## gozerlib imports
from gozerlib.utils.generic import fromenc, getversion
## gaelib imports
from auth import finduser
## google imports
from google.appengine.api import users as gusers
from google.appengine.ext.webapp import template
## basic imports
import os
import time
def mini(response, input={}):
""" display start html so that bot output can follow. """
inputdict = {'version': getversion()}
if input:
inputdict.update(input)
temp = os.path.join(os.getcwd(), 'templates/mini.html')
outstr = template.render(temp, inputdict)
response.out.write(outstr)
def start(response, input={}):
""" display start html so that bot output can follow. """
inputdict = {'version': getversion()}
if input:
inputdict.update(input)
temp = os.path.join(os.getcwd(), 'templates/start.html')
outstr = template.render(temp, inputdict)
response.out.write(outstr)
def commandbox(response, url="/dispatch/"):
""" write html data for the exec box. """
response.out.write("""
<form action="%s" method="post">
<div><b>enter command:</b> <input type="commit" name="content"></div>
</form>
""" % url)
def execbox(response, url="/exec/"):
""" write html data for the exec box. """
response.out.write("""
<form action="" method="GET">
<b>enter command:</b><input type="commit" name="input" value="">
// <input type="button" value="go" onClick="makePOSTRequest(this.form)"
</form>
""")
def closer(response):
""" send closing html .. comes after the bot output. """
response.out.write('</div><div class="footer">')
response.out.write('<b>%4f seconds</b></div>' % (time.time() - response.starttime))
response.out.write('</body></html>')
def loginurl(response):
""" show google login url. """
return gusers.create_login_url("/")
def logouturl(response):
""" show google login url. """
return gusers.create_logout_url("/")
| Python |
# gozerlib/utils/web.py
#
#
""" google auth related functions. """
## gozerlib imports
from gozerlib.utils.trace import whichmodule
## google imports
from google.appengine.api import users as gusers
## basic imports
import logging
def finduser():
""" try to find the email of the current logged in user. """
user = gusers.get_current_user()
if user:
return user.email()
return ""
def checkuser(response, request):
"""
check for user based on web response. first try google
otherwise return 'notath@IP'
:param response: response object
:param request: request object
:rtype: tuple of (userhost, gmail user, bot user , nick)
"""
userhost = "notauth"
u = "notauth"
nick = "notauth"
user = gusers.get_current_user()
if not user:
try:
email = request.get('USER_EMAIL')
if not email:
email = "notauth"
auth_domain = request.get('AUTH_DOMAIN')
if not auth_domain:
auth_domain = "nodomain"
who = request.get('who')
if not who:
who = email
userhost = nick = "%s!%s@%s" % (who, auth_domain, request.remote_addr)
except KeyError:
userhost = nick = "notauth@%s" % request.remote_addr
else:
userhost = user.email()
if not userhost:
userhost = nick = "notauth@%s" % request.remote_addr
nick = user.nickname()
u = userhost
cfrom = whichmodule()
if 'gozerlib' in cfrom:
cfrom = whichmodule(1)
if 'gozerlib' in cfrom:
cfrom = whichmodule(2)
logging.warn("auth - %s - %s - %s - %s - %s" % (cfrom, userhost, user, u, nick))
return (userhost, user, u, nick)
| Python |
# gozerlib/web/bot.py
#
#
""" web bot. """
## gozerlib imports
from gozerlib.botbase import BotBase
from gozerlib.outputcache import add
class WebBot(BotBase):
""" webbot just inherits from botbase for now. """
def __init__(self, cfg=None, users=None, plugs=None, *args, **kwargs):
BotBase.__init__(self, cfg, users, plugs, *args, **kwargs)
if self.cfg:
self.cfg['type'] = 'web'
self.type = "web"
def say(self, channel, txt, *args, **kwargs):
add(channel, [txt, ])
def sayroot(self, channel, txt, *args, **kwargs):
add(channel, [txt, ])
| Python |
# gozerlib/web/event.py
#
#
""" web event. """
## gozerlib imports
from gozerlib.eventbase import EventBase
from gozerlib.utils.generic import splittxt
from gozerlib.utils.xmpp import stripped
## gaelib imports
from gozerlib.gae.utils.auth import checkuser
from gozerlib.gae.wave.waves import Wave
## basic imports
import cgi
import logging
class WebEvent(EventBase):
def __init__(self):
EventBase.__init__(self)
self.type = "web"
def __deepcopy__(self, a):
e = WebEvent()
e.copyin(self)
return e
def parse(self, response, request):
""" parse request/response into a WebEvent. """
#logging.warn('%s %s' % (dir(request), dir(response)))
#logging.warn(str(request.environ))
input = request.get('content')
if not input:
input = request.get('QUERY_STRING')
self.isweb = True
self.origtxt = input.strip()
self.txt = input
self.usercmnd = self.txt and self.txt.split()[0]
self.groupchat = False
self.response = response
self.request = request
(userhost, user, u, nick) = checkuser(response, request)
self.user = user
self.userhost = userhost
self.nick = nick
self.auth = userhost
self.stripped = stripped(userhost)
self.domain = None
self.waveid = request.get('waveid')
if self.waveid:
self.isgadget = True
wave = Wave(self.waveid)
if wave:
logging.warn('web - setting channel to %s - %s' % (self.waveid, wave.data.title))
else:
logging.warn('web - setting channel to %s' % self.waveid)
if self.waveid:
self.channel = self.waveid
self.domain = self.waveid.split('!')[0]
else:
self.channel = stripped(userhost)
self.makeargs()
logging.warn(u'web - in - %s - %s' % (self.userhost, self.txt))
return self
def _raw(self, txt, end=""):
"""
put txt onto the reponse object .. adding end string if provided.
output is NOT escaped.
"""
txt = unicode(txt)
logging.info(u'web - out - %s - %s' % (self.userhost, txt))
self.response.out.write(txt + end)
self.bot.outmonitor(self.userhost, self.channel, txt, self)
def write(self, txt, start=u"", end=u"<br>", raw=False):
"""
put txt onto the reponse object .. adding end string if provided.
output IS escaped.
"""
if not raw:
self._raw(start + cgi.escape(txt) + end)
else:
self._raw(start + txt + end)
def makeresponse(self, txt, resultlist, nritems, dot, *args, **kwargs):
return EventBase.makeresponse(self, txt, resultlist, nritems, dot=dot, *args, **kwargs)
def reply(self, txt, resultlist=[], nritems=False, dot=", ", raw=False, *args, **kwargs):
""" send reply to the web user. """
if self.checkqueues(resultlist):
return
result = self.makeresponse(txt, resultlist, nritems, dot, *args, **kwargs)
(res1, res2) = self.less(result)
self.write(res1, raw=raw)
if res2:
self.write(res2, raw=raw)
| Python |
# gaelib/plugs/gae.py
#
#
## lib imports
from gozerlib.commands import cmnds
from gozerlib.examples import examples
## commands
def handle_gaeflushcache(bot, ievent):
""" flush the cache .. flush all with no arguments otherwise delete specific. """
from google.appengine.api.memcache import flush_all, delete
if not ievent.rest:
flush_all()
else:
delete(ievent.rest)
ievent.done()
cmnds.add('gae-flushcache', handle_gaeflushcache, 'OPER')
examples.add('gae-flushcache', 'flush the bots cache', 'admin-flushcache')
def handle_gaeadminstats(bot, ievent):
""" show bot stats. """
from google.appengine.api.memcache import get_stats
ievent.reply("cache: %s" % str(get_stats()))
cmnds.add('gae-stats', handle_gaeadminstats, 'OPER')
examples.add('gae-stats', 'show bots stats', 'admin-stats')
| Python |
# gozerlib/xmpp/bot.py
#
#
""" XMPP bot. """
## gozerlib imports
from gozerlib.botbase import BotBase
## google imports
from google.appengine.api import xmpp
## basic imports
import types
import logging
class XMPPBot(BotBase):
""" XMPPBot just inherits from BotBase for now. """
def __init__(self, cfg=None, users=None, plugs=None, jid="feedprovider@appspot.com", *args, **kwargs):
BotBase.__init__(self, cfg, users, plugs, jid, *args, **kwargs)
if self.cfg:
self.cfg['type'] = 'xmpp'
self.type = "xmpp"
def say(self, jids, body, from_jid=None, message_type=xmpp.MESSAGE_TYPE_CHAT, raw_xml=False):
""" output xmpp message. """
if type(jids) == types.StringType:
jids = [jids, ]
logging.warn('xmpp - out - %s - %s' % (unicode(jids), unicode(body)))
xmpp.send_message(jids, body, from_jid=from_jid, message_type=message_type, raw_xml=raw_xml)
| Python |
# gaelib/xmpp/event.py
#
#
""" an xmpp event. """
## gozerlib imports
from gozerlib.eventbase import EventBase
from gozerlib.utils.xmpp import stripped, resource
from gozerlib.utils.lazydict import LazyDict
## gaelibs imports
from gozerlib.gae.utils.auth import checkuser
## google imports
from google.appengine.api import xmpp
## basic imports
import cgi
import logging
class XMPPEvent(EventBase):
""" an XMPP event. """
def __init__(self):
EventBase.__init__(self)
self.type = "xmpp"
self.cbtype = 'MESSAGE'
def __deepcopy__(self, a):
""" make a deepcopy of this XMPPEvent. """
return XMPPEvent().copyin(self)
def _raw(self, txt):
""" output data to user. txt is NOT escaped. """
txt = unicode(txt)
logging.warn(u"xmpp - out - %s - %s" (self.userhost, txt))
if txt:
xmpp.send_message([self.userhost, ], txt)
self.bot.outmonitor(self.origin, self.userhost, txt, self)
def parse(self, request, response):
""" parse incoming request/response into a XMPPEvent. """
self.copyin(LazyDict(request.POST))
(userhost, user, u, nick) = checkuser(response, request)
self.userhost = stripped(self['from'])
self.origin = self.channel
if user:
self.auth = user.email()
else:
self.auth = self.userhost
logging.warn('xmpp - auth is %s' % self.auth)
self.resource = resource(self['from'])
self.jid = self['from']
self.to = stripped(self['to'])
self.channel = self.userhost
self.origin = self.channel
input = self.body
self.origtxt = input
if len(input) > 1 and input[0] == '!':
input = input[1:]
self.txt = input
self.usercmnd = self.txt.split()[0]
self.makeargs()
logging.warn(u'xmpp - in - %s - %s' % (self.userhost, self.txt))
return self
def reply(self, txt, resultlist=[], nritems=False, dot=", ", raw=False, *args, **kwargs):
""" reply with txt and optional resultlist. result lists can be
passed on onto the events queues.
"""
if self.checkqueues(resultlist):
return
result = self.makeresponse(txt, resultlist, nritems, dot, *args, **kwargs)
(res1, res2) = self.less(result)
self.write(res1, raw)
if res2:
self.write(res2, raw)
def write(self, txt, raw=False):
""" output txt to the user .. output IS escaped. """
if txt:
txt = unicode(txt)
logging.warn(u"xmpp - out - %s - %s" % (self.userhost, txt))
if not raw:
xmpp.send_message([self.userhost, ], cgi.escape(txt))
else:
xmpp.send_message([self.userhost, ], txt)
self.bot.outmonitor(self.origin, self.userhost, txt, self)
| Python |
# gozerlib/plugs/user.py
#
#
""" users related commands. """
## gozerlib imports
from gozerlib.utils.generic import getwho
from gozerlib.utils.exception import handle_exception
from gozerlib.utils.name import stripname
from gozerlib.users import users
from gozerlib.commands import cmnds
from gozerlib.examples import examples
## commands
def handle_whoami(bot, ievent):
""" get your username. """
ievent.reply('%s' % bot.users.getname(ievent.auth))
cmnds.add('user-whoami', handle_whoami, ['USER', 'GUEST'])
examples.add('user-whoami', 'get your username', 'user-whoami')
def handle_meet(bot, ievent):
""" <nick> .. introduce a new user to the bot. """
try:
nick = ievent.args[0].lower()
except IndexError:
ievent.missing('<nick>')
return
if bot.users.exists(nick):
ievent.reply('there is already a user with username %s' % nick)
return
userhost = getwho(bot, nick)
if not userhost:
ievent.reply("can't find userhost of %s" % nick)
return
username = bot.users.getname(userhost)
if username:
ievent.reply('we already have a user with userhost %s (%s)' % \
(userhost, username))
return
result = 0
name = stripname(nick.lower())
result = bot.users.add(name, [userhost, ], ['USER', ])
if result:
ievent.reply('%s (%s) added to user database' % (nick, name))
else:
ievent.reply('add failed')
cmnds.add('user-meet', handle_meet, ['OPER', 'MEET'])
examples.add('user-meet', '<nick> .. introduce <nick> to the bot', 'user-meet dunker')
def handle_adduser(bot, ievent):
""" user-add <name> <userhost> .. introduce a new user to the bot. """
try:
(name, userhost) = ievent.args
except ValueError:
ievent.missing('<name> <userhost>')
return
username = bot.users.getname(userhost)
if username:
ievent.reply('we already have a user with userhost %s (%s)' % (userhost, username))
return
result = 0
name = stripname(name.lower())
result = bot.users.add(name, [userhost, ], ['USER', ])
if result:
ievent.reply('%s added to user database' % name)
else:
ievent.reply('add failed')
cmnds.add('user-add', handle_adduser, 'OPER')
examples.add('user-add', '<name> <userhost> .. add <name> with \
<userhost> to the bot', 'user-add dunker bart@localhost')
def handle_merge(bot, ievent):
""" <name> <nick> .. merge the userhost into a already existing user. """
if len(ievent.args) != 2:
ievent.missing('<name> <nick>')
return
name, nick = ievent.args
name = name.lower()
if bot.users.gotperm(name, 'OPER') and not bot.users.allowed(ievent.userhost, 'OPER'):
ievent.reply("only OPER perm can merge with OPER user")
return
if name == 'owner' and not bot.ownercheck(ievent.userhost):
ievent.reply("you are not the owner")
return
if not bot.users.exists(name):
ievent.reply("we have no user %s" % name)
return
userhost = getwho(bot, nick)
if not userhost:
ievent.reply("can't find userhost of %s" % nick)
return
if bot.ownercheck(userhost):
ievent.reply("can't merge with owner")
return
username = bot.users.getname(userhost)
if username:
ievent.reply('we already have a user with userhost %s (%s)' % (userhost, username))
return
result = bot.users.merge(name, userhost)
if result:
ievent.reply('%s merged' % nick)
else:
ievent.reply('merge failed')
cmnds.add('user-merge', handle_merge, ['OPER', 'MEET'])
examples.add('user-merge', '<name> <nick> .. merge record with <name> with userhost from <nick>', 'user-merge bart dunker')
def handle_import(bot, ievent):
""" user-import <userhost> .. merge the userhost into user giving the command. """
if len(ievent.args) != 1:
ievent.missing('<userhost>')
return
userhost = ievent.args[0]
if bot.ownercheck(userhost):
ievent.reply("can't merge owner")
return
name = bot.users.getname(ievent.userhost)
if not name:
ievent.reply("i don't know you %s" % ievent.userhost)
return
result = bot.users.merge(name, userhost)
if result:
ievent.reply('%s imported' % userhost)
else:
ievent.reply('import failed')
cmnds.add('user-import', handle_import, ['IMPORT', 'OPER'])
examples.add('user-import', 'user-import <userhost> .. merge record with \
<name> with userhost from the person giving the command (self merge)', 'user-import bthate@gmail.com')
def handle_delete(bot, ievent):
""" <name> .. remove user. """
if not bot.ownercheck(ievent.userhost):
ievent.reply('only owner can use delete')
return
if len(ievent.args) == 0:
ievent.missing('<name>')
return
name = ievent.args[0].lower()
result = 0
name = stripname(name.lower())
name = name.lower()
try:
result = bot.users.delete(name)
if result:
ievent.reply('%s deleted' % name)
except KeyError:
ievent.reply('no %s item in database' % name)
cmnds.add('user-del', handle_delete, 'OPER')
examples.add('user-del', 'user-del <name> .. delete user with <username>' , 'user-del dunker')
def handle_userscan(bot, ievent):
""" <txt> .. scan for user. """
try:
name = ievent.args[0]
except IndexError:
ievent.missing('<txt>')
return
name = name.lower()
names = bot.users.names()
result = []
for i in names:
if i.find(name) != -1:
result.append(i)
if result:
ievent.reply("users matching %s: " % name, result)
else:
ievent.reply('no users matched')
return
cmnds.add('user-scan', handle_userscan, 'OPER')
examples.add('user-scan', '<txt> .. search database for matching usernames', 'user-scan dunk')
def handle_names(bot, ievent):
""" show registered users. """
ievent.reply("usernames: ", bot.users.names())
cmnds.add('user-names', handle_names, 'OPER')
examples.add('user-names', 'show names of registered users', 'user-names')
def handle_name(bot, ievent):
""" user-name .. show name of user giving the command. """
ievent.reply('your name is %s' % bot.users.getname(ievent.userhost))
cmnds.add('user-name', handle_name, ['USER', 'GUEST'])
examples.add('user-name', 'show name of user giving the commands', 'user-name')
def handle_getname(bot, ievent):
""" <nick> .. fetch username of nick. """
try:
nick = ievent.args[0]
except IndexError:
ievent.missing("<nick>")
return
userhost = getwho(bot, nick)
if not userhost:
ievent.reply("can't find userhost of %s" % nick)
return
name = bot.users.getname(userhost)
if not name:
ievent.reply("can't find user for %s" % userhost)
return
ievent.reply(name)
cmnds.add('user-getname', handle_getname, ['USER', ])
examples.add('user-getname', 'user-getname <nick> .. get the name of <nick>', 'user-getname dunker')
def handle_addperm(bot, ievent):
""" <name> <perm> .. add permission. """
if len(ievent.args) != 2:
ievent.missing('<name> <perm>')
return
name, perm = ievent.args
perm = perm.upper()
name = name.lower()
if not bot.users.exists(name):
ievent.reply("can't find user %s" % name)
return
result = 0
if bot.users.gotperm(name, perm):
ievent.reply('%s already has permission %s' % (name, perm))
return
result = bot.users.adduserperm(name, perm)
if result:
ievent.reply('%s perm added' % perm)
else:
ievent.reply('perm add failed')
cmnds.add('user-addperm', handle_addperm, 'OPER')
examples.add('user-addperm', 'user-addperm <name> <perm> .. add permissions to user <name>', 'user-addperm dunker rss')
def handle_getperms(bot, ievent):
""" <name> .. get permissions of name. """
try:
name = ievent.args[0]
except IndexError:
ievent.missing('<name>')
return
name = name.lower()
if not bot.users.exists(name):
ievent.reply("can't find user %s" % name)
return
perms = bot.users.getuserperms(name)
if perms:
ievent.reply("permissions of %s: " % name, perms)
else:
ievent.reply('%s has no permissions set' % name)
cmnds.add('user-getperms', handle_getperms, 'OPER')
examples.add('user-getperms', 'user-getperms <name> .. get permissions of <name>', 'user-getperms dunker')
def handle_perms(bot, ievent):
""" get permissions of the user given the command. """
if ievent.rest:
ievent.reply("use getperms to get the permissions of somebody else")
return
name = bot.users.getname(ievent.userhost)
if not name:
ievent.reply("can't find username for %s" % ievent.userhost)
return
perms = bot.users.getuserperms(name)
if perms:
ievent.reply("you have permissions: ", perms)
cmnds.add('user-perms', handle_perms, ['USER', 'GUEST'])
examples.add('user-perms', 'get permissions', 'user-perms')
def handle_delperm(bot, ievent):
""" <name> <perm> .. delete permission. """
if len(ievent.args) != 2:
ievent.missing('<name> <perm>')
return
name, perm = ievent.args
perm = perm.upper()
name = name.lower()
if not bot.users.exists(name):
ievent.reply("can't find user %s" % name)
return
result = bot.users.deluserperm(name, perm)
if result:
ievent.reply('%s perm removed' % perm)
else:
ievent.reply("%s has no %s permission" % (name, perm))
return
cmnds.add('user-delperm', handle_delperm, 'OPER')
examples.add('user-delperm', 'delete from user <name> permission <perm>', 'user-delperm dunker rss')
def handle_addstatus(bot, ievent):
""" <name> <status> .. add status of name. """
if len(ievent.args) != 2:
ievent.missing('<name> <status>')
return
name, status = ievent.args
status = status.upper()
name = name.lower()
if not bot.users.exists(name):
ievent.reply("can't find user %s" % name)
return
if bot.users.gotstatus(name, status):
ievent.reply('%s already has status %s' % (name, status))
return
result = bot.users.adduserstatus(name, status)
if result:
ievent.reply('%s status added' % status)
else:
ievent.reply('add failed')
cmnds.add('user-addstatus', handle_addstatus, 'OPER')
examples.add('user-addstatus', 'user-addstatus <name> <status>', 'user-addstatus dunker #dunkbots')
def handle_getstatus(bot, ievent):
""" <name> .. get status of name. """
try:
name = ievent.args[0]
except IndexError:
ievent.missing('<name>')
return
name = name.lower()
if not bot.users.exists(name):
ievent.reply("can't find user %s" % name)
return
status = bot.users.getuserstatuses(name)
if status:
ievent.reply("status of %s: " % name, status)
else:
ievent.reply('%s has no status set' % name)
cmnds.add('user-getstatus', handle_getstatus, 'OPER')
examples.add('user-getstatus', 'user-getstatus <name> .. get status of <name>', 'user-getstatus dunker')
def handle_status(bot, ievent):
""" get status of user given the command. """
status = bot.users.getstatuses(ievent.userhost)
if status:
ievent.reply("you have status: ", status)
else:
ievent.reply('you have no status set')
cmnds.add('user-status', handle_status, ['USER', 'GUEST'])
examples.add('user-status', 'get status', 'user-status')
def handle_delstatus(bot, ievent):
""" <name> <status> .. delete status. """
if len(ievent.args) != 2:
ievent.missing('<name> <status>')
return
name, status = ievent.args
status = status.upper()
name = name.lower()
if not bot.users.exists(name):
ievent.reply("can't find user %s" % name)
return
result = bot.users.deluserstatus(name, status)
if result:
ievent.reply('%s status deleted' % status)
else:
ievent.reply("%s has no %s status" % (name, status))
return
cmnds.add('user-delstatus', handle_delstatus, 'OPER')
examples.add('user-delstatus', '<name> <status>', 'user-delstatus dunker #dunkbots')
def handle_adduserhost(bot, ievent):
""" <name> <userhost> .. add to userhosts of name. """
if len(ievent.args) != 2:
ievent.missing('<name> <userhost>')
return
name, userhost = ievent.args
name = name.lower()
if not bot.users.exists(name):
ievent.reply("can't find user %s" % name)
return
if bot.users.gotuserhost(name, userhost):
ievent.reply('%s already has userhost %s' % (name, userhost))
return
result = bot.users.adduserhost(name, userhost)
if result:
ievent.reply('userhost added')
else:
ievent.reply('add failed')
cmnds.add('user-adduserhost', handle_adduserhost, 'OPER')
examples.add('user-adduserhost', 'user-adduserhost <name> <userhost>', 'user-adduserhost dunker bart@%.a2000.nl')
def handle_deluserhost(bot, ievent):
""" <name> <userhost> .. remove from userhosts of name. """
if len(ievent.args) != 2:
ievent.missing('<name> <userhost>')
return
name, userhost = ievent.args
name = name.lower()
if bot.ownercheck(userhost):
ievent.reply('can delete userhosts from owner')
return
if not bot.users.exists(name):
ievent.reply("can't find user %s" % name)
return
result = bot.users.deluserhost(name, userhost)
if result:
ievent.reply('userhost removed')
else:
ievent.reply("%s has no %s in userhost list" % (name, userhost))
return
cmnds.add('user-deluserhost', handle_deluserhost, 'OPER')
examples.add('user-deluserhost', 'user-deluserhost <name> <userhost> .. delete from usershosts of <name> userhost <userhost>','user-deluserhost dunker bart1@bla.a2000.nl')
def handle_getuserhosts(bot, ievent):
""" <name> .. get userhosts. """
try:
who = ievent.args[0]
except IndexError:
ievent.missing('<name>')
return
who = who.lower()
userhosts = bot.users.getuserhosts(who)
if userhosts:
ievent.reply("userhosts of %s: " % who, userhosts)
else:
ievent.reply("can't find user %s" % who)
cmnds.add('user-getuserhosts', handle_getuserhosts, 'OPER')
examples.add('user-getuserhosts', 'user-getuserhosts <name> .. get userhosts of <name>', 'user-getuserhosts dunker')
def handle_userhosts(bot, ievent):
""" get userhosts of user giving the command. """
userhosts = bot.users.gethosts(ievent.userhost)
if userhosts:
ievent.reply("you have userhosts: ", userhosts)
cmnds.add('user-userhosts', handle_userhosts, ['USER', 'GUEST'])
examples.add('user-userhosts', 'get userhosts', 'user-userhosts')
def handle_getemail(bot, ievent):
""" <name> .. get email addres. """
try:
name = ievent.args[0]
except IndexError:
ievent.missing('<name>')
return
name = name.lower()
if not bot.users.exists(name):
ievent.reply("can't find user %s" % name)
return
email = bot.users.getuseremail(name)
if email:
ievent.reply(email)
else:
ievent.reply('no email set')
cmnds.add('user-getemail', handle_getemail, ['USER', ])
examples.add('user-getemail', 'user-getemail <name> .. get email from user <name>', 'user-getemail dunker')
def handle_setemail(bot, ievent):
""" <name> <email> .. set email. """
try:
name, email = ievent.args
except ValueError:
ievent.missing('<name> <email>')
return
if not bot.users.exists(name):
ievent.reply("can't find user %s" % name)
return
bot.users.setemail(name, email)
ievent.reply('email set')
cmnds.add('user-setemail', handle_setemail, 'OPER')
examples.add('user-setemail', 'user-setemail <name> <email>.. set email of user <name>', 'user-setemail dunker bart@gozerbot.org')
def handle_email(bot, ievent):
""" show email of user giving the command. """
if len(ievent.args) != 0:
ievent.reply('use getemail to get the email address of an user .. email shows your own mail address')
return
email = bot.users.getemail(ievent.userhost)
if email:
ievent.reply(email)
else:
ievent.reply('no email set')
cmnds.add('user-email', handle_email, ['USER', 'GUEST'])
examples.add('user-email', 'get email', 'user-email')
def handle_delemail(bot, ievent):
""" reset email of user giving the command. """
name = bot.users.getname(ievent.userhost)
if not name:
ievent.reply("can't find user for %s" % ievent.userhost)
return
result = bot.users.delallemail(name)
if result:
ievent.reply('email removed')
else:
ievent.reply('delete failed')
cmnds.add('user-delemail', handle_delemail, 'OPER')
examples.add('user-delemail', 'reset email', 'user-delemail')
def handle_addpermit(bot, ievent):
""" <name> <permit> .. add permit to permit list of <name>. """
try:
who, what = ievent.args
except ValueError:
ievent.missing("<name> <permit>")
return
if not bot.users.exists(who):
ievent.reply("can't find username of %s" % who)
return
name = bot.users.getname(ievent.userhost)
if not name:
ievent.reply("i dont know %s" % ievent.userhost)
return
if bot.users.gotpermit(name, (who, what)):
ievent.reply('%s is already allowed to do %s' % (who, what))
return
result = bot.users.adduserpermit(name, who, what)
if result:
ievent.reply('permit added')
else:
ievent.reply('add failed')
cmnds.add('user-addpermit', handle_addpermit, ['USER', 'GUEST'])
examples.add('user-addpermit', 'user-addpermit <nick> <what> .. permit nick access to <what> .. use setperms to add permissions', 'user-addpermit dunker todo')
def handle_permit(bot, ievent):
""" get permit list of user giving the command. """
if ievent.rest:
ievent.reply("use the user-addpermit command to allow somebody something .. use getname <nick> to get the username of somebody .. this command shows what permits you have")
return
name = bot.users.getname(ievent.userhost)
if not name:
ievent.reply("can't find user for %s" % ievent.userhost)
return
permits = bot.users.getuserpermits(name)
if permits:
ievent.reply("you permit the following: ", permits)
else:
ievent.reply("you don't have any permits")
cmnds.add('user-permit', handle_permit, ['USER', 'GUEST'])
examples.add('user-permit', 'show permit of user giving the command', 'user-permit')
def handle_userdelpermit(bot, ievent):
""" <name> <permit> .. remove (name, permit) from permit list. """
try:
who, what = ievent.args
except ValueError:
ievent.missing("<name> <permit>")
return
if not bot.users.exists(who):
ievent.reply("can't find registered name of %s" % who)
return
name = bot.users.getname(ievent.userhost)
if not name:
ievent.reply("i don't know you %s" % ievent.userhost)
return
if not bot.users.gotpermit(name, (who, what)):
ievent.reply('%s is already not allowed to do %s' % (who, what))
return
result = bot.users.deluserpermit(name, (who, what))
if result:
ievent.reply('%s denied' % what)
else:
ievent.reply('delete failed')
cmnds.add('user-delpermit', handle_userdelpermit, ['USER', 'GUEST'])
examples.add('user-delpermit', 'user-delpermit <name> <permit>', 'user-delpermit dunker todo')
def handle_check(bot, ievent):
""" <nick> .. get user data of <nick>. """
try:
nick = ievent.args[0]
except IndexError:
ievent.missing('<nick>')
return
userhost = getwho(bot, nick)
if not userhost:
ievent.reply("can't find userhost of %s" % nick)
return
name = bot.users.getname(userhost)
if not name:
ievent.reply("can't find user")
return
userhosts = bot.users.getuserhosts(name)
perms = bot.users.getuserperms(name)
email = bot.users.getuseremail(name)
permits = bot.users.getuserpermits(name)
status = bot.users.getuserstatuses(name)
ievent.reply('userrecord of %s = userhosts: %s perms: %s email: %s permits: %s status: %s' % (name, str(userhosts), str(perms), str(email), str(permits), str(status)))
cmnds.add('user-check', handle_check, 'OPER')
examples.add('user-check', 'user-check <nick>', 'user-check dunker')
def handle_show(bot, ievent):
""" <name> .. get user data of <name>. """
try:
name = ievent.args[0]
except IndexError:
ievent.missing('<name>')
return
name = name.lower()
user = bot.users.byname(name)
if not user:
ievent.reply("can't find user %s" % name)
return
userhosts = str(user.data.userhosts)
perms = str(user.data.perms)
email = str(user.data.email)
permits = str(user.data.permits)
status = str(user.data.status)
ievent.reply('userrecord of %s = userhosts: %s perms: %s email: %s permits: %s status: %s' % (name, userhosts, perms, email, permits, status))
cmnds.add('user-show', handle_show, 'OPER')
examples.add('user-show', 'user-show <name> .. show data of <name>', 'user-show dunker')
def handle_match(bot, ievent):
""" user-match <userhost> .. get data of <userhost>. """
try:
userhost = ievent.args[0]
except IndexError:
ievent.missing('<userhost>')
return
user = bot.users.getuser(userhost)
if not user:
ievent.reply("can't find user with userhost %s" % userhost)
return
userhosts = str(user.data.userhosts)
perms = str(user.data.perms)
email = str(user.data.email)
permits = str(user.data.permits)
status = str(user.data.status)
ievent.reply('userrecord of %s = userhosts: %s perms: %s email: %s permits: %s status: %s' % (userhost, userhosts, perms, email, permits, status))
cmnds.add('user-match', handle_match, ['OPER', ])
examples.add('user-match', 'user-match <userhost>', 'user-match test@test')
def handle_getuserstatus(bot, ievent):
""" <status> .. list users with status <status>. """
try:
status = ievent.args[0].upper()
except IndexError:
ievent.missing('<status>')
return
result = bot.users.getstatususers(status)
if result:
ievent.reply("users with %s status: " % status, result)
else:
ievent.reply("no users with %s status found" % status)
return
cmnds.add('user-allstatus', handle_getuserstatus, 'OPER')
examples.add('user-allstatus', 'user-allstatus <status> .. get all users with <status> status', 'user-allstatus #dunkbots')
def handle_getuserperm(bot, ievent):
""" <perm> .. list users with permission <perm>. """
try:
perm = ievent.args[0].upper()
except IndexError:
ievent.missing('<perm>')
return
result = bot.users.getpermusers(perm)
if result:
ievent.reply('users with %s permission: ' % perm, result)
else:
ievent.reply("no users with %s permission found" % perm)
return
cmnds.add('user-allperm', handle_getuserperm, 'OPER')
examples.add('user-allperm', 'user-allperm <perm> .. get users with <perm> permission', 'user-allperm rss')
def handle_usersearch(bot, ievent):
""" <txt> .. search for user matching given userhost. """
try:
what = ievent.args[0]
except IndexError:
ievent.missing('<txt>')
return
result = bot.users.usersearch(what)
if result:
res = ["(%s) %s" % u for u in result]
ievent.reply('users matching %s: ' % what, res)
else:
ievent.reply('no userhost matching %s found' % what)
cmnds.add('user-search', handle_usersearch, 'OPER')
examples.add('user-search', 'search users userhosts', 'user-search gozerbot')
def handle_addpermall(bot, ievent):
""" <perm> .. add permission to all users. """
try:
perm = ievent.args[0].upper()
except IndexError:
ievent.missing('<perm>')
return
if perm == 'OPER':
ievent.reply("can't add OPER permissions to all")
return
bot.users.addpermall(perm)
ievent.reply('%s perm added' % perm)
#cmnds.add('user-addpermall', handle_addpermall, 'OPER')
#examples.add('user-addpermall', 'user-addpermall <perm> .. add <permission> to all users', 'addpermsall USER')
def handle_delpermall(bot, ievent):
""" <perm> .. delete permission from all users. """
try:
perm = ievent.args[0].upper()
except IndexError:
ievent.missing('<perm>')
return
if perm == 'OPER':
ievent.reply("can't delete OPER permissions from all")
return
bot.users.delpermall(perm)
ievent.reply('%s perm deleted' % perm)
#cmnds.add('user-delpermall', handle_delpermall, 'OPER')
#examples.add('user-delpermall', 'user-delpermall <perm> .. delete <permission> from all users', 'delpermsall BLA')
| Python |
# gozerlib/plugs/outputcache.py
#
#
## gozerlib imports
from gozerlib.commands import cmnds
from gozerlib.outputcache import get, set
from gozerlib.callbacks import callbacks
from gozerlib.examples import examples
## callbacks
def handle_outputcachepollerwave(bot, event):
""" callback used in gadget polling. """
res = get(event.channel)
for result in res:
event.reply(result)
callbacks.add('POLLER', handle_outputcachepollerwave)
#gn_callbacks.add('POLLER', handle_outputcachepollerwave)
def handle_outputcachepollerweb(bot, event):
""" send outputcache when WEB event is triggered. """
res = get(event.channel)
for result in res:
event.reply(result)
callbacks.add('WEB', handle_outputcachepollerweb)
#callbacks.add('EXEC', handle_outputcachepollerweb)
## commands
def handle_outputcache(bot, event):
""" forward the output cache to the user. """
res = get(event.channel)
if res:
for result in res:
event.reply(result)
else:
event.reply('no data in cache')
cmnds.add('outputcache', handle_outputcache, 'USER')
examples.add('outputcache', 'forward the outputcache to the user.', 'outputcache')
def handle_outputcacheflush(bot, event):
""" <channel or JID> .. flush outputcache of a user. """
if not event.rest:
target = event.channel
else:
target = event.rest
set(target, [])
event.done()
cmnds.add('outputcache-flush', handle_outputcacheflush, 'OPER')
examples.add('outputcache-flush', 'flush output cache of a user', '1) outputcache-flush 2) outputcache-flush bthate@gmail.com')
| Python |
# gozerlib/plugs/reverse.py
#
#
__copyright__ = 'this file is in the public domain'
__author__ = 'Hans van Kranenburg <hans@knorrie.org>'
## gozerlib imports
from gozerlib.utils.generic import waitforqueue
from gozerlib.commands import cmnds
from gozerlib.examples import examples
## basic imports
import types
def handle_reverse(bot, ievent):
""" reverse string or pipelined list. """
if ievent.inqueue:
result = waitforqueue(ievent.inqueue, 5)
elif not ievent.rest:
ievent.missing('<text to reverse>')
return
else:
result = ievent.rest
if type(result) == types.ListType:
ievent.reply("results: ", result[::-1])
else:
ievent.reply(result[::-1])
cmnds.add('reverse', handle_reverse, ['USER', 'CLOUD'], threaded=True)
examples.add('reverse', 'reverse text or pipeline', '1) reverse gozerbot 2) list | reverse')
| Python |
# gozerbot/plugs/reload.py
#
#
""" reload plugin. """
## gozerlib imports
from gozerlib.utils.exception import handle_exception
from gozerlib.plugins import plugs
from gozerlib.commands import cmnds
from gozerlib.examples import examples
from gozerlib.admin import plugin_packages
from gozerlib.boot import savecmndtable, savepluginlist
## basic imports
import os
def handle_reload(bot, ievent):
""" <list of plugins> .. reload plugins. """
try:
pluglist = ievent.args
except IndexError:
ievent.missing('<list plugins>')
return
reloaded = []
errors = []
for plug in pluglist:
for package in plugin_packages:
modname = "%s.%s" % (package, plug)
try:
if plugs.reload(modname):
reloaded.append(modname)
except Exception, ex:
if 'No module named' in str(ex):
continue
errors.append(str(ex))
if reloaded:
ievent.reply('reloaded: ', reloaded)
if errors:
ievent.reply('errors: ', errors)
cmnds.add('reload', handle_reload, 'OPER')
examples.add('reload', 'reload <plugin>', 'reload core')
def handle_unload(bot, ievent):
""" unload a plugin. """
try:
what = ievent.args[0].lower()
except IndexError:
ievent.missing('<plugin>')
return
if not what in plugs:
ievent.reply('there is no %s module' % what)
return
got = plugs.unload(what)
ievent.reply("unloaded and disabled: ", got)
cmnds.add('unload', handle_unload, 'OPER')
examples.add('unload', 'unload <plugin>', 'unload relay')
| Python |
# gozerlib/plugs/not.py
#
#
""" negative grep. """
## gozerlib imports
from gozerlib.examples import examples
from gozerlib.commands import cmnds
from gozerlib.utils.generic import waitforqueue
## basic imports
import getopt
import re
def handle_not(bot, ievent):
""" negative grep. """
if not ievent.inqueue:
ievent.reply('use not in a pipeline')
return
if not ievent.rest:
ievent.reply('not <txt>')
return
try:
(options, rest) = getopt.getopt(ievent.args, 'r')
except getopt.GetoptError, ex:
ievent.reply(str(ex))
return
result = waitforqueue(ievent.inqueue, 10)
if not result:
ievent.reply('no data to grep on')
return
doregex = False
for i, j in options:
if i == '-r':
doregex = True
res = []
if doregex:
try:
reg = re.compile(' '.join(rest))
except Exception, ex:
ievent.reply("can't compile regex: %s" % str(ex))
return
for i in result:
if not re.search(reg, i):
res.append(i)
else:
for i in result:
if ievent.rest not in str(i):
res.append(i)
if not res:
ievent.reply('no result')
else:
ievent.reply('results', res)
cmnds.add('not', handle_not, ['USER', 'GUEST', 'CLOUD'], threaded=True)
examples.add('not', 'reverse grep used in pipelines', 'list | not todo')
| Python |
# gozerlib/plugs/userstate.py
#
#
""" userstate is stored in gozerdata/userstates. """
## gozerlib imports
from gozerlib.commands import cmnds
from gozerlib.examples import examples
from gozerlib.persiststate import UserState
from gozerlib.errors import NoSuchUser
## commands
def handle_userstate(bot, ievent):
""" <item> <value> .. let the user manage its own state. """
try:
(item, value) = ievent.args
except ValueError:
item = value = None
username = bot.users.getname(ievent.userhost)
if not username:
ievent.reply("we have not state of %s" % ievent.userhost)
return
userstate = UserState(username)
if item and value:
userstate[item] = value
userstate.save()
result = []
for i, j in userstate.data.iteritems():
result.append("%s=%s" % (i, j))
if result:
ievent.reply("userstate of %s: " % username, result)
else:
ievent.reply('no userstate of %s known' % username)
cmnds.add('userstate', handle_userstate, 'USER')
examples.add('userstate', 'get or set userstate', '1) userstate 2) userstate TZ -1')
def handle_userstateget(bot, ievent):
""" <username> .. get state of a user. """
if not ievent.rest:
ievent.missing('<username>')
return
userstate = UserState(ievent.rest)
result = []
for i, j in userstate.data.iteritems():
result.append("%s=%s" % (i, j))
if result:
ievent.reply("userstate of %s: " % ievent.rest, result, dot=True)
else:
ievent.reply('no userstate of %s known' % ievent.rest)
cmnds.add('userstate-get', handle_userstateget, 'OPER')
examples.add('userstate-get', 'get the userstate of another user', 'userstate-get dunker')
def handle_userstateset(bot, ievent):
""" <username> <item> <value> .. set the userstate of a user. """
try:
(username, item, value) = ievent.args
except ValueError:
ievent.missing('<username> <item> <value>')
return
userstate = UserState(username)
userstate[item] = value
userstate.save()
ievent.reply('userstate %s set to %s' % (item, value))
cmnds.add('userstate-set', handle_userstateset, 'OPER')
examples.add('userstate-set', 'set userstate variable of another user', 'userstate-set dunker TZ -1')
def handle_userstatedel(bot, ievent):
""" [username] <item> .. remove value from user state. """
username = None
try:
(username, item) = ievent.args
except ValueError:
try:
username = bot.users.getname(ievent.userhost)
item = ievent.args[0]
except IndexError:
ievent.missing('[username] <item>')
return
if not username:
ievent.reply("i dont have any state for %s" % ievent.userhost)
return
userstate = UserState(username)
try:
del userstate.data[item]
except KeyError:
ievent.reply('no such item')
return
userstate.save()
ievent.reply('item %s deleted' % item)
cmnds.add('userstate-del', handle_userstatedel, 'OPER')
examples.add('userstate-del', 'delete userstate variable', '1) userstate-del TZ 2) userstate-del dunker TZ')
def handle_unset(bot, ievent):
""" <item> .. remove value from user state of the user giving the command. """
try:
item = ievent.args[0]
except IndexError:
ievent.missing('<item>')
return
username = bot.users.getname(ievent.userhost)
if not username:
ievent.reply("we have no state of you")
return
userstate = UserState(username)
try:
del userstate.data[item]
except KeyError:
ievent.reply('no such item')
return
userstate.save()
ievent.reply('item %s deleted' % item)
cmnds.add('unset', handle_unset, 'USER')
examples.add('unset', 'delete userstate variable for user gving the command', '1) unset TZ')
| Python |
# gozerlib/plugs/welcome.py
#
#
from gozerlib.commands import cmnds
def handle_welcome(bot, event):
event.reply("Welcome to FEEDPROVIDER .. The JSON everywhere bot ;] for wave/web/xmpp/IRC/console")
cmnds.add('welcome', handle_welcome, ['USER', 'GUEST'])
| Python |
# gozerlib/plugs/grep.py
#
#
""" grep the output of bot comamnds. """
## gozerlib imports
from gozerlib.commands import cmnds
from gozerlib.utils.generic import waitforqueue
from gozerlib.examples import examples
## basic imports
import getopt
import re
def handle_grep(bot, ievent):
""" <txt> .. grep the result list. """
if not ievent.inqueue:
ievent.reply('use grep in a pipeline')
return
if not ievent.rest:
ievent.reply('grep <txt>')
return
try:
(options, rest) = getopt.getopt(ievent.args, 'riv')
except getopt.GetoptError, ex:
ievent.reply(str(ex))
return
result = waitforqueue(ievent.inqueue, 30)
if not result:
ievent.reply('no data to grep on')
return
doregex = False
docasein = False
doinvert = False
for i, j in options:
if i == '-r':
doregex = True
if i == '-i':
docasein = True
if i == '-v':
doinvert = True
res = []
if doregex:
try:
if docasein:
reg = re.compile(' '.join(rest), re.I)
else:
reg = re.compile(' '.join(rest))
except Exception, ex:
ievent.reply("can't compile regex: %s" % str(ex))
return
if doinvert:
for i in result:
if not re.search(reg, i):
res.append(i)
else:
for i in result:
if re.search(reg, i):
res.append(i)
else:
if docasein:
what = ' '.join(rest).lower()
elif doinvert:
what = ' '.join(rest)
else:
what = ievent.rest
for i in result:
if docasein:
if what in str(i.lower()):
res.append(i)
elif doinvert:
if what not in str(i):
res.append(i)
else:
if what in str(i):
res.append(i)
if not res:
ievent.reply('no result')
else:
ievent.reply('results: ', res)
cmnds.add('grep', handle_grep, ['USER', 'GUEST', 'CLOUD'], threaded=True)
examples.add('grep', 'grep the output of a command', 'list | grep core')
| Python |
# gozerlib/plugs/irc.py
#
#
""" irc related commands. """
## gozerbot imports
from gozerlib.callbacks import callbacks
from gozerlib.socket.partyline import partyline
from gozerlib.commands import cmnds
from gozerlib.examples import examples
import gozerlib.threads as thr
## basic imports
import Queue
## define
ignorenicks = []
## commands
def handle_broadcast(bot, ievent):
""" broadcast txt to all joined channels. """
if not ievent.rest:
ievent.missing('<txt>')
return
ievent.reply('broadcasting')
partyline.say_broadcast(ievent.rest)
ievent.reply('done')
cmnds.add('broadcast', handle_broadcast, 'OPER')
examples.add('broadcast', 'send a message to all channels and dcc users', 'broadcast good morning')
def dojoin(bot, ievent):
""" <channel> [password] .. join a channel/wave"""
try:
channel = ievent.args[0]
except IndexError:
ievent.missing("<channel> [password]")
return
try:
password = ievent.args[1]
except IndexError:
password = None
bot.join(channel, password=password)
cmnds.add('join', dojoin, ['OPER', 'JOIN'])
examples.add('join', 'join <channel> [password]', '1) join #test 2) join #test mekker')
def delchan(bot, ievent):
""" <channel> .. remove channel from bot.channels. """
try:
chan = ievent.args[0].lower()
except IndexError:
ievent.missing("<channel>")
return
try:
if bot.state:
bot.state.data['joinedchannels'].remove(chan)
bot.state.save()
except ValueError:
pass
try:
if bot.channels:
del bot.channels.data[chan]
bot.channels.save()
except KeyError:
ievent.reply("no channel %s in database" % chan)
ievent.done()
cmnds.add('delchan', delchan, 'OPER')
examples.add('delchan', 'delchan <channel> .. remove channel from bot.channels', 'delchan #mekker')
def dopart(bot, ievent):
""" [<channel>] .. leave. """
if not ievent.rest:
chan = ievent.channel
else:
chan = ievent.rest
ievent.reply('leaving %s chan' % chan)
bot.part(chan)
ievent.done()
cmnds.add('part', dopart, 'OPER')
examples.add('part', 'part [<channel>]', '1) part 2) part #test')
def handle_channels(bot, ievent):
""" channels .. show joined channels. """
chans = bot.state['joinedchannels']
if chans:
ievent.reply("joined channels: ", chans, dot=True)
else:
ievent.reply('no channels joined')
cmnds.add('channels', handle_channels, ['USER', 'WEB'])
examples.add('channels', 'show what channels the bot is on', 'channels')
def handle_chat(bot, ievent):
""" chat .. start a bot initiated dcc chat session. """
if not bot.type == 'irc':
ievent.reply("chat only works on irc bots")
return
i = ievent
thr.start_new_thread(bot._dcclisten, (i.nick, i.userhost, i.channel))
ievent.reply('dcc chat request sent')
cmnds.add('chat', handle_chat, 'USER')
examples.add('chat', 'start a dcc chat session', 'chat')
def handle_cycle(bot, ievent):
""" cycle .. recycle channel. """
ievent.reply('cycling %s' % ievent.channel)
bot.part(ievent.channel)
try:
key = bot.channels[ievent.channel.lower()]['key']
except (KeyError, TypeError):
key = None
bot.join(ievent.channel, password=key)
ievent.done()
cmnds.add('cycle', handle_cycle, 'OPER')
examples.add('cycle', 'part/join channel', 'cycle')
def handle_jump(bot, ievent):
""" <server> <port> .. change server. """
if bot.jabber:
ievent.reply('jump only works on irc bots')
return
if len(ievent.args) != 2:
ievent.missing('<server> <port>')
return
(server, port) = ievent.args
ievent.reply('changing to server %s' % server)
bot.shutdown()
bot.server = server
bot.port = port
bot.connect()
ievent.done()
cmnds.add('jump', handle_jump, 'OPER')
examples.add('jump', 'jump <server> <port> .. switch server', 'jump localhost 6667')
def modecb(bot, ievent):
""" callback to detect change of channel key. """
if ievent.postfix.find('+k') != -1:
key = ievent.postfix.split('+k')[1]
bot.channels[ievent.channel.lower()]['key'] = key
callbacks.add('MODE', modecb)
def handle_nick(bot, ievent):
""" <nickname> .. change bot's nick. """
if bot.jabber:
ievent.reply('nick works only on irc bots')
return
try:
nick = ievent.args[0]
except IndexError:
ievent.missing('<nickname>')
return
ievent.reply('changing nick to %s' % nick)
bot.donick(nick, setorig=1, save=1)
ievent.done()
cmnds.add('nick', handle_nick, 'OPER', threaded=True)
examples.add('nick', 'nick <nickname> .. set nick of the bot', 'nick mekker')
def handle_sendraw(bot, ievent):
""" <txt> .. send raw text to the server. """
ievent.reply('sending raw txt')
bot._raw(ievent.rest)
ievent.done()
cmnds.add('sendraw', handle_sendraw, 'SENDRAW')
examples.add('sendraw', 'sendraw <txt> .. send raw string to the server', \
'sendraw PRIVMSG #test :yo!')
def handle_nicks(bot, ievent):
""" return nicks on channel. """
if bot.jabber:
ievent.reply('nicks only works on irc bots')
return
try:
chan = ievent.args[0]
except IndexError:
chan = ievent.channel
queue = Queue.Queue()
# set callback for name info response
if bot.wait:
wait353 = bot.wait.register('353', chan, queue)
# 366 is end of names response list
wait366 = bot.wait.register('366', chan, queue)
result = ""
ievent.reply('searching for nicks')
bot.names(chan)
if bot.wait:
while(1):
qres = queue.get()
if qres == None:
break
if qres.cmnd == '366':
break
else:
result += "%s " % qres.txt
if bot.wait:
bot.wait.delete(wait353)
bot.wait.delete(wait366)
if result:
res = result.split()
for nick in res:
for i in ignorenicks:
if i in nick:
try:
res.remove(nick)
except ValueError:
pass
res.sort()
ievent.reply("nicks on %s (%s): " % (chan, bot.server), res)
else:
ievent.reply("can't get nicks of channel %s" % chan)
cmnds.add('nicks', handle_nicks, ['OPER', 'WEB'], threaded=True)
examples.add('nicks', 'show nicks on channel the command was given in', 'nicks')
def handle_silent(bot, ievent):
""" set silent mode of channel. """
if ievent.rest:
channel = ievent.rest.split()[0].lower()
else:
if ievent.cmnd == 'DCC':
return
channel = ievent.channel
ievent.reply('putting %s to silent mode' % channel)
try:
bot.channels[channel]['silent'] = 1
except (KeyError, TypeError):
ievent.reply("no %s channel in database" % channel)
return
ievent.done()
cmnds.add('silent', handle_silent, 'OPER')
examples.add('silent', 'set silent mode on channel the command was given in', 'silent')
def handle_loud(bot, ievent):
""" loud .. enable output to the channel. """
if ievent.rest:
channel = ievent.rest.split()[0].lower()
else:
if ievent.cmnd == 'DCC':
return
channel = ievent.channel
ievent.reply('putting %s into loud mode' % ievent.channel)
try:
bot.channels[channel]['silent'] = 0
except (KeyError, TypeError):
ievent.reply("no %s channel in database" % channel)
return
ievent.done()
cmnds.add('loud', handle_loud, 'OPER')
examples.add('loud', 'disable silent mode of channel command was given in', 'loud')
def handle_withnotice(bot, ievent):
""" withnotice .. make bot use notice in channel. """
if ievent.rest:
channel = ievent.rest.split()[0].lower()
else:
if ievent.cmnd == 'DCC':
return
channel = ievent.channel
ievent.reply('setting notice in %s' % channel)
try:
bot.channels[channel]['notice'] = 1
except (KeyError, TypeError):
ievent.reply("no %s channel in database" % channel)
return
ievent.done()
cmnds.add('withnotice', handle_withnotice, 'OPER')
examples.add('withnotice', 'make bot use notice on channel the command was given in', 'withnotice')
def handle_withprivmsg(bot, ievent):
""" withprivmsg .. make bot use privmsg in channel. """
if ievent.rest:
channel = ievent.rest.split()[0].lower()
else:
if ievent.cmnd == 'DCC':
return
channel = ievent.channel
ievent.reply('setting privmsg in %s' % ievent.channel)
try:
bot.channels[channel]['notice'] = 0
except (KeyError, TypeError):
ievent.reply("no %s channel in database" % channel)
return
ievent.done()
cmnds.add('withprivmsg', handle_withprivmsg, 'OPER')
examples.add('withprivmsg', 'make bot use privmsg on channel command was given in', 'withprivmsg')
def handle_reconnect(bot, ievent):
""" reconnect .. reconnect to server. """
ievent.reply('reconnecting')
bot.reconnect()
ievent.done()
cmnds.add('reconnect', handle_reconnect, 'OPER', threaded=True)
examples.add('reconnect', 'reconnect to server', 'reconnect')
def handle_channelmode(bot, ievent):
""" show channel mode. """
if bot.type != 'irc':
ievent.reply('channelmode only works on irc bots')
return
try:
chan = ievent.args[0].lower()
except IndexError:
chan = ievent.channel.lower()
if not chan in bot.state['joinedchannels']:
ievent.reply("i'm not on channel %s" % chan)
return
ievent.reply('channel mode of %s is %s' % (chan, bot.channels.get(chan, 'mode')))
cmnds.add('channelmode', handle_channelmode, 'OPER')
examples.add('channelmode', 'show mode of channel', '1) channelmode 2) channelmode #test')
def handle_action(bot, ievent):
""" <channel> <txt> .. make the bot send an action string. """
try:
channel, txt = ievent.rest.split(' ', 1)
except ValueError:
ievent.missing('<channel> <txt>')
return
bot.action(channel, txt)
cmnds.add('action', handle_action, ['ACTION', 'OPER'])
examples.add('action', 'send an action message', 'action #test yoo dudes')
def handle_say(bot, ievent):
""" <channel> <txt> .. make the bot say something. """
try:
channel, txt = ievent.rest.split(' ', 1)
except ValueError:
ievent.missing('<channel> <txt>')
return
bot.say(channel, txt)
cmnds.add('say', handle_say, ['SAY', 'OPER'], speed=1)
examples.add('say', 'send txt to channel/user', 'say #test good morning')
def handle_server(bot, ievent):
""" show the server to which the bot is connected. """
ievent.reply(bot.server)
cmnds.add('server', handle_server, 'OPER')
examples.add('server', 'show server hostname of bot', 'server')
def handle_voice(bot, ievent):
""" <nick> .. give voice. """
if bot.type != 'irc':
ievent.reply('voice only works on irc bots')
return
if len(ievent.args)==0:
ievent.missing('<nickname>')
return
ievent.reply('setting voide on %s' % str(ievent.args))
for nick in sets.Set(ievent.args):
bot.voice(ievent.channel, nick)
ievent.done()
cmnds.add('voice', handle_voice, 'OPER')
examples.add('voice', 'give voice to user', 'voice test')
| Python |
# gozerlib/plugs/tail.py
#
#
""" tail bot results. """
## gozerlib imports
from gozerlib.utils.generic import waitforqueue
from gozerlib.commands import cmnds
from gozerlib.examples import examples
## commands
def handle_tail(bot, ievent):
""" used in a pipeline .. show last <nr> elements. """
if not ievent.inqueue:
ievent.reply("use tail in a pipeline")
return
try:
nr = int(ievent.args[0])
except (ValueError, IndexError):
ievent.reply('tail <nr>')
return
result = waitforqueue(ievent.inqueue, 5)
if not result:
ievent.reply('no data to tail')
return
ievent.reply('results: ', result[-nr:])
cmnds.add('tail', handle_tail, ['USER', 'GUEST', 'CLOUD'], threaded=True)
examples.add('tail', 'show last <nr> lines of pipeline output', 'list | tail 5')
| Python |
# gozerlib/plugs/count.py
#
#
""" count number of items in result queue. """
## gozerlib imports
from gozerlib.commands import cmnds
from gozerlib.utils.generic import waitforqueue
from gozerlib.examples import examples
def handle_count(bot, ievent):
""" show nr of elements in result list. """
if not ievent.inqueue:
ievent.reply("use count in a pipeline")
return
result = waitforqueue(ievent.inqueue, 5)
ievent.reply(str(len(result)))
cmnds.add('count', handle_count, ['USER', 'GUEST', 'CLOUD'], threaded=True)
examples.add('count', 'count nr of items', 'list | count')
| Python |
# gozerlib/plugs/uniq.py
#
# used in a pipeline .. unique elements """
# Wijnand 'tehmaze' Modderman - http://tehmaze.com
# BSD License
""" used in a pipeline .. unique elements """
__author__ = "Wijnand 'tehmaze' Modderman - http://tehmaze.com"
__license__ = 'BSD'
## gozerlib imports
from gozerlib.examples import examples
from gozerlib.commands import cmnds
from gozerlib.utils.generic import waitforqueue
def handle_uniq(bot, ievent):
""" uniq the result list """
if not ievent.inqueue:
ievent.reply('use uniq in a pipeline')
return
result = waitforqueue(ievent.inqueue, 30)
if not result:
ievent.reply('no data')
return
result = list(result)
if not result:
ievent.reply('no result')
else:
ievent.reply(result, dot=True)
cmnds.add('uniq', handle_uniq, ['USER', 'GUEST', 'CLOUD'])
examples.add('uniq', 'sort out multiple elements', 'list | grep uniqe')
| Python |
# gozerlib/plugs/core.py
#
#
""" core bot commands. """
## gozerbot imports
from gozerlib.utils.timeutils import elapsedstring
from gozerlib.utils.generic import getversion
from gozerlib.utils.exception import handle_exception
from gozerlib.commands import cmnds
from gozerlib.examples import examples
from gozerlib.plugins import plugs
from gozerlib.admin import plugin_packages
from gozerlib.boot import getpluginlist, boot
from gozerlib.persist import Persist
## basic imports
import time
import threading
import sys
import re
import os
import copy
import cgi
## define
def handle_ccadd(bot, event):
""" add a control character (bot wide). """
if bot.cfg:
if not bot.cfg.cc:
bot.cfg.cc = event.rest
elif event.rest not in bot.cfg.cc:
bot.cfg.cc += event.rest
else:
event.reply("%s is already in cc list" % event.rest)
return
bot.cfg.save()
event.done()
else:
event.reply("bot.cfg is not set.")
cmnds.add('cc-add', handle_ccadd, 'OPER')
examples.add('cc-add', 'add a control charater (bot wide)', 'cc-add @')
def handle_ccremove(bot, event):
""" remove a control character from the bot's cc list. """
try:
bot.cfg.cc.remove(event.rest)
bot.cfg.save()
event.done()
except ValueError:
event.reply("can't remove %s from %s" % (event.rest, bot.cfg.cc))
cmnds.add('cc-add', handle_ccadd, 'OPER')
examples.add('cc-add', 'add a control charater (bot wide)', 'cc-add @')
def handle_encoding(bot, ievent):
""" show default encoding. """
ievent.reply('default encoding is %s' % sys.getdefaultencoding())
cmnds.add('encoding', handle_encoding, ['USER', 'OPER'])
examples.add('encoding', 'show default encoding', 'encoding')
def handle_uptime(bot, ievent):
""" show uptime. """
ievent.reply("uptime is %s" % elapsedstring(time.time()-bot.starttime))
cmnds.add('uptime', handle_uptime, ['USER', 'WEB', 'GUEST'])
examples.add('uptime', 'show uptime of the bot', 'uptime')
def handle_list(bot, ievent):
""" [<plugin>] .. list loaded plugins or list commands provided by plugin. """
try:
what = ievent.args[0]
except:
# no arguments given .. show plugins
result = []
for plug in plugs:
if '__init__' in plug:
continue
result.append(plug.split('.')[-1])
ievent.reply('loaded plugins: ', result)
return
# show commands of <what> plugin
result = []
for i, j in cmnds.iteritems():
if what == j.plugname:
txt = i
if txt:
result.append(txt)
if result:
result.sort()
ievent.reply('%s has the following commands: ' % what, result)
else:
ievent.reply('no commands found for plugin %s' % what)
#cmnds.add('list', handle_list, ['USER', 'WEB', 'CLOUD'], threaded=True)
def handle_available(bot, ievent):
""" show available plugins .. to enable use !reload. """
ievent.reply("available plugins: ", getpluginlist())
cmnds.add('list', handle_available, ['USER', 'GUEST'])
examples.add('list', 'list available plugins', 'list')
def handle_commands(bot, ievent):
""" <plugin> .. show commands of <plugin>. """
try:
plugin = ievent.args[0].lower()
except IndexError:
ievent.missing('<plugin> .. see the list command for available plugins')
return
if not plugs.has_key(plugin):
ievent.reply('no %s plugin is loaded .. see the available command for available plugins (reload to enable)' % plugin)
return
result = []
cp = dict(cmnds)
for i, j in cp.iteritems():
if plugin == j.plugname:
txt = i
if txt:
result.append(txt)
if result:
result.sort()
ievent.reply('%s has the following commands: ' % plugin, result)
else:
ievent.reply('no commands found for plugin %s' % plugin)
cmnds.add('commands', handle_commands, ['USER', 'GUEST', 'CLOUD'])
examples.add('commands', 'show commands of <plugin>', '1) commands core')
def handle_perm(bot, ievent):
""" <command> .. get permission of command. """
try:
cmnd = ievent.args[0]
except IndexError:
ievent.missing("<cmnd>")
return
try:
perms = cmnds.perms(cmnd)
except KeyError:
ievent.reply("no %sw command registered")
return
if perms:
ievent.reply("%s command needs %s permission" % (cmnd, perms))
return
ievent.reply("can't find perm for %s" % cmnd)
cmnds.add('perm', handle_perm, ['USER', 'GUEST', 'WEB'])
examples.add('perm', 'show permission of command', 'perm quit')
def handle_version(bot, ievent):
""" show bot's version. """
ievent.reply(getversion(bot.type.upper()))
cmnds.add('version', handle_version, ['USER', 'GUEST'])
examples.add('version', 'show version of the bot', 'version')
def handle_whereis(bot, ievent):
""" <cmnd> .. locate a command. """
try:
cmnd = ievent.args[0]
except IndexError:
ievent.missing('<cmnd>')
return
plugin = cmnds.whereis(cmnd)
if plugin:
ievent.reply("%s command is in: %s" % (cmnd, plugin))
else:
ievent.reply("can't find " + cmnd)
cmnds.add('whereis', handle_whereis, ['USER', 'GUEST'])
examples.add('whereis', 'whereis <cmnd> .. show in which plugins <what> is', 'whereis test')
def handle_help(bot, event):
""" help [<cmnd>|<plugin>]. """
if event.rest:
event.txt = 'help ' + event.rest
handle_helpplug(bot, event)
return
event.reply("see !help <plugin> for help on a plugin and !list for a list of available plugins.")
cmnds.add('help', handle_help, ['USER', 'GUEST'])
def handle_helpplug(bot, ievent):
""" help [<plugin>] .. show help on plugin/command or show basic help msg. """
try:
what = ievent.args[0]
except IndexError:
pluginslist = Persist('run' + os.sep + 'pluginlist').data
ievent.reply("available plugins: ", pluginslist)
ievent.reply('see commmands <plugin> for list of commands.')
return
plugin = None
modname = ""
for package in plugin_packages:
try:
modname = "%s.%s" % (package, what)
plugin = plugs.reload(modname)
if plugin:
break
except(KeyError, ImportError):
pass
if not plugin:
ievent.reply("no %s plugin loaded" % what)
return
try:
phelp = plugin.__doc__
except (KeyError, AttributeError):
ievent.reply('no description of %s plugin available' % what)
return
cmndresult = []
if phelp:
if bot.users:
perms = list(bot.users.getperms(ievent.userhost))
else:
perms = ['GUEST', ]
for i, j in cmnds.iteritems():
if what == j.plugname:
for perm in j.perms:
if perm in perms:
if True:
try:
descr = j.func.__doc__
cmndhelp = cmnds.gethelp(i)
try:
cmndresult.append(u" !%s %s - examples: %s" % (i, descr, examples[i].example))
except KeyError:
cmndresult.append(u" !%s %s - no examples" % (i, descr))
except AttributeError:
cmndresult.append(i)
if cmndresult and phelp:
res = []
for r in cmndresult:
if bot.type in ['web', ]:
res.append("<code>%s</code>" % cgi.escape(r))
else:
res.append(r)
res.sort()
if bot.type in ['web', ]:
res.insert(0, u'%s - %s<br>' % (what, phelp.strip()))
ievent.reply('HELP ON %s<br><br>' % what, res, dot="<br>", raw=True)
else:
res.insert(0, u'%s - %s\n' % (what, phelp.strip()))
ievent.reply('HELP ON %s\n\n' % what, res, dot="\n", raw=True)
else:
ievent.reply('no commands available')
cmnds.add('help-plug', handle_helpplug, ['USER', 'GUEST'])
examples.add('help-plug', 'get help on <cmnd> or <plugin>', '1) help-plug test 2) help-plug misc')
def handle_apro(bot, ievent):
""" <cmnd> .. apropos for command. """
try:
what = ievent.args[0]
except IndexError:
ievent.missing('<what>')
return
result = []
perms = bot.users.getperms(ievent.userhost)
for i in cmnds.apropos(re.escape(what)):
result.append(i)
if result:
ievent.reply("commands matching %s: " % what, result , nr=1)
else:
ievent.reply('no matching commands found for %s (%s)' % (what, ' .. '.join(perms)))
cmnds.add('apro', handle_apro, ['USER', 'GUEST'])
examples.add('apro', 'apro <what> .. search for commands that contain <what>', 'apro com')
def handle_whatcommands(bot, ievent):
""" show all commands with permission. """
if not ievent.rest:
ievent.missing('<perm>')
return
result = cmnds
res = []
for cmnd in result.values():
if ievent.rest in cmnd.perms:
res.append(cmnd.cmnd)
res.sort()
if not res:
ievent.reply('no commands known for permission %s' % ievent.rest)
else:
ievent.reply('commands known for permission %s: ' % ievent.rest, res)
cmnds.add('whatcommands', handle_whatcommands, ['USER', 'GUEST'])
examples.add('whatcommands', 'show commands with permission <perm>', 'whatcommands USER')
def handle_versions(bot, ievent):
""" show versions of all loaded modules (if available). """
versions = {}
for mod in copy.copy(sys.modules):
try:
versions[mod] = sys.modules[mod].__version__
except AttributeError, ex:
pass
try:
versions['python'] = sys.version
except AttributeError, ex:
pass
ievent.reply("versions ==> %s" % str(versions))
cmnds.add('versions', handle_versions, 'OPER')
examples.add('versions', 'show versions of all loaded modules', 'versions')
| Python |
# plugs/choice.py
#
#
""" the choice command can be used with a string or in a pipeline. """
## gozerlib imports
from gozerlib.utils.generic import waitforqueue
from gozerlib.commands import cmnds
from gozerlib.examples import examples
## basic imports
import random
def handle_choice(bot, ievent):
""" make a random choice out of different words or list elements. """
result = []
if ievent.inqueue:
result = waitforqueue(ievent.inqueue, 5)
elif not ievent.args:
ievent.missing('<space seperated list>')
return
else:
result = ievent.args
if result:
ievent.reply(random.choice(result))
else:
ievent.reply('nothing to choose from')
cmnds.add('choice', handle_choice, ['USER', 'GUEST', 'CLOUD'], threaded=True)
examples.add('choice', 'make a random choice', '1) choice a b c 2) list | choice')
| Python |
# plugs/more.py
#
#
""" access the output cache. """
from gozerlib.commands import cmnds
from gozerlib.examples import examples
def handle_less(bot, ievent):
""" get entry from the output cache. """
try:
if len(ievent.args) == 3:
(who, index1, index2) = ievent.args
elif len(ievent.args) == 2:
who = ievent.userhost
(index1, index2) = ievent.args
else:
who = ievent.userhost
index1 = 0
index2 = ievent.args[0]
index1 = int(index1)
index2 = int(index2)
except IndexError:
ievent.missing('[<who>] [<index1>] <index2>')
return
except ValueError:
ievent.reply('i need integers as arguments')
return
txt = bot.outcache.get(who, index1, index2)
if not txt:
ievent.reply('no data available for %s %s %s' % \
(who, index1, index2))
return
ievent.reply(txt, raw=True)
#cmnds.add('less', handle_less, ['USER', 'CLOUD'])
#examples.add('less', "less [<who>] [<index1>] <index2> .. get txt from bots output cache", '1) less 0 2) less 0 2 3) less bart 1 0')
def handle_lesssize(bot, ievent):
""" show size of output cache. """
try:
who = ievent.args[0]
except IndexError:
who = ievent.nick
ievent.reply("outputcache for %s: %s" % (who, str(bot.outcache.size(who))))
#cmnds.add('less-size', handle_lesssize, ['USER', ])
#examples.add('less-size', "show sizes of data in bot's ouput cache", 'less-size')
def handle_more(bot, ievent):
""" pop message from the output cache. """
try:
who = ievent.args[0]
except IndexError:
who = ievent.auth
what, size = bot.outcache.more(who, 0)
if not what:
ievent.reply('no more data available for %s' % who)
return
#ievent.reply(what)
if size:
ievent.reply("%s (+%s)" % (what.strip(), size), raw=True)
else:
ievent.reply(what.strip(), raw=True)
cmnds.add('more', handle_more, ['USER', 'GUEST', 'CLOUD'], threaded=True)
examples.add('more', 'return txt from output cache', '1) more 2) more test')
| Python |
# gozerlib/plugs/sort.py
#
# Sorting
""" sort bot results. """
__author__ = "Wijnand 'maze' Modderman <http://tehmaze.com>"
__license__ = "BSD"
## gozerlib imports
from gozerlib.commands import cmnds
from gozerlib.utils.generic import waitforqueue
from gozerlib.examples import examples
## basic imports
import optparse
## classes
class SortError(Exception): pass
class SortOptionParser(optparse.OptionParser):
""" options parsers for the sort command. """
def __init__(self):
optparse.OptionParser.__init__(self)
self.add_option('-f', '--ignore-case',
help='fold lower case to upper case characters', default=False,
action='store_true', dest='ignorecase')
self.add_option('-n', '--numeric-sort', default=False,
help='compare according to string numerical value',
action='store_true', dest='numeric')
self.add_option('-r', '--reverse', default=False,
help='reverse the result of comparisons',
action='store_true', dest='reverse')
self.add_option('-u', '--unique', default=False,
help='output only the first of an equal run',
action='store_true', dest='unique')
def format_help(self, formatter=None):
raise SortError('sort [-fnru] [--ignore-case] [--numeric-sort] [--reverse] [--unique]')
def error(self, msg):
return self.exit(msg=msg)
def exit(self, status=0, msg=None):
if msg:
raise SortError(msg)
else:
raise SortError
## functions
def numeric_compare(x, y):
try: a = int(x)
except: return cmp(x, y)
try: b = int(y)
except: return cmp(x, y)
return a - b
## commands
def handle_sort(bot, ievent):
""" sort the result list. """
parser = SortOptionParser()
if not ievent.inqueue:
if not ievent.args:
ievent.missing('<input>')
return
try:
options, result = parser.parse_args(ievent.args)
except SortError, e:
ievent.reply(str(e))
return
else:
result = waitforqueue(ievent.inqueue, 30)
try:
options, args = parser.parse_args(ievent.rest.split())
except SortError, e:
ievent.reply(str(e))
return
if not result:
ievent.reply('no data to sort')
return
if options.unique:
result = list(set(result))
if options.numeric:
result.sort(numeric_compare)
else:
result.sort()
if options.ignorecase:
result.sort(lambda a, b: cmp(a.upper(), b.upper()))
if options.reverse:
result.reverse()
ievent.reply("results: ", result)
cmnds.add('sort', handle_sort, ['USER', 'GUEST'], threaded=True)
examples.add('sort', 'sort the output of a command', 'list | sort')
| Python |
# feedprovider basic plugins
#
#
""" register all .py files """
import os
(f, tail) = os.path.split(__file__)
__all__ = []
for i in os.listdir(f):
if i.endswith('.py'):
__all__.append(i[:-3])
elif os.path.isdir(f + os.sep + i) and not i.startswith('.'):
__all__.append(i)
try:
__all__.remove('__init__')
except:
pass
__plugs__ = __all__
| Python |
# gozerlib/plugs/misc.py
#
#
""" misc commands. """
## gozerbot imports
from gozerlib.utils.exception import handle_exception
from gozerlib.commands import cmnds
from gozerlib.examples import examples
from gozerlib.persiststate import UserState
## basic imports
import time
import os
import threading
import thread
import copy
cpy = copy.deepcopy
def handle_test(bot, ievent):
""" give test response. """
ievent.reply("%s .. it works!" % ievent.userhost)
cmnds.add('test', handle_test, ['USER', 'GUEST', ])
examples.add('test', 'give test response',' test')
def handle_testevent(bot, ievent):
""" give dump of event. """
event = cpy(ievent)
try:
del event.cfg
del event.plugs
del event.bot
except Exception, ex:
handle_exception()
ievent.reply(str(event))
cmnds.add('test-event', handle_testevent, ['USER', 'GUEST', ])
examples.add('test-event', 'dump the event',' test-event')
def handle_source(bot, ievent):
""" show where to fetch the bot source. """
ievent.reply('see http://feedprovider.googlecode.com')
cmnds.add('source', handle_source, ['USER', 'GUEST'])
examples.add('source', 'show source url', 'source')
def handle_time(bot, ievent):
""" show current time """
authuser = username = ievent.userhost
if authuser:
userstate = UserState(username)
try:
tz = userstate['TZ']
except KeyError:
ievent.reply("%s doesn't have a timezone set .. use !set TZ " % username)
return
ievent.reply(get_time(tz, username, authuser))
else:
ievent.reply(get_time('UTC', '', ''))
cmnds.add('time', handle_time, ['USER', 'CLOUD'], threaded=True)
examples.add('time', 'show current time (of a user)', 'time test')
def handle_timezone(bot, ievent):
""" <timezone> (integer) .. set users timezone in the userstate. """
username = ievent.userhost
if not ievent.rest:
ievent.missing('<timezone> (integer)')
return
if username:
userstate = UserState(username)
if ievent.rest:
try:
timezone = int(ievent.rest)
set_timezone(bot, ievent, userstate, timezone)
except ValueError:
ievent.reply('TZ needs to be an integer')
return
else:
ievent.reply("can't determine timezone")
cmnds.add('timezone', handle_timezone, ['USER'], threaded=True)
examples.add('timezone', 'set current timezone', 'timezone +1')
def handle_ask_timezone(bot, ievent):
""" ask for a users timezone. """
ievent.reply('what is your timezone ? for example -1 or +4')
response = waitforuser(bot, ievent.userhost)
if response:
return response.txt
else:
ievent.reply("can't determine timezone .. not setting it")
return
def set_timezone(bot, ievent, userstate, timezone):
""" set a users timezone. """
# check for timezone validity and return False, if necessary
try:
tz = int(timezone)
except ValueError:
ievent.reply('timezone needs to be an integer')
return False
userstate['TZ'] = tz
userstate.save()
ievent.reply("timezone set to %s" % tz)
return True
def get_time(zone, username, authuser):
""" get the time of a user. """
try:
zone = int(zone)
except ValueError:
zone = 0
return time.ctime(time.time() + int(time.timezone) + zone*3600)
| Python |
# gozerlib/plugs/admin.py
#
#
""" admin related commands. """
## gozerlib imports
from gozerlib.commands import cmnds
from gozerlib.examples import examples
from gozerlib.persist import Persist
from gozerlib.boot import savecmndtable, savepluginlist, boot
from gozerlib.admin import plugin_packages
from gozerlib.config import cfg
from gozerlib.plugins import plugs
from gozerlib.botbase import BotBase
## simplejson imports
from simplejson import dump
## commands
def handle_adminboot(bot, ievent):
""" boot the bot .. do some initialisation. """
boot(force=True)
ievent.done()
cmnds.add('admin-boot', handle_adminboot, 'OPER')
cmnds.add('admin-init', handle_adminboot, 'OPER')
examples.add('admin-boot', 'initialize the bot .. cmndtable and pluginlist', 'admin-init')
def handle_loadall(bot, ievent):
""" load all available plugins. """
plugs.loadall(plugin_packages)
ievent.done()
cmnds.add('admin-loadall', handle_loadall, 'OPER')
examples.add('admin-loadall', 'load all plugins', 'admin-loadall')
def handle_adminmakebot(bot, ievent):
""" <name> <type> .. create a bot of given type. """
try:
botname, bottype = ievent.args
except ValueError:
ievent.missing("<name> <type>")
return
newbot = BotBase()
newbot.name = botname
newbot.type = bottype
newbot.owner = bot.owner
newbot.save()
ievent.done()
cmnds.add('admin-makebot', handle_adminmakebot, 'OPER')
examples.add('admin-makebot', 'create a bot', 'admin-makebot cmndxmpp xmpp')
| Python |
# gozerlib/persiststate.py
#
#
""" persistent state classes. """
## gozerlib imports
from gozerlib.utils.trace import calledfrom
from persist import Persist
## basic imports
import types
import os
import sys
import logging
class PersistState(Persist):
""" base persitent state class. """
def __init__(self, filename):
Persist.__init__(self, filename)
self.types = dict((i, type(j)) for i, j in self.data.iteritems())
def __getitem__(self, key):
""" get state item. """
return self.data[key]
def __setitem__(self, key, value):
""" set state item. """
self.data[key] = value
def define(self, key, value):
""" define a state item. """
if not self.data.has_key(key) or type(value) != self.types[key]:
if type(value) == types.StringType:
value = unicode(value)
if type(value) == types.IntType:
value = long(value)
self.data[key] = value
class PlugState(PersistState):
""" state for plugins. """
def __init__(self, *args, **kwargs):
self.plugname = calledfrom(sys._getframe())
logging.debug('persiststate - initialising %s' % self.plugname)
PersistState.__init__(self, 'gozerdata' + os.sep + 'state' + os.sep + 'plugs' + os.sep + self.plugname + os.sep + 'state')
class ObjectState(PersistState):
""" state for usage in constructors. """
def __init__(self, *args, **kwargs):
PersistState.__init__(self, 'gozerdata' + os.sep + 'state' + os.sep + calledfrom(sys._getframe(1))+'.state')
class UserState(PersistState):
""" state for users. """
def __init__(self, username, filename="state", *args, **kwargs):
assert username
datadir = 'gozerdata' + os.sep + 'state' + os.sep + 'users' + os.sep + username
PersistState.__init__(self, datadir + os.sep + filename)
| Python |
# gozerlib/boot.py
#
#
""" admin related data and functions. """
## gozerlib imports
from gozerlib.persist import Persist
from gozerlib.plugins import plugs
from gozerlib.commands import cmnds
from gozerlib.admin import plugin_packages, default_plugins
from gozerlib.callbacks import callbacks
import admin
import users
## basic imports
import logging
import os
import sys
sys.path.insert(0, os.getcwd())
## define
rundir = "run"
## functions
def boot(force=False):
""" initialize the bot. """
global loaded
logging.warn("booting ..")
if not admin.cmndtable:
admin.cmndtable = Persist(rundir + os.sep + 'cmndtable')
if not admin.pluginlist:
admin.pluginlist = Persist(rundir + os.sep + 'pluginlist')
if not admin.callbacktable:
admin.callbacktable = Persist(rundir + os.sep + 'callbacktable')
if not admin.cmndtable.data or force:
plugs.loadall(plugin_packages)
admin.loaded = True
savecmndtable()
if not admin.pluginlist.data or force:
if not admin.loaded:
plugs.loadall(plugin_packages)
admin.loaded = True
savepluginlist()
if not admin.callbacktable.data or force:
if not admin.loaded:
plugs.loadall(plugin_packages)
loaded = True
savecallbacktable()
if not admin.loaded:
for plug in default_plugins:
plugs.load(plug)
def savecmndtable():
""" save command -> plugin list to db backend. """
admin.cmndtable.data = {}
for cmndname, c in cmnds.iteritems():
admin.cmndtable.data[cmndname] = c.modname
logging.debug("saving command table")
admin.cmndtable.save()
def getcmndtable():
""" save command -> plugin list to db backend. """
if not admin.cmndtable:
boot()
return admin.cmndtable.data
def savecallbacktable():
""" save command -> plugin list to db backend. """
admin.callbacktable.data = {}
for type, cbs in callbacks.cbs.iteritems():
for c in cbs:
if not admin.callbacktable.data.has_key(type):
admin.callbacktable.data[type] = []
admin.callbacktable.data[type].append(c.plugname)
logging.debug("saving callback table")
admin.callbacktable.save()
def getcallbacktable():
""" save command -> plugin list to db backend. """
if not admin.callbacktable:
boot()
return admin.callbacktable.data
def savepluginlist():
""" save a list of available plugins to db backend. """
admin.pluginlist.data = []
for cmndname, c in cmnds.iteritems():
if c.plugname not in admin.pluginlist.data:
admin.pluginlist.data.append(c.plugname)
admin.pluginlist.data.sort()
logging.debug("saving plugin list")
admin.pluginlist.save()
def getpluginlist():
""" get the plugin list. """
if not admin.pluginlist:
boot()
return admin.pluginlist.data
| Python |
# gozerlib/commands.py
#
#
""" commands are the first word. """
## lib imports
from utils.xmpp import stripped
from utils.trace import calledfrom, whichmodule
from utils.exception import handle_exception
from utils.lazydict import LazyDict
from errors import NoSuchCommand
## basic imports
import logging
import sys
import types
## classes
class Command(LazyDict):
""" a command object. """
def __init__(self, modname, cmnd, func, perms=[]):
LazyDict.__init__(self)
self.modname = modname
self.plugname = self.modname.split('.')[-1]
self.cmnd = cmnd
self.func = func
if type(perms) == types.StringType:
perms = [perms, ]
self.perms = perms
self.plugin = self.plugname
class Commands(LazyDict):
""" the commands object holds all commands of the bot. """
def add(self, cmnd, func, perms, threaded=False, *args, **kwargs):
""" add a command. """
modname = calledfrom(sys._getframe())
self[cmnd] = Command(modname, cmnd, func, perms)
return self
def dispatch(self, bot, event):
""" dispatch an event if cmnd exists and user is allowed to exec this command. """
cmnd = event.usercmnd
try:
c = self[cmnd]
except KeyError:
raise NoSuchCommand(cmnd)
id = event.auth or event.userhost
## core business
if bot.allowall:
return self.doit(bot, event, c)
elif not bot.users or bot.users.allowed(id, c.perms, bot=bot):
return self.doit(bot, event, c)
elif bot.cfg and bot.cfg.auto_register:
bot.users.addguest(event.userhost)
if bot.users.allowed(id, c.perms, bot=bot):
return self.doit(bot, event, c)
return []
def doit(self, bot, event, target):
id = event.auth or event.userhost
logging.warn('dispatching %s for %s' % (event.usercmnd, id))
result = []
try:
target.func(bot, event)
result = event.result
except Exception, ex:
logging.error('commands - %s - error executing %s' % (whichmodule(), str(target.func)))
handle_exception(event)
if event.queues:
for queue in event.queues:
queue.put_nowait(None)
return result
def unload(self, modname):
""" remove modname registered commands from store. """
delete = []
for name, cmnd in self.iteritems():
if cmnd.modname == modname:
delete.append(cmnd)
for cmnd in delete:
del cmnd
return self
def apropos(self, search):
""" search existing commands for search term. """
result = []
for name, cmnd in self.iteritems():
if search in name:
result.append(name)
return result
def perms(self, cmnd):
""" show what permissions are needed to execute cmnd. """
try:
return self[cmnd].perms
except KeyError:
return []
def whereis(self, cmnd):
""" return plugin name in which command is implemented. """
try:
return self[cmnd].plugname
except KeyError:
return ""
def gethelp(self, cmnd):
""" get the docstring of a command. used for help. """
try:
return self[cmnd].func.__doc__
except KeyError:
return
## defines
cmnds = Commands()
| Python |
# gozerlib/eggs.py
#
#
"""
eggs related functions
this module is used to load the eggs on which gozerlib depends from
specified dir .. most of the time this is the jsbnest dir.
"""
## gozerlib imports
from utils.exception import handle_exception
from gozerlib.config import cfg as config
## basic imports
import os
import sys
import logging
## define
mainenv = None
def init(eggdir, log=False):
"""
make sure setuptools is available.
:param eggdir: directory to scan for eggs
:type eggdir: string
:param log: whether to log the registration of the setuptools egg
:type log: True or False
"""
try:
import setuptools
except ImportError, ex:
try:
sys.path.insert(0, eggdir)
for egg in os.listdir(eggdir):
if not egg.startswith('setuptools'):
log and logging.warn('eggs - loaded %s' % egg)
sys.path.insert(0, eggdir + os.sep + egg)
except OSError:
pass
latest = {}
def enable_egg(env, egg, log=True):
"""
search for the latest version of an egg in the enviroment and put
it on sys.path.
:param env: the environment to search the egg in
:type env: pkg_resources.Environment
:param egg: egg to load or find a newer version for
:param log: determine if we should log the enabling of the egg
"""
try:
from pkg_resources import DistributionNotFound, VersionConflict, working_set, parse_requirements, require
if not latest.has_key(egg.project_name):
latest[egg.project_name] = egg
req = egg.as_requirement()
reqstr = str(req)
reqq = parse_requirements([reqstr.replace('==', '>='), ])
for e in working_set.resolve(reqq, mainenv):
if e.location not in sys.path:
env.add(e)
working_set.add(e)
working_set.add_entry(e.location)
latest[egg.project_name] = e
sys.path.insert(0, egg.location)
log and logging.warn('eggs - loaded %s' % e)
else:
log and logging.warn('eggs - %s already on path' % e)
except DistributionNotFound, ex:
env.add(egg)
working_set.add(egg)
working_set.add_entry(egg.location)
latest[egg.project_name] = egg
sys.path.insert(0, egg.location)
log and logging('eggs - loaded %s' % egg)
except VersionConflict, ex:
if egg > ex[0]:
env.add(egg)
working_set.add_entry(egg.location)
working_set.add(egg)
latest[egg.project_name] = egg
sys.path.insert(0, egg.location)
log and logging.warn('eggs - override %s' % egg)
def loadegg(name, eggdirs=['gozernest',], log=True):
"""
scan eggdir for a egg matching `name`.
:param name: piece of txt which should be in the egg projectname
:type name: string
:param eggdirs: directories to search in
:type eggdirs: list
:param log: boolean which indicates whether loading should be logged
:type log: boolean
"""
try:
from pkg_resources import find_distributions, Environment
global mainenv
for eggdir in eggdirs:
if mainenv:
mainenv += Environment(eggdir)
else:
mainenv = Environment(eggdir)
eggs = find_distributions(eggdir)
for egg in eggs:
if name.lower() in egg.project_name.lower():
enable_egg(mainenv, egg, log)
except ImportError:
return
except Exception, ex:
handle_exception()
def loadeggs(eggdir, log=True):
"""
load all eggs in a directory.
:param eggdir: directory to load eggs from
:type eggdir: string
"""
logging.warn('eggs - scanning %s' % eggdir)
try:
from pkg_resources import find_distributions, Environment
global mainenv
if mainenv:
mainenv += Environment(eggdir)
else:
mainenv = Environment(eggdir)
eggs = find_distributions(eggdir)
for egg in eggs:
if not egg.project_name.startswith('setuptools'):
enable_egg(mainenv, egg, log)
except ImportError:
return
except Exception, ex:
handle_exception()
res = []
for name, egg in latest.iteritems():
res.append("%s: %s" % (name, egg.version))
logging.warn('eggs - loaded: %s' % ' .. '.join(res))
# INIT SECTION
try:
import google
except ImportError:
# first search for setuptools and load it
init(os.getcwd())
init(os.getcwd() + os.sep + 'gozernest')
# END INIT
| Python |
# gozerlib/channelbase.py
#
#
""" provide a base class for channels (waves, xmpp, web). """
## gozerlib imports
from gozerlib.utils.lazydict import LazyDict
from gozerlib.persist import Persist
## basic imports
import time
class ChannelBase(Persist):
"""
Base class for all channel objects.
:param name: name of the channel
:type name: string
:param type: type of channel
:type type: string
"""
def __init__(self, id, type="notset"):
Persist.__init__(self, id)
self.id = id
self.type = type
self.lastmodified = time.time()
self.data.feeds = self.data.feeds or []
self.data.passwords = self.data.passwords or {}
def setpass(self, name, key):
self.data.passwords[name] = key
self.save()
def getpass(self, name='IRC'):
try:
return self.data.passwords[name]
except KeyError:
return
def delpass(self, name='IRC'):
try:
del self.data.passwords[name]
self.save()
return True
except KeyError:
return
def parse(self, event, wavelet=None):
"""
parse an event for channel related data and constuct the
channel with it. Overload this.
:param event: event to parse
:type event: gozerlib.eventbase.EventBase
:rtype: self
"""
pass
| Python |
# gozerlib/persist/persist.py
#
#
"""
allow data to be written to disk or BigTable in JSON format. creating
the persisted object restores data.
"""
## lib imports
from utils.trace import whichmodule, calledfrom
from utils.lazydict import LazyDict
from utils.exception import handle_exception
from utils.name import stripname
from utils.locking import lockdec
from datadir import datadir
## simplejson imports
from simplejson import load, dump, loads, dumps
## basic imports
import thread
import logging
import os
import types
import copy
import sys
## try google first
try:
## google imports
from google.appengine.ext import db
from google.appengine.api.memcache import get, set, replace
from google.appengine.api.datastore_errors import Timeout
logging.debug("persist - using BigTable based Persist")
## classes
class JSONindb(db.Model):
""" model to store json files in. """
modtime = db.DateTimeProperty(auto_now=True, indexed=False)
createtime = db.DateTimeProperty(auto_now_add=True, indexed=False)
filename = db.StringProperty()
content = db.TextProperty(indexed=False)
class Persist(object):
""" persist data attribute to database backed JSON file. """
def __init__(self, filename, default={}):
self.plugname = calledfrom(sys._getframe())
if 'lib' in self.plugname:
self.plugname = calledfrom(sys._getframe(1))
self.fn = unicode(stripname(filename.strip())) # filename to save to
self.key = None
self.obj = None
jsontxt = get(self.fn)
if type(default) == types.DictType:
default2 = LazyDict()
default2.update(default)
else:
default2 = copy.deepcopy(default)
if jsontxt is None:
logging.debug("persist - %s - loading from db" % self.fn)
try:
try:
self.obj = JSONindb.get_by_key_name(self.fn)
except Timeout:
self.obj = JSONindb.get_by_key_name(self.fn)
except Exception, ex:
handle_exception()
self.data = default2
return
if self.obj == None:
logging.debug("persist - %s - no entry found" % self.fn)
self.obj = JSONindb(key_name=self.fn)
self.obj.content = unicode(default)
self.data = default2
return
jsontxt = self.obj.content
if jsontxt:
set(self.fn, jsontxt)
logging.debug('persist - jsontxt is %s' % jsontxt)
gotcache = False
else:
gotcache = True
self.data = loads(jsontxt)
if type(self.data) == types.DictType:
d = LazyDict()
d.update(self.data)
self.data = d
cfrom = whichmodule()
if 'gozerlib' in cfrom:
cfrom = whichmodule(2)
if 'gozerlib' in cfrom:
cfrom = whichmodule(3)
if gotcache:
logging.warn('persist - %s - loaded %s (%s) *cache*' % (cfrom, self.fn, len(jsontxt)))
else:
logging.warn('persist - %s - loaded %s (%s)' % (cfrom, self.fn, len(jsontxt)))
def save(self):
""" save json data to database. """
bla = dumps(self.data)
if self.obj == None:
self.obj = JSONindb(key_name=self.fn)
self.obj.content = bla
else:
self.obj.content = bla
self.obj.filename = self.fn
key = self.obj.put()
cfrom = whichmodule(0)
if 'gozerlib' in cfrom:
cfrom = whichmodule(2)
if 'gozerlib' in cfrom:
cfrom = whichmodule(3)
logging.warn('persist - %s - saved %s (%s)' % (cfrom, self.fn, len(bla)))
set(self.fn, bla)
except ImportError:
# no google so use file based persist
logging.debug("using file based Persist")
## defines
persistlock = thread.allocate_lock()
persistlocked = lockdec(persistlock)
## classes
class Persist(object):
""" persist data attribute to JSON file. """
def __init__(self, filename, default=None, init=True):
""" Persist constructor """
self.fn = stripname(filename.strip()) # filename to save to
self.lock = thread.allocate_lock() # lock used when saving)
self.data = LazyDict() # attribute to hold the data
if init:
if default == None:
default = LazyDict()
self.init(default)
@persistlocked
def init(self, default={}):
""" initialize the data. """
logging.debug('persist - reading %s' % self.fn)
# see if file exists .. if not initialize data to default
try:
datafile = open(self.fn, 'r')
except IOError, ex:
if not 'No such file' in str(ex):
logging.error('persist - failed to read %s: %s' % (self.fn, str(ex)))
self.data = copy.deepcopy(default)
raise
else:
return
# load the JSON data into attribute
try:
self.data = load(datafile)
datafile.close()
if type(self.data) == types.DictType:
d = LazyDict()
d.update(self.data)
self.data = d
except Exception, ex:
logging.error('persist - ERROR: %s' % self.fn)
raise
@persistlocked
def save(self):
""" persist data attribute. """
# save data
try:
#self.lock.acquire()
dirr = []
for p in self.fn.split(os.sep)[:-1]:
dirr.append(p)
pp = os.sep.join(dirr)
if not os.path.isdir(pp):
logging.warn("persist - creating %s dir" % pp)
os.mkdir(pp)
tmp = self.fn + '.tmp' # tmp file to save to
# first save to temp file and when done rename
try:
datafile = open(tmp, 'w')
except IOError, ex:
logging.error("persist - can't save %s: %s" % (self.fn, str(ex)))
return
# dump JSON to file
#cp = copy.copy(self.data)
dump(self.data, datafile)
datafile.close()
try:
os.rename(tmp, self.fn)
except OSError:
handle_exception()
# no atomic operation supported on windows! error is thrown when destination exists
os.remove(self.fn)
os.rename(tmp, self.fn)
logging.warn('persist - %s saved' % self.fn)
finally:
#self.lock.release()
pass
## common classes
class PlugPersist(Persist):
"""
persist plug related data. data is stored in gozerdata/plugs/{plugname}/{filename}
"""
def __init__(self, filename, default=None):
# retrieve plugname where object is constructed
plugname = calledfrom(sys._getframe())
# call base constructor with appropiate filename
Persist.__init__(self, datadir + os.sep + 'plugs' + os.sep + plugname + os.sep + filename)
| Python |
# gozerlib/threadloop.py
#
#
""" class to implement start/stoppable threads. """
## lib imports
from threads import start_new_thread
## basic imports
import Queue
import time
import logging
## classes
class ThreadLoop(object):
""" implement startable/stoppable threads. """
def __init__(self, name="", queue=None):
self.name = name or 'idle'
self.stopped = False
self.running = False
self.outs = []
self.queue = queue or Queue.Queue()
self.nowrunning = "none"
def _loop(self):
logging.debug('%s - starting threadloop' % self.name)
self.running = True
while not self.stopped:
try:
data = self.queue.get_nowait()
except Queue.Empty:
if self.stopped:
break
time.sleep(0.1)
continue
if self.stopped:
break
if not data:
break
#logging.debug('%s - running %s' % (self.name, str(data)))
self.handle(*data)
self.running = False
logging.debug('%s - stopping threadloop' % self.name)
def put(self, *data):
""" put data on task queue. """
self.queue.put_nowait(data)
def start(self):
""" start the thread. """
if not self.running:
start_new_thread(self._loop, ())
def stop(self):
""" stop the thread. """
self.stopped = True
self.running = False
self.queue.put(None)
def handle(self, *args, **kwargs):
""" overload this. """
pass
class RunnerLoop(ThreadLoop):
""" dedicated threadloop for bot commands/callbacks. """
def _loop(self):
logging.debug('%s - starting threadloop' % self.name)
self.running = True
while not self.stopped:
try:
data = self.queue.get()
except Queue.Empty:
if self.stopped:
break
time.sleep(0.1)
continue
if self.stopped:
break
if not data:
break
self.nowrunning = data[0]
logging.debug('%s - now running %s' % (self.name, self.nowrunning))
self.handle(*data)
self.running = False
self.debug('%s - stopping threadloop' % self.name)
| Python |
# gozerlib/errors.py
#
#
""" gozerlib exceptions. """
from gozerlib.utils.trace import calledfrom
import sys
class FeedProviderError(Exception):
pass
class CantSaveConfig(FeedProviderError):
pass
class NoOwnerSet(FeedProviderError):
pass
class NoSuchUser(FeedProviderError):
pass
class NoSuchBotType(FeedProviderError):
pass
class NoSuchWave(FeedProviderError):
pass
class NoSuchCommand(FeedProviderError):
pass
class NoSuchPlugin(FeedProviderError):
pass
class NoOwnerSet(FeedProviderError):
pass
class PlugsNotConnected(FeedProviderError):
pass
| Python |
# gozerlib/eventbase.py
#
#
""" base class of all events. """
## imports
from utils.lazydict import LazyDict
from utils.generic import splittxt
## simplejson imports
from simplejson import dumps, loads
## basic imports
from xml.sax.saxutils import unescape
import copy
import logging
## defines
cpy = copy.deepcopy
## classes
class EventBase(LazyDict):
""" basic event class. """
def __init__(self, input=None):
""" EventBase constructor """
LazyDict.__init__(self)
if input:
self.copyin(input)
self.result = []
def __deepcopy__(self, a):
""" deepcopy an event. """
e = EventBase()
e.copyin(self)
return e
def _raw(self, txt):
""" put rawstring to the server .. overload this """
logging.info(u"eventbase - out - %s - %s" % (self.userhost, unicode(txt)))
print u"> " + txt
self.result.append(txt)
def parse(self, *args, **kwargs):
""" overload this. """
pass
def copyin(self, eventin):
""" copy in an event. """
self.update(eventin)
if eventin.queues:
self.queues = list(eventin.queues)
#if eventin.inqueue:
# self.inqueue = cpy(eventin.inqueue)
return self
def reply(self, txt, result=[], *args, **kwargs):
""" reply to this event """
if self.checkqueues(result):
return
resp = self.makeresponse(txt, result, *args, **kwargs)
if self.bot:
self.bot.say(self.channel, resp)
else:
self._raw(resp)
self.result.append(resp)
return self
def missing(self, txt):
""" display missing arguments. """
self.reply("%s %s" % (self.usercmnd, txt))
return self
def done(self):
""" tell the user we are done. """
self.reply('done')
return self
def makeargs(self):
""" make arguments and rest attributes from self.txt. """
try:
self.args = self.txt.split()[1:]
self.rest = ' '.join(self.args)
except:
self.args = None
def checkqueues(self, resultlist):
""" check if resultlist is to be sent to the queues. if so do it. """
if self.queues:
for queue in self.queues:
for item in resultlist:
queue.put_nowait(item)
return True
return False
def makeresponse(self, txt, result, nritems=False, dot=", ", *args, **kwargs):
if txt:
return txt + dot.join(result)
elif result:
return dot.join(result)
return ""
def less(self, what):
what = what.strip()
txtlist = splittxt(what, 1000)
size = 0
# send first block
res = txtlist[0]
# see if we need to store output in less cache
result = ""
if len(txtlist) > 2:
logging.warn("addding %s lines to %s outputcache" % (len(txtlist), self.userhost))
self.bot.outcache.add(self.userhost, txtlist[1:])
size = len(txtlist) - 2
result = txtlist[1:2][0]
if size:
result += " (+%s)" % size
else:
if len(txtlist) == 2:
result = txtlist[1]
return [res, result]
| Python |
# gozerlib/tasks.py
#
#
## gozerlib imports
from gozerlib.utils.trace import calledfrom
from gozerlib.plugins import plugs
## basic imports
import logging
import sys
class Task(object):
def __init__(self, name, func):
self.name = name
self.func = func
def handle(self, *args, **kwargs):
self.func(*args, **kwargs)
class TaskManager(object):
def __init__(self):
self.handlers = {}
self.plugins = {}
def add(self, taskname, func):
logging.debug("added task %s - %s" % (taskname, func))
self.handlers[taskname] = func
self.plugins[taskname] = calledfrom(sys._getframe())
return True
def dispatch(self, taskname, *args, **kwargs):
try:
plugin = self.plugins[taskname]
except KeyError:
logging.debug('tasks - no plugin for %s found' % taskname)
return
logging.debug('loading %s for taskmanager' % plugin)
plugs.reload(plugin)
try:
handler = self.handlers[taskname]
except KeyError:
logging.debug('tasks - no handler for %s found' % taskname)
return
logging.info("dispatching task %s - %s" % (taskname, str(handler)))
return handler(*args, **kwargs)
taskmanager = TaskManager()
| Python |
# gozerlib/users.py
#
#
""" bot's users in JSON file. NOT USED AT THE MOMENT. """
## lib imports
from utils.exception import handle_exception, exceptionmsg
from utils.generic import stripped
from persist import Persist
from utils.lazydict import LazyDict
from datadir import datadir
from config import cfg as mainconfig
from errors import NoSuchUser
## basic imports
import re
import types
import os
import time
import logging
## classes
class JsonUser(Persist):
""" LazyDict representing a user. """
def __init__(self, name, userhosts=[], perms=[], permits=[], status=[], email=[]):
assert name
Persist.__init__(self, datadir + os.sep + 'users' + os.sep + name)
self.data.datadir = datadir
self.data.name = self.data.name or name
self.data.userhosts = self.data.userhosts or userhosts
self.data.perms = self.data.perms or perms
self.data.permits = self.data.permits or permits
self.data.status = self.data.status or status
self.data.email = self.data.email or email
class Users(Persist):
""" class representing all users. """
def __init__(self, ddir=None, filename=None):
self.datadir = ddir or datadir
self.filename = filename or 'mainusers'
Persist.__init__(self, self.datadir + os.sep + self.filename)
if not self.data:
self.data = LazyDict()
self.data.names = self.data.names or {}
def exists(self, name):
return name in self.data.names
def all(self):
""" get all users. """
result = []
for name in self.data['names'].values():
result.append(JsonUser(name))
return result
### Misc. Functions
def size(self):
""" return nr of users. """
return len(self.data['names'])
def names(self):
""" get names of all users. """
return self.data.names
def byname(self, name):
""" return user by name. """
try:
user = JsonUser(name)
if user.data.userhosts:
return user
except KeyError:
raise NoSuchUser(name)
def merge(self, name, userhost):
""" add userhosts to user with name """
user = self.byname(name)
if user:
if not userhost in user.data.userhosts:
user.data.userhosts.append(userhost)
user.save()
self.data.names[userhost] = name
self.save()
logging.warn("users - %s merged with %s" % (userhost, name))
return 1
def usersearch(self, userhost):
""" search for users with a userhost like the one specified """
result = []
for u, name in self.data.names.iteritems():
if userhost in u:
result.append((name, u))
return result
def getuser(self, userhost):
try:
user = self.byname(self.data.names[userhost])
if user:
return user
except KeyError:
return
### Check functions
def exist(self, name):
""" see if user with <name> exists """
return self.byname(name)
def allowed(self, userhost, perms, log=True, bot=None):
""" check if user with userhosts is allowed to execute perm command """
if not type(perms) == types.ListType:
perms = [perms, ]
if 'ANY' in perms:
return 1
if bot and bot.allowall:
return 1
res = None
user = self.getuser(userhost)
if not user:
logging.warn('users - %s userhost denied' % userhost)
return res
else:
uperms = set(user.data.perms)
sperms = set(perms)
intersection = sperms.intersection(uperms)
res = list(intersection) or None
if not res and log:
logging.warn("users - %s perm %s denied (%s)" % (userhost, str(perms), str(uperms)))
return res
def permitted(self, userhost, who, what):
""" check if (who,what) is in users permit list """
user = self.getuser(userhost)
res = None
if user:
if '%s %s' % (who, what) in user.data.permits:
res = 1
return res
def status(self, userhost, status):
""" check if user with <userhost> has <status> set """
user = self.getuser(userhost)
res = None
if user:
if status.upper() in user.data.status:
res = 1
return res
def gotuserhost(self, name, userhost):
""" check if user has userhost """
user = self.byname(name)
return userhost in user.data.userhosts
def gotperm(self, name, perm):
""" check if user had permission """
user = self.byname(name)
if user:
return perm.upper() in user.data.perms
def gotpermit(self, name, permit):
""" check if user permits something. permit is a (who, what) tuple """
user = self.byname(name)
if user:
return '%s %s' % permit in user.data.permits
def gotstatus(self, name, status):
""" check if user has status """
user = self.byname(name)
return status.upper() in user.data.status
### Get Functions
def getname(self, userhost):
""" get name of user belonging to <userhost> """
try:
return self.data.names[userhost]
except:
user = self.getuser(userhost)
if user:
return user.data.name
def gethosts(self, userhost):
""" return the userhosts of the user associated with the specified userhost """
user = self.getuser(userhost)
if user:
return user.data.userhosts
def getemail(self, userhost):
""" return the email of the specified userhost """
user = self.getuser(userhost)
if user:
if user.data.email:
return user.data.email[0]
def getperms(self, userhost):
""" return permission of user"""
user = self.getuser(userhost)
if user:
return user.data.perms
def getpermits(self, userhost):
""" return permits of the specified userhost"""
user = self.getuser(userhost)
if user:
return user.data.permits
def getstatuses(self, userhost):
""" return the list of statuses for the specified userhost. """
user = self.getuser(userhost)
if user:
return user.data.status
def getuserhosts(self, name):
""" return the userhosts associated with the specified user. """
user = self.byname(name)
if user:
return user.data.userhosts
def getuseremail(self, name):
""" get email of user. """
user = self.byname(name)
if user:
if user.data.email:
return user.data.email[0]
def getuserperms(self, name):
""" return permission of user. """
user = self.byname(name)
if user:
return user.data.perms
def getuserpermits(self, name):
""" return permits of user. """
user = self.byname(name)
if user:
return user.data.permits
def getuserstatuses(self, name):
""" return the list of statuses for the specified user. """
user = self.byname(name)
if user:
return user.data.status
def getpermusers(self, perm):
""" return all users that have the specified perm. """
result = []
for name in self.data.names:
user = JsonUser(name)
if perm.upper() in user.data.perms:
result.append(user.data.name)
return result
def getstatususers(self, status):
""" return all users that have the specified status. """
result = []
for name in self.data.names:
user = JsonUser(name)
if status in user.data.status:
result.append(user.data.name)
return result
### Set Functions
def setemail(self, name, email):
""" set email of user. """
user = self.byname(name)
if user:
try:
user.data.email.remove(email)
except:
pass
user.data.email.insert(0, email)
user.save()
return True
return False
### Add functions
def add(self, name, userhosts, perms):
""" add an user. """
user = self.byname(name)
logging.debug("users - %s - %s" % (name, str(user)))
if not user:
newuser = JsonUser(name, userhosts, perms)
for userhost in userhosts:
self.data.names[userhost] = name
newuser.save()
self.save()
logging.warn('users - %s %s %s added to user database' % (name, userhosts, perms))
return True
def addguest(self, userhost):
if not self.getname(userhost):
self.add(userhost, [userhost, ], ["GUEST",])
def addemail(self, userhost, email):
""" add an email address to the userhost. """
user = self.getuser(userhost)
if user:
user.data.email.append(email)
user.save()
return 1
def addperm(self, userhost, perm):
""" add the specified perm to the userhost. """
user = self.getuser(userhost)
if user:
user.data.perms.append(perm.upper())
user.save()
return 1
def addpermit(self, userhost, permit):
""" add the given (who, what) permit to the given userhost. """
user = self.getuser(userhost)
if user:
user.data.permits.append(permit)
user.save()
return 1
def addstatus(self, userhost, status):
""" add status to given userhost. """
user = self.getuser(userhost)
if user:
user.data.status.append(status.upper())
user.save()
return 1
def adduserhost(self, name, userhost):
""" add userhost. """
user = self.byname(name)
if not user:
user = self.users[name] = User(name=name)
user.data.userhosts.append(userhost)
user.save()
return 1
def adduseremail(self, name, email):
""" add email to specified user. """
user = self.byname(name)
if user:
user.data.email.append(email)
user.save()
return 1
def adduserperm(self, name, perm):
""" add permission. """
user = self.byname(name)
if user:
perm = perm.upper()
user.data.perms.append(perm)
user.save()
return 1
def adduserpermit(self, name, who, permit):
""" add (who, what) permit tuple to sepcified user. """
user = self.byname(name)
if user:
p = '%s %s' % (who, permit)
user.data.permits.append(p)
user.save()
return 1
def adduserstatus(self, name, status):
""" add status to given user. """
user = self.byname(name)
if user:
user.data.status.append(status.upper())
user.save()
return 1
def addpermall(self, perm):
""" add permission to all users. """
for name in self.data.names:
user = JsonUser(name)
user.data.perms.append(perm.upper())
user.save()
### Delete functions
def delemail(self, userhost, email):
""" delete email from userhost. """
user = self.getuser(userhost)
if user:
if email in user.emails:
user.data.emails.remove(email)
user.save()
return 1
def delperm(self, userhost, perm):
""" delete perm from userhost. """
user = self.getuser(userhost)
if user:
p = perm.upper()
if p in user.perms:
user.data.perms.remove(p)
user.save()
return 1
def delpermit(self, userhost, permit):
""" delete permit from userhost. """
user = self.getuser(userhost)
if user:
p = '%s %s' % permit
if p in user.permits:
user.data.permits.remove(p)
user.save()
return 1
def delstatus(self, userhost, status):
""" delete status from userhost. """
user = self.getuser(userhost)
if user:
st = status.upper()
if st in user.data.status:
user.data.status.remove(st)
user.save()
return 1
def delete(self, name):
""" delete user with name. """
try:
del self.data.names[name]
self.save()
except ValueError:
pass
def deluserhost(self, name, userhost):
""" delete the userhost entry. """
user = self.byname(name)
if user:
if userhost in user.data.userhosts:
user.data.userhosts.remove(userhost)
user.save()
return 1
def deluseremail(self, name, email):
""" delete email. """
user = self.byname(name)
if user:
if email in user.data.email:
user.data.email.remove(email)
user.save()
return 1
def deluserperm(self, name, perm):
""" delete permission. """
user = self.byname(name)
if user:
p = perm.upper()
if p in user.data.perms:
user.data.perms.remove(p)
user.save()
return 1
def deluserpermit(self, name, permit):
""" delete permit. """
user = self.byname(name)
if user:
p = '%s %s' % permit
if p in user.data.permits:
user.data.permits.remove(p)
user.save()
return 1
def deluserstatus(self, name, status):
""" delete the status from the given user. """
user = self.byname(name)
if user:
st = status.upper()
if st in user.data.status:
user.data.status.remove(status)
user.save()
return 1
def delallemail(self, name):
""" delete all emails for the specified user. """
user = self.byname(name)
if user:
user.data.email = []
user.save()
return 1
def make_owner(self, userhosts):
""" see if owner already has a user account if not add it. """
if not userhosts:
logging.warn("no usershosts provided in make_owner")
return
owner = []
if type(userhosts) != types.ListType:
owner.append(userhosts)
else:
owner = userhosts
for userhost in owner:
username = self.getname(unicode(userhost))
if not username or username != 'owner':
if not self.merge('owner', unicode(userhost)):
self.add('owner', [unicode(userhost), ], ['USER', 'OPER'])
## define
users = None
def users_boot():
global users
users = Users()
| Python |
#!/usr/bin/env python
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit http://feedparser.org/ for the latest version
Visit http://feedparser.org/docs/ for the latest documentation
Required: Python 2.1 or later
Recommended: Python 2.3 or later
Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "4.1"# + "$Revision: 1.92 $"[11:15] + "-cvs"
__license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>"]
_debug = 0
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# ---------- required modules (should come with any Python distribution) ----------
import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2
try:
from cStringIO import StringIO as _StringIO
except:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except:
gzip = None
try:
import zlib
except:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
from xml.sax.saxutils import escape as _xmlescape
_XML_AVAILABLE = 1
except:
_XML_AVAILABLE = 0
def _xmlescape(data):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
return data
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except:
base64 = binascii = None
# cjkcodecs and iconv_codec provide support for more character encodings.
# Both are available from http://cjkpython.i18n.org/
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
if _debug:
import chardet.constants
chardet.constants._debug = 1
except:
chardet = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
sgmllib.special = re.compile('<!')
sgmllib.charref = re.compile('&#(x?[0-9A-Fa-f]+)[^0-9A-Fa-f]')
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
'hotrss': 'Hot RSS'
}
try:
UserDict = dict
except NameError:
# Python 2.1 does not have dict
from UserDict import UserDict
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
class FeedParserDict(UserDict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated',
'description': ['subtitle', 'summary'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated',
'issued': 'published',
'issued_parsed': 'published',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
return UserDict.__getitem__(self, 'tags')[0]['term']
if key == 'categories':
return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')]
realkey = self.keymap.get(key, key)
if type(realkey) == types.ListType:
for k in realkey:
if UserDict.has_key(self, k):
return UserDict.__getitem__(self, k)
if UserDict.has_key(self, key):
return UserDict.__getitem__(self, key)
return UserDict.__getitem__(self, realkey)
def __setitem__(self, key, value):
for k in self.keymap.keys():
if key == k:
key = self.keymap[k]
if type(key) == types.ListType:
key = key[0]
return UserDict.__setitem__(self, key, value)
def get(self, key, default=None):
if self.has_key(key):
return self[key]
else:
return default
def setdefault(self, key, value):
if not self.has_key(key):
self[key] = value
return self[key]
def has_key(self, key):
try:
return hasattr(self, key) or UserDict.has_key(self, key)
except AttributeError:
return False
def __getattr__(self, key):
try:
return self.__dict__[key]
except KeyError:
pass
try:
assert not key.startswith('_')
return self.__getitem__(key)
except:
raise AttributeError, "object has no attribute '%s'" % key
def __setattr__(self, key, value):
if key.startswith('_') or key == 'data':
self.__dict__[key] = value
else:
return self.__setitem__(key, value)
def __contains__(self, key):
return self.has_key(key)
def zopeCompatibilityHack():
global FeedParserDict
del FeedParserDict
def FeedParserDict(aDict=None):
rc = {}
if aDict:
rc.update(aDict)
return rc
_ebcdic_to_ascii_map = None
def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
)
import string
_ebcdic_to_ascii_map = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(_ebcdic_to_ascii_map)
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf'
}
_matchnamespaces = {}
can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'license', 'icon', 'logo']
can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if _debug: sys.stderr.write('initializing FeedParser\n')
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
if baselang:
self.feeddata['language'] = baselang
def unknown_starttag(self, tag, attrs):
if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
# normalize attrs
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
# Note: probably shouldn't simply recreate localname here, but
# our namespace handling isn't actually 100% correct in cases where
# the feed redefines the default namespace (which is actually
# the usual case for inline content, thanks Sam), so here we
# cheat and just reconstruct the element based on localname
# because that compensates for the bugs in our namespace handling.
# This will horribly munge inline content with non-empty qnames,
# but nobody actually does that, so I'm not fixing it.
tag = tag.split(':')[-1]
return self.handle_data('<%s%s>' % (tag, ''.join([' %s="%s"' % t for t in attrs])), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
return self.push(prefix + suffix, 1)
def unknown_endtag(self, tag):
if _debug: sys.stderr.write('end %s\n' % tag)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack: return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack: return
if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref)
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
else:
# entity resolution graciously donated by Aaron Swartz
def name2cp(k):
import htmlentitydefs
if hasattr(htmlentitydefs, 'name2codepoint'): # requires Python 2.3
return htmlentitydefs.name2codepoint[k]
k = htmlentitydefs.entitydefs[k]
if k.startswith('&#') and k.endswith(';'):
return int(k[2:-1]) # not in latin-1
return ord(k)
try: name2cp(ref)
except KeyError: text = '&%s;' % ref
else: text = unichr(name2cp(ref)).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack: return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if _debug: sys.stderr.write('entering parse_declaration\n')
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1: k = len(self.rawdata)
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
return k+1
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
self.version = 'rss090'
if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
self.version = 'rss10'
if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if self._matchnamespaces.has_key(loweruri):
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack: return
if self.elementstack[-1][0] != element: return
element, expectingText, pieces = self.elementstack.pop()
output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText: return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = base64.decodestring(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
# resolve relative URIs within embedded markup
if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding)
# sanitize embedded markup
if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding)
if self.encoding and type(output) != type(u''):
try:
output = unicode(output, self.encoding)
except:
pass
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource) and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value):
context = self._getContext()
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
if not self.version:
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_dlhottitles(self, attrsD):
self.version = 'hotrss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
_start_feedinfo = _start_channel
def _cdf_common(self, attrsD):
if attrsD.has_key('lastmod'):
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if attrsD.has_key('href'):
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
self.inimage = 1
self.push('image', 0)
context = self._getContext()
context.setdefault('image', FeedParserDict())
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
self.intextinput = 1
self.push('textinput', 0)
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['textinput']['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['image']['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['image']['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
elif self.inimage:
context = self._getContext()
context['image']['href'] = value
elif self.intextinput:
context = self._getContext()
context['textinput']['link'] = value
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author = context.get(key)
if not author: return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))''', author)
if not emailmatch: return
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
context.setdefault('%s_detail' % key, FeedParserDict())
context['%s_detail' % key]['name'] = author
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
_start_product = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
def _end_published(self):
value = self.pop('published')
#self._save('published_parsed', _parse_date(value))
_end_dcterms_issued = _end_published
_end_issued = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_pubdate = _start_updated
_start_dc_date = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
#self._save('updated_parsed', parsed_value)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_pubdate = _end_updated
_end_dc_date = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
#self._save('created_parsed', _parse_date(value))
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
pass
#self._save('expired_parsed', _parse_date(self.pop('expired')))
def _start_cc_license(self, attrsD):
self.push('license', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('license')
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
def _end_creativecommons_license(self):
self.pop('license')
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label): return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(FeedParserDict({'term': term, 'scheme': scheme, 'label': label}))
def _start_category(self, attrsD):
if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split():
self._addTag(term, 'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value: return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
attrsD.setdefault('type', 'text/html')
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context = self._getContext()
context.setdefault('links', [])
context['links'].append(FeedParserDict(attrsD))
if attrsD['rel'] == 'enclosure':
self._start_enclosure(attrsD)
if attrsD.has_key('href'):
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
_start_producturl = _start_link
def _end_link(self):
value = self.pop('link')
context = self._getContext()
if self.intextinput:
context['textinput']['link'] = value
if self.inimage:
context['image']['link'] = value
_end_producturl = _end_link
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
def _start_title(self, attrsD):
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
value = self.popContent('title')
context = self._getContext()
if self.intextinput:
context['textinput']['title'] = value
elif self.inimage:
context['image']['title'] = value
_end_dc_title = _end_title
_end_media_title = _end_title
def _start_description(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
context = self._getContext()
if self.intextinput:
context['textinput']['description'] = value
elif self.inimage:
context['image']['description'] = value
self._summaryKey = None
_end_abstract = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if context.has_key('generator_detail'):
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
self._getContext().setdefault('enclosures', []).append(FeedParserDict(attrsD))
href = attrsD.get('href')
if href:
context = self._getContext()
if not context.get('id'):
context['id'] = href
def _start_source(self, attrsD):
self.insource = 1
def _end_source(self):
self.insource = 0
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_prodlink(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToDescription:
self._save('description', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
_end_prodlink = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
if _debug: sys.stderr.write('trying StrictFeedParser\n')
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
def startPrefixMapping(self, prefix, uri):
self.trackNamespace(prefix, uri)
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
if prefix:
localname = prefix + ':' + localname
localname = str(localname).lower()
if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname))
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD = {}
for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param']
def __init__(self, encoding):
self.encoding = encoding
if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
#data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace
data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
if self.encoding and type(data) == type(u''):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
def normalize_attrs(self, attrs):
# utility method to be called by descendants
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
uattrs = []
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
for key, value in attrs:
if type(value) != type(u''):
value = unicode(value, self.encoding)
uattrs.append((unicode(key, self.encoding), value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding)
if tag in self.elements_no_end_tag:
self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
else:
self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
self.pieces.append('&#%(ref)s;' % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
self.pieces.append('&%(ref)s;' % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text)
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%(text)s-->' % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%(text)s>' % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%(text)s>' % locals())
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = [('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')]
def __init__(self, baseuri, encoding):
_BaseHTMLProcessor.__init__(self, encoding)
self.baseuri = baseuri
def resolveURI(self, uri):
return _urljoin(self.baseuri, uri)
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding):
if _debug: sys.stderr.write('entering _resolveRelativeURIs\n')
p = _RelativeURIResolver(baseURI, encoding)
p.feed(htmlSource)
return p.output()
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big',
'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col',
'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset',
'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input',
'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup',
'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike',
'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th',
'thead', 'tr', 'tt', 'u', 'ul', 'var']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing',
'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols',
'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled',
'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace',
'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method',
'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly',
'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type',
'usemap', 'valign', 'value', 'vspace', 'width']
unacceptable_elements_with_end_tag = ['script', 'applet']
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
def unknown_starttag(self, tag, attrs):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
return
attrs = self.normalize_attrs(attrs)
attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def _sanitizeHTML(htmlSource, encoding):
p = _HTMLSanitizer(encoding)
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = type(data) == type(u'')
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
if ((code / 100) == 3) and (code != 304):
return self.http_error_302(req, fp, code, msg, headers)
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
return infourl
def http_error_302(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
def http_error_301(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
http_error_300 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions)
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
try:
assert sys.version.split()[0] >= '2.3.3'
assert base64 != None
user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
except:
return self.http_error_default(req, fp, code, msg, headers)
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it must be a tuple of 9 integers
as returned by gmtime() in the standard Python time module. This MUST
be in GMT (Greenwich Mean Time). The formatted date/time will be used
as the value of an If-Modified-Since request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if url_file_stream_or_string == '-':
return sys.stdin
if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'):
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.encodestring(user_passwd).strip()
# try to open with urllib2 (to use optional headers)
request = urllib2.Request(url_file_stream_or_string)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
request.add_header('A-IM', 'feed') # RFC 3229 support
opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string)
except:
pass
# treat url_file_stream_or_string as string
return _StringIO(str(url_file_stream_or_string))
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
del tmpl
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
del regex
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m: break
if not m: return
if m.span() == (0, 0): return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params.keys():
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(params.get('second', 0))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
# daylight savings is complex, but not needed for feedparser's purposes
# as time zones, if specified, include mention of whether it is active
# (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and
# and most implementations have DST bugs
daylight_savings_flag = 0
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tm))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m: return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
_mssql_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
def _parse_date_mssql(dateString):
'''Parse a string according to the MS SQL date format'''
m = _mssql_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_mssql)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m: return
try:
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
except:
return
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date)
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m: return
try:
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
except:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<julian>\d\d\d)'
'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
rfc822._timezones.update(_additional_timezones)
registerDateHandler(_parse_date_rfc822)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
if not date9tuple: continue
if len(date9tuple) != 9:
if _debug: sys.stderr.write('date handler function must return 9-tuple\n')
raise ValueError
map(int, date9tuple)
return date9tuple
except Exception, e:
if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e)))
pass
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). CJKCodecs and iconv_codec help a lot;
you should definitely install them if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
return content_type, params.get('charset', '').replace("'", '')
sniffed_xml_encoding = ''
xml_encoding = ''
true_encoding = ''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type'))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = _ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
true_encoding = http_encoding or 'us-ascii'
elif http_headers and (not http_headers.has_key('content-type')):
true_encoding = xml_encoding or 'iso-8859-1'
else:
true_encoding = xml_encoding or 'utf-8'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding)
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16be':
sys.stderr.write('trying utf-16be instead\n')
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16le':
sys.stderr.write('trying utf-16le instead\n')
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-8':
sys.stderr.write('trying utf-8 instead\n')
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32be':
sys.stderr.write('trying utf-32be instead\n')
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32le':
sys.stderr.write('trying utf-32le instead\n')
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE)
data = entity_pattern.sub('', data)
doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE)
doctype_results = doctype_pattern.findall(data)
doctype = doctype_results and doctype_results[0] or ''
if doctype.lower().count('netscape'):
version = 'rss091n'
else:
version = None
data = doctype_pattern.sub('', data)
return version, data
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]):
'''Parse a feed from a URL, file, stream, or string'''
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
if _XML_AVAILABLE:
result['bozo'] = 0
if type(handlers) == types.InstanceType:
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
f = None
# if feed is gzip-compressed, decompress it
if f and data and hasattr(f, 'headers'):
if gzip and f.headers.get('content-encoding', '') == 'gzip':
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except Exception, e:
# Some feeds claim to be gzipped but they're not, so
# we get garbage. Ideally, we should re-request the
# feed without the 'Accept-encoding: gzip' header,
# but we don't.
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
elif zlib and f.headers.get('content-encoding', '') == 'deflate':
try:
data = zlib.decompress(data, -zlib.MAX_WBITS)
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
# save HTTP headers
if hasattr(f, 'info'):
info = f.info()
result['etag'] = info.getheader('ETag')
last_modified = info.getheader('Last-Modified')
if last_modified:
#result['modified'] = _parse_date(last_modified)
result['modified'] = last_modified
if hasattr(f, 'url'):
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'headers'):
result['headers'] = f.headers.dict
if hasattr(f, 'close'):
f.close()
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
http_headers = result.get('headers', {})
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if http_headers.has_key('content-type'):
bozo_message = '%s is not an XML media type' % http_headers['content-type']
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
result['version'], data = _stripDoctype(data)
baseuri = http_headers.get('content-location', result.get('href'))
baselang = http_headers.get('content-language', None)
# if server sent 304, we're done
if result.get('status', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if not data:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding):
if not proposed_encoding: continue
if proposed_encoding in tried_encodings: continue
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
break
except:
pass
# if no luck and we have auto-detection library, try that
if (not known_encoding) and chardet:
try:
proposed_encoding = chardet.detect(data)['encoding']
if proposed_encoding and (proposed_encoding not in tried_encodings):
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried utf-8 yet, try that
if (not known_encoding) and ('utf-8' not in tried_encodings):
try:
proposed_encoding = 'utf-8'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried windows-1252 yet, try that
if (not known_encoding) and ('windows-1252' not in tried_encodings):
try:
proposed_encoding = 'windows-1252'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck, give up
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, and windows-1252 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = ''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'documented declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
if hasattr(saxparser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
saxparser.parse(source)
except Exception, e:
if _debug:
import traceback
traceback.print_stack()
traceback.print_exc()
sys.stderr.write('xml parsing failed\n')
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '')
feedparser.feed(data)
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
if __name__ == '__main__':
if not sys.argv[1:]:
print __doc__
sys.exit(0)
else:
urls = sys.argv[1:]
zopeCompatibilityHack()
from pprint import pprint
for url in urls:
print url
print
result = parse(url)
pprint(result)
print
#REVISION HISTORY
#1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements,
# added Simon Fell's test suite
#1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections
#2.0 - 10/19/2002
# JD - use inchannel to watch out for image and textinput elements which can
# also contain title, link, and description elements
# JD - check for isPermaLink='false' attribute on guid elements
# JD - replaced openAnything with open_resource supporting ETag and
# If-Modified-Since request headers
# JD - parse now accepts etag, modified, agent, and referrer optional
# arguments
# JD - modified parse to return a dictionary instead of a tuple so that any
# etag or modified information can be returned and cached by the caller
#2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything
# because of etag/modified, return the old etag/modified to the caller to
# indicate why nothing is being returned
#2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its
# useless. Fixes the problem JD was addressing by adding it.
#2.1 - 11/14/2002 - MAP - added gzip support
#2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent.
# start_admingeneratoragent is an example of how to handle elements with
# only attributes, no content.
#2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify);
# also, make sure we send the User-Agent even if urllib2 isn't available.
# Match any variation of backend.userland.com/rss namespace.
#2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is.
#2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's
# snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed
# project name
#2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree);
# removed unnecessary urllib code -- urllib2 should always be available anyway;
# return actual url, status, and full HTTP headers (as result['url'],
# result['status'], and result['headers']) if parsing a remote feed over HTTP --
# this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>;
# added the latest namespace-of-the-week for RSS 2.0
#2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom
# User-Agent (otherwise urllib2 sends two, which confuses some servers)
#2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for
# inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds
#2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or
# textInput, and also to return the character encoding (if specified)
#2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking
# nested divs within content (JohnD); fixed missing sys import (JohanS);
# fixed regular expression to capture XML character encoding (Andrei);
# added support for Atom 0.3-style links; fixed bug with textInput tracking;
# added support for cloud (MartijnP); added support for multiple
# category/dc:subject (MartijnP); normalize content model: 'description' gets
# description (which can come from description, summary, or full content if no
# description), 'content' gets dict of base/language/type/value (which can come
# from content:encoded, xhtml:body, content, or fullitem);
# fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang
# tracking; fixed bug tracking unknown tags; fixed bug tracking content when
# <content> element is not in default namespace (like Pocketsoap feed);
# resolve relative URLs in link, guid, docs, url, comments, wfw:comment,
# wfw:commentRSS; resolve relative URLs within embedded HTML markup in
# description, xhtml:body, content, content:encoded, title, subtitle,
# summary, info, tagline, and copyright; added support for pingback and
# trackback namespaces
#2.7 - 1/5/2004 - MAP - really added support for trackback and pingback
# namespaces, as opposed to 2.6 when I said I did but didn't really;
# sanitize HTML markup within some elements; added mxTidy support (if
# installed) to tidy HTML markup within some elements; fixed indentation
# bug in _parse_date (FazalM); use socket.setdefaulttimeout if available
# (FazalM); universal date parsing and normalization (FazalM): 'created', modified',
# 'issued' are parsed into 9-tuple date format and stored in 'created_parsed',
# 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified'
# and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa
#2.7.1 - 1/9/2004 - MAP - fixed bug handling " and '. fixed memory
# leak not closing url opener (JohnD); added dc:publisher support (MarekK);
# added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK)
#2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in
# encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL);
# fixed relative URI processing for guid (skadz); added ICBM support; added
# base64 support
#2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many
# blogspot.com sites); added _debug variable
#2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing
#3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available);
# added several new supported namespaces; fixed bug tracking naked markup in
# description; added support for enclosure; added support for source; re-added
# support for cloud which got dropped somehow; added support for expirationDate
#3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking
# xml:base URI, one for documents that don't define one explicitly and one for
# documents that define an outer and an inner xml:base that goes out of scope
# before the end of the document
#3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level
#3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version']
# will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized;
# added support for creativeCommons:license and cc:license; added support for
# full Atom content model in title, tagline, info, copyright, summary; fixed bug
# with gzip encoding (not always telling server we support it when we do)
#3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail
# (dictionary of 'name', 'url', 'email'); map author to author_detail if author
# contains name + email address
#3.0b8 - 1/28/2004 - MAP - added support for contributor
#3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added
# support for summary
#3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from
# xml.util.iso8601
#3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain
# dangerous markup; fiddled with decodeEntities (not right); liberalized
# date parsing even further
#3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right);
# added support to Atom 0.2 subtitle; added support for Atom content model
# in copyright; better sanitizing of dangerous HTML elements with end tags
# (script, frameset)
#3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img,
# etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />)
#3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under
# Python 2.1
#3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS;
# fixed bug capturing author and contributor URL; fixed bug resolving relative
# links in author and contributor URL; fixed bug resolvin relative links in
# generator URL; added support for recognizing RSS 1.0; passed Simon Fell's
# namespace tests, and included them permanently in the test suite with his
# permission; fixed namespace handling under Python 2.1
#3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15)
#3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023
#3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei);
# use libxml2 (if available)
#3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author
# name was in parentheses; removed ultra-problematic mxTidy support; patch to
# workaround crash in PyXML/expat when encountering invalid entities
# (MarkMoraes); support for textinput/textInput
#3.0b20 - 4/7/2004 - MAP - added CDF support
#3.0b21 - 4/14/2004 - MAP - added Hot RSS support
#3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in
# results dict; changed results dict to allow getting values with results.key
# as well as results[key]; work around embedded illformed HTML with half
# a DOCTYPE; work around malformed Content-Type header; if character encoding
# is wrong, try several common ones before falling back to regexes (if this
# works, bozo_exception is set to CharacterEncodingOverride); fixed character
# encoding issues in BaseHTMLProcessor by tracking encoding and converting
# from Unicode to raw strings before feeding data to sgmllib.SGMLParser;
# convert each value in results to Unicode (if possible), even if using
# regex-based parsing
#3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain
# high-bit characters in attributes in embedded HTML in description (thanks
# Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in
# FeedParserDict; tweaked FeedParserDict.has_key to return True if asking
# about a mapped key
#3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and
# results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could
# cause the same encoding to be tried twice (even if it failed the first time);
# fixed DOCTYPE stripping when DOCTYPE contained entity declarations;
# better textinput and image tracking in illformed RSS 1.0 feeds
#3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed
# my blink tag tests
#3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that
# failed to parse utf-16 encoded feeds; made source into a FeedParserDict;
# duplicate admin:generatorAgent/@rdf:resource in generator_detail.url;
# added support for image; refactored parse() fallback logic to try other
# encodings if SAX parsing fails (previously it would only try other encodings
# if re-encoding failed); remove unichr madness in normalize_attrs now that
# we're properly tracking encoding in and out of BaseHTMLProcessor; set
# feed.language from root-level xml:lang; set entry.id from rdf:about;
# send Accept header
#3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between
# iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are
# windows-1252); fixed regression that could cause the same encoding to be
# tried twice (even if it failed the first time)
#3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types;
# recover from malformed content-type header parameter with no equals sign
# ('text/xml; charset:iso-8859-1')
#3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities
# to Unicode equivalents in illformed feeds (aaronsw); added and
# passed tests for converting character entities to Unicode equivalents
# in illformed feeds (aaronsw); test for valid parsers when setting
# XML_AVAILABLE; make version and encoding available when server returns
# a 304; add handlers parameter to pass arbitrary urllib2 handlers (like
# digest auth or proxy support); add code to parse username/password
# out of url and send as basic authentication; expose downloading-related
# exceptions in bozo_exception (aaronsw); added __contains__ method to
# FeedParserDict (aaronsw); added publisher_detail (aaronsw)
#3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always
# convert feed to UTF-8 before passing to XML parser; completely revamped
# logic for determining character encoding and attempting XML parsing
# (much faster); increased default timeout to 20 seconds; test for presence
# of Location header on redirects; added tests for many alternate character
# encodings; support various EBCDIC encodings; support UTF-16BE and
# UTF16-LE with or without a BOM; support UTF-8 with a BOM; support
# UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no
# XML parsers are available; added support for 'Content-encoding: deflate';
# send blank 'Accept-encoding: ' header if neither gzip nor zlib modules
# are available
#3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure
# problem tracking xml:base and xml:lang if element declares it, child
# doesn't, first grandchild redeclares it, and second grandchild doesn't;
# refactored date parsing; defined public registerDateHandler so callers
# can add support for additional date formats at runtime; added support
# for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added
# zopeCompatibilityHack() which turns FeedParserDict into a regular
# dictionary, required for Zope compatibility, and also makes command-
# line debugging easier because pprint module formats real dictionaries
# better than dictionary-like objects; added NonXMLContentType exception,
# which is stored in bozo_exception when a feed is served with a non-XML
# media type such as 'text/plain'; respect Content-Language as default
# language if not xml:lang is present; cloud dict is now FeedParserDict;
# generator dict is now FeedParserDict; better tracking of xml:lang,
# including support for xml:lang='' to unset the current language;
# recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default
# namespace; don't overwrite final status on redirects (scenarios:
# redirecting to a URL that returns 304, redirecting to a URL that
# redirects to another URL with a different type of redirect); add
# support for HTTP 303 redirects
#4.0 - MAP - support for relative URIs in xml:base attribute; fixed
# encoding issue with mxTidy (phopkins); preliminary support for RFC 3229;
# support for Atom 1.0; support for iTunes extensions; new 'tags' for
# categories/keywords/etc. as array of dict
# {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0
# terminology; parse RFC 822-style dates with no time; lots of other
# bug fixes
#4.1 - MAP - removed socket timeout; added support for chardet library
| Python |
# gozerlib/plugins.py
#
#
""" holds all the plugins. plugins are imported modules. """
## lib imports
from commands import cmnds
from eventbase import EventBase
from persist import Persist
from utils.lazydict import LazyDict
from utils.exception import handle_exception
from admin import cmndtable
from errors import NoSuchPlugin
from jsbimport import force_import, _import
## basic imports
import os
import logging
import Queue
import copy
import sys
import logging
## defines
cpy = copy.deepcopy
## classes
class Plugins(LazyDict):
""" the plugins object contains all the plugins. """
def loadall(self, paths, default=[], force=False):
"""
load all plugins from given paths, if force is true ..
otherwise load all plugins for default_plugins list.
"""
imp = None
for module in paths:
try:
imp = _import(module)
except ImportError:
logging.warn("plugins - no %s plugin package found" % module)
continue
logging.warn("plugins - got plugin package %s" % module)
try:
for plug in imp.__plugs__:
try:
self.load("%s.%s" % (module,plug), replace=True)
except KeyError:
logging.debug("failed to load plugin package %s" % module)
except AttributeError:
logging.warn("no plugins in %s .. define __plugs__ in __init__.py" % module)
def unload(self, modname):
""" unload plugin .. remove related commands from cmnds object. """
logging.debug("plugins - unloading %s" % modname)
try:
cmnds.unload(modname)
except KeyError:
return False
try:
self[modname].shutdown()
logging.debug('plugins - called %s shutdown' % modname)
except (AttributeError, KeyError):
pass
return True
def load(self, modname, replace=False):
""" load a plugin. """
if not replace:
if modname in sys.modules:
logging.debug("plugins - %s is already loaded" % modname)
self[modname] = sys.modules[modname]
return sys.modules[modname]
logging.warn("plugins - loading %s" % modname)
try:
mod = _import(modname)
except ImportError, ex:
logging.info("can't import %s - %s" % (modname, str(ex)))
return
try:
self[modname] = mod
except KeyError:
logging.error("plugins - failed to load %s" % modname)
raise NoSuchPlugin(modname)
try:
init = getattr(self[modname], 'init')
except AttributeError:
return self[modname]
init()
logging.debug('plugins - %s init called' % modname)
return self[modname]
def reload(self, modname):
""" reload a plugin. just load for now. """
#self.unload(modname)
return self.load(modname, replace=True)
def dispatch(self, bot, event, *args, **kwargs):
""" dispatch event onto the cmnds object. check for pipelines first. """
result = []
if event.txt and not ' | ' in event.txt:
self.needreloadcheck(bot, event)
result = cmnds.dispatch(bot, event, *args, **kwargs)
if event.queues:
for queue in event.queues:
queue.put_nowait(None)
return result
if event.txt and ' | ' in event.txt:
return self.pipelined(bot, event, *args, **kwargs)
def pipelined(self, bot, event, *args, **kwargs):
""" split cmnds, create events for them, chain the queues and dispatch. """
origqueues = event.queues
event.queues = []
event.allowqueue = True
events = []
# split commands
for item in event.txt.split(' | '):
e = copy.deepcopy(event)
e.queues = []
e.onlyqueues = True
e.txt = item.strip()
e.usercmnd = e.txt.split()[0]
logging.debug('creating event for %s' % e.txt)
e.bot = bot
e.makeargs()
events.append(e)
# loop over events .. chain queues
prevq = None
for e in events[:-1]:
q = Queue.Queue()
e.queues.append(q)
if prevq:
e.inqueue = prevq
prevq = q
events[-1].inqueue = prevq
events[-1].onlyqueues = False
if origqueues:
events[-1].queues = origqueues
# do the dispatch
for e in events:
self.dispatch(bot, e)
return events[-1].result
def needreloadcheck(self, bot, event, target=None):
"""
check if event requires a plugin to be reloaded. if so
reload the plugin.
"""
logging.debug("plugins - checking for reload of %s (%s)" % (event.usercmnd, event.userhost))
plugloaded = None
try:
from boot import getcmndtable
plugin = getcmndtable()[event.usercmnd]
except KeyError:
logging.warn("can't find plugin to reload for %s" % event.usercmnd)
return
#logging.warn('cmnd: %s plugin: %s' % (event.usercmnd, plugin))
if plugin in self:
return False
plugloaded = self.reload(plugin)
logging.warn("plugins - loaded %s on demand (%s)" % (plugin, event.usercmnd))
return plugloaded
## define
plugs = Plugins()
| Python |
# gozerlib/runner.py
#
#
""" threads management to run jobs. """
__copyright__ = 'this file is in the public domain'
## gozerlib imports
from gozerlib.threads import getname, start_new_thread
from gozerlib.utils.exception import handle_exception
from gozerlib.utils.locking import lockdec
from gozerlib.threadloop import RunnerLoop
from gozerlib.periodical import minutely
## basic imports
import Queue
import time
import thread
import random
## define
# locks
runlock = thread.allocate_lock()
locked = lockdec(runlock)
## classes
class Runner(RunnerLoop):
"""
a runner is a thread with a queue on which jobs can be pushed.
jobs scheduled should not take too long since only one job can
be executed in a Runner at the same time.
:param name: name of the runner
:type name: string
"""
def __init__(self, name="runner"):
RunnerLoop.__init__(self, name)
self.working = False
self.starttime = time.time()
self.elapsed = self.starttime
self.finished = time.time()
def handle(self, descr, func, *args, **kwargs):
"""
schedule a job.
:param descr: description of the job
:type descr: string
:param func: function to call
:type func: function
"""
self.working = True
try:
name = getname(str(func))
stats.up('runners', name)
logging.debug('runner - running %s: %s' % (descr, name))
self.starttime = time.time()
func(*args, **kwargs)
self.finished = time.time()
self.elapsed = self.finished - self.starttime
if self.elapsed > 3:
logging.debug('runner - ALERT %s %s job taking too long: %s seconds' % (descr, str(func), self.elapsed))
except Exception, ex:
handle_exception()
self.working = False
class CommandRunner(Runner):
def handle(self, descr, func, bot, ievent, *args, **kwargs):
"""
schedule a bot command.
:param descr: description of the job
:type descr: string
:param func: function to call
:type func: function
:param bot: bot on which the command is called
:type bot: gozerbot.botbase.BotBase
:param ievent: event that triggered this command
:type ievent: gozerbot.eventbase.EventBase
"""
self.working = True
try:
name = getname(str(func))
stats.up('runners', name)
stats.up('runners', bot.name)
logging.debug('runner - %s (%s) running %s: %s at speed %s' % (ievent.nick, ievent.userhost, descr, str(func), ievent.speed))
self.starttime = time.time()
func(bot, ievent, *args, **kwargs)
for queue in ievent.queues:
queue.put_nowait(None)
self.finished = time.time()
self.elapsed = self.finished - self.starttime
if self.elapsed > 3:
logging.debug('runner - ALERT %s %s job taking too long: %s seconds' % (descr, str(func), self.elapsed))
except Exception, ex:
handle_exception(ievent)
self.working = False
class Runners(object):
"""
runners is a collection of runner objects.
:param max: maximum of runners
:type max: integer
:param runnertype: Runner class to instatiate runners with
:type runnertype: Runner
"""
def __init__(self, max=100, runnertype=Runner):
self.max = max
self.runners = []
self.cleanup()
self.runnertype=runnertype
def runnersizes(self):
"""
return sizes of runner objects.
:rtype: list .. list of runner queue sizes
"""
result = []
for runner in self.runners:
result.append(runner.queue.qsize())
return result
def stop(self):
"""
stop runners.
"""
for runner in self.runners:
runner.stop()
self.cleanup()
def start(self):
"""
overload this if needed.
"""
pass
def put(self, *data):
"""
put a job on a free runner.
"""
for runner in self.runners:
if not runner.working:
runner.put(*data)
return
runner = self.makenew()
runner.put(*data)
def running(self):
"""
return list of running jobs.
:rtype: list
"""
result = []
for runner in self.runners:
if runner.working:
result.append(runner.nowrunning)
return result
def makenew(self):
"""
create a new runner.
:rtype: Runner or None
"""
runner = None
if len(self.runners) < self.max:
runner = self.runnertype()
runner.start()
self.runners.append(runner)
else:
runner = random.choice(self.runners)
return runner
def cleanup(self):
"""
clean up idle runners.
"""
for runner in self.runners:
if not runner.working:
runner.stop()
self.runners.remove(runner)
# start all runners
def runners_start():
for runner in cbrunners:
runner.start()
for runner in cmndrunners:
runner.start()
# stop all runners
def runners_stop():
for runner in cbrunners:
runner.stop()
for runner in cmndrunners:
runner.stop()
## INIT SECTION
# callback runners
cbrunners = [Runners(12-i) for i in range(10)]
# command runners
cmndrunners = [Runners(20-i, CommandRunner) for i in range(10)]
# sweep over all runners
@minutely
def cleanall():
for runners in cbrunners + cmndrunners:
runners.cleanup()
cleanall()
## END INIT
| Python |
# gozerlib/datadir.py
#
#
""" the datadir of the bot. """
## basic imports
import re
import os
## define
datadir = 'gozerdata'
## functions
def makedirs(ddir=None):
""" make subdirs in datadir. users, db, fleet, pgp, plugs and old. """
ddir = ddir or datadir
curdir = os.getcwd()
if not os.path.isdir(ddir):
os.mkdir(ddir)
if not os.path.isdir(ddir + '/users/'):
os.mkdir(ddir + '/users/')
if not os.path.isdir(ddir + '/db/'):
os.mkdir(ddir + '/db/')
if not os.path.isdir(ddir + '/fleet/'):
os.mkdir(ddir + '/fleet/')
if not os.path.isdir(ddir + '/pgp/'):
os.mkdir(ddir + '/pgp/')
if not os.path.isdir(ddir + '/plugs/'):
os.mkdir(ddir + '/plugs/')
if not os.path.isdir(ddir + '/old/'):
os.mkdir(ddir + '/old/')
return True
| Python |
# gozerlib/jsbimport.py
#
#
""" use the imp module to import modules. """
import time
import sys
import imp
import os
import thread
import logging
def _import(name):
mods = []
mm = ""
for m in name.split('.'):
mm += m
mods.append(mm)
mm += "."
for mod in mods:
logging.debug("jsbimport - trying %s" % mod)
imp = __import__(mod)
logging.debug("jsbimport - got module %s" % sys.modules[name])
return sys.modules[name]
def __import(name, path=None):
"""
import module <name> with the imp module .. will reload module is
already in sys.modules.
:param name: name of the module to import (may contain dots)
:type name: string
:param path: optional path to search in
:type path: string
:rtype: module
"""
logging.debug('import - importing %s' % name)
splitted = name.split('.')
for plug in splitted:
fp, pathname, description = imp.find_module(plug, path)
try:
result = imp.load_module(plug, fp, pathname, description)
try:
path = result.__path__
except:
pass
finally:
if fp:
fp.close()
if result:
return result
def force_import(name):
"""
force import of module <name> by replacing it in sys.modules.
:param name: name of module to import
:type name: string
:rtype: module
"""
try:
del sys.modules[name]
except KeyError:
pass
plug = _import(name)
return plug
| Python |
# gozerlib/botbase.py
#
#
""" base class for all bots. """
## lib imports
from utils.lazydict import LazyDict
from plugins import plugs as coreplugs
from callbacks import callbacks, gn_callbacks
from eventbase import EventBase
from errors import NoSuchCommand, PlugsNotConnected, NoOwnerSet
from datadir import datadir
from commands import Commands
from config import cfg as mainconfig
from utils.pdod import Pdod
from less import Less
from boot import boot
from utils.locking import lockdec
## basic imports
import time
import logging
import copy
import sys
import getpass
import os
import thread
## define
cpy = copy.deepcopy
eventlock = thread.allocate_lock()
eventlocked = lockdec(eventlock)
## classes
class BotBase(LazyDict):
def __init__(self, cfg=None, usersin=None, plugs=None, jid=None, *args, **kwargs):
LazyDict.__init__(self)
self.starttime = time.time()
self.type = "base"
if cfg:
self.cfg = cfg
self.update(cfg)
else:
self.cfg = mainconfig
self.owner = self.cfg.owner
if not self.owner:
logging.warn("owner is not set in %s" % self.cfg.cfile)
self.setusers(usersin)
logging.warn("botbase - owner is %s" % self.owner)
self.users.make_owner(self.owner)
self.plugs = plugs or coreplugs
if jid:
self.jid = jid
else:
self.jid = "default"
# set datadir to datadir/fleet/<botname>
self.fleetdir = 'fleet' + os.sep + self.jid
self.datadir = datadir + os.sep + self.fleetdir
self.outcache = Less(1)
try:
if not os.isdir(self.datadir):
os.mkdir(self.datadir)
except:
pass
#if not self.cfg:
# self.cfg = Config(self.fleetdir + os.sep + 'config')
# self.update(self.cfg)
def setstate(self, state=None):
self.state = state or Pdod(self.datadir + os.sep + 'state')
if self.state and not 'joinedchannels' in self.state.data:
self.state.data.joinedchannels = []
def setusers(self, users=None):
if users:
self.users = users
return
import gozerlib.users as u
if not u.users:
u.users_boot()
self.users = u.users
else:
self.users = u.users
def loadplugs(self, dirlist):
self.plugs.loadall(dirlist)
return self.plugs
def start(self):
while 1:
sys.stdout.write("> ")
try:
input = sys.stdin.readline()
except KeyboardInterrupt:
print "\nbye!"
os._exit(0)
if input:
event = EventBase()
event.auth = getpass.getuser()
event.userhost = event.auth
event.txt = input
event.usercmnd = input.split()[0]
event.makeargs()
try:
result = self.plugs.dispatch(self, event)
except NoSuchCommand:
print "no such command: %s" % event.usercmnd
@eventlocked
def doevent(self, event):
""" dispatch an event. """
self.curevent = event
go = False
if self.cfg:
cc = self.cfg.cc or "!"
else:
cc = "!"
logging.warn("cc for %s is %s" % (event.title or event.channel, cc))
if event.txt and event.txt[0] in cc:
event.txt = event.txt[1:]
if event.txt:
event.usercmnd = event.txt.split()[0]
else:
event.usercmnd = None
event.makeargs()
go = True
starttime = time.time()
e = cpy(event)
if event.isremote:
logging.debug('doing REMOTE callback')
gn_callbacks.check(self, e)
else:
callbacks.check(self, e)
if event.isremote and not event.remotecmnd:
logging.debug("event is remote but not command .. not dispatching")
return
try:
if go or event.type in ['web', 'xmpp', 'irc']:
result = self.plugs.dispatch(self, event)
else:
result = []
except NoSuchCommand:
event.reply("no such command: %s" % event.usercmnd)
result = []
if event.chan:
if event.chan.data.lastedited > starttime:
event.chan.save()
return result
def ownercheck(self, userhost):
""" check if provided userhost belongs to an owner. """
if 'owner' in self:
if userhost in self.owner:
return True
return False
def _raw(self, txt):
""" override this. """
sys.stdout.write(u"> %s\n" % unicode(txt))
def say(self, channel, txt, result=[], event=None, *args, **kwargs):
""" override this. """
print u"> " + txt + u', '.join(result)
def outmonitor(self, origin, channel, txt, event=None):
""" create an OUTPUT event with provided txt and send it to callbacks. """
e = EventBase()
if event:
e.copyin(event)
e.origin = origin
e.ruserhost = event.userhost
e.userhost = self.name
e.channel = channel
e.txt = txt
e.cbtype = 'OUTPUT'
callbacks.check(self, e)
def docmnd(self, origin, channel, txt, event=None):
""" do a command. """
e = EventBase()
if event:
e.copyin(event)
e.bot = self
e.origin = origin
e.ruserhost = origin
e.auth = origin
e.userhost = origin
e.channel = channel
e.txt = txt
e.nick = e.userhost.split('@')[0]
e.usercmnd = e.txt.split()[0]
e.cbtype = 'DOCMND'
e.makeargs()
if self.plugs:
result = self.plugs.dispatch(self, e)
logging.info("bot - got result - %s" % result)
return result
else:
raise PlugsNotConnected()
def less(self, who, what, nr=365):
""" split up in parts of <nr> chars overflowing on word boundaries. """
what = what.strip()
txtlist = splittxt(what, nr)
size = 0
# send first block
res = txtlist[0]
# see if we need to store output in less cache
result = ""
if len(txtlist) > 2:
logging.warn("addding %s lines to %s outputcache" % (len(txtlist), who))
self.outcache.add(who, txtlist[1:])
size = len(txtlist) - 2
result = txtlist[1:2][0]
if size:
result += " (+%s)" % size
else:
if len(txtlist) == 2:
result = txtlist[1]
return [res, result]
def join(self, *args, **kwargs):
pass
def part(self, *args, **kwargs):
pass
def action(self, *args, **kwargs):
pass
def reconnect(self, *args, **kwargs):
pass
def donick(self, *args, **kwargs):
pass
def shutdown(self, *args, **kwargs):
pass
def quit(self, *args, **kwargs):
pass
def connect(self, *args, **kwargs):
pass
def names(self, *args, **kwargs):
pass
def save(self, *args, **kwargs):
if self.state:
self.state.save()
| Python |
# gozerlib package
#
#
""" gozerlib core package. """
__version__ = "0.2.1"
__all__ = ['persistconfig', 'rest', 'jsbimport', 'admin', 'boot', 'botbase', 'callbacks', 'channelbase', 'commands', 'config', 'contrib', 'datadir', 'eggs', 'errors', 'eventbase', 'examples', 'fleet', 'gae', 'gozernet', 'less', 'monitor', 'outputcache', 'periodical', 'persist', 'persiststate', 'plugins', 'plugs', 'runner', 'socket', 'tasks', 'threadloop', 'threads', 'users', 'utils']
| Python |
# gozerlib/less.py
#
#
""" maintain bot output cache. """
# gozerlib imports
from utils.limlist import Limlist
class Less(object):
"""
output cache .. caches upto <nr> item of txt lines per nick.
:param nr: size of backlog
:type nr: integer
"""
def __init__(self, nr):
self.data = {}
self.index = {}
self.nr = nr
def add(self, nick, listoftxt):
"""
add listoftxt to nick's output .. set index for used by more
commands.
:param nick: nick to add txt to cache for
:type nick: string
:param listoftxt: list of txt to cache
:type listoftxt: list
"""
# see if we already have cached output .. if not create limited list
if not self.data.has_key(nick):
self.data[nick] = Limlist(self.nr)
# add data
self.data[nick].insert(0, listoftxt)
self.index[nick] = 1
def get(self, nick, index1, index2):
"""
return less entry.
entry is self.data[nick][index1][index2]
:param nick: nick to get data for
:type nick: string
:param index1: number of txtlines back
:type index1: integer
:param index2: index into the txtlines
:type index2: integer
:rtype: string
"""
try:
txt = self.data[nick][index1][index2]
except (KeyError, IndexError):
txt = None
return txt
def more(self, nick, index1):
"""
return more entry pointed to by index .. increase index.
:param nick: nick to fetch data for
:type nick: string
:param index1: index into cache data
:type index1: integer
:rtype: tuple .. (txt, index)
"""
try:
nr = self.index[nick]
except KeyError:
nr = 1
try:
txt = self.data[nick][index1][nr]
size = len(self.data[nick][index1])-nr
self.index[nick] = nr+1
except (KeyError, IndexError):
txt = None
size = 0
return (txt, size-1)
def size(self, nick):
"""
return sizes of cached output.
:param nick: nick to get cache sizes for
:type nick: string
:rtype: list .. list of sizes
"""
sizes = []
if not self.data.has_key(nick):
return sizes
for i in self.data[nick]:
sizes.append(len(i))
return sizes
| Python |
# gozerlib/examples.py
#
#
""" examples is a dict of example objects. """
## basic imports
import re
class Example(object):
"""
an example.
:param descr: description of the example
:type descr: string
:param ex: the example
:type ex: string
"""
def __init__(self, descr, ex):
self.descr = descr
self.example = ex
class Examples(dict):
""" examples holds all the examples. """
def add(self, name, descr, ex):
"""
add description and example.
:param name: name of the example
:type name: string
:param descr: description of the example
:type descr: string
:param ex: the example
:type ex: string
"""
self[name.lower()] = Example(descr, ex)
def size(self):
"""
return size of examples dict.
:rtype: integer
"""
return len(self.keys())
def getexamples(self):
"""
get all examples in list.
:rtype: list
"""
result = []
for i in self.values():
ex = i.example.lower()
exampleslist = re.split('\d\)', ex)
for example in exampleslist:
if example:
result.append(example.strip())
return result
# main examples object
examples = Examples()
| Python |
# gozerlib/callbacks.py
#
#
"""
bot callbacks .. callbacks occure on registered events. a precondition
function can optionaly be provided to see if the callback should fire.
"""
## gozerlib imports
from threads import getname
from utils.exception import handle_exception
from utils.trace import calledfrom, whichplugin
from utils.dol import Dol
from plugins import plugs
## basic imports
import sys
import copy
import thread
import logging
## classes
class Callback(object):
"""
class representing a callback.
:param func: function to execute
:param prereq: prerequisite function
:param plugname: plugin to register this callback with
:param kwargs: dict to pass on to the callback
:param threaded: whether the callback should be executed in its own thread
:param speed: determines which runnerspool to run this callback on
"""
def __init__(self, func, prereq, plugname, kwargs, threaded=False, \
speed=5):
self.func = func # the callback function
self.prereq = prereq # pre condition function
self.plugname = plugname # plugin name
self.kwargs = kwargs # kwargs to pass on to function
self.threaded = copy.deepcopy(threaded) # run callback in thread
self.speed = copy.deepcopy(speed) # speed to execute callback with
self.activate = False
class Callbacks(object):
"""
dict of lists containing callbacks. Callbacks object take care of
dispatching the callbacks based on incoming events. see Callbacks.check()
"""
def __init__(self):
# self.cbs holds the dict of list. entry value is the event (string)
self.cbs = Dol()
def size(self):
""" return number of callbacks. """
return len(self.cbs)
def add(self, what, func, prereq=None, kwargs=None, threaded=False, nr=False, speed=5):
"""
add a callback.
:param what: event to fire callback for
:param func: function to execute
:param prereq: prerequisite function
:param plugname: plugin to register this callback with
:param kwargs: dict to pass on to the callback
:param threaded: whether the callback should be executed in its own thread
:param speed: determines which runnerspool to run this callback on
"""
what = what.upper()
# get the plugin this callback was registered from
#plugname = calledfrom(sys._getframe(0))
plugname = whichplugin()
# see if kwargs is set if not init to {}
if not kwargs:
kwargs = {}
# add callback to the dict of lists
if nr != False:
self.cbs.insert(nr, what, Callback(func, prereq, plugname, kwargs, threaded, speed))
else:
self.cbs.add(what, Callback(func, prereq, plugname, kwargs, threaded, speed))
logging.debug('callbacks - added %s (%s)' % (what, plugname))
def unload(self, plugname):
""" unload all callbacks registered in a plugin. """
unload = []
# look for all callbacks in a plugin
for name, cblist in self.cbs.iteritems():
index = 0
for item in cblist:
if item.plugname == plugname:
unload.append((name, index))
index += 1
# delete callbacks
for callback in unload[::-1]:
self.cbs.delete(callback[0], callback[1])
logging.debug('callbacks - unloaded %s' % callback[0])
def disable(self, plugname):
""" disable all callbacks registered in a plugin. """
unload = []
# look for all callbacks in a plugin
for name, cblist in self.cbs.iteritems():
index = 0
for item in cblist:
if item.plugname == plugname:
item.activate = False
def activate(self, plugname):
""" activate all callbacks registered in a plugin. """
unload = []
# look for all callbacks in a plugin
for name, cblist in self.cbs.iteritems():
index = 0
for item in cblist:
if item.plugname == plugname:
item.activate = True
def whereis(self, cmnd):
""" show where ircevent.CMND callbacks are registered """
result = []
cmnd = cmnd.upper()
# locate callbacks for CMND
for c, callback in self.cbs.iteritems():
if c == cmnd:
for item in callback:
if not item.plugname in result:
result.append(item.plugname)
return result
def list(self):
""" show all callbacks. """
result = []
# loop over callbacks and collect callback functions
for cmnd, callbacks in self.cbs.iteritems():
for cb in callbacks:
result.append(getname(cb.func))
return result
def check(self, bot, event):
"""
check for callbacks to be fired.
:param bot: bot where event originates from
:param event: event that needs to be checked
.. literalinclude:: ../../gozerlib/callbacks.py
:pyobject: Callbacks.check
"""
# check for "ALL" callbacks
if self.cbs.has_key('ALL'):
for cb in self.cbs['ALL']:
self.callback(cb, bot, event)
type = event.cbtype or event.cmnd
self.reloadcheck(event)
#logging.info("callbacks - %s - %s" % (event.userhost, type))
# check for CMND callbacks
if self.cbs.has_key(type):
for cb in self.cbs[type]:
self.callback(cb, bot, event)
def callback(self, cb, bot, event):
"""
do the actual callback with provided bot and event as arguments.
:param cb: the callback to fire
:param bot: bot to call the callback on
:param event: the event that triggered the callback
.. literalinclude:: ../../gozerlib/callbacks.py
:pyobject: Callbacks.callback
"""
try:
# see if the callback pre requirement succeeds
if cb.prereq:
logging.debug('callbacks - excecuting in loop %s' % str(cb.prereq))
if not cb.prereq(bot, event):
return
# check if callback function is there
if not cb.func:
return
logging.warning('callbacks - excecuting callback %s' % str(cb.func))
event.iscallback = True
# launch the callback
cb.func(bot, event)
return True
except Exception, ex:
handle_exception()
def reloadcheck(self, event):
#logging.debug("callbacks - checking for reload of %s (%s)" % (event.cmnd, event.userhost))
plugloaded = []
target = event.cbtype or event.cmnd
try:
from boot import getcallbacktable
plugins = getcallbacktable()[target]
except KeyError:
#logging.warn("can't find plugin to reload for %s" % event.cmnd)
return
logging.info('callback: %s plugin: %s' % (target, plugins))
for name in plugins:
if name in plugs:
continue
else:
logging.debug("on demand reloading of %s" % name)
try:
plugloaded.append(plugs.reload(name))
except Exception, ex:
handle_exception()
return plugloaded
## define
callbacks = Callbacks()
gn_callbacks = Callbacks()
| Python |
# gozerlib/admin.py
#
#
""" admin related data and functions. """
## gozerlib imports
from gozerlib.persist import Persist
## defines
plugin_packages = ['gozerlib.plugs', 'commonplugs', 'myplugs', 'waveplugs', 'socketplugs']
default_plugins = ['gozerlib.plugs.admin', ]
loaded = False
cmndtable = None
pluginlist = None
callbacktable = None
| Python |
'''
Created on 10/01/2011
@author: jguerrer
'''
class Vertex:
'''
classdocs
'''
def __init__(self, id , coords , incidentEdge):
'''
Constructor
'''
#print "VErtice: " + id + " " + str(coords)
self.id = id;
self.coords = coords;
self.incidentEdge = incidentEdge #aunque se lo podria pasar inmediatamente
def getId(self):
return self.id;
def getCoords(self):
return self.coords;
def getIncidentEdge(self):
return self.incidentEdge;
def toString(self):
data=''
if not self.id == None:
data += "v" + self.id
if not self.coords == None:
data += "\t\tCoords: " + str(self.coords)
if not self.incidentEdge == None:
data += "\t\tIEdge: " + self.incidentEdge.id
return data; | Python |
"""
GM1050 Advanced Principles and Algorithm of GIS
2010/2011
- Mini Project - The simplification of a map
A shapefile of departments in western Europe is given.
By the Douglas-Peuker algorithm this shapefile is simplified.
Where several topological relations should remain
@author: Bas, Josafat and Elise
"""
#-- general import
import math
#from Vertex import Vertex
from dcel1 import DCEL
#-- OGR
try:
from osgeo import ogr
except ImportError:
import ogr
#-- Shapely
from shapely.wkb import loads as wkbloads
from shapely.wkt import loads as wktloads
from shapely.geometry import LineString
def main():
#1) Load all polygons
#shapefile = 'final.shp';
#shapefile = 'departementsmall.shp';
shapefile = 'europe_nuts.shp';
print "reading: " + shapefile
shapelyPolygons = read_shapefile(shapefile)
print "Number of Features: " + str(len(shapelyPolygons))
#2) Extract edges and nodes - (not line segments, but a graph).
#3) Structure for each polygon which edges is bounding him
createTopology(shapelyPolygons)
#construct_graph(polygons)
#total_edges = edge_counter
#print total_edges
#edges = graph[1]
#4) Douglas - Peuker algorithm for each line
#new_edges = {}
#print "Give tolerance (e.g.: 0.2)"
#tolerance = raw_input('--> ')
#for i in range(0,total_edges):
# new_line = simplify_points(list(edges[i].coords), tolerance)
# new_edges[i] = LineString(new_line)
#5) Build each polygon again, edge by edge
#1) Load all polygons
def read_shapefile(infile):
"""
Function that reads a shapefile and prints some info about it.
Input:
infile: shapefile to read
Output:
- (the informations are printed in the console)
"""
print "Reading", infile
ge = ogr.Open(infile)
layer = ge.GetLayer(0) #-- a simple shapefile always has 1 layer only
ld = layer.GetLayerDefn()
#-- Find and print the geometry type of the shapefile
print ld.GetGeomType()
s = "Geometry type: "
if ld.GetGeomType() == ogr.wkbPoint:
s += "Point"
elif ld.GetGeomType() == ogr.wkbLineString:
s += "LineString"
elif ld.GetGeomType() == ogr.wkbPolygon:
s += "Polygon"
else:
s += "Unknown"
print s
#-- Find and print the number of geometries
noFeat = layer.GetFeatureCount()
print "Number of features:", noFeat
#-- Find and print the attributes
print "Attributes:"
for i in range(ld.GetFieldCount()):
print "\t", ld.GetFieldDefn(i).GetNameRef()
#-- print the WKT of each feature
print "The features are:"
lsFeatures = [] #-- create an empty list of Features (of geometries)
wktFeatures = []
for i in xrange(noFeat):
#print "-- --"
f = layer.GetFeature(i)
wkb = f.GetGeometryRef().ExportToWkb() #-- Well-Known Text
wkt = f.GetGeometryRef().ExportToWkt() #-- Well-Known Text
#print str(i) + "\t" + wkt
#wktFeatures.append({'geom': str(i) +"\t" + wkt})
pshape = wktloads(wkt)
#print str(i) + "\t" + pshape.to_wkt()
lsFeatures.append(pshape)
return lsFeatures
#2) Extract edges and nodes - (not line segments, but a graph).
def createTopology(shapelyPolygons):
#first we require to store information.
#DCEL looks like the most convinient thing, but instead of storing graphs and vertexes, store nodes.
# print "testing"
dcel = DCEL()
dcel.convert(shapelyPolygons)
print "----- End of conversion"
#dcel.printData();
dcel.printLineSegments();
def construct_graph(polygons):
#-- several dictionaries to keep the graph organized
# each line segment has an starting and end node
nodes = {}
# an linestring or linearring, describing (a part of) the boundary
edge = {}
# each line has one or two neighbours
neighbour = {}
# a check to see if the line is the boundary of an island
island = {}
# a counter for each edge
global edge_counter
edge_counter = 1
#para cada poligono
for i in range(len(polygons)):
print "------------------------------------"
for j in range(len(polygons)):
if i != j:
print str(i) + " " + str(j)
print polygons[i]
print polygons[j]
intersect = polygons[i].intersection(polygons[j])
test1 = polygons[i].intersection(intersect)
test2 = polygons[j].intersection(intersect)
print "test1: " , test1;
print "test2: " , test2;
#print "This polygon produces " + str(len(intersect)) + " SEGMENTS"
#it they do not intersect, they re a geometrycollection EMPTY
print "INTERSECT : " , intersect
try:
type = intersect.geom_type
if type != "GeometryCollection":
if type != "MultiLineString":
print type
print str(i) + " " + str(j)
# the common case of two neighbouring polygons result in an
# linearstring of linesegments
if type == 'GeometryCollection':
pass;
if type == 'LineString' and (((intersect.coords[0], intersect.coords[-1]) or \
(intersect.coords[-1], intersect.coords[0])) not in nodes):
#print "LineString"
nodes[edge_counter] = (intersect.coords[0], intersect.coords[-1])
edge[edge_counter] = intersect
neighbour[edge_counter] = (i, j)
island[edge_counter] = False
edge_counter += 1
# a interseection can have several loose attachments to another polygon
# this intersection results in an multi-linestring
elif type == 'MultiLineString':
pass;
#print "MultiLineString";
# new_intersect = combine(intersect)
# for k in range(0,len(intersect)-1):
# if (((intersect[k].coords[0],intersect[k].coords[-1]) or \
# (intersect[k].coords[-1],intersect[k].coords[0])) not in nodes):
# nodes[edge_counter] = (intersect[k].coords[0],intersect[k].coords[-1])
# edge[edge_counter] = intersect[k]
# neighbour[edge_counter] = (i,j)
# island[edge_counter] = False
# edge_counter += 1
# the intersection of an island with its surrounding neighbour results
# in an linear ring
elif type == 'LinearRing':
#print "LinearRing"
# each island gets 3 linestrings
step = len(intersect.coords) / 3
stepping = (0, step, 2 * step, 0)
cor = list(intersect.coords)
for k in range(0, 2):
nodes[edge_counter] = (intersect.coords[stepping[k]], \
intersect.coords[stepping[k + 1]])
edge[edge_counter] = LineString(cor[stepping[k]:stepping[k + 1]])
neighbour[edge_counter] = (i, j)
island[edge_counter] = True
edge_counter += 1
elif type == 'MultiPolygon':
#print "Multipolygon"
print 'Not a planar partition, polygon ', i, ' & ', j, ' overlap!'
except ValueError:
match = False
#return graph
def combine(MultiLineString):
'''
An algorithm to combine line-segments into connected lines
Input: Shapely MultiLineString
Output: Shapely MultiLineString
'''
Multistring = []
lines = list(MultiLineString)
for i in range(len(lines)):
lines[i] = list(lines[i].coords)
while len(lines) != 0:
line = [lines[0]]
new_line = domino(line, lines)
lines.remove(new_line)
Multistring.append(new_line)
for i in len(Multistring):
string = str(Multistring[i])
string = string.replace("[", "(")
string = string.replace("]", ")")
lines.append(LineString(string))
def domino(list, rest):
while len(rest) != 0:
change = False
for i in range(len(rest)):
if list[1][1] == rest[i][0]:
list.append(rest[i])
rest.remove(rest[i])
change = True
break
if list[0][0] == rest[i][1]:
list.insert(0, rest[i])
rest.remove(rest[i])
change = True
break
if not change:
break
return list
#3) Structure for each polygon which edges is bounding him
#4) Douglas - Peuker algorithm for each line
def simplify_points (pts, tolerance):
'''
this code was written by Schuyler Erle <schuyler@nocat.net>
'''
anchor = 0
floater = len(pts) - 1
stack = []
keep = set()
stack.append((anchor, floater))
while stack:
anchor, floater = stack.pop()
# initialize line segment
if pts[floater] != pts[anchor]:
anchorX = float(pts[floater][0] - pts[anchor][0])
anchorY = float(pts[floater][1] - pts[anchor][1])
seg_len = math.sqrt(anchorX ** 2 + anchorY ** 2)
# get the unit vector
anchorX /= seg_len
anchorY /= seg_len
else:
anchorX = anchorY = seg_len = 0.0
# inner loop:
max_dist = 0.0
farthest = anchor + 1
for i in range(anchor + 1, floater):
dist_to_seg = 0.0
# compare to anchor
vecX = float(pts[i][0] - pts[anchor][0])
vecY = float(pts[i][1] - pts[anchor][1])
seg_len = math.sqrt(vecX ** 2 + vecY ** 2)
# dot product:
proj = vecX * anchorX + vecY * anchorY
if proj < 0.0:
dist_to_seg = seg_len
else:
# compare to floater
vecX = float(pts[i][0] - pts[floater][0])
vecY = float(pts[i][1] - pts[floater][1])
seg_len = math.sqrt(vecX ** 2 + vecY ** 2)
# dot product:
proj = vecX * (-anchorX) + vecY * (-anchorY)
if proj < 0.0:
dist_to_seg = seg_len
else: # calculate perpendicular distance to line (pythagorean theorem):
dist_to_seg = math.sqrt(abs(seg_len ** 2 - proj ** 2))
if max_dist < dist_to_seg:
max_dist = dist_to_seg
farthest = i
if max_dist <= tolerance: # use line segment
keep.add(anchor)
keep.add(floater)
else:
stack.append((anchor, farthest))
stack.append((farthest, floater))
keep = list(keep)
keep.sort()
return [pts[i] for i in keep]
#5) Build each polygon again, edge by edge
# start of the program
if __name__ == "__main__":
print "Running"
main()
#raw_input('press enter')
| Python |
"""
GM1050 Advanced Principles and Algorithm of GIS
2010/2011
- Mini Project - The simplification of a map
A shapefile of departments in western Europe is given.
By the Douglas-Peuker algorithm this shapefile is simplified.
Where several topological relations should remain
@author: Bas, Josafat and Elise
"""
#-- general import
import math
import os
#from Vertex import Vertex
from dcel1 import DCEL
#from Douglas_Peucker import algorithm
from Douglas_Peucker2 import simplify_points
#-- OGR
try:
from osgeo import ogr
except ImportError:
import ogr
#-- Shapely
from shapely.wkb import loads as wkbloads
from shapely.wkt import loads as wktloads
from shapely.geometry import LineString
def main():
#1) Load all polygons
#shapefile = 'final.shp';
#shapefile = 'departementsmall.shp';
#shapefile = 'europe_nuts.shp';
shapefile = 'europe_nuts_mini2.shp';
print "reading: " + shapefile
shapelyPolygons = read_shapefile(shapefile)
print "Number of Features: " + str(len(shapelyPolygons))
#2) Extract edges and nodes - (not line segments, but a graph).
#3) Structure for each polygon which edges is bounding him
coords = createTopology(shapelyPolygons)
text = createWkt(coords)
outfile = raw_input('give filename (+.shp) of output file ... ')
write_shapefile(text, outfile)
#construct_graph(polygons)
#total_edges = edge_counter
#print total_edges
#edges = graph[1]
#4) Douglas - Peuker algorithm for each line
#new_edges = {}
#print "Give tolerance (e.g.: 0.2)"
#tolerance = raw_input('--> ')
#for i in range(0,total_edges):
# new_line = simplify_points(list(edges[i].coords), tolerance)
# new_edges[i] = LineString(new_line)
#5) Build each polygon again, edge by edge
#1) Load all polygons
def read_shapefile(infile):
"""
Function that reads a shapefile and prints some info about it.
Input:
infile: shapefile to read
Output:
- (the informations are printed in the console)
"""
print "Reading", infile
ge = ogr.Open(infile)
layer = ge.GetLayer(0) #-- a simple shapefile always has 1 layer only
ld = layer.GetLayerDefn()
#-- Find and print the geometry type of the shapefile
print ld.GetGeomType()
s = "Geometry type: "
if ld.GetGeomType() == ogr.wkbPoint:
s += "Point"
elif ld.GetGeomType() == ogr.wkbLineString:
s += "LineString"
elif ld.GetGeomType() == ogr.wkbPolygon:
s += "Polygon"
else:
s += "Unknown"
print s
#-- Find and print the number of geometries
noFeat = layer.GetFeatureCount()
print "Number of features:", noFeat
#-- Find and print the attributes
print "Attributes:"
for i in range(ld.GetFieldCount()):
print "\t", ld.GetFieldDefn(i).GetNameRef()
#-- print the WKT of each feature
print "The features are:"
lsFeatures = [] #-- create an empty list of Features (of geometries)
wktFeatures = []
for i in xrange(noFeat):
#print "-- --"
f = layer.GetFeature(i)
wkb = f.GetGeometryRef().ExportToWkb() #-- Well-Known Text
wkt = f.GetGeometryRef().ExportToWkt() #-- Well-Known Text
#print str(i) + "\t" + wkt
#wktFeatures.append({'geom': str(i) +"\t" + wkt})
pshape = wktloads(wkt)
#print str(i) + "\t" + pshape.to_wkt()
lsFeatures.append(pshape)
return lsFeatures
#2) Extract edges and nodes - (not line segments, but a graph).
def createTopology(shapelyPolygons):
#first we require to store information.
#DCEL looks like the most convinient thing, but instead of storing graphs and vertexes, store nodes.
# print "testing"
dcel = DCEL()
dcel.convert(shapelyPolygons)
print "----- End of conversion"
dcel.printData();
lineSegments = dcel.findLineSegments();
lines = []
tolerance = float(raw_input('Please give percentage to keep ... [0...1]'))
#percentage = float(raw_input('Give the percentage of points to be kept [0...1] ...'))
for lineSegmentKey in iter(lineSegments):
lineSegment = lineSegments[lineSegmentKey]
#print "Segment: " + lineSegmentKey
oldline = dcel.convertLine2Tuple(lineSegment);
print oldline
newline = simplify_points(oldline, tolerance);
print newline
dcel.replaceLine(oldline, newline);############check for errors
lines.append(newline)
#dcel.printData();
return lines
def createWkt(lines):
wkt = []
for i in range(len(lines)):
# for each polyline
line = ''
for j in range(len(lines[i])):
# for each coordinate
line = line + str(lines[i][j][0])\
+ ' ' + str(lines[i][j][1]) + ' , '
polyline = 'LINESTRING (' + line[0:-2] + ')'
wkt.append(polyline)
return wkt
def write_shapefile(lsFeat, outfile):
"""
Create a new shapefile and writes it to the disk.
Input:
lsFeat: list of Features (a dico with geom/name/pop)
outfile: path for the new file to create
Output:
- (the shapefile is written to the disk)
"""
driver = ogr.GetDriverByName('ESRI Shapefile') #-- we create a new SHP file
if os.path.exists(outfile):
driver.DeleteDataSource(outfile) #-- if it exists, overwrite it
ds = driver.CreateDataSource(outfile)
layer = ds.CreateLayer(outfile, geom_type = ogr.wkbLineString) #-- we create a SHP with polygons
#-- create the 1 attributes (called Fields in OGR jargon) "name" and "population"
fd = ogr.FieldDefn()
layer.CreateField(fd)
for i in lsFeat:
f = ogr.Feature(feature_def = layer.GetLayerDefn())
p = ogr.CreateGeometryFromWkt(i)
f.SetGeometry(p)
layer.CreateFeature(f)
f.Destroy()
ds.Destroy()
print "\nShapefile saved as:", outfile
def construct_graph(polygons):
#-- several dictionaries to keep the graph organized
# each line segment has an starting and end node
nodes = {}
# an linestring or linearring, describing (a part of) the boundary
edge = {}
# each line has one or two neighbours
neighbour = {}
# a check to see if the line is the boundary of an island
island = {}
# a counter for each edge
global edge_counter
edge_counter = 1
#para cada poligono
for i in range(len(polygons)):
print "------------------------------------"
for j in range(len(polygons)):
if i != j:
print str(i) + " " + str(j)
print polygons[i]
print polygons[j]
intersect = polygons[i].intersection(polygons[j])
test1 = polygons[i].intersection(intersect)
test2 = polygons[j].intersection(intersect)
print "test1: " , test1;
print "test2: " , test2;
#print "This polygon produces " + str(len(intersect)) + " SEGMENTS"
#it they do not intersect, they re a geometrycollection EMPTY
print "INTERSECT : " , intersect
try:
type = intersect.geom_type
if type != "GeometryCollection":
if type != "MultiLineString":
print type
print str(i) + " " + str(j)
# the common case of two neighbouring polygons result in an
# linearstring of linesegments
if type == 'GeometryCollection':
pass;
if type == 'LineString' and (((intersect.coords[0], intersect.coords[-1]) or \
(intersect.coords[-1], intersect.coords[0])) not in nodes):
#print "LineString"
nodes[edge_counter] = (intersect.coords[0], intersect.coords[-1])
edge[edge_counter] = intersect
neighbour[edge_counter] = (i, j)
island[edge_counter] = False
edge_counter += 1
# a interseection can have several loose attachments to another polygon
# this intersection results in an multi-linestring
elif type == 'MultiLineString':
pass;
#print "MultiLineString";
# new_intersect = combine(intersect)
# for k in range(0,len(intersect)-1):
# if (((intersect[k].coords[0],intersect[k].coords[-1]) or \
# (intersect[k].coords[-1],intersect[k].coords[0])) not in nodes):
# nodes[edge_counter] = (intersect[k].coords[0],intersect[k].coords[-1])
# edge[edge_counter] = intersect[k]
# neighbour[edge_counter] = (i,j)
# island[edge_counter] = False
# edge_counter += 1
# the intersection of an island with its surrounding neighbour results
# in an linear ring
elif type == 'LinearRing':
#print "LinearRing"
# each island gets 3 linestrings
step = len(intersect.coords) / 3
stepping = (0, step, 2 * step, 0)
cor = list(intersect.coords)
for k in range(0, 2):
nodes[edge_counter] = (intersect.coords[stepping[k]], \
intersect.coords[stepping[k + 1]])
edge[edge_counter] = LineString(cor[stepping[k]:stepping[k + 1]])
neighbour[edge_counter] = (i, j)
island[edge_counter] = True
edge_counter += 1
elif type == 'MultiPolygon':
#print "Multipolygon"
print 'Not a planar partition, polygon ', i, ' & ', j, ' overlap!'
except ValueError:
match = False
#return graph
def combine(MultiLineString):
'''
An algorithm to combine line-segments into connected lines
Input: Shapely MultiLineString
Output: Shapely MultiLineString
'''
Multistring = []
lines = list(MultiLineString)
for i in range(len(lines)):
lines[i] = list(lines[i].coords)
while len(lines) != 0:
line = [lines[0]]
new_line = domino(line, lines)
lines.remove(new_line)
Multistring.append(new_line)
for i in len(Multistring):
string = str(Multistring[i])
string = string.replace("[", "(")
string = string.replace("]", ")")
lines.append(LineString(string))
def domino(list, rest):
while len(rest) != 0:
change = False
for i in range(len(rest)):
if list[1][1] == rest[i][0]:
list.append(rest[i])
rest.remove(rest[i])
change = True
break
if list[0][0] == rest[i][1]:
list.insert(0, rest[i])
rest.remove(rest[i])
change = True
break
if not change:
break
return list
#3) Structure for each polygon which edges is bounding him
#4) Douglas - Peuker algorithm for each line
#5) Build each polygon again, edge by edge
# start of the program
if __name__ == "__main__":
print "Running"
main()
#raw_input('press enter')
| Python |
'''
Created on 05/01/2011
@author: Josafat Guerrero josafatisai@gmail.com
'''
import math
from Face import Face
from Vertex import Vertex
from HalfEdge import HalfEdge
from shapely.coords import CoordinateSequence
from spyderlib.widgets.editortools import EdgeLine
#-- OGR
try:
from osgeo import ogr
except ImportError:
import ogr
#-- Shapely
from shapely.wkt import loads
from shapely.geometry import LineString
class DCEL:
'''
classdocs
'''
def convert(self, polygons):
##start storing the faces
## indexes to start enumerating everything
#we require a world face
outerFace = Face('z', None, None)
self.faces['z'] = outerFace;
lastVertex = None#used for computing edges
currentVertex = None
#Basically, all the polygons id start at 1,
for p in polygons:
# print "-------------------------------------------------------------"
# print "Appending things at Face " + str(self.faceId)
currentFace = Face(str(self.faceId), None, None)
self.faces[str(self.faceId)] = currentFace;
self.faceId += 1;#not gonna use this anymore until next face
coordinates = list(p['geom'].exterior.coords)# is a coordinate sequence
#print "Coordinates for " + currentFace.toString() + ":"
#print coordinates# printing as a list
#############################################
## checking the first vertex
tmpVertexCoords = coordinates.pop(0)#TOOK OUT THE FIRST VERTEX
if str(tmpVertexCoords) in self.verticesIdx: ## if the vertex exists, just retrieve info
lastVertex = self.verticesIdx[str(tmpVertexCoords)]#retrieve the vertex
else:
###print "ADD Vertex: " + str(self.vertexIndex)# the current vertex idx
tmpVertex = Vertex(str(self.vertexIndex), tmpVertexCoords, None)# storing self.vertexIndex +=1;
self.verticesIdx[str(tmpVertexCoords)] = tmpVertex
self.vertices[str(self.vertexIndex)] = tmpVertex
self.vertexIndex += 1;
lastVertex = tmpVertex
#now we start with the remaining objects
##
edgesList = [];# this list is used to fill the remaining objects as next, prev, inner object, outerobject
#edgesListTwin =[]
########################################## start processing the rest of the coordinates
#note all coordinates are defined clockwise
for i in xrange(len(coordinates)):
coordsi = coordinates[i]# para cada par coordenado
coordsKey = str(coordsi)
#si el vertice ya existe ,
if coordsKey in self.verticesIdx:
###print "RR Repeated Vertex: " + self.verticesIdx[coordsKey].id + " " + coordsKey
tmpVertex = self.verticesIdx[coordsKey]
currentVertex = tmpVertex
else:# add a new vertex to the list
###print "ADD Vertex: " + str(self.vertexIndex)# the current vertex idx
tmpVertex = Vertex(str(self.vertexIndex), coordsi, None) # storing currentself.vertexIndex +=1;
self.verticesIdx[coordsKey] = tmpVertex
self.vertices[str(self.vertexIndex)] = tmpVertex #just copy the reference
self.vertexIndex += 1;
currentVertex = tmpVertex
next = None
prev = None
#An edge can always be created as 2 vertices always exists
edgeKey = lastVertex.getId() + "_" + currentVertex.getId();
edgeKeyTwin = currentVertex.getId() + "_" + lastVertex.getId();
#########################################################################################
#if the edge doesnt exists, add it to the idx and also its twin
# print "ckecking " + edgeKey
if edgeKey not in self.halfEdgesIdx: ## edge v1v2 is different than v2v1
#if is not in the index, add it
name1 = "E_" + str(self.edgeIndex) + "_1" # I use this names E_1_1, E_1_2
name2 = "E_" + str(self.edgeIndex) + "_2"
###print "ADD Edge " + name1 + " Origin: " + lastVertex.id
###print "ADD Edge " + name2 +" TWIN" + " Origin: " + currentVertex.id
##################### lleno la mayoria de la informacion
newEdge = HalfEdge(name1 , ##id
lastVertex, # pointer to the origin
None, #twin
currentFace, #incidentFAce pointer
None, #next
None, #prev
currentVertex
)
newEdgeTwin = HalfEdge(name2 ,
currentVertex, #origin
newEdge, #twin pointer
outerFace, #dont know who is incident to , default outerFace
None, #no next
None, # No prev)
lastVertex
);
newEdge.twin = newEdgeTwin #now i can add the twin
self.halfEdgesIdx[edgeKey] = newEdge# indexo por vertice1_vertice2, obtengo EdgeId11
self.halfEdgesIdx[edgeKeyTwin ] = newEdgeTwin# indexo por vertice1_vertice2, obtengo EdgeId12
##
self.halfEdges[name1] = self.halfEdgesIdx[edgeKey]
self.halfEdges[name2] = self.halfEdgesIdx[edgeKeyTwin ]
####
edgesList.append(newEdge); #keep the record for the next pass, to fill next and prev values
#edgesListTwin.append(newEdgeTwin);
####
#print "ADD Edge " + newEdge.id + " Origin: " + newEdge.origin.id + " TWIN: " + newEdge.twin.id + " Incident: " + newEdge.incidentFace.id
#print "ADD Edge " + newEdgeTwin.id + " Origin: " + newEdgeTwin.origin.id + " TWIN: " + newEdgeTwin.twin.id + " Incident: " #+ newEdgeTwin.incidentFace.id
###print "ADD Edge " + newEdge.toString();#id + " Origin: " + newEdge.origin.id + " TWIN: " + newEdge.twin.id + " Incident: " + newEdge.incidentFace.id
###print "ADD Edge " + newEdgeTwin.toString();#id + " Origin: " + newEdgeTwin.origin.id + " TWIN: " + newEdgeTwin.twin.id + " Incident: " #+ newEdgeTwin.incidentFace.id
lastVertex.incidentEdge = newEdge
currentVertex.incidentEdge = newEdgeTwin# addinf incident edge to this vertex
self.edgeIndex += 1;
###################
else:# si si existe la arista
#puede ser porque alguien mas la agrego, pero del otro lado, ie, desde el twin
#en ese caso le falta el incident face
#aun le falta next y prev, pero eso hasta la siguiente pasada
#twin debe estar asignado ya
currentEdge = self.halfEdgesIdx[edgeKey]
# print "EXisting edge " + currentEdge.toString();
# print "UPdating Info ... "
# print " Incident Face: " + currentFace.toString();
# print currentEdge.toString();
# print "Updated ... "
currentEdge.incidentFace = currentFace;
edgesList.append(currentEdge);
self.halfEdgesIdx[edgeKey] = currentEdge
self.halfEdges[currentEdge.id] = currentEdge;
# print self.halfEdgesIdx[edgeKey].toString()
lastVertex = currentVertex#
## end of: for coordinates, no more vertexes added, no more edges added
#################################### a second round to complete all the remaining references on edged is needed.
#completing next and prev references on edges list only on my side
### print "------------------------------------------- UPDATING EDGES next and prev"
### for e in edgesList:
### print e.toString()
### print "-------------------------------------------- "
for i in xrange(len(edgesList) - 1):
###print i
edge = edgesList[i]
#edgeTwin = edgesListTwin[i]
###print "Antes " + edge.toString()
edge.prev = edgesList[i - 1]
edge.next = edgesList[i + 1]
### if everything fails remove this comments
#edge.prev.twin = edgesListTwin[i - 1 ]
#edge.next.twin = edgesListTwin[i + 1]
###
###print "Desp " + edge.toString();
#processing the last element
lastIndex = len(edgesList) - 1
#print lastIndex
edge = edgesList[lastIndex]
###print "Antes " + edge.toString()
edge.prev = edgesList[lastIndex - 1]
edge.next = edgesList[0]
#
#edge.prev.twin = edgesListTwin[lastIndex -2]
#edge.next.twin = edgesListTwin[0]
###print "Desp " + edge.toString();
edgesList = []# clear everything
#print edge.toString();
#######################3
###print "------------------------------------------- ENDOF UPDATING EDGES next and prev"
### updating this face, adding the first edge
currentFace.outerComponent = edge.twin; # and outer edge
#currentFace.innerComponents= edge.twin; # and outer edge
for key in iter(self.halfEdges):
tmpedge = self.halfEdges[key];
edgekeystart = tmpedge.incidentFace.id + "_" + tmpedge.origin.id
edgekeyend = tmpedge.incidentFace.id + "_" + tmpedge.end.id
self.faceStartVertexIdx[edgekeystart ] = tmpedge
self.faceEndVertexIdx[edgekeyend ] = tmpedge
print "Updating f0 edges"
f0edges = self.findEdgesAtFace(outerFace);# single time operation
#those edges need to be filled
for e in f0edges:
if e.next == None:
e.next = self.findEdgeAtVertexFace(e.end, e.incidentFace)
pass;
if e.prev == None:
e.prev = self.findEdgeAtEndVertexNFace(e.origin, e.incidentFace);
pass;
# print e.toString()
print "Updated f0 edges"
#############################
###creating indexes
def __init__(self):
'''
Constructor
'''
self.faces = {}
self.verticesIdx = {} # key is a pair of coordinates, value is a Vertex object
self.vertices = {} # key is vertex id
self.halfEdges = {} # key is # E_x_y
self.halfEdgesIdx = {}# key is a pair of vertices, v1_v2, v1 is origin, v2 is end
self.faceStartVertexIdx = {} #holds a pointer to the edge with such face and such vertex as origin
self.faceEndVertexIdx = {} #holds a pointer to the edge with such face and such vertex as end
self.faceId = 0;
self.vertexIndex = 1;
self.edgeIndex = 1;
def printData(self):
'''
This method tries to print a nice screen with all the information
'''
print "****************************************************************"
print "****************************************************************"
print "****************************************************************"
print "VERTICES"
for v in iter(self.vertices):
print self.vertices[ v ].toString()
print "****************************************************************"
print "FACES"
for f in iter(self.faces):
print self.faces[f].toString()
print "****************************************************************"
print "EDGES"
for e in iter(self.halfEdges):
print self.halfEdges[e].toString()
print "****************************************************************"
def printLineSegments(self):
lineSegments = self.findLineSegments();
for lineSegmentKey in iter(lineSegments):
lineSegment = lineSegments[lineSegmentKey]
print "Segment: " + lineSegmentKey
for s in lineSegment:
print s.id
conv = self.convertLine2Tuple(lineSegment);
for c in conv:
print c
def convertLine2Tuple(self, listOfVertices):
'''
This method recieves a single list of vertices (v1,v2,v3,v4, ... vn)
and returns the corresponding coordinates on a list
( )
'''
listOfTupleCoordinates = []
for v in listOfVertices:
vertex = self.vertices[v.id]
listOfTupleCoordinates.append(vertex.coords);
#print listOfTupleCoordinates
return listOfTupleCoordinates
def findLineSegments(self):
'''
This method is used to extract all the pair of nodes for all the polygons.
The pairs are stored on a dictionary as (vi,vj,face_k)
Is noted that (v1,vj,face_k) == (vj,vi,face_l) , ie, the order of the vertices doesnt matter.
'''
print "Finding line Segments"
lineSegments = {};
counter = self.getGraphArity();# indexed per vertex, arity # ok
# for each face, make a walk and find all the vertexes with more than 2 incident edges, that means a node
for fkey in iter(self.faces):
if fkey == 'z':
fkey = '1'
# print "************************************* Face: -" + fkey + "-"
localNodeList = [];
face = self.faces[fkey]
edgeStart = self.findFirstEdgeAtFace(face)#the first edge depends on the actual order of the keys
# print "Walking around " + fkey "looking for a node to start"
edge = edgeStart;
if counter[ edge.origin.id ] > 2:
# print "Found a Node on first vertex: " + edge.origin.id
#if edge.origin.id == '63225':
# print "-************************************"
localNodeList.append(edge.origin);
else:# have to first encounter a node
# print "Node not found on first vertex: " + edge.origin.id
edge = edge.next;
while (counter[ edge.origin.id ] < 3) & (not (edgeStart.origin.id == edge.origin.id)) :
# print "L:ooking for a node on : " + edge.origin.id
edge = edge.next;
# print "Stop Counter: " + str(counter[ edge.origin.id ]) + " VERTICES: " + " = " + edge.origin.id + " = " + edgeStart.origin.id + " EDge:" + edge.id
localNodeList.append(edge.origin);# on both cases, just add such node as a starting node
edgeStart = edge
edge = edge.next;
#now, everythime, i start on a node and i know that such node must end my nodes list
while not edge.id == edgeStart.id:
# print "Walking on EDge " + edge.id
result = counter[ edge.origin.id ]
if result > 2:
# print "Node FOUND " + edge.origin.id
localNodeList.append(edge.origin);
edge = edge.next
# i will always end with more at least one node
localNodeList.append(localNodeList[0]);
# print "List of nodes for FAce f " + fkey
# for node in localNodeList:
# print node.toString();
#
#########################3
#no i have to create a set of tuples for the node pairs
# it should looks like
# [a,b,c,d,e,f,g,a]
# so i must find an edge a as vertes and a face f
#print 'Creating the list of segments'
segmentCounter = 0
#########################3
#############################
while (len(localNodeList) > 1):
#print "Trying segment " + str(segmentCounter)
tmpVertexList = []
startNode = localNodeList.pop(0);# a node is a vertex
endNode = localNodeList[0]
#print "Start Node: " + startNode.toString()
#print "End Node: " + endNode.toString()
currentEdge = self.findEdgeAtVertexFace(startNode, face);
startEdge = currentEdge;
#tmpVertexList.append(startNode);# a node is a vertex
tmpVertexList.append(currentEdge.origin);#add the id of the vertex
currentEdge = currentEdge.next
while not currentEdge.origin == endNode:#while we cant find the next node
#print " End Node " + endNode.id + " not found yet at origin of" + currentEdge.toString()
tmpVertexList.append(currentEdge.origin);#add the id of the vertex
currentEdge = currentEdge.next
tmpVertexList.append(endNode);#add the last node vertex
# print "Trying to add line segment: "
# for v in tmpVertexList:
# print v.toString()
### try to add to the line segment list
key1 = startNode.id + "_" + endNode.id + "_" + startEdge.id
key2 = endNode.id + "_" + startNode.id + "_" + currentEdge.prev.twin.id
test1 = key1 in lineSegments;
test2 = key2 in lineSegments;
if not test1 | test2:# if is already included, dont do anything
#add to the edges list
# print "SEgment added " + key1
lineSegments[key1] = tmpVertexList;# this is all that must be done for this line segment
else:
#print "segment not added"
pass;
###
### getting back to face for
## getting back to the next face
return lineSegments;
## completely deprecated
def findLineSegments2(self):
'''
This method is used to extract all the pair of nodes for all the polygons.
The pairs are stored on a dictionary as (vi,vj,face_k)
Is noted that (v1,vj,face_k) == (vj,vi,face_l) , ie, the order of the vertices doesnt matter.
'''
print "Finding line Segments"
lineSegments = {};
counter = self.getGraphArity();# indexed per vertex, arity
# for each face, make a walk and find all the vertexes with more than 2 incident edges, that means a node
for fkey in iter(self.faces):
if fkey == 'z':
fkey = '1'
print "************************************* Face: -" + fkey + "-"
localNodeList = [];
face = self.faces[fkey]
edgeStart = self.findFirstEdgeAtFace(face)#the first edge depends on the actual order of the keys
# print "Walking around " + fkey
edge = edgeStart;
if counter[ edge.origin.id ] > 2:
#print "Found a Node: " + edge.origin.id
#if edge.origin.id == '63225':
# print "-************************************"
localNodeList.append(edge.origin);
edge = edgeStart.next;
### now start walking around the edges looking for nodes
# the condition is quite weak
while not edge.id == edgeStart.id:
#print "Walking on EDge " + edge.id
result = counter[ edge.origin.id ]
if result > 2:
#print "Found a Node: " + edge.origin.id
localNodeList.append(edge.origin);
edge = edge.next
#creo que tengo que hacer la ultima verificacion
################ this code corrects all the incinsistencies on the missing nodes
### have several cases
if(len(localNodeList) > 1):#if at least have 2 node
print "Found at least " + str(len(localNodeList)) + " node"
localNodeList.append(localNodeList[0]);
if(len(localNodeList) == 1):#if at least have a node
localNodeList.append(edgeStart.prev.origin)## otro wuick fix
print "Found only " + str(len(localNodeList)) + " node. Adding no node"
pass;
if(len(localNodeList) == 0):# have no node and have to add an artificial one, but this will lead to errors
print "No nodes found"
################### mega fix, solo para islas
localNodeList.append(edgeStart.origin)
localNodeList.append(edgeStart.prev.origin)## porque me brinque este en las islas
localNodeList.append(edgeStart.origin)
#################################
# print "List of nodes for FAce f " + fkey
# for node in localNodeList:
# print node.toString();
#
#########################3
#no i have to create a set of tuples for the node pairs
# it should looks like
# [a,b,c,d,e,f,g,a]
# so i must find an edge a as vertes and a face f
#print 'Creating the list of segments'
segmentCounter = 0
#########################3
#need to create an exception if is an isolated polygon
if (len(localNodeList) == 1):
# print "Island"
tmpVertexList = []
startvertex = localNodeList.pop(0)
currentEdge = self.findEdgeAtVertexFace(startvertex, face);
while not (currentEdge.end == startvertex):
#print currentEdge.toString()
tmpVertexList.append(currentEdge.origin)
currentEdge = currentEdge.next
tmpVertexList.append(startvertex);
print "Trying to add line segment: "
for v in tmpVertexList:
print v.toString()
key1 = startvertex.id + "_" + startvertex.id + "_" + currentEdge.id
#print key1
test1 = key1 in lineSegments;
if not test1 :# if is already included, dont do anything
# print "SEgment added " + key1
lineSegments[key1] = tmpVertexList;# this is all that must be done for this line segment
#print tmpVertexList
else:
#############################
while (len(localNodeList) > 1):
#print "Trying segment " + str(segmentCounter)
tmpVertexList = []
startNode = localNodeList.pop(0);# a node is a vertex
endNode = localNodeList[0]
#print "Start Node: " + startNode.toString()
#print "End Node: " + endNode.toString()
currentEdge = self.findEdgeAtVertexFace(startNode, face);
startEdge = currentEdge;
#tmpVertexList.append(startNode);# a node is a vertex
while not currentEdge.origin == endNode:#while we cant find the next node
#print " End Node " + endNode.id + " not found yet at origin of" + currentEdge.toString()
tmpVertexList.append(currentEdge.origin);#add the id of the vertex
currentEdge = currentEdge.next
tmpVertexList.append(endNode);#add the last node vertex
# print "Trying to add line segment: "
# for v in tmpVertexList:
# print v.toString()
### try to add to the line segment list
key1 = startNode.id + "_" + endNode.id + "_" + startEdge.id
key2 = endNode.id + "_" + startNode.id + "_" + currentEdge.prev.twin.id
test1 = key1 in lineSegments;
test2 = key2 in lineSegments;
if not test1 | test2:# if is already included, dont do anything
#add to the edges list
# print "SEgment added " + key1
lineSegments[key1] = tmpVertexList;# this is all that must be done for this line segment
else:
#print "segment not added"
pass;
###
### getting back to face for
## getting back to the next face
return lineSegments;
#needs to be tested
def replaceLine(self, oldLine, newLine):
'''
Each line is a list of tuples
THis code attempts to replace an old line which the simplified version, or in general ,
any other line, bigger or smaller.
A line looks like this: (v1,v2, .. Vn-1,Vn) , where v1 = n1 and vn = n2,
starts on node N1 and ends at N2
The general procedure when simplifing lines s the following
1.- Drop the unused vertices or add new vertices accordingly.
2.- Keep track of
2.1 The edge that has it origin at N1, and ends at v2
2.2 its twin
2.3 edge that has it origin at N2, and ends at vn-1
2.4 its twin
3.- Keep track of the affected edges in an ordered way and separated on by face
4.- Create new edges accordingly keeping them in a proper order
5.- Update the affected vertices with the new edges
6.- Updated the affected faces with the first edge, and the other face with the twin of the last edge
'''
# print "Replacing Line"
if (len(oldLine) == len(newLine)):
# print "Replacing Line ... Nothing done"
return None; # we still consider only simplification
dropVertices = []
#checks if a point on the original line is present on the new line
#if the point is not present, is kept
# print "Replacing Line..."
for ov in oldLine:
# print ov
for v in newLine:
#print v;
if v == ov: # if the new point is contained on old list
found = True;
if not found:
#print ov
dropVertices.append(ov);# the missing points are kept, why???
###################
## keep track of the important edges
#find an edge that start on the node and ends on the next vertex of the original line
#find an edge that start on the last node and ends on the previous vertex of the original line
# with this we keep track of the previous edges
# print "-- -- -- "
# print self.verticesIdx[str(oldLine[0])].id + "_" + str (self.verticesIdx[str(oldLine[0])].coords)
# print self.verticesIdx[str(oldLine[1])].id + "_" + str(self.verticesIdx[str(oldLine[1])].coords)
# print self.verticesIdx[str(oldLine[-2])].id + "_" + str(self.verticesIdx[str(oldLine[-2])].coords)
# print self.verticesIdx[str(oldLine[-1])].id + "_" + str(self.verticesIdx[str(oldLine[-1])].coords)
firstEdgeKey = self.verticesIdx[str(oldLine[0])].id + "_" + self.verticesIdx[str(oldLine[1])].id
lastEdgeKey = self.verticesIdx[str(oldLine[-2])].id + "_" + self.verticesIdx[str(oldLine[-1])].id
# print firstEdgeKey
# print lastEdgeKey
#old first edges
firstEdge = self.halfEdgesIdx[firstEdgeKey];
lastEdge = self.halfEdgesIdx[lastEdgeKey];
#start creating new edges
face1 = firstEdge.incidentFace;
face2 = firstEdge.twin.incidentFace;
edgesList = [];
# creating new edges with the new vertices
for vindex in xrange(len(newLine) - 1):# create new edge and its twin
vorig = self.verticesIdx[str(newLine[vindex])];
vend = self.verticesIdx[str(newLine[vindex + 1])];
edgeKey = vorig.id + "_" + vend.id;
edgeKeyTwin = vend.id + "_" + vorig.id;
#########################################################################################
#the edge shouldnt exists, as we are droping them
name1 = "E_" + str(self.edgeIndex) + "_1" # I use this names E_1_1, E_1_2
name2 = "E_" + str(self.edgeIndex) + "_2"
##################### lleno la mayoria de la informacion
newEdge = HalfEdge(name1 ,
vorig, # pointer to the origin
None, #twin
face1, # need to find it
None, #next
None, #prev
vend
);
newEdgeTwin = HalfEdge(name2 ,
vend, #origin #############################
newEdge, #twin pointer
face2, #dont know who is incident to , default outerFace
None, #no next
None, # No prev)
vorig
);
newEdge.twin = newEdgeTwin #now i can add the twin
##add to the index the new edges
self.halfEdgesIdx[edgeKey] = newEdge# indexo por vertice1_vertice2, obtengo EdgeId11
self.halfEdgesIdx[edgeKeyTwin ] = newEdgeTwin# indexo por vertice1_vertice2, obtengo EdgeId12
##
self.halfEdges[name1] = self.halfEdgesIdx[edgeKey]
self.halfEdges[name2] = self.halfEdgesIdx[edgeKeyTwin ]
########## indexes
self.faceStartVertexIdx[ face1.id + "_" + vorig.id ] = newEdge
self.faceEndVertexIdx[face1.id + "_" + vend.id] = newEdge
self.faceStartVertexIdx[ face2.id + "_" + vend.id ] = newEdgeTwin
self.faceEndVertexIdx[face2.id + "_" + vorig.id] = newEdgeTwin
### end of indexes
edgesList.append(newEdge); #keep the record for the next pass, to fill next and prev values
vorig.incidentEdge = newEdge
#vend.incidentEdge = newEdgeTwin# addinf incident edge to this vertex
face1.outerComponent = newEdge;
face2.outerComponent = newEdge.twin;
#print newEdge.toString();
#print newEdgeTwin.toString();
#### creating indexes
self.edgeIndex += 1;
###################
#### now need to update the remaining info on such edges, prev and next
##### also need to update faces outerComponent and maybe their inner components
## set the first and last edge
# first a bulk pointer assignment
# first and las pointer will be wrong
for i in xrange(len (edgesList) - 1):
e = edgesList[i]
#print e.toString()
e.prev = edgesList[i - 1]
e.next = edgesList[i + 1]
#rewriting things
edgesList[0].prev = firstEdge.prev
edgesList[0].twin.next = firstEdge.twin.next# on the twin side
edgesList[0].prev.next = edgesList[0]
edgesList[0].twin.next.prev = edgesList[0].twin
##ok
edgesList[-1].next = lastEdge.next
edgesList[-1].twin.prev = lastEdge.twin.prev
edgesList[-1].next.prev = edgesList[-1]
edgesList[-1].twin.prev.next = edgesList[-1].twin
print (len(edgesList))
if len(edgesList) > 1:
edgesList[-1].prev = edgesList[-2]
edgesList[-1].twin.next = edgesList[-2].twin
#dropping old edges
for vindex in xrange(len(oldLine) - 1):# create new edge and its twin
key = self.verticesIdx[str(oldLine[vindex])].id + "_" + self.verticesIdx[str(oldLine[vindex + 1])].id
keyTwin = self.verticesIdx[str(oldLine[vindex + 1])].id + "_" + self.verticesIdx[str(oldLine[vindex])].id
self.halfEdgesIdx[key] = None
self.halfEdgesIdx[keyTwin] = None
del self.halfEdgesIdx[key]
del self.halfEdgesIdx[keyTwin]
##dropping some other indexes
## but not today
def findEdgeAtEndVertexNFace(self, vertex, face):
return self.faceEndVertexIdx[face.id + "_" + vertex.id]
def findEdgeAtEndVertexNFace2(self, vertex, face):
edge = None;
for edgeKey in iter(self.halfEdges):
tmpEdge = self.halfEdges[edgeKey]
if (tmpEdge.end.id == vertex.id) & (tmpEdge.incidentFace.id == face.id):
return tmpEdge;
return None;
def findEdgeAtStartEndVertexNFace2(self, startVertex, endVertex, face):
edge = None;
for edgeKey in iter(self.halfEdges):
tmpEdge = self.halfEdges[edgeKey]
if (tmpEdge.origin.id == startVertex.id) & (tmpEdge.end.id == endVertex.id) & (tmpEdge.incidentFace.id == face.id):
return tmpEdge;
return None;
def findEdgeAtVertexFace(self, vertex, face):
#print "Looking: " + face.id + "_" + vertex.id
return self.faceStartVertexIdx[face.id + "_" + vertex.id]
def findEdgeAtVertexFace2(self, vertex, face):
'''
This operation is time consuming, some improvements are needed
Right now is a linear search
'''
# print "Finding edge at Vertex " + vertex.id + " at Face " + face.id
edge = None;
for edgeKey in iter(self.halfEdges):
tmpEdge = self.halfEdges[edgeKey]
if (tmpEdge.origin.id == vertex.id) & (tmpEdge.incidentFace.id == face.id):
# print "Edge Found : "+ tmpEdge.toString()
return tmpEdge;
else:
#print "Not in Edge " + tmpEdge.toString()
pass;
return None;
def findFirstEdgeAtFace(self, face):
# print "Finding Edge at face " + face.id
for edgeKey in iter(self.halfEdges):
# print "Trying with edge " + edgeKey
tmpEdge = self.halfEdges[edgeKey]
if (tmpEdge.incidentFace.id == face.id):
# print "Found an edge incident on " + face.id
# print tmpEdge.toString()
return tmpEdge;
return None;
def findEdgesAtFace(self, face):
edges = []
# print "Finding Edges at face " + face.id
for edgeKey in iter(self.halfEdges):
#print "Trying with edge " + edgeKey
tmpEdge = self.halfEdges[edgeKey]
if (tmpEdge.incidentFace.id == face.id):
# print "Found an edge incident on Face " + face.id
# print tmpEdge.toString()
edges.append(tmpEdge);
return edges;
def getVertexArity(self, vertex):
'''
Method to compute the actual arity of a vertex.
Requires a full search, linear search on the edge list.
For each edge, check if the origin vector is the requested one.
If so, add one to the counter and also store the edge
'''
print "Cheking Vertex: " + Vertex.id
incidentEdges = [];
counter = 0;
for e in iter(self.halfEdges):
if e.origin.id == Vertex.id:
incidentEdges.append(e);
print e.origin.id;
counter += 1;
return counter;
def getGraphArity(self):
'''
This method computes the actual arity of each vertex
'''
counter = {};
#first set all he keys
for vkey in iter(self.vertices):
counter[str(self.vertices[vkey].id)] = 0
for e in iter(self.halfEdges):
counter[self.halfEdges[e].origin.id] += 1
# for c in iter(counter):
# print "V " + c + "- = " + str( counter[c])
return counter;
def createPolygons2(self):
'''
This method is used to extract all the pair of nodes for all the polygons.
The pairs are stored on a dictionary as (vi,vj,face_k)
Is noted that (v1,vj,face_k) == (vj,vi,face_l) , ie, the order of the vertices doesnt matter.
'''
print "Creating Polygons"
lineSegments = {};
polygons = {};
# just made a simple walk
for fkey in iter(self.faces):
if not fkey == 'z':
#print "Polygon " + fkey
vertices = []
face = self.faces[fkey]
edgeStart = face.outerComponent
#print edgeStart.toString()
vstart = edgeStart.origin
vend = edgeStart.end
vertices.append(vstart.coords);
edge = self.findEdgeAtVertexFace(vend, face)
vertex = edge.origin
while not (vstart == vertex):
## basicamente voy a buscar la arista que empieza en vstart y termina en vend
vertices.append(vertex.coords);
edge = self.findEdgeAtVertexFace(edge.end, face)
vertex = edge.origin;
vertices.append(vertex.coords);
polygons[fkey] = vertices;
return polygons;
def createPolygons(self):
'''
This method is used to extract all the pair of nodes for all the polygons.
The pairs are stored on a dictionary as (vi,vj,face_k)
Is noted that (v1,vj,face_k) == (vj,vi,face_l) , ie, the order of the vertices doesnt matter.
'''
print "Creating Polygons"
lineSegments = {};
polygons = {};
# just made a simple walk
for fkey in iter(self.faces):
if not fkey == 'z':
#print "Polygon " + fkey
vertices = []
face = self.faces[fkey]
edgeStart = face.outerComponent
#print edgeStart.toString()
vertices.append(edgeStart.origin.coords);
edge = edgeStart
edge = edge.next
#print edge.toString()
while (not (edgeStart.origin.id == edge.origin.id)) :
#print edge.toString()
#print "Appending vertex " + edge.origin.id
#print "Appending vertex " + edge.origin.id
vertices.append(edge.origin.coords);
edge = edge.next
# i think I am still missing a last point
vertices.append(edge.origin.coords);## test if is correct
polygons[fkey] = vertices;
return polygons;
| Python |
import os
import sys
#-- general import
import os
#-- OGR
try:
from osgeo import ogr
except ImportError:
import ogr
#-- Shapely
from shapely.wkb import loads as wkbloads
from shapely.wkt import loads as wktloads
from shapely.geometry import Point, LineString, Polygon
from shapely.ops import cascaded_union
'''
Created on 12/01/2011
@author: jguerrer
'''
def writeBackShapeFile(eleFile, nodeFile):
# the elefile contains the actual triangles
# the nodefile contains the list of vertexes or points
#first create a dictionary of nodefiles
print "Reading node file: " + nodeFile
f=open(nodeFile,'r')
headerNode = f.readline()
elements = headerNode.split()
nodesNum = elements[0];
nodesDim = elements[1];
#skip the next elements like the attributes
nodes = {}
data=f.readlines()
for x in xrange(len(data)-1):#for each line
d = data[x]
#print d
elements = d.split()
# pt_number x_coordinate y_coordinate z_attribute
#print "0:" + elements[0] + " 1:" + str(elements[1]) + " 2:" + str(elements[2]) + " 3:" + str(elements[3])
nodes[elements[0]] = { 'id':elements[0] ,'x':elements[1] , 'y':elements[2], 'z':elements[3] }
#now points are ready to use
print "Reading ele file: " + eleFile
f=open(eleFile,'r')
headerNode = f.readline()
elements = headerNode.split()
triangles = [];
data=f.readlines()
for x in xrange(len(data)-1):#for each line
d = data[x]
#print d
elements = d.split()
p1 = elements[1]
p2 = elements[2]
p3 = elements[3]
############ triangles
wkt = "POLYGON(( " + \
nodes[p1]['x'] + " " + nodes[p1]['y'] + " , " + \
nodes[p2]['x'] + " " + nodes[p2]['y'] + " , " + \
nodes[p3]['x'] + " " + nodes[p3]['y'] + " , " + \
nodes[p1]['x'] + " " + nodes[p1]['y'] + "))";
#print wkt;
triangles.append({'geom':wkt });
##start writing a shapefile os send it to a method
return triangles;
def write_shapefile(lsFeat, outfile):
"""
Create a new shapefile and writes it to the disk.
Input:
lsFeat: list of Features (a dico with geom/name/pop)
outfile: path for the new file to create
Output:
- (the shapefile is written to the disk)
"""
driver = ogr.GetDriverByName('ESRI Shapefile') #-- we create a new SHP file
if os.path.exists(outfile):
driver.DeleteDataSource(outfile) #-- if it exists, overwrite it
ds = driver.CreateDataSource(outfile)
layer = ds.CreateLayer(outfile, geom_type=ogr.wkbPolygon) #-- we create a SHP with polygons
#-- create the 1 attributes (called Fields in OGR jargon) "name" and "population"
fd = ogr.FieldDefn()
fd.SetName('height')
fd.SetType(ogr.OFTString) #-- type is String
layer.CreateField(fd)
for i in lsFeat:
f = ogr.Feature(feature_def=layer.GetLayerDefn())
p = ogr.CreateGeometryFromWkt(i['geom'])
f.SetGeometry(p)
# f.SetField(0, i['height'])
f.SetField(0, 0)
layer.CreateFeature(f)
f.Destroy()
ds.Destroy()
print "\nShapefile saved as:", outfile
def main():
nodefile = 'test.1.node'
elefile = 'test.1.ele'
outputShapefile = 'outputTriangulation.shp'
triangles = writeBackShapeFile(elefile,nodefile);
write_shapefile(triangles, outputShapefile)
pass;
if __name__ == '__main__':
main();
| Python |
'''
Created on 10/01/2011
@author: jguerrer
'''
class Face(object):
'''
classdocs
name is a plain string
outhercomponent is also a plain string
innercomponent is an innerstring
'''
def __init__(self, id, outerCompoment , innerComponents):
'''
Constructor
'''
self.id = id
self.outerComponent = outerCompoment
self.innerComponents = innerComponents
self.attributes = []
def toString(self):
data=''
if not self.id == None:
data += "f" + self.id
if not self.outerComponent == None:
data += "\t\tOuter: " + self.outerComponent.id
if not self.innerComponents == None:
for d in self.innerComponents:
data += "\t\tInner: " + d.id
return data; | Python |
'''
Created on 10/01/2011
@author: jguerrer
'''
class HalfEdge(object):
'''
classdocs
'''
def __init__(self,id,origin, twin, incidentFace,next,prev,end):
'''
Constructor
'''
self.id=id
self.origin = origin
self.twin = twin
self.incidentFace = incidentFace
self.next = next
self.prev = prev
self.end = end
def toString(self):
info = str(self.id)
if not self.origin == None:
info = info + "\t\tOrigin: " + self.origin.id
if not self.end == None:
info = info + "\t\tEND: " + self.end.id
if not self.twin == None:
info = info + "\t\tTWIN: " + self.twin.id
if not self.incidentFace == None:
info = info + "\t\tIF: " + self.incidentFace.id
if not self.next == None:
info = info + "\t\tNEXT: " + self.next.id
if not self.prev == None:
info = info + "\t\tPREV: " + self.prev.id
return info
def equals(self,otherHalfEdge):
if self.origin == otherHalfEdge.origin & self.next == otherHalfEdge.next :
return True
else:
return False
| Python |
"""
GM1050 Advanced Principles and Algorithm of GIS
2010/2011
- Mini Project - The simplification of a map
A shapefile of departments in western Europe is given.
By the Douglas-Peuker algorithm this shapefile is simplified.
Where several topological relations should remain
@author: Bas, Josafat and Elise
"""
#-- general import
import math
import os
#from Vertex import Vertex
from dcel1 import DCEL
#from Douglas_Peucker import algorithm
from Douglas_Peucker2 import simplify_points
#-- OGR
try:
from osgeo import ogr
except ImportError:
import ogr
#-- Shapely
from shapely.wkb import loads as wkbloads
from shapely.wkt import loads as wktloads
from shapely.geometry import LineString
def main():
createPolygons();
def createPolygons():
#1) Load all polygons
#shapefile = 'final.shp';
#shapefile = 'departementsmall.shp';
shapefile = 'europe_nuts.shp';
#shapefile = 'europe_nuts_mini2.shp';
print "reading: " + shapefile
shapelyFeatures = read_shapefile(shapefile)
print "Number of Features: " + str(len(shapelyFeatures))
polygons = simplifyPolygons(shapelyFeatures)# i just recieve a list of indexed coordinates
wktpolygons = createPolygonsWkt(polygons)
outfile = raw_input('give filename (+.shp) of output file ... ')
writePolygonShapefile(shapelyFeatures, wktpolygons, outfile)
#construct_graph(polygons)
def createLines():
#1) Load all polygons
#shapefile = 'final.shp';
#shapefile = 'departementsmall.shp';
shapefile = 'europe_nuts.shp';
#shapefile = 'europe_nuts_mini2.shp';
print "reading: " + shapefile
shapelyPolygons = read_shapefile(shapefile)
print "Number of Features: " + str(len(shapelyPolygons))
#2) Extract edges and nodes - (not line segments, but a graph).
#3) Structure for each polygon which edges is bounding him
polygons = simplifyPolygons(shapelyPolygons)
text = createPolygonsWkt(polygons)
outfile = raw_input('give filename (+.shp) of output file ... ')
write_shapefile(text, outfile)
#construct_graph(polygons)
def read_shapefile(infile):
"""
Function that reads a shapefile and prints some info about it.
Input:
infile: shapefile to read
Output:
- A dicctionary
"""
print "Reading", infile
ge = ogr.Open(infile)
layer = ge.GetLayer(0) #-- a simple shapefile always has 1 layer only
ld = layer.GetLayerDefn()
#-- Find and print the geometry type of the shapefile
print ld.GetGeomType()
s = "Geometry type: "
if ld.GetGeomType() == ogr.wkbPoint:
s += "Point"
elif ld.GetGeomType() == ogr.wkbLineString:
s += "LineString"
elif ld.GetGeomType() == ogr.wkbPolygon:
s += "Polygon"
else:
s += "Unknown"
print s
#-- Find and print the number of geometries
noFeat = layer.GetFeatureCount()
print "Number of features:", noFeat
#-- Find and print the attributes
print "Attributes:"
listOfAttributeTypes = []
for i in range(ld.GetFieldCount()):
listOfAttributeTypes.append(ld.GetFieldDefn(i).GetNameRef())#add the attribute name to the list
print "\t", ld.GetFieldDefn(i).GetNameRef() + " "
print "The features are:"
lsFeatures = [] #-- create an empty list of Features (of geometries)
for i in xrange(noFeat):
f = layer.GetFeature(i)
wkt = f.GetGeometryRef().ExportToWkt() #-- Well-Known Text
#print str(i) + "\t" + wkt
pshape = wktloads(wkt)
feature = {'id': str(i), 'geom': pshape , 'fieldNames' :listOfAttributeTypes}
for j in xrange(len(listOfAttributeTypes)):
feature[ str(j) ] = f.GetFieldAsString(j);
lsFeatures.append(feature)
return lsFeatures
#2) Extract edges and nodes - (not line segments, but a graph).
def simplifyPolygons(shapelyPolygons):
dcel = DCEL()
dcel.convert(shapelyPolygons)## just import the simple features into DCEL
lineSegments = dcel.findLineSegments();## the line segments are a list of tuples
tolerance = float(raw_input('Please give tolerance (e.g.: 0.03) [deg] '))
for lineSegmentKey in iter(lineSegments):
lineSegment = lineSegments[lineSegmentKey]
oldline = dcel.convertLine2Tuple(lineSegment);
newline = simplify_points(oldline, tolerance);
dcel.replaceLine(oldline, newline);############check for errors
# this call will return a dictionary
# {face = ''}
polygons = dcel.createPolygons2()
## converting a dict 'polyid' , [vertexes] to 'polyid'
return polygons
def simplifyLines(shapelyPolygons):
dcel = DCEL()
dcel.convert(shapelyPolygons)
lineSegments = dcel.findLineSegments();
tolerance = float(raw_input('Please give percentage to keep ... [0...1]'))
#percentage = float(raw_input('Give the percentage of points to be kept [0...1] ...'))
lines = []
for lineSegmentKey in iter(lineSegments):
lineSegment = lineSegments[lineSegmentKey]
#print "Segment: " + lineSegmentKey
oldline = dcel.convertLine2Tuple(lineSegment);
newline = simplify_points(oldline, tolerance);
dcel.replaceLine(oldline, newline);############check for errors
lines.append(newline)
return lines
def createLinesWkt(lines):
wkt = []
for i in range(len(lines)):
# for each polyline
line = ''
for j in range(len(lines[i])):
# for each coordinate
line = line + str(lines[i][j][0])\
+ ' ' + str(lines[i][j][1]) + ' , '
polyline = 'LINESTRING (' + line[0:-2] + ')'
wkt.append(polyline)
return wkt
def createPolygonsWkt(polygons):
'''
I should recieve a dictionary with polygons
'''
wkt = []
#print polygons
for i in xrange(len(polygons)):
polyline = 'POLYGON (('
polygon = polygons[str(i)]
tuple1 = polygon.pop()
#print tuple1
polyline = polyline + str(tuple1[0]) + " " + str(tuple1[1])
for tuple in polygon:
polyline = polyline + " , " + str(tuple[0]) + " " + str(tuple[1])
polyline = polyline + "))"
wkt.append(polyline)
return wkt
def writeLineShapefile(lsFeat, outfile):
"""
Create a new shapefile and writes it to the disk.
Input:
lsFeat: list of Features (a dico with geom/name/pop)
outfile: path for the new file to create
Output:
- (the shapefile is written to the disk)
"""
driver = ogr.GetDriverByName('ESRI Shapefile') #-- we create a new SHP file
if os.path.exists(outfile):
driver.DeleteDataSource(outfile) #-- if it exists, overwrite it
ds = driver.CreateDataSource(outfile)
layer = ds.CreateLayer(outfile, geom_type = ogr.wkbLineString) #-- we create a SHP with polygons
#-- create the 1 attributes (called Fields in OGR jargon) "name" and "population"
fd = ogr.FieldDefn()
layer.CreateField(fd)
for i in lsFeat:
f = ogr.Feature(feature_def = layer.GetLayerDefn())
p = ogr.CreateGeometryFromWkt(i)
f.SetGeometry(p)
layer.CreateFeature(f)
f.Destroy()
ds.Destroy()
print "\nShapefile saved as:", outfile
def writePolygonShapefile(shapelyFeatures, wktpolygons , outfile):
"""
Create a new shapefile and writes it to the disk.
Input:
lsFeat: list of Features (a dico with geom/name/pop)
outfile: path for the new file to create
Output:
- (the shapefile is written to the disk)
"""
driver = ogr.GetDriverByName('ESRI Shapefile') #-- we create a new SHP file
if os.path.exists(outfile):
driver.DeleteDataSource(outfile) #-- if it exists, overwrite it
ds = driver.CreateDataSource(outfile)
layer = ds.CreateLayer(outfile, geom_type = ogr.wkbPolygon) #-- we create a SHP with polygons
#-- create the 1 attributes (called Fields in OGR jargon) "name" and "population"
fd = ogr.FieldDefn()
feat = shapelyFeatures[0]
names = feat['fieldNames']
print "-----------"
print names
print "-----------"
for i in names:
print i
fd.SetName(i)
fd.SetType(ogr.OFTString) #-- type is String
layer.CreateField(fd)
for i in xrange(len(wktpolygons)):
f = ogr.Feature(feature_def = layer.GetLayerDefn())
p = ogr.CreateGeometryFromWkt(wktpolygons[i])
f.SetGeometry(p)
feature = shapelyFeatures[i]
attributeNames = feature['fieldNames']
for j in xrange(len(attributeNames)):
f.SetField(j , feature[str(j)])
layer.CreateFeature(f)
f.Destroy()
ds.Destroy()
print "\nShapefile saved as:", outfile
if __name__ == "__main__":
print "Running"
main()
#raw_input('press enter')
| Python |
import math
def simplify_points (pts, tolerance):
anchor = 0
floater = len(pts) - 1
size = math.ceil(len(pts)*tolerance)
stack = []
keep = set()
stack.append((anchor, floater))
if pts[0] == pts[-1]:
if len(pts) <= 3:
return pts
else:
middle = int(math.floor(len(pts)/2))
first_half = pts[0:middle]
first_half= simplify_points(first_half, tolerance)
second_half = pts[(middle+1):]
second_half = simplify_points(second_half, tolerance)
pts = first_half + second_half
print pts
return pts
else:
while stack:
anchor, floater = stack.pop()
# initialize line segment
if pts[floater] != pts[anchor]:
anchorX = float(pts[floater][0] - pts[anchor][0])
anchorY = float(pts[floater][1] - pts[anchor][1])
seg_len = math.sqrt(anchorX ** 2 + anchorY ** 2)
# get the unit vector
anchorX /= seg_len
anchorY /= seg_len
else:
anchorX = anchorY = seg_len = 0.0
# inner loop:
max_dist = 0.0
farthest = anchor + 1
for i in range(anchor + 1, floater):
dist_to_seg = 0.0
# compare to anchor
vecX = float(pts[i][0] - pts[anchor][0])
vecY = float(pts[i][1] - pts[anchor][1])
seg_len = math.sqrt( vecX ** 2 + vecY ** 2 )
# dot product:
proj = vecX * anchorX + vecY * anchorY
if proj < 0.0:
dist_to_seg = seg_len
else:
# compare to floater
vecX = float(pts[i][0] - pts[floater][0])
vecY = float(pts[i][1] - pts[floater][1])
seg_len = math.sqrt( vecX ** 2 + vecY ** 2 )
# dot product:
proj = vecX * (-anchorX) + vecY * (-anchorY)
if proj < 0.0:
dist_to_seg = seg_len
else: # calculate perpendicular distance to line (pythagorean theorem):
dist_to_seg = math.sqrt(abs(seg_len ** 2 - proj ** 2))
if max_dist < dist_to_seg:
max_dist = dist_to_seg
farthest = i
if max_dist <= tolerance: # use line segment
keep.add(anchor)
keep.add(floater)
else:
stack.append((anchor, farthest))
stack.append((farthest, floater))
keep = list(keep)
keep.sort()
print len(pts)-len(keep), 'point(s) of ', len(pts) ,' points deleted'
return [pts[i] for i in keep]
if __name__ == "__main__":
import doctest
doctest.testmod()
| Python |
# pure-Python Douglas-Peucker line simplification/generalization
#
# this code was written by Schuyler Erle <schuyler@nocat.net> and is
# made available in the public domain.
#
# the code was ported from a freely-licensed example at
# http://www.3dsoftware.com/Cartography/Programming/PolyLineReduction/
#
# the original page is no longer available, but is mirrored at
# http://www.mappinghacks.com/code/PolyLineReduction/
import math
def simplify_points (pts, tolerance, percentage, recursion):
# pts list of tuples, with coordinates
# tolerance value
# percentage value from 0...1 with the percentage of points to be kept
# recursion flag to identify a recursion
#
anchor = 0
floater = len(pts) - 1
size = math.ceil(len(pts)*percentage)
stack = []
keep = set()
if recursion and len(pts) < 3:
return pts
if pts[0] == pts[-1]: #is string a loop?
if len(pts) <= 3:
return pts
else:
recursion = True
middle = int(math.floor(len(pts)/2))
first_half = pts[0:middle]
first_half= simplify_points(first_half, tolerance, percentage, recursion)
second_half = pts[(middle+1):]
second_half = simplify_points(second_half, tolerance, percentage, recursion)
return first_half + second_half
else:
stack.append((anchor, floater))
while stack:
anchor, floater = stack.pop()
# initialize line segment
if pts[floater] != pts[anchor]:
anchorX = float(pts[floater][0] - pts[anchor][0])
anchorY = float(pts[floater][1] - pts[anchor][1])
seg_len = math.sqrt(anchorX ** 2 + anchorY ** 2)
# get the unit vector
anchorX /= seg_len
anchorY /= seg_len
else:
anchorX = anchorY = seg_len = 0.0
# inner loop:
max_dist = 0.0
farthest = anchor + 1
for i in range(anchor + 1, floater):
dist_to_seg = 0.0
# compare to anchor
vecX = float(pts[i][0] - pts[anchor][0])
vecY = float(pts[i][1] - pts[anchor][1])
seg_len = math.sqrt( vecX ** 2 + vecY ** 2 )
# dot product:
proj = vecX * anchorX + vecY * anchorY
if proj < 0.0:
dist_to_seg = seg_len
else:
# compare to floater
vecX = float(pts[i][0] - pts[floater][0])
vecY = float(pts[i][1] - pts[floater][1])
seg_len = math.sqrt( vecX ** 2 + vecY ** 2 )
# dot product:
proj = vecX * (-anchorX) + vecY * (-anchorY)
if proj < 0.0:
dist_to_seg = seg_len
else: # calculate perpendicular distance to line (pythagorean theorem):
dist_to_seg = math.sqrt(abs(seg_len ** 2 - proj ** 2))
if max_dist < dist_to_seg:
max_dist = dist_to_seg
farthest = i
if len(keep) <= size:
keep.add(anchor)
keep.add(floater)
else:
stack.append((anchor, farthest))
stack.append((farthest, floater))
keep = list(keep)
keep.sort()
if keep[0] == keep[1]:
keep = keep[1:]
keep = keep.append(keep[0])
print len(pts)-len(keep), 'point(s) of ', len(pts) ,' points deleted'
return [pts[i] for i in keep]
if __name__ == "__main__":
import doctest
doctest.testmod()
| Python |
#!/cygdrive/c/Python27/python
import feedparser
import urllib2
import codecs
import sqlite3
import datetime
import sys
from wx import Frame, DefaultPosition, Size, Menu, MenuBar, App, grid
from wx import EVT_MENU, EVT_CLOSE
from twisted.python import log
from twisted.internet import wxreactor
wxreactor.install()
# import t.i.reactor only after installing wxreactor:
from twisted.internet import reactor
# should just check file type is audio/video whatevs
def addToFileList(stringToTest):
try:
for fileType in fileTypeList:
if(stringToTest.find(fileType) > -1):
fileList.append(stringToTest)
except:
print('Couldn\'t add')
# this is just a traversal function, not used but retained as is useful for trouble shooting
def itObj(parent):
try:
'' + parent
addToFileList(parent)
except TypeError:
try:
# if the parent is an array the child will be the actual parent[child]
for child in parent:
try:
'' + child # will this cause unwanted type errors?
itObj(parent[child])
except TypeError:
try:
itObj(child)
except TypeError:
try:
'' + parent[child]
except TypeError:
'' + child
except:
print('error' + parent)
return
# just a hack for now, needs to check with operating system?
# maybe just don't download text/html or get the user to OK certain types
def isMedia(type):
if(type == 'audio/mpeg'):
return True
# this needs to be non-blocking
def download(url):
file_name = url.split('/')[-1]
u = urllib2.urlopen(url)
# todo: check folder exists
# todo: check if we've already downloaded it
# todo: podcasts go into own directory which is user selectable
f = open('podcasts/' + file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (file_name, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d mb [%3.2f%%]" % (file_size_dl/ 1024/1024 , file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print(status,)
f.close()
def dictFactory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
ID_EXIT = 101
ID_DOWNLOAD = 102
def createDatabase():
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS feed(feedId INTEGER PRIMARY KEY, name text, url text, lastChecked timestamp)")
c.execute("CREATE TABLE IF NOT EXISTS entry(entryId INTEGER PRIMARY KEY, title text, summary text, createDate timestamp)")
c.execute("CREATE TABLE IF NOT EXISTS mediaType(mediaTypeId INTEGER PRIMARY KEY, type text, download bool)") # audio/mpeg etc
#>>> import wx
#>>> import wx.grid
#>>> class MyApp(wx.App):
#... def OnInit(self):
#... frame = wx.Frame(None, -1, title = "wx.Grid - Bitmap example")
#... grid = wx.grid.Grid(frame)
#... grid.CreateGrid(1,1)
#... img = wx.Bitmap("python-logo.png", wx.BITMAP_TYPE_PNG)
#... imageRenderer = MyImageRenderer(img)
#... grid.SetCellRenderer(0,0,imageRenderer)
#... grid.SetColSize(0,img.GetWidth()+2)
#... grid.SetRowSize(0,img.GetHeight()+2)
#... frame.Show(True)
#... return True
#
#>>> class MyImageRenderer(wx.grid.PyGridCellRenderer):
#... def __init__(self, img):
#... wx.grid.PyGridCellRenderer.__init__(self)
#... self.img = img
#... def Draw(self, grid, attr, dc, rect, row, col, isSelected):
#... image = wx.MemoryDC()
#... image.SelectObject(self.img)
#... dc.SetBackgroundMode(wx.SOLID)
#... if isSelected:
#... dc.SetBrush(wx.Brush(wx.BLUE, wx.SOLID))
#... dc.SetPen(wx.Pen(wx.BLUE, 1, wx.SOLID))
#... else:
#... dc.SetBrush(wx.Brush(wx.WHITE, wx.SOLID))
#... dc.SetPen(wx.Pen(wx.WHITE, 1, wx.SOLID))
#... dc.DrawRectangleRect(rect)
#... width, height = self.img.GetWidth(), self.img.GetHeight()
#... if width > rect.width-2:
#... width = rect.width-2
#... if height > rect.height-2:
#... height = rect.height-2
#... dc.Blit(rect.x+1, rect.y+1, width, height, image, 0, 0, wx.COPY, True)
#
#>>> app = MyApp(0)
#>>> app.MainLoop()
class mainFrame(Frame):
def __init__(self, parent, ID, title):
Frame.__init__(self, parent, ID, title, DefaultPosition, Size(300, 200))
menu = Menu()
menu.Append(ID_EXIT, "E&xit", "Terminate the program")
menu.Append(ID_DOWNLOAD, "&Download", "Download stuff")
menuBar = MenuBar()
menuBar.Append(menu, "&File")
self.SetMenuBar(menuBar)
EVT_MENU(self, ID_EXIT, self.DoExit)
EVT_MENU(self, ID_DOWNLOAD, self.DoDownload)
g = grid.Grid(self)
g.CreateGrid(1,1)
# make sure reactor.stop() is used to stop event loop:
EVT_CLOSE(self, lambda evt: reactor.stop())
def DoExit(self, event):
reactor.stop()
def DoDownload(self, event):
pass
#download("www.google.com.au")
class feedGrabber(App):
def twoSecondsPassed(self):
print("two seconds passed")
def OnInit(self):
frame = mainFrame(None, -1, "Hello, world")
frame.Show(True)
self.SetTopWindow(frame)
# look, we can use twisted calls!
reactor.callLater(2, self.twoSecondsPassed)
# return True
conn = sqlite3.connect('feed.sqlite')
conn.row_factory = dictFactory
c = conn.cursor()
"""
for each feed we want to track:
how often deleted without listening
how often listened through to end
" " skipped stuff
" " paused
number of episodes listened to
whether they tried the feed and unsubscribed
how soon after downloading (if not automatically downloaded) the episode was listened to
"""
# insert a new feed
# feed = feedparser.parse('http://www.google.com/reader/public/atom/user%2F01961322947913245777%2Fbundle%2FListen%20Subscriptions')
# c.execute("insert into feed (name, url, lastChecked) values (?,?,?)", (feed.feed.title, feed.feed.links[0].href, datetime.datetime.now()))
# this stuff needs to be done in like reactor.callLater or something
c.execute('select * from feed')
print("got feeds")
for row in c:
feed = feedparser.parse(row['url'])
#print (feed.feed)
print (feed.feed.title)
for entry in feed.entries:
print('\n' + entry.title + ' ' + entry.updated)
for link in entry.links:
if(isMedia(link.type)):
reactor.callLater(1, download, link.href)
# download(link.href)
break
conn.close()
return True
if __name__ == "__main__":
log.startLogging(sys.stdout)
print("App start")
app = feedGrabber(0)
reactor.registerWxApp(app)
reactor.run()
| Python |
#!/cygdrive/c/Python27/python
import sys, os
from twisted.internet import wxreactor
wxreactor.install ()
from twisted.internet import reactor, defer
from twisted.python import log
import wx
# not sure why i can't go wx.grid, but here we are
from wx import grid
ID_EXIT = 101
ID_DOWNLOAD = 102
class Frame (wx.Frame):
def __init__ (self, parent, id, title):
wx.Frame.__init__ (self, parent, id, title)
menu = wx.Menu()
menu.Append(ID_EXIT, "E&xit", "Terminate the program")
menu.Append(ID_DOWNLOAD, "&Download", "Download stuff")
menuBar = wx.MenuBar()
menuBar.Append(menu, "&File")
self.SetMenuBar(menuBar)
wx.EVT_MENU(self, ID_EXIT, self.DoExit)
wx.EVT_MENU(self, ID_DOWNLOAD, self.DoDownload)
g = grid.Grid(self)
g.CreateGrid(1,1)
# make sure reactor.stop() is used to stop event loop:
# wx.EVT_CLOSE(self, lambda evt: reactor.stop())
btn_dlg = wx.Button (self, -1, "Press", size=(240, 180))
btn_dlg.Bind (wx.EVT_BUTTON, self.click)
def DoExit (self, event):
pass
def DoDownload (self, event):
pass
def click (self, event):
reactor.callLater(10,self.tester)
d = defer.Deferred ()
self.prepare (d)
def tester(self):
print("yay")
def prepare (self, d):
"""
uncomment reactor.callLater or d.callback to test
"""
d.addCallback (self.show)
reactor.callLater (1.0, d.callback, 1) # this will fail
#d.callback (1) # this will work
def show (self, val):
for i in range (20):
reactor.callLater (i*0.2, self.dosth, i) # these will be printed _after_ dialog has been closed
dlg = wx.MessageDialog (self, "I am a dialog", "Dialog", style=wx.OK|wx.CANCEL)
ruckgabe = dlg.ShowModal ()
dlg.Destroy ()
if ruckgabe == wx.ID_OK:
print("okay")
else:
print("not okay")
def dosth (self, idx):
print("doing something nr" + str(idx) )
class App (wx.App):
def OnInit (self):
self.frame = Frame (None, -1, "Test")
self.frame.Show (1)
self.SetTopWindow (self.frame)
return True
if __name__ == "__main__":
app = App (1)
reactor.registerWxApp (app)
reactor.run () | Python |
#!/usr/bin/python
import os
def main():
lists = [
"ISODrivers/Galaxy/galaxy.prx",
"ISODrivers/March33/march33.prx",
"ISODrivers/March33/march33_620.prx",
"ISODrivers/Inferno/inferno.prx",
"Popcorn/popcorn.prx",
"Satelite/satelite.prx",
"Stargate/stargate.prx",
"SystemControl/systemctrl.prx",
"contrib/usbdevice.prx",
"Vshctrl/vshctrl.prx",
"Recovery/recovery.prx",
]
for fn in lists:
path = "../" + fn
name=os.path.split(fn)[-1]
name=os.path.splitext(name)[0]
ret = os.system("bin2c %s %s.h %s"%(path, name, name))
assert(ret == 0)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
class FakeTime:
def time(self):
return 1225856967.109
import os, gzip, StringIO
gzip.time = FakeTime()
def create_gzip(input, output):
f_in=open(input, 'rb')
temp=StringIO.StringIO()
f=gzip.GzipFile(fileobj=temp, mode='wb')
f.writelines(f_in)
f.close()
f_in.close()
fout=open(output, 'wb')
temp.seek(0)
fout.writelines(temp)
fout.close()
temp.close()
def cleanup():
del_list = [
"installer.prx.gz",
"Rebootex.prx.gz",
]
for file in del_list:
try:
os.remove(file)
except OSError:
pass
def main():
create_gzip("../../Installer/installer.prx", "installer.prx.gz")
create_gzip("../../Rebootex/Rebootex.prx", "Rebootex.prx.gz")
os.system("bin2c installer.prx.gz installer.h installer")
os.system("bin2c Rebootex.prx.gz Rebootex_prx.h Rebootex_prx")
cleanup()
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
from hashlib import *
import sys, struct
def sha512(psid):
if len(psid) != 16:
return "".encode()
for i in range(512):
psid = sha1(psid).digest()
return psid
def get_psid(str):
if len(str) != 32:
return "".encode()
b = "".encode()
for i in range(0, len(str), 2):
b += struct.pack('B', int(str[i] + str[i+1], 16))
return b
def main():
if len(sys.argv) < 2:
print ("Usage: sha512.py psid")
exit(0)
psid = get_psid(sys.argv[1])
xhash = sha512(psid)
if len(xhash) == 0:
print ("wrong PSID")
exit(0)
print ("{\n\t"),
for i in range(len(xhash)):
if i != 0 and i % 8 == 0:
print ("\n\t"),
print ("0x%02X, "%(struct.unpack('B', xhash[i])[0])),
print ("\n},")
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
import sys, hashlib
def toNID(name):
hashstr = hashlib.sha1(name.encode()).hexdigest().upper()
return "0x" + hashstr[6:8] + hashstr[4:6] + hashstr[2:4] + hashstr[0:2]
if __name__ == "__main__":
assert(toNID("sceKernelCpuSuspendIntr") == "0x092968F4")
for name in sys.argv[1:]:
print ("%s: %s"%(name, toNID(name)))
| Python |
#!/usr/bin/python
"""
pspbtcnf_editor: An script that add modules from pspbtcnf
"""
import sys, os, re
from getopt import *
from struct import *
BTCNF_MAGIC=0x0F803001
verbose = False
def print_usage():
print ("%s: pspbtcnf.bin [-o output.bin] [-a add_module_name:before_module_name:flag]" %(os.path.split(sys.argv[0]))[-1])
def replace_binary(data, offset, newdata):
newdata = data[0:offset] + newdata + data[offset+len(newdata):]
assert(len(data) == len(newdata))
return newdata
def dump_binary(data, offset, size):
newdata = data[offset:offset+size]
assert(len(newdata) == size)
return newdata
def dump_binary_str(data, offset):
ch = data[offset]
tmp = b''
while ch != 0:
tmp += pack('b', ch)
offset += 1
ch = data[offset]
return tmp.decode()
def add_prx_to_bootconf(srcfn, before_modname, modname, modflag):
"Return new bootconf data"
fn=open(srcfn, "rb")
bootconf = fn.read()
fn.close()
if len(bootconf) < 64:
raise Exception("Bad bootconf")
signature, devkit, modestart, nmodes, modulestart, nmodules, modnamestart, modnameend = unpack('LL8xLL8xLL8xLL8x', bootconf[:64])
if verbose:
print ("Devkit: 0x%08X"%(devkit))
print ("modestart: 0x%08X"%(modestart))
print ("nmodes: %d"%(nmodes))
print ("modulestart: 0x%08X"%(modulestart))
print ("nmodules: 0x%08X"%(nmodules))
print ("modnamestart: 0x%08X"%(modnamestart))
print ("modnameend: 0x%08X"%(modnameend))
if signature != BTCNF_MAGIC or nmodules <= 0 or nmodes <= 0:
raise Exception("Bad bootconf")
bootconf = bootconf + modname.encode() + b'\0'
modnameend += len(modname) + 1
i=0
while i < nmodules:
module_path, module_flags = unpack('L4xL4x16x', bootconf[modulestart+i*32:modulestart+(i+1)*32])
module_name = dump_binary_str(bootconf, modnamestart+module_path)
if verbose:
print ("[%02d]: Module path: %s flag: 0x%08X"%(i, module_name, module_flags))
if before_modname == module_name:
break
i+=1
if i >= nmodules:
raise Exception("module %s not found"%(before_modname))
module_path = modnameend - len(modname) - 1 - modnamestart
module_flag = 0x80010000 | (modflag & 0xFFFF)
newmod = dump_binary(bootconf, modulestart+i*32, 32)
newmod = replace_binary(newmod, 0, pack('L', module_path))
newmod = replace_binary(newmod, 8, pack('L', module_flag))
bootconf = bootconf[0:modulestart+i*32] + newmod + bootconf[modulestart+i*32:]
nmodules+=1
bootconf = replace_binary(bootconf, 0x24, pack('L', nmodules))
modnamestart += 32
bootconf = replace_binary(bootconf, 0x30, pack('L', modnamestart))
modnameend += 32
bootconf = replace_binary(bootconf, 0x34, pack('L', modnameend))
i = 0
while i < nmodes:
num = unpack('H', bootconf[modestart+i*32:modestart+i*32+2])[0]
num += 1
bootconf = replace_binary(bootconf, modestart + i * 32, pack('H', num))
i += 1
return bootconf
def write_file(output_fn, data):
fn = open(output_fn, "wb")
fn.write(data)
fn.close()
def main():
global verbose
try:
optlist, args = gnu_getopt(sys.argv, "a:o:vh")
except GetoptError as err:
print(err)
print_usage()
sys.exit(1)
# default configure
verbose = False
dst_filename = "-"
add_module = ""
for o, a in optlist:
if o == "-v":
verbose = True
elif o == "-h":
print_usage()
sys.exit()
elif o == "-o":
dst_filename = a
elif o == "-a":
add_module = a
else:
assert False, "unhandled option"
if verbose:
print (optlist, args)
if len(args) < 2:
print ("Missing input pspbtcnf.bin")
sys.exit(1)
src_filename = args[1]
if verbose:
print ("src_filename: " + src_filename)
print ("dst_filename: " + dst_filename)
# check add_module
if add_module != "":
t = (re.split(":", add_module, re.I))
if len(t) != 3:
print ("Bad add_module input")
sys.exit(1)
add_module, before_module, add_module_flag = (re.split(":", add_module, re.I))
if verbose:
print ("add_module: " + add_module)
print ("before_module: " + before_module)
print ("add_module_flag: " + add_module_flag)
if add_module != "":
result = add_prx_to_bootconf(src_filename, before_module, add_module, int(add_module_flag, 16))
if dst_filename == "-":
# print("Bootconf result:")
# print(result)
pass
else:
write_file(dst_filename, result)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
class FakeTime:
def time(self):
return 1225856967.109
import sys, os, struct, gzip, hashlib, StringIO
gzip.time = FakeTime()
def binary_replace(data, newdata, offset):
return data[0:offset] + newdata + data[offset+len(newdata):]
def prx_compress(output, hdr, input, mod_name="", mod_attr=0xFFFFFFFF):
a=open(hdr, "rb")
fileheader = a.read();
a.close()
a=open(input, "rb")
elf = a.read(4);
a.close()
if (elf != '\x7fELF'.encode()):
print ("not a ELF/PRX file!")
return -1
uncompsize = os.stat(input).st_size
f_in=open(input, 'rb')
temp=StringIO.StringIO()
f=gzip.GzipFile(fileobj=temp, mode='wb')
f.writelines(f_in)
f.close()
f_in.close()
prx=temp.getvalue()
temp.close()
digest=hashlib.md5(prx).digest()
filesize = len(fileheader) + len(prx)
if mod_name != "":
if len(mod_name) < 28:
mod_name += "\x00" * (28-len(mod_name))
else:
mod_name = mod_name[0:28]
fileheader = binary_replace(fileheader, mod_name.encode(), 0xA)
if mod_attr != 0xFFFFFFFF:
fileheader = binary_replace(fileheader, struct.pack('H', mod_attr), 0x4)
fileheader = binary_replace(fileheader, struct.pack('L', uncompsize), 0x28)
fileheader = binary_replace(fileheader, struct.pack('L', filesize), 0x2c)
fileheader = binary_replace(fileheader, struct.pack('L', len(prx)), 0xb0)
fileheader = binary_replace(fileheader, digest, 0x140)
a=open(output, "wb")
assert(len(fileheader) == 0x150)
a.write(fileheader)
a.write(prx)
a.close()
try:
os.remove("tmp.gz")
except OSError:
pass
return 0
def main():
if len(sys.argv) < 4:
print ("Usage: %s outfile prxhdr infile [modname] [modattr]\n"%(sys.argv[0]))
exit(-1)
if len(sys.argv) < 5:
prx_compress(sys.argv[1], sys.argv[2], sys.argv[3])
elif len(sys.argv) < 6:
prx_compress(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
else:
prx_compress(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], int(sys.argv[5], 16))
if __name__ == "__main__":
main()
| Python |
# coding: utf-8
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.wait import WebDriverWait
import unittest, time, re
class Untitled(unittest.TestCase):
main_page = "/php4dvd/#!/sort/name%20asc/"
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://localhost"
self.verificationErrors = []
self.accept_next_alert = True
def test_login(self):
driver = self.driver
# open | /php4dvd/ |
driver.get(self.base_url + "/php4dvd/")
# type | id=username | admin
username = driver.find_element_by_id("username")
username.clear()
username.send_keys("admin")
# type | name=password | admin
password = driver.find_element_by_name("password")
password.clear()
password.send_keys("admin")
# click | name=submit |
driver.find_element_by_name("submit").click()
# make sure we are not at login page
self.assertNotEqual(self.driver.current_url , self.base_url + "/php4dvd/")
def test_create_film(self):
self.test_login()
driver = self.driver
# open | /php4dvd/#!/sort/name%20asc/ |
driver.get(self.base_url + self.main_page)
# click | css=img[alt="Add movie"] |
driver.find_element_by_css_selector("img[alt=\"Add movie\"]").click()
# type | name=name | Somefilm
name = driver.find_element_by_name("name")
name.clear()
name.send_keys("Somefilm")
# type | name=year | 2014
year = driver.find_element_by_name("year")
year.clear()
year.send_keys("2014")
# type | name=notes | very nice movie. Would like to see it again.
notes = driver.find_element_by_name("notes")
notes.clear()
notes.send_keys("Very nice movie. Would like to see it again.")
# type | id=text_languages_0 | english, russian
text_languages = driver.find_element_by_id("text_languages_0")
text_languages.clear()
text_languages.send_keys("english, russian")
# type | name=subtitles | russian
subtitles = driver.find_element_by_name("subtitles")
subtitles.clear()
subtitles.send_keys("russian")
# click | id=submit |
driver.find_element_by_id("submit").click()
# need to redirect to another page
self.assertEqual(self.driver.current_url.split('&')[0] , self.base_url + "/php4dvd/?go=movie")
def test_create_film_without_required_fields(self):
self.test_login()
driver = self.driver
# open | /php4dvd/#!/sort/name%20asc/ |
driver.get(self.base_url + self.main_page)
# click | css=img[alt="Add movie"] |
driver.find_element_by_css_selector("img[alt=\"Add movie\"]").click()
# type | name=name | Somefilm
name = driver.find_element_by_name("name")
name.clear()
name.send_keys("Somefilm")
# click | id=submit |
driver.find_element_by_id("submit").click()
# looking for error msg
self.assertTrue(self.is_element_present(By.CLASS_NAME, 'error'))
# need to stay on same page
self.assertEqual(self.driver.current_url , self.base_url + "/php4dvd/?go=add")
def test_delete_film(self):
self.test_login()
driver = self.driver
# open | /php4dvd/#!/sort/name%20asc/ |
driver.get(self.base_url + self.main_page)
# click | css=div.nocover |
driver.find_element_by_css_selector("div.nocover").click()
# click | css=img[alt="Remove"] |
driver.find_element_by_css_selector("img[alt=\"Remove\"]").click()
# assertConfirmation | Are you sure you want to remove this? |
self.assertRegexpMatches(self.close_alert_and_get_its_text(), r"^Are you sure you want to remove this[\s\S]$")
# need to redirect to another page
self.assertEqual(self.driver.current_url , self.base_url + self.main_page)
def test_find_existing_film(self):
self.test_login()
driver = self.driver
# open | /php4dvd/#!/sort/name%20asc/ |
driver.get(self.base_url + self.main_page)
# type | id=q | somefilm
search = driver.find_element_by_id("q")
search.clear()
search.send_keys("somefilm")
search.send_keys(Keys.ENTER)
# looking for some links to film
self.assertTrue(self.is_element_present(By.XPATH, '//*[@id="results"]/a[1]'))
def test_find_non_existing_film(self):
self.test_login()
driver = self.driver
# open | /php4dvd/#!/sort/name%20asc/ |
driver.get(self.base_url + self.main_page)
# type | id=q | another
search = driver.find_element_by_id("q")
search.clear()
search.send_keys("anotherfilm")
search.send_keys(Keys.ENTER)
# looking there is div inside #id=result with .content (No movies where found.)
self.assertTrue(self.is_element_present(By.XPATH, '//*[@id="results"]/div[@class="content"]'))
# сначало думал что нужно удалять часть описания
#def test_delete_description(self):
# self.test_login()
# driver = self.driver
# # open | /php4dvd/#!/sort/name%20asc/ |
# driver.get(self.base_url + "/php4dvd/#!/sort/name%20asc/")
# # click | css=div.nocover |
# driver.find_element_by_css_selector("div.nocover").click()
# # click | css=img[alt="Edit"] |
# driver.find_element_by_css_selector("img[alt=\"Edit\"]").click()
# # type | name=notes |
# driver.find_element_by_name("notes").clear()
# driver.find_element_by_name("notes").send_keys("")
# # type | id=text_languages_0 |
# driver.find_element_by_id("text_languages_0").clear()
# driver.find_element_by_id("text_languages_0").send_keys("")
# # type | name=subtitles |
# driver.find_element_by_name("subtitles").clear()
# driver.find_element_by_name("subtitles").send_keys("")
# # click | id=submit |
# driver.find_element_by_id("submit").click()
#
# # need to redirect to ?go=movie&... page
# self.assertEqual(self.driver.current_url.split('&')[0] , self.base_url + "/php4dvd/?go=movie")
#def test_delete_description_with_required_fields(self):
# self.test_login()
# driver = self.driver
# # open | /php4dvd/#!/sort/name%20asc/ |
# driver.get(self.base_url + "/php4dvd/#!/sort/name%20asc/")
# # click | css=div.nocover |
# driver.find_element_by_css_selector("div.nocover").click()
# # click | css=img[alt="Edit"] |
# driver.find_element_by_css_selector("img[alt=\"Edit\"]").click()
# # type | name=year |
# driver.find_element_by_name("year").clear()
# driver.find_element_by_name("year").send_keys("")
# # type | name=notes |
# driver.find_element_by_name("notes").clear()
# driver.find_element_by_name("notes").send_keys("")
# # type | id=text_languages_0 |
# driver.find_element_by_id("text_languages_0").clear()
# driver.find_element_by_id("text_languages_0").send_keys("")
# # type | name=subtitles |
# driver.find_element_by_name("subtitles").clear()
# driver.find_element_by_name("subtitles").send_keys("")
# # click | id=submit |
# driver.find_element_by_id("submit").click()
#
# # need to stay on same page after submiting
# self.assertEqual(self.driver.current_url.split('&')[0] , self.base_url + "/php4dvd/?go=edit")
def is_element_present(self, how, what):
try:
self.driver.find_element(by=how, value=what)
except NoSuchElementException, e:
return False
return True
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException, e:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| Python |
from django.db import models
from apps.common.models import CommonItem
# Create your models here.
class Post(CommonItem):
title = models.CharField(max_length=90)
slug = models.SlugField(max_length=40)
body = models.TextField()
@models.permalink
def get_absolute_url(self):
return ('post_view', (), {
'year': self.created_at.year,
'month': self.created_at.month,
'day': self.created_at.day,
'slug': self.slug})
def __unicode__(self):
return self.title
class Comment(CommonItem):
post = models.ForeignKey(Post)
body = models.TextField()
def __unicode__(self):
return self.id
| Python |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
| Python |
from django.contrib.syndication.views import Feed
from django.contrib.sites.models import Site
class BlogFeed(Feed):
title = str(Site.objects.get_current().name)
link = '/'
description = 'The latest stories from ' + title
def items(self):
# Import needs to be inside due to dependency issue
from feedbag.apps.blog.models import Post
return Post.objects.all() \
.order_by('-created_at')[:10]
def item_title(self, item):
return item.title
def item_description(self, item):
return item.body
def item_pubdate(self, item):
return item.created_at
def item_link(self, item):
return item.get_absolute_url() | Python |
# Create your views here.
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
def blog(request):
return render_to_response('blog.html', {},
context_instance=RequestContext(request))
def post(request, year, month, day, slug):
return render_to_response('post.html', {},
context_instance=RequestContext(request))
def archive_day(request, year, month, day):
return render_to_response('archive.html', {},
context_instance=RequestContext(request))
def archive_month(request, year, month):
return render_to_response('archive.html', {},
context_instance=RequestContext(request))
def archive_year(request, year):
return render_to_response('archive.html', {},
context_instance=RequestContext(request)) | Python |
from feedbag.apps.blog.models import Post
from django.contrib.sitemaps import FlatPageSitemap, GenericSitemap
def post_sitemap():
info_dict = {
'queryset': Post.objects.all(),
'date_field': 'created_at',
}
return GenericSitemap(info_dict, priority=0.6) | Python |
from django.contrib import admin
from django.db import models
from feedbag.apps.blog.models import Post, Comment
class PostAdmin(admin.ModelAdmin):
ordering = ('-created_at', 'id')
prepopulated_fields = {"slug": ("title",)}
admin.site.register(Post, PostAdmin)
class CommentAdmin(admin.ModelAdmin):
ordering = ('-created_at', 'id')
admin.site.register(Comment, CommentAdmin) | Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.