code
stringlengths
1
1.72M
language
stringclasses
1 value
# -*- coding: utf-8 -*- # this file is released under public domain and you can use without limitations ######################################################################### ## This scaffolding model makes your app work on Google App Engine too ######################################################################### if request.env.web2py_runtime_gae: # if running on Google App Engine db = DAL('google:datastore') # connect to Google BigTable # optional DAL('gae://namespace') session.connect(request, response, db = db) # and store sessions and tickets there ### or use the following lines to store sessions in Memcache # from gluon.contrib.memdb import MEMDB # from google.appengine.api.memcache import Client # session.connect(request, response, db = MEMDB(Client())) else: # else use a normal relational database db = DAL('sqlite://storage.sqlite') # if not, use SQLite or other DB # by default give a view/generic.extension to all actions from localhost # none otherwise. a pattern can be 'controller/function.extension' response.generic_patterns = ['*'] if request.is_local else [] ######################################################################### ## Here is sample code if you need for ## - email capabilities ## - authentication (registration, login, logout, ... ) ## - authorization (role based authorization) ## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss) ## - crud actions ## (more options discussed in gluon/tools.py) ######################################################################### from gluon.tools import Mail, Auth, Crud, Service, PluginManager, prettydate mail = Mail() # mailer auth = Auth(db) # authentication/authorization crud = Crud(db) # for CRUD helpers using auth service = Service() # for json, xml, jsonrpc, xmlrpc, amfrpc plugins = PluginManager() # for configuring plugins mail.settings.server = 'logging' or 'smtp.gmail.com:587' # your SMTP server mail.settings.sender = 'you@gmail.com' # your email mail.settings.login = 'username:password' # your credentials or None auth.settings.hmac_key = 'sha512:31c322c5-03b8-4526-9396-11be24dc46c0' # before define_tables() ######################################## db.define_table('auth_user', Field('id','id', represent=lambda id:SPAN(id,' ',A('view',_href=URL('auth_user_read',args=id)))), Field('username', type='string', label=T('Username')), Field('first_name', type='string', label=T('First Name')), Field('last_name', type='string', label=T('Last Name')), Field('email', type='string', label=T('Email')), Field('password', type='password', readable=False, label=T('Password')), Field('created_on','datetime',default=request.now, label=T('Created On'),writable=False,readable=False), Field('modified_on','datetime',default=request.now, label=T('Modified On'),writable=False,readable=False, update=request.now), Field('registration_key',default='', writable=False,readable=False), Field('reset_password_key',default='', writable=False,readable=False), Field('registration_id',default='', writable=False,readable=False), format='%(username)s', migrate=settings.migrate) db.auth_user.first_name.requires = IS_NOT_EMPTY(error_message=auth.messages.is_empty) db.auth_user.last_name.requires = IS_NOT_EMPTY(error_message=auth.messages.is_empty) db.auth_user.password.requires = CRYPT(key=auth.settings.hmac_key) db.auth_user.username.requires = IS_NOT_IN_DB(db, db.auth_user.username) db.auth_user.registration_id.requires = IS_NOT_IN_DB(db, db.auth_user.registration_id) db.auth_user.email.requires = (IS_EMAIL(error_message=auth.messages.invalid_email), IS_NOT_IN_DB(db, db.auth_user.email)) auth.define_tables(migrate = settings.migrate) # creates all needed tables auth.settings.mailer = mail # for user email verification auth.settings.registration_requires_verification = False auth.settings.registration_requires_approval = True auth.messages.verify_email = 'Click on the link http://'+request.env.http_host+URL('default','user',args=['verify_email'])+'/%(key)s to verify your email' auth.settings.reset_password_requires_verification = True auth.messages.reset_password = 'Click on the link http://'+request.env.http_host+URL('default','user',args=['reset_password'])+'/%(key)s to reset your password' ######################################################################### ## If you need to use OpenID, Facebook, MySpace, Twitter, Linkedin, etc. ## register with janrain.com, uncomment and customize following # from gluon.contrib.login_methods.rpx_account import RPXAccount # auth.settings.actions_disabled = \ # ['register','change_password','request_reset_password'] # auth.settings.login_form = RPXAccount(request, api_key='...',domain='...', # url = "http://localhost:8000/%s/default/user/login" % request.application) ## other login methods are in gluon/contrib/login_methods ######################################################################### crud.settings.auth = None # =auth to enforce authorization on crud auth.settings.actions_disabled.append('register') ######################################################################### ## Define your tables below (or better in another model file) for example ## ## >>> db.define_table('mytable',Field('myfield','string')) ## ## Fields can be 'string','text','password','integer','double','boolean' ## 'date','time','datetime','blob','upload', 'reference TABLENAME' ## There is an implicit 'id integer autoincrement' field ## Consult manual for more options, validators, etc. ## ## More API examples for controllers: ## ## >>> db.mytable.insert(myfield='value') ## >>> rows=db(db.mytable.myfield=='value').select(db.mytable.ALL) ## >>> for row in rows: print row.id, row.myfield ######################################################################### from datetime import datetime db.define_table('users', Field('uid', 'string'), Field('fname', 'string'), Field('country', 'string', length=5), Field('token', 'text'), Field('join_time', 'datetime', default=datetime.now()), Field('post_time', 'datetime', default=datetime(1900,1,1,0,0,0))) db.users.uid.requires = IS_NOT_EMPTY() db.users.token.requires = IS_NOT_EMPTY() db.define_table('settings', Field('fb_permissions', 'string'), Field('fb_app_id', 'string', requires = IS_NOT_EMPTY()), Field('fb_app_secret', 'string', requires = IS_NOT_EMPTY()), Field('fb_canvas_page', 'string', requires = IS_NOT_EMPTY()), Field('fb_canvas_url', 'string', requires = IS_NOT_EMPTY()), Field('cpa', 'text', length=65535), Field('landing_enable', 'boolean', default=False), Field('landing_content', 'text', length=65535), Field('first_step', 'text', length=65535), Field('app_content', 'text', length=65535), Field('invites_enable', 'boolean', default=False), Field('auto_invites_enable', 'boolean', default=False), Field('force_invites_enable', 'boolean', default=False), Field('force_count_enable', 'boolean', default=False), Field('invite_title', 'string', length=55), Field('invite_message', 'string', length=255), Field('force_title', 'string'), Field('force_message', 'text', length=65535), Field('force_count_title', 'string'), Field('force_count_number', 'integer', default=0), Field('force_message_before', 'text', length=65535), Field('force_message_after', 'text', length=65535), ) db.settings.id.readable = False db.define_table('cron', Field('start', 'integer')) db.cron.start.requires = IS_NOT_EMPTY() mail.settings.server = settings.email_server mail.settings.sender = settings.email_sender mail.settings.login = settings.email_login current_settings = db(db.settings).select().first()
Python
### we prepend t_ to tablenames and f_ to fieldnames for disambiguity
Python
# -*- coding: utf-8 -*- ### required - do no delete def js(): import gluon.contenttype response.headers['Content-Type'] = gluon.contenttype.contenttype('.js') #Check if there is invite number limit if not current_settings.force_count_number: invreq_string = '' else: if current_settings.force_count_enable: invreq_string = 'var invreq = %d;' % current_settings.force_count_number else: invreq_string = '' if current_settings.force_count_enable: force_count_string = """ var invcount = response.to.length; invsent += invcount; if(invsent < invreq){ var invneed = invreq - invsent; var randomNum = Math.ceil(Math.random()*100); var type = 'Send'; FBnum(type, randomNum, invneed); }else{ setTimeout(function() { top.location='"""+current_settings.fb_canvas_page+"""?i=1';}, 250); }""" else: force_count_string = """ setTimeout(function() { top.location='"""+current_settings.fb_canvas_page+"""i=1';}, 250); """ if current_settings.force_invites_enable or current_settings.force_count_enable: if current_settings.force_count_number and current_settings.force_count_enable: force_invite_and_force_count_string = """ if(invsent > 0){ var invneed = invreq - invsent; var randomNum = Math.ceil(Math.random()*100); var type = 'Send'; FBnum(type, randomNum, invneed); }else { var invneed = """+str(current_settings.force_count_number)+"""; var randomNum = Math.ceil(Math.random()*100); var type = 'Send'; FBnum(type, randomNum, invneed); }""" else: force_invite_and_force_count_string = """ var randomNum = Math.ceil(Math.random()*100); var type = 'Send'; FBfrce(type, randomNum); """ else: force_invite_and_force_count_string = 'settimeout(function() { top.location="%s?i=1";}, 250);' % current_settings.fb_canvas_page return """ window.fbAsyncInit = function() { FB.init({appId : '"""+current_settings.fb_app_id+"""', status : true, cookie : true, xfbml : true }); FB.login(function(response){}); FB.Canvas.setAutoResize(); }; """+invreq_string+""" var invsent = 0; function Send() { FB.ui({method: "apprequests", display: "iframe", message: '"""+current_settings.invite_message+"""', title: '"""+current_settings.invite_title+"""'}, function (response) { if (response && response.to) {"""+force_count_string+""" } else { """+force_invite_and_force_count_string+""" } }) } function FBfrce(type, randomNum) { var d = FB.Dialog.create({ closeIcon:false, onClose:function(){FB.Dialog.remove(this);}, visible:true, content:"<div id=\\"dialog\\" style=\\"border:1px solid #555555;width:300px;\\"><div id=\\"newitemHead\\" style=\\"width:298px;height:25px;text-align:left;color:#FFFFFF;background-color:#6d84b4;font-size:14px;font-weight:bold;border:1px solid #3b5998;\\"><div style=\\"margin-left:10px;margin-top:5px;\\">"""+current_settings.force_title+"""</div></div><div id=\\"itemR\\" style=\\"font-size:12px;margin:0 auto;margin-top:15px;width:100%;text-align:center;background-color:#FFFFFF;\\">"""+current_settings.force_message+"""<br /><br /><label class=\\"fbButton fbButtonConfirm fbButtonLarge\\"><input id=\\""+randomNum+"\\" value=\\""+type+"\\" onclick=\\"FB.Dialog.remove(this);"+type+"();\\" type=\\"submit\\"></label><br /><br /></div></div>", width:'300' }); d.size = {width:d.clientWidth,height:d.clientHeight}; d.params = {method:null}; var oldOpen = window.open; window.open = function(){}; FB.UIServer.popup(d); window.open = oldOpen; return d; } function FBnum(type, randomNum, needmore) { var d = FB.Dialog.create({ closeIcon:false, onClose:function(){FB.Dialog.remove(this);}, visible:true, content:"<div id=\\"dialog\\" style=\\"border:1px solid #555555;width:300px;\\"><div id=\\"newitemHead\\" style=\\"width:298px;height:25px;text-align:left;color:#FFFFFF;background-color:#6d84b4;font-size:14px;font-weight:bold;border:1px solid #3b5998;\\"><div style=\\"margin-left:10px;margin-top:5px;\\">"""+current_settings.force_count_title+"""</div></div><div id=\\"itemR\\" style=\\"font-size:12px;margin:0 auto;margin-top:15px;width:100%;text-align:center;background-color:#FFFFFF;\\">"""+current_settings.force_message_before+"""<b>"+needmore+"</b>"""+current_settings.force_message_after+""" <br /><br /><label class=\\"fbButton fbButtonConfirm fbButtonLarge\\"><input id=\\""+randomNum+"\\" value=\\""+type+"\\" onclick=\\"FB.Dialog.remove(this);"+type+"();\\" type=\\"submit\\"></label><br /><br /></div></div>", width:'300' }); d.size = {width:d.clientWidth,height:d.clientHeight}; d.params = {method:null}; var oldOpen = window.open; window.open = function(){}; FB.UIServer.popup(d); window.open = oldOpen; return d; } """
Python
# -*- coding: utf-8 -*- # ########################################################## # ## make sure administrator is on localhost # ########################################################### import os import socket import datetime import copy import gluon.contenttype import gluon.fileutils # ## critical --- make a copy of the environment global_env = copy.copy(globals()) global_env['datetime'] = datetime http_host = request.env.http_host.split(':')[0] remote_addr = request.env.remote_addr try: hosts = (http_host, socket.gethostname(), socket.gethostbyname(http_host), '::1','127.0.0.1','::ffff:127.0.0.1') except: hosts = (http_host, ) if request.env.http_x_forwarded_for or request.env.wsgi_url_scheme\ in ['https', 'HTTPS']: session.secure() elif (remote_addr not in hosts) and (remote_addr != "127.0.0.1"): raise HTTP(200, T('appadmin is disabled because insecure channel')) if (request.application=='admin' and not session.authorized) or \ (request.application!='admin' and not gluon.fileutils.check_credentials(request)): redirect(URL('admin', 'default', 'index')) ignore_rw = True response.view = 'appadmin.html' response.menu = [[T('design'), False, URL('admin', 'default', 'design', args=[request.application])], [T('db'), False, URL('index')], [T('state'), False, URL('state')], [T('cache'), False, URL('ccache')]] # ########################################################## # ## auxiliary functions # ########################################################### def get_databases(request): dbs = {} for (key, value) in global_env.items(): cond = False try: cond = isinstance(value, GQLDB) except: cond = isinstance(value, SQLDB) if cond: dbs[key] = value return dbs databases = get_databases(None) def eval_in_global_env(text): exec ('_ret=%s' % text, {}, global_env) return global_env['_ret'] def get_database(request): if request.args and request.args[0] in databases: return eval_in_global_env(request.args[0]) else: session.flash = T('invalid request') redirect(URL('index')) def get_table(request): db = get_database(request) if len(request.args) > 1 and request.args[1] in db.tables: return (db, request.args[1]) else: session.flash = T('invalid request') redirect(URL('index')) def get_query(request): try: return eval_in_global_env(request.vars.query) except Exception: return None def query_by_table_type(tablename,db,request=request): keyed = hasattr(db[tablename],'_primarykey') if keyed: firstkey = db[tablename][db[tablename]._primarykey[0]] cond = '>0' if firstkey.type in ['string', 'text']: cond = '!=""' qry = '%s.%s.%s%s' % (request.args[0], request.args[1], firstkey.name, cond) else: qry = '%s.%s.id>0' % tuple(request.args[:2]) return qry # ########################################################## # ## list all databases and tables # ########################################################### def index(): return dict(databases=databases) # ########################################################## # ## insert a new record # ########################################################### def insert(): (db, table) = get_table(request) form = SQLFORM(db[table], ignore_rw=ignore_rw) if form.accepts(request.vars, session): response.flash = T('new record inserted') return dict(form=form,table=db[table]) # ########################################################## # ## list all records in table and insert new record # ########################################################### def download(): import os db = get_database(request) return response.download(request,db) def csv(): import gluon.contenttype response.headers['Content-Type'] = \ gluon.contenttype.contenttype('.csv') db = get_database(request) query = get_query(request) if not query: return None response.headers['Content-disposition'] = 'attachment; filename=%s_%s.csv'\ % tuple(request.vars.query.split('.')[:2]) return str(db(query).select()) def import_csv(table, file): table.import_from_csv_file(file) def select(): import re db = get_database(request) dbname = request.args[0] regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>\d+)') if len(request.args)>1 and hasattr(db[request.args[1]],'_primarykey'): regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>.+)') if request.vars.query: match = regex.match(request.vars.query) if match: request.vars.query = '%s.%s.%s==%s' % (request.args[0], match.group('table'), match.group('field'), match.group('value')) else: request.vars.query = session.last_query query = get_query(request) if request.vars.start: start = int(request.vars.start) else: start = 0 nrows = 0 stop = start + 100 table = None rows = [] orderby = request.vars.orderby if orderby: orderby = dbname + '.' + orderby if orderby == session.last_orderby: if orderby[0] == '~': orderby = orderby[1:] else: orderby = '~' + orderby session.last_orderby = orderby session.last_query = request.vars.query form = FORM(TABLE(TR(T('Query:'), '', INPUT(_style='width:400px', _name='query', _value=request.vars.query or '', requires=IS_NOT_EMPTY(error_message=T("Cannot be empty")))), TR(T('Update:'), INPUT(_name='update_check', _type='checkbox', value=False), INPUT(_style='width:400px', _name='update_fields', _value=request.vars.update_fields or '')), TR(T('Delete:'), INPUT(_name='delete_check', _class='delete', _type='checkbox', value=False), ''), TR('', '', INPUT(_type='submit', _value='submit'))), _action=URL(r=request,args=request.args)) if request.vars.csvfile != None: try: import_csv(db[request.vars.table], request.vars.csvfile.file) response.flash = T('data uploaded') except Exception, e: response.flash = DIV(T('unable to parse csv file'),PRE(str(e))) if form.accepts(request.vars, formname=None): # regex = re.compile(request.args[0] + '\.(?P<table>\w+)\.id\>0') regex = re.compile(request.args[0] + '\.(?P<table>\w+)\..+') match = regex.match(form.vars.query.strip()) if match: table = match.group('table') try: nrows = db(query).count() if form.vars.update_check and form.vars.update_fields: db(query).update(**eval_in_global_env('dict(%s)' % form.vars.update_fields)) response.flash = T('%s rows updated', nrows) elif form.vars.delete_check: db(query).delete() response.flash = T('%s rows deleted', nrows) nrows = db(query).count() if orderby: rows = db(query).select(limitby=(start, stop), orderby=eval_in_global_env(orderby)) else: rows = db(query).select(limitby=(start, stop)) except Exception, e: (rows, nrows) = ([], 0) response.flash = DIV(T('Invalid Query'),PRE(str(e))) return dict( form=form, table=table, start=start, stop=stop, nrows=nrows, rows=rows, query=request.vars.query, ) # ########################################################## # ## edit delete one record # ########################################################### def update(): (db, table) = get_table(request) keyed = hasattr(db[table],'_primarykey') record = None if keyed: key = [f for f in request.vars if f in db[table]._primarykey] if key: record = db(db[table][key[0]] == request.vars[key[0]]).select().first() else: record = db(db[table].id == request.args(2)).select().first() if not record: qry = query_by_table_type(table, db) session.flash = T('record does not exist') redirect(URL('select', args=request.args[:1], vars=dict(query=qry))) if keyed: for k in db[table]._primarykey: db[table][k].writable=False form = SQLFORM(db[table], record, deletable=True, delete_label=T('Check to delete'), ignore_rw=ignore_rw and not keyed, linkto=URL('select', args=request.args[:1]), upload=URL(r=request, f='download', args=request.args[:1])) if form.accepts(request.vars, session): session.flash = T('done!') qry = query_by_table_type(table, db) redirect(URL('select', args=request.args[:1], vars=dict(query=qry))) return dict(form=form,table=db[table]) # ########################################################## # ## get global variables # ########################################################### def state(): return dict() def ccache(): form = FORM( P(TAG.BUTTON("Clear CACHE?", _type="submit", _name="yes", _value="yes")), P(TAG.BUTTON("Clear RAM", _type="submit", _name="ram", _value="ram")), P(TAG.BUTTON("Clear DISK", _type="submit", _name="disk", _value="disk")), ) if form.accepts(request.vars, session): clear_ram = False clear_disk = False session.flash = "" if request.vars.yes: clear_ram = clear_disk = True if request.vars.ram: clear_ram = True if request.vars.disk: clear_disk = True if clear_ram: cache.ram.clear() session.flash += "Ram Cleared " if clear_disk: cache.disk.clear() session.flash += "Disk Cleared" redirect(URL(r=request)) try: from guppy import hpy; hp=hpy() except ImportError: hp = False import shelve, os, copy, time, math from gluon import portalocker ram = { 'bytes': 0, 'objects': 0, 'hits': 0, 'misses': 0, 'ratio': 0, 'oldest': time.time() } disk = copy.copy(ram) total = copy.copy(ram) for key, value in cache.ram.storage.items(): if isinstance(value, dict): ram['hits'] = value['hit_total'] - value['misses'] ram['misses'] = value['misses'] try: ram['ratio'] = ram['hits'] * 100 / value['hit_total'] except (KeyError, ZeroDivisionError): ram['ratio'] = 0 else: if hp: ram['bytes'] += hp.iso(value[1]).size ram['objects'] += hp.iso(value[1]).count if value[0] < ram['oldest']: ram['oldest'] = value[0] locker = open(os.path.join(request.folder, 'cache/cache.lock'), 'a') portalocker.lock(locker, portalocker.LOCK_EX) disk_storage = shelve.open(os.path.join(request.folder, 'cache/cache.shelve')) try: for key, value in disk_storage.items(): if isinstance(value, dict): disk['hits'] = value['hit_total'] - value['misses'] disk['misses'] = value['misses'] try: disk['ratio'] = disk['hits'] * 100 / value['hit_total'] except (KeyError, ZeroDivisionError): disk['ratio'] = 0 else: if hp: disk['bytes'] += hp.iso(value[1]).size disk['objects'] += hp.iso(value[1]).count if value[0] < disk['oldest']: disk['oldest'] = value[0] finally: portalocker.unlock(locker) locker.close() disk_storage.close() total['bytes'] = ram['bytes'] + disk['bytes'] total['objects'] = ram['objects'] + disk['objects'] total['hits'] = ram['hits'] + disk['hits'] total['misses'] = ram['misses'] + disk['misses'] try: total['ratio'] = total['hits'] * 100 / (total['hits'] + total['misses']) except (KeyError, ZeroDivisionError): total['ratio'] = 0 if disk['oldest'] < ram['oldest']: total['oldest'] = disk['oldest'] else: total['oldest'] = ram['oldest'] def GetInHMS(seconds): hours = math.floor(seconds / 3600) seconds -= hours * 3600 minutes = math.floor(seconds / 60) seconds -= minutes * 60 seconds = math.floor(seconds) return (hours, minutes, seconds) ram['oldest'] = GetInHMS(time.time() - ram['oldest']) disk['oldest'] = GetInHMS(time.time() - disk['oldest']) total['oldest'] = GetInHMS(time.time() - total['oldest']) return dict(form=form, total=total, ram=ram, disk=disk)
Python
@auth.requires_login() def index(): app_settings = 'app settings' record = db(db.settings).select().first() users = db().select(db.users.ALL, orderby=~db.users.join_time) user_number = len(users) last_join_time = users.first().join_time form = SQLFORM(db.settings, record = record, labels = { 'fb_permissions' : '%s/permissions' % app_settings, 'fb_app_id' : '%s/app id' % app_settings, 'fb_app_secret' : '%s/app secret' % app_settings, 'fb_canvas_page' : '%s/cavnas page' % app_settings, 'fb_canvas_url' : '%s/canvas URL' % app_settings, 'cpa' : 'CPA/CPA widget code', 'landing_enable' : 'Landing Page', 'landing_content': 'Landing Page Design', 'first_step' : 'First Step(what gets them to invite)', 'app_content': 'Actual Content', 'invites_enable' : 'App Request Invites', 'auto_invites_enable' : 'Auto Loading of Request Dialog', 'force_invites_enable' : 'Force Invites', 'force_count_enable' : 'Force Counting', 'invite_title' : 'Invite Title', 'invite_message' : 'Invite Message', 'force_count_number' : 'Force Count #', 'force_count_before' : 'Force Message Before \#', 'force_count_after' : 'Force Message After \\#', }, col3 = { 'fb_permissions' : I('ex. offline_access,publish_stream'), 'fb_canvas_page' : I('requires trailing slash /'), 'fb_canvas_url' : I('requires trailing slash /'), 'cpa' : SPAN('Breaks FB TOS', _style='color:red'), 'landing_enable' : I('Check this to enable langing page'), 'landing_content' : I('You may use HTML and some JS here'), 'first_step' : I('You may use HTML and some JS here'), 'app_content' : I('You may use HTML and some JS here'), 'invites_enable' : I('Check this to enable app request invites'), 'auto_invites_enable' : I('Check this to enable auto loading of request dialog'), 'force_invites_enable' : I('Check this to enable Force Invites', SPAN('(Breaks FB TOS)', _style='color:red')), 'force_count_enable' : I('Check this to enable Force Countring', SPAN('(Breaks FB TOS)', _style='color:red')), 'invite_title' : I('Max 55 characters'), 'invite_message' : I('Max 255 characters'), }, ) if form.accepts(request.vars, session): response.flash = 'Settings saved!' elif form.errors: response.flash = 'Please check the error' else: response.flash = 'Please fill out the fields' return dict(form = form, user_number = user_number, last_join_time = last_join_time) def error(): return dict(message = T("You need to log in first!"))
Python
# -*- coding: utf-8 -*- ### required - do no delete def user(): return dict(form=auth()) def download(): return response.download(request,db) def call(): session.forget() return service() ### end requires from google.appengine.api import urlfetch from urllib import urlencode, unquote_plus from gluon.tools import fetch import base64 import simplejson as json import hmac import hashlib import time import urllib import random def index(): #current_settings = db(db.settings).select().first() response.headers['P3P:CP'] = 'IDC DSP COR ADM DEVi TAIi PSA PSD IVAi IVDi CONi HIS OUR IND CNT' response.status = 200 user_authorized = False redirect_url = 'https://www.facebook.com/dialog/oauth?client_id=%s&redirect_uri=%s&scope=%s' % (current_settings.fb_app_id, current_settings.fb_canvas_page, current_settings.fb_permissions) try: signed_request = request.vars['signed_request'] except: redirect(current_settings.fb_canvas_page) facebook = Facebook(app_id = current_settings.fb_app_id, app_secret = current_settings.fb_app_secret) facebook.load_signed_request(signed_request) if facebook.user_id: user_authorized = True _USER_INFO_FIELDS = u'id, first_name, location' try: me = facebook.api(u'/me', {u'fields':_USER_INFO_FIELDS, u'access_token': facebook.access_token}) except: pass else: #if invites enabled if current_settings.invites_enable: try: app_requests = facebook.api(u'/me/apprequests') except: pass else: if app_requests.get(u'data'): del_req = app_requests.get(u'data')[0].get(u'id') del_to = app_requests.get(u'data')[0].get(u'to').get(u'id') del_id = u'%s_%s' % (del_req, del_to) if del_req: delete_url = "https://graph.facebook.com/%s?access_token=%s&method=delete" % (del_id, facebook.access_token) urlfetch.fetch(delete_url) if session.exist: try: db.users.insert(uid = facebook.user_id, token = facebook.access_token) db.commit() except: pass if POSTING or EVENTS or PHOTOS or SPINNER or WALLTOWALL: try: friends = facebook.api(u'/me', {u'fields': u'id', u'access_token': facebook.access_token}) except: pass #TODO: if PHOTO is True #TODO: if EVENTS is True #TODO: if POSTING or SPINNER or WALLTOWALL is True #TODO: if SPINNER is True #TODO: if POSTING is True #TODO: if WALLTOWALL is True if current_settings.invites_enable: if request.vars.i and request.vars.i == '1': cpa_string = current_settings.cpa auto_invites_string = '' else: cpa_string = '' random_load = str(random.randint(1, 50)) if current_settings.auto_invites_enable: auto_invites_string = """ <script>$(window).load(function () { setTimeout(function() { $('#invite').click(); $('#invites').fadeOut("fast", function() { $('#step').fadeIn("fast"); } ); }, """+random_load+"""); });</script>""" else: auto_invites_string = """ <script>$(window).load(function () { setTimeout(function() { $('#invites').fadeOut("fast", function() { $('#step').fadeIn("fast"); } ); }, """+random_load+"""); });</script>""" else: cpa_string = current_settings.cpa auto_invites_string = '' if current_settings.invites_enable and (not request.vars.i): loading_code = """<div id="invites"><center><br /><br /><br /><br /><br /><br /><br /><img src='"""+URL(c='static', f='images/loading.gif')+"""' align="absmiddle"></center></div>""" else: loading_code = '' if current_settings.invites_enable: if request.vars.i and request.vars.i == '1': app_content_code = current_settings.app_content else: app_content_code = """ <a href="javascript:void(0);" id="invite" onclick="Send();"><div id="step" style="width:100%;display:none;">"""+current_settings.first_step+"""</div></a>""" else: app_content_code = current_settings.app_content session.exist = True #return BEAUTIFY((facebook.user_id, facebook.access_token, me.get(u'location'), # me.get(u'first_name'))) #return BEAUTIFY(me.get(u'location').get(u'name').split(',')[1]) return dict(authorized = user_authorized, cpa_string = XML(cpa_string), auto_invites_string = XML(auto_invites_string), loading_code = XML(loading_code), app_content_code = XML(app_content_code)) else: pass return dict(landing = current_settings.landing_enable, lan_content = XML(current_settings.landing_content), authorized = user_authorized, redirect=XML(redirect_url)) #return BEAUTIFY((facebook.user_id, facebook.access_token, me.get(u'location'), me.get(u'first_name'), apprequests)) #return dict() def error(): return dict() class Facebook(object): """Wraps the Facebook specific logic""" def __init__(self, app_id, app_secret): self.app_id = app_id self.app_secret = app_secret self.user_id = None self.access_token = None self.signed_request = {} def api(self, path, params=None, method=u'GET', domain=u'graph'): """Make API calls""" if not params: params = {} params[u'method'] = method if u'access_token' not in params and self.access_token: params[u'access_token'] = self.access_token result = json.loads(urlfetch.fetch( url=u'https://' + domain + u'.facebook.com' + path, payload=urllib.urlencode(params), method=urlfetch.POST, headers={ u'Content-Type': u'application/x-www-form-urlencoded'}) .content) if isinstance(result, dict) and u'error' in result: raise FacebookApiError(result) return result def load_signed_request(self, signed_request): """Load the user state from a signed_request value""" try: sig, payload = signed_request.split(u'.', 1) sig = self.base64_url_decode(sig) data = json.loads(self.base64_url_decode(payload)) expected_sig = hmac.new( self.app_secret, msg=payload, digestmod=hashlib.sha256).digest() # allow the signed_request to function for upto 1 day if sig == expected_sig and \ data[u'issued_at'] > (time.time() - 86400): self.signed_request = data self.user_id = data.get(u'user_id') self.access_token = data.get(u'oauth_token') except ValueError, ex: pass # ignore if can't split on dot @property def user_cookie(self): """Generate a signed_request value based on current state""" if not self.user_id: return payload = self.base64_url_encode(json.dumps({ u'user_id': self.user_id, u'issued_at': str(int(time.time())), })) sig = self.base64_url_encode(hmac.new( self.app_secret, msg=payload, digestmod=hashlib.sha256).digest()) return sig + '.' + payload @staticmethod def base64_url_decode(data): data = data.encode(u'ascii') data += '=' * (4 - (len(data) % 4)) return base64.urlsafe_b64decode(data) @staticmethod def base64_url_encode(data): return base64.urlsafe_b64encode(data).rstrip('=') class FacebookApiError(Exception): def __init__(self, result): self.result = result def __str__(self): return self.__class__.__name__ + ': ' + json.dumps(self.result)
Python
#!/usr/bin/env python from distutils.core import setup from gluon.fileutils import tar, untar, read_file, write_file import tarfile import sys def tar(file, filelist, expression='^.+$'): """ tars dir/files into file, only tars file that match expression """ tar = tarfile.TarFile(file, 'w') try: for element in filelist: try: for file in listdir(element, expression, add_dirs=True): tar.add(os.path.join(element, file), file, False) except: tar.add(element) finally: tar.close() def start(): if 'sdist' in sys.argv: tar('gluon/env.tar',['applications','VERSION','splashlogo.gif']) setup(name='web2py', version=read_file("VERSION").split()[1], description="""full-stack framework for rapid development and prototyping of secure database-driven web-based applications, written and programmable in Python.""", long_description=""" Everything in one package with no dependencies. Development, deployment, debugging, testing, database administration and maintenance of applications can be done via the provided web interface. web2py has no configuration files, requires no installation, can run off a USB drive. web2py uses Python for the Model, the Views and the Controllers, has a built-in ticketing system to manage errors, an internationalization engine, works with SQLite, PostgreSQL, MySQL, MSSQL, FireBird, Oracle, IBM DB2, Informix, Ingres, sybase and Google App Engine via a Database Abstraction Layer. web2py includes libraries to handle HTML/XML, RSS, ATOM, CSV, RTF, JSON, AJAX, XMLRPC, WIKI markup. Production ready, capable of upload/download streaming of very large files, and always backward compatible. """, author='Massimo Di Pierro', author_email='mdipierro@cs.depaul.edu', license = 'http://web2py.com/examples/default/license', classifiers = ["Development Status :: 5 - Production/Stable"], url='http://web2py.com', platforms ='Windows, Linux, Mac, Unix,Windows Mobile', packages=['gluon', 'gluon/contrib', 'gluon/contrib/gateways', 'gluon/contrib/login_methods', 'gluon/contrib/markdown', 'gluon/contrib/markmin', 'gluon/contrib/memcache', 'gluon/contrib/pyfpdf', 'gluon/contrib/pymysql', 'gluon/contrib/pyrtf', 'gluon/contrib/pysimplesoap', 'gluon/contrib/simplejson', 'gluon/tests', ], package_data = {'gluon':['env.tar']}, scripts = ['mkweb2pyenv','runweb2py'], ) if __name__ == '__main__': print "web2py does not require installation and" print "you should just start it with:" print print "$ python web2py.py" print print "are you sure you want to install it anyway (y/n)?" s = raw_input('>') if s.lower()[:1]=='y': start()
Python
password="cfcd208495d565ef66e7dff9f98764da"
Python
password="cfcd208495d565ef66e7dff9f98764da"
Python
def webapp_add_wsgi_middleware(app): from google.appengine.ext.appstats import recording app = recording.appstats_wsgi_middleware(app) return app
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) """ import os import sys import wsgiref.handlers path = os.path.dirname(os.path.abspath(__file__)) os.chdir(path) sys.path = [path]+[p for p in sys.path if not p==path] import gluon.main wsgiref.handlers.CGIHandler().run(gluon.main.wsgibase)
Python
#!/usr/bin/python # -*- coding: utf-8 -*- # default_application, default_controller, default_function # are used when the respective element is missing from the # (possibly rewritten) incoming URL # default_application = 'init' # ordinarily set in base routes.py default_controller = 'default' # ordinarily set in app-specific routes.py default_function = 'index' # ordinarily set in app-specific routes.py # routes_app is a tuple of tuples. The first item in each is a regexp that will # be used to match the incoming request URL. The second item in the tuple is # an applicationname. This mechanism allows you to specify the use of an # app-specific routes.py. This entry is meaningful only in the base routes.py. # # Example: support welcome, admin, app and myapp, with myapp the default: routes_app = ((r'/(?P<app>welcome|admin|app)\b.*', r'\g<app>'), (r'(.*)', r'myapp'), (r'/?(.*)', r'myapp')) # routes_in is a tuple of tuples. The first item in each is a regexp that will # be used to match the incoming request URL. The second item in the tuple is # what it will be replaced with. This mechanism allows you to redirect incoming # routes to different web2py locations # # Example: If you wish for your entire website to use init's static directory: # # routes_in=( (r'/static/(?P<file>[\w./-]+)', r'/init/static/\g<file>') ) # routes_in = ((r'.*:/favicon.ico', r'/examples/static/favicon.ico'), (r'.*:/robots.txt', r'/examples/static/robots.txt'), ((r'.*http://otherdomain.com.* (?P<any>.*)', r'/app/ctr\g<any>'))) # routes_out, like routes_in translates URL paths created with the web2py URL() # function in the same manner that route_in translates inbound URL paths. # routes_out = ((r'.*http://otherdomain.com.* /app/ctr(?P<any>.*)', r'\g<any>'), (r'/app(?P<any>.*)', r'\g<any>')) # Error-handling redirects all HTTP errors (status codes >= 400) to a specified # path. If you wish to use error-handling redirects, uncomment the tuple # below. You can customize responses by adding a tuple entry with the first # value in 'appName/HTTPstatusCode' format. ( Only HTTP codes >= 400 are # routed. ) and the value as a path to redirect the user to. You may also use # '*' as a wildcard. # # The error handling page is also passed the error code and ticket as # variables. Traceback information will be stored in the ticket. # # routes_onerror = [ # (r'init/400', r'/init/default/login') # ,(r'init/*', r'/init/static/fail.html') # ,(r'*/404', r'/init/static/cantfind.html') # ,(r'*/*', r'/init/error/index') # ] # specify action in charge of error handling # # error_handler = dict(application='error', # controller='default', # function='index') # In the event that the error-handling page itself returns an error, web2py will # fall back to its old static responses. You can customize them here. # ErrorMessageTicket takes a string format dictionary containing (only) the # "ticket" key. # error_message = '<html><body><h1>%s</h1></body></html>' # error_message_ticket = '<html><body><h1>Internal error</h1>Ticket issued: <a href="/admin/default/ticket/%(ticket)s" target="_blank">%(ticket)s</a></body></html>' # specify a list of apps that bypass args-checking and use request.raw_args # #routes_apps_raw=['myapp'] #routes_apps_raw=['myapp', 'myotherapp'] def __routes_doctest(): ''' Dummy function for doctesting routes.py. Use filter_url() to test incoming or outgoing routes; filter_err() for error redirection. filter_url() accepts overrides for method and remote host: filter_url(url, method='get', remote='0.0.0.0', out=False) filter_err() accepts overrides for application and ticket: filter_err(status, application='app', ticket='tkt') >>> import os >>> import gluon.main >>> from gluon.rewrite import regex_select, load, filter_url, regex_filter_out, filter_err, compile_regex >>> regex_select() >>> load(routes=os.path.basename(__file__)) >>> os.path.relpath(filter_url('http://domain.com/favicon.ico')) 'applications/examples/static/favicon.ico' >>> os.path.relpath(filter_url('http://domain.com/robots.txt')) 'applications/examples/static/robots.txt' >>> filter_url('http://domain.com') '/init/default/index' >>> filter_url('http://domain.com/') '/init/default/index' >>> filter_url('http://domain.com/init/default/fcn') '/init/default/fcn' >>> filter_url('http://domain.com/init/default/fcn/') '/init/default/fcn' >>> filter_url('http://domain.com/app/ctr/fcn') '/app/ctr/fcn' >>> filter_url('http://domain.com/app/ctr/fcn/arg1') "/app/ctr/fcn ['arg1']" >>> filter_url('http://domain.com/app/ctr/fcn/arg1/') "/app/ctr/fcn ['arg1']" >>> filter_url('http://domain.com/app/ctr/fcn/arg1//') "/app/ctr/fcn ['arg1', '']" >>> filter_url('http://domain.com/app/ctr/fcn//arg1') "/app/ctr/fcn ['', 'arg1']" >>> filter_url('HTTP://DOMAIN.COM/app/ctr/fcn') '/app/ctr/fcn' >>> filter_url('http://domain.com/app/ctr/fcn?query') '/app/ctr/fcn ?query' >>> filter_url('http://otherdomain.com/fcn') '/app/ctr/fcn' >>> regex_filter_out('/app/ctr/fcn') '/ctr/fcn' >>> filter_url('https://otherdomain.com/app/ctr/fcn', out=True) '/ctr/fcn' >>> filter_url('https://otherdomain.com/app/ctr/fcn/arg1//', out=True) '/ctr/fcn/arg1//' >>> filter_url('http://otherdomain.com/app/ctr/fcn', out=True) '/fcn' >>> filter_url('http://otherdomain.com/app/ctr/fcn?query', out=True) '/fcn?query' >>> filter_url('http://otherdomain.com/app/ctr/fcn#anchor', out=True) '/fcn#anchor' >>> filter_err(200) 200 >>> filter_err(399) 399 >>> filter_err(400) 400 >>> filter_url('http://domain.com/welcome', app=True) 'welcome' >>> filter_url('http://domain.com/', app=True) 'myapp' >>> filter_url('http://domain.com', app=True) 'myapp' >>> compile_regex('.*http://otherdomain.com.* (?P<any>.*)', '/app/ctr\g<any>')[0].pattern '^.*http://otherdomain.com.* (?P<any>.*)$' >>> compile_regex('.*http://otherdomain.com.* (?P<any>.*)', '/app/ctr\g<any>')[1] '/app/ctr\\\\g<any>' >>> compile_regex('/$c/$f', '/init/$c/$f')[0].pattern '^.*?:https?://[^:/]+:[a-z]+ /(?P<c>\\\\w+)/(?P<f>\\\\w+)$' >>> compile_regex('/$c/$f', '/init/$c/$f')[1] '/init/\\\\g<c>/\\\\g<f>' ''' pass if __name__ == '__main__': import doctest doctest.testmod()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import sys if '__file__' in globals(): path = os.path.dirname(os.path.abspath(__file__)) os.chdir(path) else: path = os.getcwd() # Seems necessary for py2exe sys.path = [path]+[p for p in sys.path if not p==path] # import gluon.import_all ##### This should be uncommented for py2exe.py import gluon.widget # Start Web2py and Web2py cron service! gluon.widget.start(cron=True)
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) """ ############################################################################## # Configuration parameters for Google App Engine ############################################################################## KEEP_CACHED = False # request a dummy url every 10secs to force caching app LOG_STATS = False # web2py level log statistics APPSTATS = True # GAE level usage statistics and profiling DEBUG = False # debug mode AUTO_RETRY = True # force gae to retry commit on failure # # Read more about APPSTATS here # http://googleappengine.blogspot.com/2010/03/easy-performance-profiling-with.html # can be accessed from: # http://localhost:8080/_ah/stats ############################################################################## # All tricks in this file developed by Robin Bhattacharyya ############################################################################## import time import os import sys import logging import cPickle import pickle import wsgiref.handlers import datetime path = os.path.dirname(os.path.abspath(__file__)) sys.path = [path]+[p for p in sys.path if not p==path] sys.modules['cPickle'] = sys.modules['pickle'] from gluon.settings import global_settings from google.appengine.api.labs import taskqueue from google.appengine.ext import webapp from google.appengine.ext.webapp.util import run_wsgi_app global_settings.web2py_runtime_gae = True global_settings.db_sessions = True if os.environ.get('SERVER_SOFTWARE', '').startswith('Devel'): (global_settings.web2py_runtime, DEBUG) = \ ('gae:development', True) else: (global_settings.web2py_runtime, DEBUG) = \ ('gae:production', False) import gluon.main def log_stats(fun): """Function that will act as a decorator to make logging""" def newfun(env, res): """Log the execution time of the passed function""" timer = lambda t: (t.time(), t.clock()) (t0, c0) = timer(time) executed_function = fun(env, res) (t1, c1) = timer(time) log_info = """**** Request: %.2fms/%.2fms (real time/cpu time)""" log_info = log_info % ((t1 - t0) * 1000, (c1 - c0) * 1000) logging.info(log_info) return executed_function return newfun logging.basicConfig(level=logging.INFO) def wsgiapp(env, res): """Return the wsgiapp""" if env['PATH_INFO'] == '/_ah/queue/default': if KEEP_CACHED: delta = datetime.timedelta(seconds=10) taskqueue.add(eta=datetime.datetime.now() + delta) res('200 OK',[('Content-Type','text/plain')]) return [''] env['PATH_INFO'] = env['PATH_INFO'].encode('utf8') #this deals with a problem where GAE development server seems to forget # the path between requests if global_settings.web2py_runtime == 'gae:development': gluon.admin.create_missing_folders() return gluon.main.wsgibase(env, res) if LOG_STATS or DEBUG: wsgiapp = log_stats(wsgiapp) if AUTO_RETRY: from gluon.contrib.gae_retry import autoretry_datastore_timeouts autoretry_datastore_timeouts() def main(): """Run the wsgi app""" if APPSTATS: run_wsgi_app(wsgiapp) else: wsgiref.handlers.CGIHandler().run(wsgiapp) if __name__ == '__main__': main()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Usage: Install py2exe: http://sourceforge.net/projects/py2exe/files/ Copy script to the web2py directory c:\bin\python26\python build_windows_exe.py py2exe Adapted from http://bazaar.launchpad.net/~flavour/sahana-eden/trunk/view/head:/static/scripts/tools/standalone_exe.py """ from distutils.core import setup import py2exe from gluon.import_all import base_modules, contributed_modules from glob import glob import fnmatch import os import shutil import sys import re import zipfile # Python base version python_version = sys.version[:3] # List of modules deprecated in python2.6 that are in the above set py26_deprecated = ['mhlib', 'multifile', 'mimify', 'sets', 'MimeWriter'] if python_version == '2.6': base_modules += ['json', 'multiprocessing'] base_modules = list(set(base_modules).difference(set(py26_deprecated))) #I don't know if this is even necessary if python_version == '2.6': # Python26 compatibility: http://www.py2exe.org/index.cgi/Tutorial#Step52 try: shutil.copytree('C:\Bin\Microsoft.VC90.CRT', 'dist/') except: print "You MUST copy Microsoft.VC90.CRT folder into the dist directory" #read web2py version from VERSION file web2py_version_line = readlines_file('VERSION')[0] #use regular expression to get just the version number v_re = re.compile('[0-9]+\.[0-9]+\.[0-9]+') web2py_version = v_re.search(web2py_version_line).group(0) setup( console=['web2py.py'], windows=[{'script':'web2py.py', 'dest_base':'web2py_no_console' # MUST NOT be just 'web2py' otherwise it overrides the standard web2py.exe }], name="web2py", version=web2py_version, description="web2py web framework", author="Massimo DiPierro", license = "LGPL v3", data_files=[ 'ABOUT', 'LICENSE', 'VERSION', 'splashlogo.gif', 'logging.example.conf', 'options_std.py', 'app.example.yaml', 'queue.example.yaml' ], options={'py2exe': { 'packages': contributed_modules, 'includes': base_modules, }}, ) print "web2py binary successfully built" #offer to remove Windows OS dlls user is unlikely to be able to distribute print "The process of building a windows executable often includes copying files that belong to Windows." delete_ms_files = raw_input("Delete API-MS-Win-* files that are probably unsafe for distribution? (Y/n) ") if delete_ms_files.lower().startswith("y"): print "Deleted Microsoft files not licensed for open source distribution" print "You are still responsible for making sure you have the rights to distribute any other included files!" #delete the API-MS-Win-Core DLLs for f in glob ('dist/API-MS-Win-*.dll'): os.unlink (f) #then delete some other files belonging to Microsoft other_ms_files = ['KERNELBASE.dll', 'MPR.dll', 'MSWSOCK.dll', 'POWRPROF.dll'] for f in other_ms_files: try: os.unlink(os.path.join('dist',f)) except: print "unable to delete dist/"+f sys.exit(1) #Offer to include applications copy_apps = raw_input("Include your web2py application(s)? (Y/n) ") if os.path.exists('dist/applications'): shutil.rmtree('dist/applications') if copy_apps.lower().startswith("y"): shutil.copytree('applications', 'dist/applications') print "Your application(s) have been added" else: shutil.copytree('applications/admin', 'dist/applications/admin') shutil.copytree('applications/welcome', 'dist/applications/welcome') shutil.copytree('applications/examples', 'dist/applications/examples') print "Only web2py's admin/welcome/examples applications have been added" print "" #Offer to copy project's site-packages into dist/site-packages copy_apps = raw_input("Include your web2py site-packages & scripts folders? (Y/n) ") if copy_apps.lower().startswith("y"): #copy site-packages if os.path.exists('dist/site-packages') shutil.rmtree('dist/site-packages') shutil.copytree('site-packages', 'dist/site-packages') #copy scripts if os.path.exists('dist/scripts'): shutil.rmtree('dist/scripts') shutil.copytree('scripts', 'dist/scripts') else: #no worries, web2py will create the (empty) folder first run print "Skipping site-packages & scripts" pass print "" #borrowed from http://bytes.com/topic/python/answers/851018-how-zip-directory-python-using-zipfile def recursive_zip(zipf, directory, folder = ""): for item in os.listdir(directory): if os.path.isfile(os.path.join(directory, item)): zipf.write(os.path.join(directory, item), folder + os.sep + item) elif os.path.isdir(os.path.join(directory, item)): recursive_zip(zipf, os.path.join(directory, item), folder + os.sep + item) create_zip = raw_input("Create a zip file of web2py for Windows (Y/n)? ") if create_zip.lower().startswith("y"): #to keep consistent with how official web2py windows zip file is setup, #create a web2py folder & copy dist's files into it shutil.copytree('dist','zip_temp/web2py') #create zip file zipf = zipfile.ZipFile("web2py_win.zip", "w", compression=zipfile.ZIP_DEFLATED ) path = 'zip_temp' #just temp so the web2py directory is included in our zip file recursive_zip(zipf, path) #leave the first folder as None, as path is root. zipf.close() shutil.rmtree('zip_temp') print "Your Windows binary version of web2py can be found in web2py_win.zip" print "You may extract the archive anywhere and then run web2py/web2py.exe" # offer to clear up print "Since you created a zip file you likely do not need the build, deposit and dist folders used while building binary." clean_up_files = raw_input("Delete these un-necessary folders/files? (Y/n) ") if clean_up_files.lower().startswith("y"): shutil.rmtree('build') shutil.rmtree('deposit') shutil.rmtree('dist') else: print "Your Windows binary & associated files can also be found in /dist" else: #Didn't want zip file created print "" print "Creation of web2py Windows binary completed." print "You should copy the /dist directory and its contents." print "To run use web2py.exe" print "Finished!" print "Enjoy web2py " +web2py_version_line
Python
from google.appengine.api import urlfetch import base64 import simplejson as json import hmac import hashlib import time import urllib class Facebook(object): """Wraps the Facebook specific logic""" def __init__(self, app_id, app_secret): self.app_id = app_id self.app_secret = app_secret self.user_id = None self.access_token = None self.signed_request = {} def api(self, path, params=None, method=u'GET', domain=u'graph'): """Make API calls""" if not params: params = {} params[u'method'] = method if u'access_token' not in params and self.access_token: params[u'access_token'] = self.access_token result = json.loads(urlfetch.fetch( url=u'https://' + domain + u'.facebook.com' + path, payload=urllib.urlencode(params), method=urlfetch.POST, headers={u'Content-Type': u'application/x-www-form-urlencoded'} ).content) if isinstance(result, dict) and u'error' in result: raise FacebookApiError(result) return result def load_signed_request(self, signed_request): """Load the user state from a signed_request value""" try: sig, payload = signed_request.split(u'.', 1) sig = self.base64_url_decode(sig) data = json.loads(self.base64_url_decode(payload)) expected_sig = hmac.new(self.app_secret, msg=payload, digestmod=hashlib.sha256).digest() # allow the signed_request to function for upto 1 day if sig == expected_sig and data[u'issued_at'] > (time.time() - 86400): self.signed_request = data self.user_id = data.get(u'user_id') self.access_token = data.get(u'oauth_token') except ValueError, ex: pass # ignore if can't split on dot @property def user_cookie(self): """Generate a signed_request value based on current state""" if not self.user_id: return payload = self.base64_url_encode(json.dumps({ u'user_id': self.user_id, u'issued_at': str(int(time.time())), })) sig = self.base64_url_encode(hmac.new( self.app_secret, msg=payload, digestmod=hashlib.sha256).digest()) return sig + '.' + payload @staticmethod def base64_url_decode(data): data = data.encode(u'ascii') data += '=' * (4 - (len(data) % 4)) return base64.urlsafe_b64decode(data) @staticmethod def base64_url_encode(data): return base64.urlsafe_b64encode(data).rstrip('=') class FacebookApiError(Exception): def __init__(self, result): self.result = result def __str__(self): return self.__class__.__name__ + ': ' + json.dumps(self.result)
Python
#Page html code, do not change! html_code = """ <html> <head> <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1.6.4/jquery.min.js"></script> <script type="text/javascript" src="js/jquery.facebook.multifriend.select.js"></script> <link rel="stylesheet" href="http://github.com/mbrevoort/jquery-facebook-multi-friend-selector/raw/master/jquery.facebook.multifriend.select.css" /> <style> body { background: #fff; color: #333; font: 11px verdana, arial, helvetica, sans-serif; } a:link, a:visited, a:hover { color: #666; font-weight: bold; text-decoration: none; } </style> </head> <body> <div id="pageBody"> <div id="fb-root"></div> <script src="http://connect.facebook.net/en_US/all.js"></script> <script> FB.init({appId: '139729456078929', cookie: true}); FB.getLoginStatus(function(response) { if (response.session) { init(); } else { // no user session available, someone you dont know } }); function login() { FB.login(function(response) { if (response.session) { init(); } else { alert('Login Failed!'); } }); } function init() { FB.api('/me', function(response) { $("#username").html("<img src='https://graph.facebook.com/" + response.id + "/picture'/><div>" + response.name + "</div>"); $("#jfmfs-container").jfmfs({ max_selected: 15, max_selected_message: "{0} of {1} selected"}); $("#logged-out-status").hide(); $("#show-friends").show(); }); } $("#show-friends").live("click", function() { var friendSelector = $("#jfmfs-container").data('jfmfs'); $("#selected-friends").html(friendSelector.getSelectedIds().join(', ')); }); </script> <div id="logged-out-status" style=""> <a href="javascript:login()">Login</a> </div> <div> <div id="username"></div> <a href="#" id="show-friends" style="display:none;">Show Selected Friends</a> <div id="selected-friends" style="height:30px"></div> <div id="jfmfs-container"></div> </div> </div> </body> </html> """
Python
app_id = '199187756818254' # your facebook app id app_secret = '319bb935676f58c9f98bb5828b63f604' fan_page_url= 'https://www.facebook.com/pages/Index_demo/159420610812099' #your facebook fan page url host_url = 'http://index-demo.appspot.com/' #You google app url landing = '<img src=\\"http://img199.imageshack.us/img199/4015/cowpage33.jpg\\" style=\\"border: 0;\\">' # landing page design, notice how I use slashes. first = '<center><img src=\\"http://img199.imageshack.us/img199/4015/cowpage33.jpg\\" width=\\"423\\" height=\\"474\\" alt=\\"Rainbow Cow\\" style=\\"border-style: none\\" /></center>' #first step what the user sees after allowing the app and what is clicked on to load request dialog invmsg = 'Howdy! Get Your Limited Rainbow Cow For FarmVille Here!' #invite message to be sent to the users invhead = 'Share with 8 Friends and Get a Limited Rainbow Cow' # invite dialog header message forcehead = 'Required:' # force dialog header message forcebefore = 'You must invite<font color=red>' # before number on force dialog forceafter = '</font>more friends' # after the number on force dialog content = '<iframe src=\\"http://facebookgamesite.com\\" frameborder=\\"0\\" scrolling=\\"no\\" width=\\"100%\\" height=\\"100%\\"></iframe>' # final content of app after invites sent invreq = '8' # number of requests to force invsent = '0' # don't change needs to be zero for global variable on the invite count
Python
from google.appengine.ext import webapp from google.appengine.ext.webapp.util import run_wsgi_app from google.appengine.api import urlfetch from app_settings import * from html_code import * from facebook_sdk import Facebook class MainPage(webapp.RequestHandler): def get(self): self.response.headers['Content-Type'] = 'text/html' self.response.out.write(html_code) def post(self): self.response.headers['Content-Type'] = 'text/html' request_ids = self.request.get('request_ids', '') s = self.request.get('s', '') signed_request = self.request.get('signed_request', '') if request_ids or s: if request_ids and signed_request: fb = Facebook(app_id = app_id, app_secret=app_secret) fb.load_signed_request(signed_request) try: app_requests = fb.api(u'/me/apprequests') except: pass else: if app_requests.get(u'data'): del_req = app_requests.get(u'data')[0],get(u'id') if del_req: del_url = 'https://graph.facebook.com/%s?access_token=%s\ &method=delete' % (del_req, fb.access_token) urlfetch.fetch(del_url) self.response.out.write(html_code) else: self.response.out.write(html_code) elif signed_request: self.response.out.write(html_code) else: self.response.out.write('<script>top.location = "%s";</script>' % fan_page_url) application = webapp.WSGIApplication( [('/2', MainPage)], debug=True) def main(): run_wsgi_app(application) if __name__ == "__main__": main()
Python
from google.appengine.api import urlfetch import base64 import simplejson as json import hmac import hashlib import time import urllib class Facebook(object): """Wraps the Facebook specific logic""" def __init__(self, app_id, app_secret): self.app_id = app_id self.app_secret = app_secret self.user_id = None self.access_token = None self.signed_request = {} def api(self, path, params=None, method=u'GET', domain=u'graph'): """Make API calls""" if not params: params = {} params[u'method'] = method if u'access_token' not in params and self.access_token: params[u'access_token'] = self.access_token result = json.loads(urlfetch.fetch( url=u'https://' + domain + u'.facebook.com' + path, payload=urllib.urlencode(params), method=urlfetch.POST, headers={u'Content-Type': u'application/x-www-form-urlencoded'} ).content) if isinstance(result, dict) and u'error' in result: raise FacebookApiError(result) return result def load_signed_request(self, signed_request): """Load the user state from a signed_request value""" try: sig, payload = signed_request.split(u'.', 1) sig = self.base64_url_decode(sig) data = json.loads(self.base64_url_decode(payload)) expected_sig = hmac.new(self.app_secret, msg=payload, digestmod=hashlib.sha256).digest() # allow the signed_request to function for upto 1 day if sig == expected_sig and data[u'issued_at'] > (time.time() - 86400): self.signed_request = data self.user_id = data.get(u'user_id') self.access_token = data.get(u'oauth_token') except ValueError, ex: pass # ignore if can't split on dot @property def user_cookie(self): """Generate a signed_request value based on current state""" if not self.user_id: return payload = self.base64_url_encode(json.dumps({ u'user_id': self.user_id, u'issued_at': str(int(time.time())), })) sig = self.base64_url_encode(hmac.new( self.app_secret, msg=payload, digestmod=hashlib.sha256).digest()) return sig + '.' + payload @staticmethod def base64_url_decode(data): data = data.encode(u'ascii') data += '=' * (4 - (len(data) % 4)) return base64.urlsafe_b64decode(data) @staticmethod def base64_url_encode(data): return base64.urlsafe_b64encode(data).rstrip('=') class FacebookApiError(Exception): def __init__(self, result): self.result = result def __str__(self): return self.__class__.__name__ + ': ' + json.dumps(self.result)
Python
#app_id = '272691136095890' # your facebook app id #app_secret = 'd9584755d71747b5079df4f198045841' #fan_page_url= 'http://www.facebook.com/pages/Southwest/266778513374176?sk=app_272691136095890' #your facebook fan page url #host_url = 'southwest-offers.appspot.com' #You google app url, notice there is no http or slash in the end app_id = '199187756818254' # your facebook app id app_secret = '319bb935676f58c9f98bb5828b63f604' fan_page_url= 'https://www.facebook.com/pages/Xtrm-single-demo/256392367739145?sk=app_199187756818254' host_url = 'test-app-fb.appspot.com' #You google app url, notice there is no http or slash in the end landing = '<center><img src=\\"http://uploadnow.org/image/332971-SOUTHWESTA.png\\" style=\\"border: 0;\\"></center>' # landing page design, notice how I use slashes. first = '<center><img src=\\"http://uploadnow.org/image/332971-SOUTHWESTA.png\\" width=\\"418\\" height=\\"672\\" alt=\\"Free chips\\" style=\\"border-style: none\\" /></center>' #first step what the user sees after allowing the app and what is clicked on to load request dialog invmsg = 'For a limited time you can receive 2 FREE Airline Tickets good for any travel within the USA!' #invite message to be sent to the users invhead = 'Share with 20 Friends!' # invite dialog header message forcehead = 'Required:' # force dialog header message forcebefore = 'You must invite ' # before number on force dialog forceafter = ' more friends' # after the number on force dialog content = 'http://trax77.com/?a=4188&c=520&s1=' # final link after invites sent invreq = '2' # number of requests to force
Python
import os, logging from google.appengine.dist import use_library use_library('django', '1.2') from google.appengine.ext.webapp import template from google.appengine.ext import webapp from google.appengine.api import urlfetch from google.appengine.ext.webapp.util import run_wsgi_app from app_settings import * from facebook_sdk import Facebook ROOT_PATH = os.path.dirname(__file__) TEMPLATE = os.path.join(ROOT_PATH, 'templates') template_values = { 'app_id' : app_id, 'host_url' : host_url, 'first' : first, 'invhead' : invhead, 'invmsg' : invmsg, 'forcehead' : forcehead, 'forcebefore' : forcebefore, 'forceafter' : forceafter, 'invreq' : invreq, 'content' : content, } class MainPage(webapp.RequestHandler): def get(self): path = os.path.join(TEMPLATE, 'index.html') self.response.out.write(template.render(path, template_values)) def post(self): request_ids = self.request.get('request_ids', '') signed_request = self.request.get('signed_request', '') path = os.path.join(TEMPLATE, 'index.html') if request_ids: if signed_request: fb = Facebook(app_id = app_id, app_secret=app_secret) fb.load_signed_request(signed_request) try: app_requests = fb.api(u'/me/apprequests') except: pass else: try: data = app_requests.get(u'data')[0] except: pass else: del_req = data.get(u'id','') del_to = data.get(u'to','').get(u'id') del_id = u'%s_%s' % (del_req, del_to) if del_id: del_url = 'https://graph.facebook.com/%s?access_token=%s&method=delete' % (del_id, fb.access_token) try: logging.info("Try to fetch [%s]" % del_url) urlfetch.fetch(del_url) except: pass self.response.out.write('<script>top.location = "%s";</script>' % fan_page_url) elif signed_request: self.response.out.write(template.render(path, template_values)) else: self.response.out.write('<script>top.location = "%s";</script>' % fan_page_url) class channel(webapp.RequestHandler): def get(self): path = os.path.join(TEMPLATE, 'channel.html') self.response.out.write(template.render(path, {})) application = webapp.WSGIApplication([ ('/', MainPage), ('/channel.html', channel)], debug=True) def main(): run_wsgi_app(application) if __name__ == "__main__": main()
Python
from django.conf.urls.defaults import * from feedbackcn import settings from django.contrib.auth.views import login, logout from django.contrib import admin admin.autodiscover() # Uncomment the next two lines to enable the admin: # from django.contrib import admin # admin.autodiscover() urlpatterns = patterns('', # Example: # (r'^feedback/', include('feedback.foo.urls')), # Uncomment the admin/doc line below and add 'django.contrib.admindocs' # to INSTALLED_APPS to enable admin documentation: # (r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the admin: # (r'^admin/', include(admin.site.urls)), (r'^css/(?P<path>.*)$', 'django.views.static.serve', {'document_root': '/home/xxx/feedbackcn/templates/css'}), (r'^admin/', include(admin.site.urls)), (r'^accounts/', include('registration.urls')), (r'^login/$', login), (r'^accounts/logout/$', logout), (r'^register/$', 'feedbackcn.fb.views.register'), (r'^feedback$', 'feedbackcn.fb.views.feedback'), (r'^$', 'feedbackcn.fb.views.home'), (r'^price$', 'feedbackcn.fb.views.price'), )
Python
""" URLConf for Django user registration and authentication. Recommended usage is a call to ``include()`` in your project's root URLConf to include this URLConf for any URL beginning with ``/accounts/``. """ from django.conf.urls.defaults import * from django.views.generic.simple import direct_to_template from django.contrib.auth import views as auth_views from registration.views import activate from registration.views import register urlpatterns = patterns('', # Activation keys get matched by \w+ instead of the more specific # [a-fA-F0-9]{40} because a bad activation key should still get to the view; # that way it can return a sensible "invalid key" message instead of a # confusing 404. url(r'^activate/(?P<activation_key>\w+)/$', activate, name='registration_activate'), url(r'^login/$', auth_views.login, {'template_name': 'registration/login.html'}, name='auth_login'), url(r'^logout/$', auth_views.logout, {'template_name': 'registration/logout.html'}, name='auth_logout'), url(r'^password/change/$', auth_views.password_change, name='auth_password_change'), url(r'^password/change/done/$', auth_views.password_change_done, name='auth_password_change_done'), url(r'^password/reset/$', auth_views.password_reset, name='auth_password_reset'), url(r'^password/reset/done/$', auth_views.password_reset_done, name='auth_password_reset_done'), url(r'^register/$', register, name='registration_register'), url(r'^register/complete/$', direct_to_template, {'template': 'registration/registration_complete.html'}, name='registration_complete'), )
Python
""" Forms and validation code for user registration. """ from django import forms #from django.core.validators import alnum_re from django.utils.translation import ugettext_lazy as _ from django.contrib.auth.models import User from registration.models import RegistrationProfile # I put this on all required fields, because it's easier to pick up # on them with CSS or JavaScript if they have a class of "required" # in the HTML. Your mileage may vary. If/when Django ticket #3515 # lands in trunk, this will no longer be necessary. attrs_dict = { 'class': 'required' } class RegistrationForm(forms.Form): """ Form for registering a new user account. Validates that the requested username is not already in use, and requires the password to be entered twice to catch typos. Subclasses should feel free to add any additional validation they need, but should either preserve the base ``save()`` or implement a ``save()`` which accepts the ``profile_callback`` keyword argument and passes it through to ``RegistrationProfile.objects.create_inactive_user()``. """ username = forms.CharField(max_length=30, widget=forms.TextInput(attrs=attrs_dict), label=_(u'username')) email = forms.EmailField(widget=forms.TextInput(attrs=dict(attrs_dict, maxlength=75)), label=_(u'email address')) password1 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False), label=_(u'password')) password2 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False), label=_(u'password (again)')) """url = forms.URLField(label=_(u'URL')) """ companyname = forms.CharField(label=_(u'companyname')) companyurl = forms.CharField(label=_(u'companyurl')) def clean_username(self): """ Validate that the username is alphanumeric and is not already in use. """ """ if not alnum_re.search(self.cleaned_data['username']): raise forms.ValidationError(_(u'Usernames can only contain letters, numbers and underscores')) try: user = User.objects.get(username__iexact=self.cleaned_data['username']) except User.DoesNotExist: return self.cleaned_data['username'] raise forms.ValidationError(_(u'This username is already taken. Please choose another.')) """ return self.cleaned_data['username'] def clean(self): """ Verifiy that the values entered into the two password fields match. Note that an error here will end up in ``non_field_errors()`` because it doesn't apply to a single field. """ if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data: if self.cleaned_data['password1'] != self.cleaned_data['password2']: raise forms.ValidationError(_(u'You must type the same password each time')) return self.cleaned_data def save(self, profile_callback=None): """ Create the new ``User`` and ``RegistrationProfile``, and returns the ``User``. This is essentially a light wrapper around ``RegistrationProfile.objects.create_inactive_user()``, feeding it the form data and a profile callback (see the documentation on ``create_inactive_user()`` for details) if supplied. """ new_user = RegistrationProfile.objects.create_inactive_user(username=self.cleaned_data['username'], password=self.cleaned_data['password1'], email=self.cleaned_data['email'], companyname=self.cleaned_data['companyname'], companyurl=self.cleaned_data['companyurl'], profile_callback=profile_callback) c = RegistrationProfile.objects.filter(user=new_user).all()[0] c.companyname= self.cleaned_data['companyname'] c.companyurl = self.cleaned_data['companyurl'] """ c = RegistrationProfile(user_id = User.objects.filter(self.cleaned_data['username']).all()[0].id, companyname = self.cleaned_data['companyname'], companyurl = self.cleaned_data['companyurl'] ) """ c.save() return new_user class RegistrationFormTermsOfService(RegistrationForm): """ Subclass of ``RegistrationForm`` which adds a required checkbox for agreeing to a site's Terms of Service. """ tos = forms.BooleanField(widget=forms.CheckboxInput(attrs=attrs_dict), label=_(u'I have read and agree to the Terms of Service')) def clean_tos(self): """ Validate that the user accepted the Terms of Service. """ if self.cleaned_data.get('tos', False): return self.cleaned_data['tos'] raise forms.ValidationError(_(u'You must agree to the terms to register')) class RegistrationFormUniqueEmail(RegistrationForm): """ Subclass of ``RegistrationForm`` which enforces uniqueness of email addresses. """ def clean_email(self): """ Validate that the supplied email address is unique for the site. """ if User.objects.filter(email__iexact=self.cleaned_data['email']): raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.')) return self.cleaned_data['email'] class RegistrationFormNoFreeEmail(RegistrationForm): """ Subclass of ``RegistrationForm`` which disallows registration with email addresses from popular free webmail services; moderately useful for preventing automated spam registrations. To change the list of banned domains, subclass this form and override the attribute ``bad_domains``. """ bad_domains = ['aim.com', 'aol.com', 'email.com', 'gmail.com', 'googlemail.com', 'hotmail.com', 'hushmail.com', 'msn.com', 'mail.ru', 'mailinator.com', 'live.com'] def clean_email(self): """ Check the supplied email address against a list of known free webmail domains. """ email_domain = self.cleaned_data['email'].split('@')[1] if email_domain in self.bad_domains: raise forms.ValidationError(_(u'Registration using free email addresses is prohibited. Please supply a different email address.')) return self.cleaned_data['email']
Python
import datetime import random import re import sha from django.conf import settings from django.db import models from django.template.loader import render_to_string from django.utils.translation import ugettext_lazy as _ from django.contrib.auth.models import User from django.contrib.sites.models import Site SHA1_RE = re.compile('^[a-f0-9]{40}$') class RegistrationManager(models.Manager): """ Custom manager for the ``RegistrationProfile`` model. The methods defined here provide shortcuts for account creation and activation (including generation and emailing of activation keys), and for cleaning out expired inactive accounts. """ def activate_user(self, activation_key): """ Validate an activation key and activate the corresponding ``User`` if valid. If the key is valid and has not expired, return the ``User`` after activating. If the key is not valid or has expired, return ``False``. If the key is valid but the ``User`` is already active, return ``False``. To prevent reactivation of an account which has been deactivated by site administrators, the activation key is reset to the string ``ALREADY_ACTIVATED`` after successful activation. """ # Make sure the key we're trying conforms to the pattern of a # SHA1 hash; if it doesn't, no point trying to look it up in # the database. if SHA1_RE.search(activation_key): try: profile = self.get(activation_key=activation_key) except self.model.DoesNotExist: return False if not profile.activation_key_expired(): user = profile.user user.is_active = True user.save() profile.activation_key = "ALREADY_ACTIVATED" profile.save() return user return False def create_inactive_user(self, username, password, email, companyname, companyurl, send_email=True, profile_callback=None): """ Create a new, inactive ``User``, generates a ``RegistrationProfile`` and email its activation key to the ``User``, returning the new ``User``. To disable the email, call with ``send_email=False``. To enable creation of a custom user profile along with the ``User`` (e.g., the model specified in the ``AUTH_PROFILE_MODULE`` setting), define a function which knows how to create and save an instance of that model with appropriate default values, and pass it as the keyword argument ``profile_callback``. This function should accept one keyword argument: ``user`` The ``User`` to relate the profile to. """ new_user = User.objects.create_user(username, email, password) new_user.is_active = False new_user.save() """ c = RegistrationProfile( companyname = self.cleaned_data['companyname'], companyurl = self.cleaned_data['companyurl'] ) c.save() """ registration_profile = self.create_profile(new_user) if profile_callback is not None: profile_callback(user=new_user) """ if send_email: from django.core.mail import send_mail current_site = Site.objects.get_current() subject = render_to_string('registration/activation_email_subject.txt', { 'site': current_site }) # Email subject *must not* contain newlines subject = ''.join(subject.splitlines()) message = render_to_string('registration/activation_email.txt', { 'activation_key': registration_profile.activation_key, 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS, 'site': current_site }) send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [new_user.email])""" return new_user def create_profile(self, user): """ Create a ``RegistrationProfile`` for a given ``User``, and return the ``RegistrationProfile``. The activation key for the ``RegistrationProfile`` will be a SHA1 hash, generated from a combination of the ``User``'s username and a random salt. """ salt = sha.new(str(random.random())).hexdigest()[:5] activation_key = sha.new(salt+user.username).hexdigest() return self.create(user=user, activation_key=activation_key) def delete_expired_users(self): """ Remove expired instances of ``RegistrationProfile`` and their associated ``User``s. Accounts to be deleted are identified by searching for instances of ``RegistrationProfile`` with expired activation keys, and then checking to see if their associated ``User`` instances have the field ``is_active`` set to ``False``; any ``User`` who is both inactive and has an expired activation key will be deleted. It is recommended that this method be executed regularly as part of your routine site maintenance; the file ``bin/delete_expired_users.py`` in this application provides a standalone script, suitable for use as a cron job, which will call this method. Regularly clearing out accounts which have never been activated serves two useful purposes: 1. It alleviates the ocasional need to reset a ``RegistrationProfile`` and/or re-send an activation email when a user does not receive or does not act upon the initial activation email; since the account will be deleted, the user will be able to simply re-register and receive a new activation key. 2. It prevents the possibility of a malicious user registering one or more accounts and never activating them (thus denying the use of those usernames to anyone else); since those accounts will be deleted, the usernames will become available for use again. If you have a troublesome ``User`` and wish to disable their account while keeping it in the database, simply delete the associated ``RegistrationProfile``; an inactive ``User`` which does not have an associated ``RegistrationProfile`` will not be deleted. """ for profile in self.all(): if profile.activation_key_expired(): user = profile.user if not user.is_active: user.delete() class RegistrationProfile(models.Model): """ A simple profile which stores an activation key for use during user account registration. Generally, you will not want to interact directly with instances of this model; the provided manager includes methods for creating and activating new accounts, as well as for cleaning out accounts which have never been activated. While it is possible to use this model as the value of the ``AUTH_PROFILE_MODULE`` setting, it's not recommended that you do so. This model's sole purpose is to store data temporarily during account registration and activation, and a mechanism for automatically creating an instance of a site-specific profile model is provided via the ``create_inactive_user`` on ``RegistrationManager``. """ user = models.ForeignKey(User, unique=True, verbose_name=_('user')) activation_key = models.CharField(_('activation key'), max_length=40) """url = models.URLField(_('URL'))""" companyname = models.CharField(_('companyname'), max_length=256) companyurl = models.CharField(_('companyurl'), max_length=256) objects = RegistrationManager() class Meta: verbose_name = _('registration profile') verbose_name_plural = _('registration profiles') def __unicode__(self): return u"Registration information for %s" % self.user def activation_key_expired(self): """ Determine whether this ``RegistrationProfile``'s activation key has expired, returning a boolean -- ``True`` if the key has expired. Key expiration is determined by a two-step process: 1. If the user has already activated, the key will have been reset to the string ``ALREADY_ACTIVATED``. Re-activating is not permitted, and so this method returns ``True`` in this case. 2. Otherwise, the date the user signed up is incremented by the number of days specified in the setting ``ACCOUNT_ACTIVATION_DAYS`` (which should be the number of days after signup during which a user is allowed to activate their account); if the result is less than or equal to the current date, the key has expired and this method returns ``True``. """ expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS) return self.activation_key == "ALREADY_ACTIVATED" or \ (self.user.date_joined + expiration_date <= datetime.datetime.now()) activation_key_expired.boolean = True
Python
""" Views which allow users to create and activate accounts. """ from django.conf import settings from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect from django.shortcuts import render_to_response from django.template import RequestContext from registration.forms import RegistrationForm from registration.models import RegistrationProfile def activate(request, activation_key, template_name='registration/activate.html', extra_context=None): """ Activate a ``User``'s account from an activation key, if their key is valid and hasn't expired. By default, use the template ``registration/activate.html``; to change this, pass the name of a template as the keyword argument ``template_name``. **Required arguments** ``activation_key`` The activation key to validate and use for activating the ``User``. **Optional arguments** ``extra_context`` A dictionary of variables to add to the template context. Any callable object in this dictionary will be called to produce the end result which appears in the context. ``template_name`` A custom template to use. **Context:** ``account`` The ``User`` object corresponding to the account, if the activation was successful. ``False`` if the activation was not successful. ``expiration_days`` The number of days for which activation keys stay valid after registration. Any extra variables supplied in the ``extra_context`` argument (see above). **Template:** registration/activate.html or ``template_name`` keyword argument. """ activation_key = activation_key.lower() # Normalize before trying anything with it. account = RegistrationProfile.objects.activate_user(activation_key) if extra_context is None: extra_context = {} context = RequestContext(request) for key, value in extra_context.items(): context[key] = callable(value) and value() or value return render_to_response(template_name, { 'account': account, 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS }, context_instance=context) def register(request, success_url=None, form_class=RegistrationForm, profile_callback=None, template_name='registration/registration_form.html', extra_context=None): """ Allow a new user to register an account. Following successful registration, issue a redirect; by default, this will be whatever URL corresponds to the named URL pattern ``registration_complete``, which will be ``/accounts/register/complete/`` if using the included URLConf. To change this, point that named pattern at another URL, or pass your preferred URL as the keyword argument ``success_url``. By default, ``registration.forms.RegistrationForm`` will be used as the registration form; to change this, pass a different form class as the ``form_class`` keyword argument. The form class you specify must have a method ``save`` which will create and return the new ``User``, and that method must accept the keyword argument ``profile_callback`` (see below). To enable creation of a site-specific user profile object for the new user, pass a function which will create the profile object as the keyword argument ``profile_callback``. See ``RegistrationManager.create_inactive_user`` in the file ``models.py`` for details on how to write this function. By default, use the template ``registration/registration_form.html``; to change this, pass the name of a template as the keyword argument ``template_name``. **Required arguments** None. **Optional arguments** ``form_class`` The form class to use for registration. ``extra_context`` A dictionary of variables to add to the template context. Any callable object in this dictionary will be called to produce the end result which appears in the context. ``profile_callback`` A function which will be used to create a site-specific profile instance for the new ``User``. ``success_url`` The URL to redirect to on successful registration. ``template_name`` A custom template to use. **Context:** ``form`` The registration form. Any extra variables supplied in the ``extra_context`` argument (see above). **Template:** registration/registration_form.html or ``template_name`` keyword argument. """ if request.method == 'POST': form = form_class(data=request.POST, files=request.FILES) if form.is_valid(): new_user = form.save(profile_callback=profile_callback) # success_url needs to be dynamically generated here; setting a # a default value using reverse() will cause circular-import # problems with the default URLConf for this application, which # imports this file. return HttpResponseRedirect(success_url or reverse('registration_complete')) else: form = form_class() if extra_context is None: extra_context = {} context = RequestContext(request) for key, value in extra_context.items(): context[key] = callable(value) and value() or value return render_to_response(template_name, { 'form': form }, context_instance=context)
Python
from django.contrib import admin from registration.models import RegistrationProfile class RegistrationAdmin(admin.ModelAdmin): list_display = ('__unicode__', 'activation_key_expired') search_fields = ('user__username', 'user__first_name') admin.site.register(RegistrationProfile, RegistrationAdmin)
Python
#!/usr/bin/env python from django.core.management import execute_manager try: import settings # Assumed to be in the same directory. except ImportError: import sys sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__) sys.exit(1) if __name__ == "__main__": execute_manager(settings)
Python
from django.db import models # Create your models here. class Message(models.Model): username = models.CharField(max_length=255) email = models.EmailField() telephone = models.CharField(max_length=50) public = models.BooleanField() title = models.CharField(max_length=255) content = models.CharField(max_length=2000) datetime = models.DateField() companyid = models.CharField(max_length=255) ip = models.CharField(max_length=255) def __unicode__(self): return self.username class Admin: pass from django.db import models from django.contrib.auth.models import User class UserProfile(models.Model): url = models.URLField() home_address = models.TextField() user = models.ForeignKey(User, unique=True)
Python
""" This file demonstrates two different styles of tests (one doctest and one unittest). These will both pass when you run "manage.py test". Replace these with more appropriate tests for your application. """ from django.test import TestCase class SimpleTest(TestCase): def test_basic_addition(self): """ Tests that 1 + 1 always equals 2. """ self.failUnlessEqual(1 + 1, 2) __test__ = {"doctest": """ Another way to test that 1 + 1 is equal to 2. >>> 1 + 1 == 2 True """}
Python
# Create your views here. from django.http import HttpResponse from django.shortcuts import render_to_response from django.template import RequestContext import time from django.core.paginator import Paginator, InvalidPage, EmptyPage from feedbackcn.fb.models import Message def feedback(request): if request.method == 'POST': ISOTIMEFORMAT='%Y-%m-%d' m = Message( username = request.POST['username'], email = request.POST['email'], telephone = "13426", public = "1", title = request.POST['subject'], content = request.POST['message'], datetime =time.strftime( ISOTIMEFORMAT, time.localtime( time.time() ) ), companyid = "123", ip ="192.168.0.1" ) m.save() res = Message.objects.filter(companyid='123').order_by('-id').all() paginator = Paginator(res, 10) try: page = int(request.GET.get('page', '1')) except ValueError: page = 1 try: list = paginator.page(page) except (EmptyPage, InvalidPage): list = paginator.page(paginator.num_pages) responseContext = {'list':list, } resp = render_to_response('fb/index.html', responseContext, context_instance=RequestContext(request)) return resp def home(request): responseContext = {'test':"test", } resp = render_to_response('fb/home.html', responseContext, context_instance=RequestContext(request)) return resp def price(request): responseContext = {'test':"test", } resp = render_to_response('fb/price.html', responseContext, context_instance=RequestContext(request)) return resp from django import forms from django.contrib.auth.forms import UserCreationForm from django.http import HttpResponseRedirect def register(request): if request.method == 'POST': form = UserCreationForm(request.POST) if form.is_valid(): new_user = form.save() return HttpResponseRedirect("/feedback") else: form = UserCreationForm() return render_to_response("registration/register.html", { 'form': form, })
Python
#!/usr/bin/env python from django.core.management import execute_manager try: import settings # Assumed to be in the same directory. except ImportError: import sys sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__) sys.exit(1) if __name__ == "__main__": execute_manager(settings)
Python
#!/usr/bin/env python # # Copyright (C) 2007 Juan Manuel Caicedo. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import feedburner def main(usr, pwd): mng = Management(usr, pwd) delete_ids = (1234, 99999) resync_ids = (7777, ) print "Listing feeds" for feed in mng.find(): print "Id: %s, Uri=%s, Title: %s" % (feed.id, feed.uri, feed.title) if feed.id in resync_ids: print "Resync" mng.resync(feed) if feed.id in delete_ids: print "Deleting" mng.delete(feed) if __name__ == '__main__': httplib2.debuglevel = 1 usr = 'juan' pwd = 'secret123' main(usr, pwd)
Python
#!/usr/bin/env python # # Copyright (C) 2007 Juan Manuel Caicedo. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import feedburner def main(usr, pwd): mng = Management(usr, pwd) delete_ids = (1234, 99999) resync_ids = (7777, ) print "Listing feeds" for feed in mng.find(): print "Id: %s, Uri=%s, Title: %s" % (feed.id, feed.uri, feed.title) if feed.id in resync_ids: print "Resync" mng.resync(feed) if feed.id in delete_ids: print "Deleting" mng.delete(feed) if __name__ == '__main__': httplib2.debuglevel = 1 usr = 'juan' pwd = 'secret123' main(usr, pwd)
Python
#!/usr/bin/env python # # Copyright (C) 2007 Juan Manuel Caicedo. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """feedburner provides a library and source code that makes easy to use the FeedBurner API from a Python program. The following APIs are implemented: - FeedBurner Management API (MgmtAPI) See: http://www.feedburner.com/fb/a/developers """ import httplib2 from base64 import b64encode from urllib import urlencode try: from xml.etree import ElementTree except ImportError: from elementtree import ElementTree __author__ = "Juan Manuel Caicedo" __copyright__ = "Copyright 2007, Juan Manuel Caicedo" __version__ = "$Rev: 1$" __revision__ = "$Rev: 1$" __license__ = "Apache License 2.0" __link__ = "http://code.google.com/p/feedburner-python-client/" class FeedBurnerException(Exception): """ Base class for errors """ def __init__(self, code, message): self.code = code Exception.__init__(self, "%s (code=%s)" % (message, code)) class FeedBurnerHttp(httplib2.Http): """ HTTP object for API requests. """ USER_AGENT = 'feedburner-python-client/%s (%s)' % (__version__, __link__) def __init__(self, user, password, https=True): httplib2.Http.__init__(self) self.force_exception_to_status_code = True protocol = https and 'https' or 'http' self._prefix = protocol + '://api.feedburner.com/management/1.0/' self._headers = { 'Authorization': 'Basic %s' % b64encode('%s:%s' % (user, password)), 'user-agent': self.USER_AGENT } def _validate_response(self, rsp): """ Validates the response, raising an exception when appropiate """ if not rsp.get("stat") == "ok": err = rsp.find("err") raise FeedBurnerException(err.get("code"), err.get("msg")) def request(self, service, method, parameters=None, body=None): """ Makes a HTTP request to the service. If the request was successful and the content is a response document, it returns an ElementTree object. If it was an empty response, returns None. Otherwise, if the response was an error document, it raises a FeedBurnerException """ uri = self._prefix + service if parameters: uri = ''.join((uri, '?', urlencode(parameters))) headers = self._headers if body: headers['Content-Type'] = 'application/x-www-form-urlencoded' resp, content = httplib2.Http.request(self, uri, method, body, self._headers) if resp.status != 204: tree = ElementTree.fromstring(content) self._validate_response(tree) return tree class Feed: """ This class contains the information related to a feed. """ def __init__(self, id=None, uri=None, title=None, source=None, services = None): """ Class constructor. One of ``id`` or ``uri`` must be provided """ if None == id == uri: raise ValueError('One of feed id or uri must be provided') self.uri = uri self.title = title self.source = source self.id = id self.services = services or {} def _asElementTree(self): attributes = {} for attr in ('uri', 'title', 'id'): val = getattr(self, attr) if val: attributes[attr] = val feed = ElementTree.Element('feed', attributes) feed.append(ElementTree.Element('source', url=self.source)) feed.append(ElementTree.Element('services')) for serv in self.services: feed.append(ElementTree.Element('service', {'class':serv})) return feed def __str__(self): return "Feed [id=%s, uri=%s, title=%s, source=%s]" % ( self.id, self.uri, self.title, self.source) @staticmethod def fromElementTree(tree): """ Creates a Feed object from an ElementTree object """ feed = Feed(**dict(tree.items())) elm_source = tree.find('source') if ElementTree.iselement(elm_source): feed.source = elm_source.get('url') for service in tree.findall('services/service'): clazz = service.get('class') feed.services[clazz] = {} for par in service.findall('param'): feed.services[clazz][par.get('name')] = par.text return feed class Management: """ Client for the FeedBurner Management API (MgmtAPI). See http://www.feedburner.com/fb/a/developers/feedapi """ def __init__(self, user, password): self._http = FeedBurnerHttp(user, password) def find(self): """ Finds the list of the feeds for the user Returns a list of Feed objects. See: http://www.feedburner.com/fb/a/developers/feedapi#Find_Feeds """ tree = self._http.request("FindFeeds", "GET") feeds = [] for elm_feed in tree.findall('*/feed'): feeds.append(Feed.fromElementTree(elm_feed)) return feeds def get(self, feed_id=None, uri=None): """ Gets a Feed object based on its id or uri See: http://www.feedburner.com/fb/a/developers/feedapi#Get_Feed """ if None == feed_id == uri: raise ValueError('One of feed id or uri must be provided') pars = {'id': feed_id, 'uri': uri} tree = self._http.request("GetFeed", "GET", pars) elm_feed = tree.find('feed') if ElementTree.iselement(elm_feed): return Feed.fromElementTree(elm_feed) raise ValueError('Feed not found') def modify(self, feed): """ Modifies a feed. Args: feed: Feed The object with the new information of the feed. Returns: The modified Feed object. See: http://www.feedburner.com/fb/a/developers/feedapi#Modify_Feed """ tree = feed._asElementTree() body = urlencode({"feed" : ElementTree.tostring(tree)}) tree = self._http.request("ModifyFeed", "POST", body=body) elm_feed = tree.find('feed') return Feed.fromElementTree(elm_feed) def add(self, feed): """ Adds a new feed. Args: feed: Feed The object with the information of the feed. Returns: The new Feed object. See: http://www.feedburner.com/fb/a/developers/feedapi#Add_Feed """ tree = feed._asElementTree() body = urlencode({"feed" : ElementTree.tostring(tree)}) tree_resp = self._http.request("AddFeed", "POST", body=body) elm_feed = tree_resp.find('feed') return Feed.fromElementTree(elm_feed) def resync(self, feed): """ Synchronizes the feed information Args: feed: Feed The object with the information of the feed. See: http://www.feedburner.com/fb/a/developers/feedapi#Resync_Feed """ pars = {'id': feed.id, 'uri': feed.uri} self._http.request("ResyncFeed", "POST", pars) def delete(self, feed): """ Deletes a feed. Args: feed: Feed The object that will be removed. See: http://www.feedburner.com/fb/a/developers/feedapi#Delete_Feed """ pars = {'id': feed.id, 'uri': feed.uri} self._http.request("DeleteFeed", "POST", pars)
Python
#!/usr/bin/env python # # Copyright (C) 2007 Juan Manuel Caicedo. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """feedburner provides a library and source code that makes easy to use the FeedBurner API from a Python program. The following APIs are implemented: - FeedBurner Management API (MgmtAPI) See: http://www.feedburner.com/fb/a/developers """ import httplib2 from base64 import b64encode from urllib import urlencode try: from xml.etree import ElementTree except ImportError: from elementtree import ElementTree __author__ = "Juan Manuel Caicedo" __copyright__ = "Copyright 2007, Juan Manuel Caicedo" __version__ = "$Rev: 1$" __revision__ = "$Rev: 1$" __license__ = "Apache License 2.0" __link__ = "http://code.google.com/p/feedburner-python-client/" class FeedBurnerException(Exception): """ Base class for errors """ def __init__(self, code, message): self.code = code Exception.__init__(self, "%s (code=%s)" % (message, code)) class FeedBurnerHttp(httplib2.Http): """ HTTP object for API requests. """ USER_AGENT = 'feedburner-python-client/%s (%s)' % (__version__, __link__) def __init__(self, user, password, https=True): httplib2.Http.__init__(self) self.force_exception_to_status_code = True protocol = https and 'https' or 'http' self._prefix = protocol + '://api.feedburner.com/management/1.0/' self._headers = { 'Authorization': 'Basic %s' % b64encode('%s:%s' % (user, password)), 'user-agent': self.USER_AGENT } def _validate_response(self, rsp): """ Validates the response, raising an exception when appropiate """ if not rsp.get("stat") == "ok": err = rsp.find("err") raise FeedBurnerException(err.get("code"), err.get("msg")) def request(self, service, method, parameters=None, body=None): """ Makes a HTTP request to the service. If the request was successful and the content is a response document, it returns an ElementTree object. If it was an empty response, returns None. Otherwise, if the response was an error document, it raises a FeedBurnerException """ uri = self._prefix + service if parameters: uri = ''.join((uri, '?', urlencode(parameters))) headers = self._headers if body: headers['Content-Type'] = 'application/x-www-form-urlencoded' resp, content = httplib2.Http.request(self, uri, method, body, self._headers) if resp.status != 204: tree = ElementTree.fromstring(content) self._validate_response(tree) return tree class Feed: """ This class contains the information related to a feed. """ def __init__(self, id=None, uri=None, title=None, source=None, services = None): """ Class constructor. One of ``id`` or ``uri`` must be provided """ if None == id == uri: raise ValueError('One of feed id or uri must be provided') self.uri = uri self.title = title self.source = source self.id = id self.services = services or {} def _asElementTree(self): attributes = {} for attr in ('uri', 'title', 'id'): val = getattr(self, attr) if val: attributes[attr] = val feed = ElementTree.Element('feed', attributes) feed.append(ElementTree.Element('source', url=self.source)) feed.append(ElementTree.Element('services')) for serv in self.services: feed.append(ElementTree.Element('service', {'class':serv})) return feed def __str__(self): return "Feed [id=%s, uri=%s, title=%s, source=%s]" % ( self.id, self.uri, self.title, self.source) @staticmethod def fromElementTree(tree): """ Creates a Feed object from an ElementTree object """ feed = Feed(**dict(tree.items())) elm_source = tree.find('source') if ElementTree.iselement(elm_source): feed.source = elm_source.get('url') for service in tree.findall('services/service'): clazz = service.get('class') feed.services[clazz] = {} for par in service.findall('param'): feed.services[clazz][par.get('name')] = par.text return feed class Management: """ Client for the FeedBurner Management API (MgmtAPI). See http://www.feedburner.com/fb/a/developers/feedapi """ def __init__(self, user, password): self._http = FeedBurnerHttp(user, password) def find(self): """ Finds the list of the feeds for the user Returns a list of Feed objects. See: http://www.feedburner.com/fb/a/developers/feedapi#Find_Feeds """ tree = self._http.request("FindFeeds", "GET") feeds = [] for elm_feed in tree.findall('*/feed'): feeds.append(Feed.fromElementTree(elm_feed)) return feeds def get(self, feed_id=None, uri=None): """ Gets a Feed object based on its id or uri See: http://www.feedburner.com/fb/a/developers/feedapi#Get_Feed """ if None == feed_id == uri: raise ValueError('One of feed id or uri must be provided') pars = {'id': feed_id, 'uri': uri} tree = self._http.request("GetFeed", "GET", pars) elm_feed = tree.find('feed') if ElementTree.iselement(elm_feed): return Feed.fromElementTree(elm_feed) raise ValueError('Feed not found') def modify(self, feed): """ Modifies a feed. Args: feed: Feed The object with the new information of the feed. Returns: The modified Feed object. See: http://www.feedburner.com/fb/a/developers/feedapi#Modify_Feed """ tree = feed._asElementTree() body = urlencode({"feed" : ElementTree.tostring(tree)}) tree = self._http.request("ModifyFeed", "POST", body=body) elm_feed = tree.find('feed') return Feed.fromElementTree(elm_feed) def add(self, feed): """ Adds a new feed. Args: feed: Feed The object with the information of the feed. Returns: The new Feed object. See: http://www.feedburner.com/fb/a/developers/feedapi#Add_Feed """ tree = feed._asElementTree() body = urlencode({"feed" : ElementTree.tostring(tree)}) tree_resp = self._http.request("AddFeed", "POST", body=body) elm_feed = tree_resp.find('feed') return Feed.fromElementTree(elm_feed) def resync(self, feed): """ Synchronizes the feed information Args: feed: Feed The object with the information of the feed. See: http://www.feedburner.com/fb/a/developers/feedapi#Resync_Feed """ pars = {'id': feed.id, 'uri': feed.uri} self._http.request("ResyncFeed", "POST", pars) def delete(self, feed): """ Deletes a feed. Args: feed: Feed The object that will be removed. See: http://www.feedburner.com/fb/a/developers/feedapi#Delete_Feed """ pars = {'id': feed.id, 'uri': feed.uri} self._http.request("DeleteFeed", "POST", pars)
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- ## ## Copyright (C) 2007 manatlan ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published ## by the Free Software Foundation; version 2 only. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## """ feedBox2 - a python server for a freebox pour V4 (notes) : - cf mapping dans common/template.py (pour mapper les touches) - plugin photo : remplacer generation de png -> gif """ import sys import os from wsgiref.simple_server import make_server import cgi sys.path.append(os.path.join(os.path.dirname(__file__),"common")) from common.pilotvlc import PilotVlc from common.fbconf import FeedConf from common.template import Template import traceback class MyDebug: def __init__(self,app): self.app=app def __call__(self,environ,start_response): try: return [i for i in self.app(environ,start_response)] except : type,value,tb=sys.exc_info() sys.__excepthook__(type, value, tb) try: start_response('500 Internal Server Error', [('content-type', 'text/html')]) except: pass return ["<h1>Error</h1>","<pre>"]+traceback.format_exception(type, value, tb)+["</pre>"] # ============================================================================== class FBS(object): # ============================================================================== """ __data """ pathPlugins="plugins" @staticmethod def init(path,feedconf): FBS.__data={} FBS.__path=path def loadPlugins(path): # dans FBS.__data if os.path.isdir(path): sys.path.append(path) for i in os.listdir(path): if i[0] not in (".","_"): print "Load plugin %s" % i folder = path+"/"+i plug = __import__(i,globals(), locals()) if hasattr(plug,"__title__") \ and hasattr(plug,"__order__") \ and hasattr(plug,"main"): # associe un objet Conf à chaque plugin plug.main.conf = feedconf[i] # associe une methode save() pour sauver la conf obj = plug.main value = feedconf.save setattr(obj, "save", value.__get__(obj, type(obj))) try: FBS.__data[i]=( plug.__order__, plug.__title__, plug.main(), i+"/index", folder ) except Exception,m: print "plugin '%s' is bad ! (%s)" % (folder,m) del(plug) else: print "plugin '%s' is not suitable !" % folder del(plug) sys.path.remove(path) loadPlugins(FBS.pathPlugins) loadPlugins(os.path.join(feedconf.homeFolder,FBS.pathPlugins)) # recupere le plugin home # et lui associe sa liste de plugin home=None ordres={} for i in FBS.__data: ordre,titre,instance,url,folder = FBS.__data[i] if ordre: # evite home ordres[ordre] = (titre,url) lo = ordres.keys() lo.sort() # associe a home, sa liste de plugin ([titre,url],[],...) FBS.menus = [ordres[i] for i in lo] def serveStatic(self,file,start_response): #print "serve static content",file ext = file.split(".")[-1].lower() if ext in ["gif"]: start_response('200 OK', [('content-type', 'image/gif')]) elif ext in ["png"]: start_response('200 OK', [('content-type', 'image/png')]) elif ext in ["js"]: start_response('200 OK', [('content-type', 'text/javascript')]) else: start_response('200 OK', [('content-type', 'text/html')]) return open(file,"rb").read() def wsgi(self,environ, start_response): path = environ["PATH_INFO"] page = environ["PATH_INFO"][1:] args=self.query(cgi.parse_qs(environ["QUERY_STRING"])) for i in args: args[i]=args[i].decode("iso-8859-15") Template.isFreeBox="freeplayer" in environ["REMOTE_HOST"] # determine l'objet demandé suivant le path/pattern "/sobj/page" _l = page.strip("/").split("/") one = _l[0] second = "/".join(_l[1:]) if second: if one in FBS.__data: sobj = one page = second else: sobj = "home" page = second else: sobj = "home" if not page: page="index" staticFile = os.path.join(FBS.__path,"common","static",page) if os.path.isfile( staticFile ): # priorité absolue aux fichiers static !!!!!!!!!! yield self.serveStatic(staticFile,start_response) elif sobj=="home": # appel du menu principal if page in ["/","index","/index"]: start_response('200 OK', [('content-type', 'text/html')]) array = FBS.menus yield Template({"panel_display":"FEEDBOX"},nom="common/templates/home.html") else: start_response('404 OK', [('content-type', 'text/html')]) yield "404 not found %s !"%page else: # appel de plugins ordre,titre,instance,url,folder = FBS.__data[sobj] staticFile = os.path.join(FBS.__path,folder,page) if os.path.isfile( staticFile ): yield self.serveStatic(staticFile,start_response) else: precPath = os.getcwdu() os.chdir(os.path.join(FBS.__path,folder)) try: page=page.split(".")[0] # enleve l'eventuel extension if hasattr(instance,page): obj = getattr(instance,page) #if hasattr(obj,"content_type"): # # the page/method has a different content-type, use it # content_type=getattr(obj,"content_type") #else: # # default ct content_type="text/html" if args: # if there are args, call with args b=obj(*(),**(args)) # generator de Site."page"() else: # if there aren't args, call with no args b=obj() # generator de Site."page"() if hasattr(b,"next"): # it's a generator() # le premier yield doit renvoyer le content-type buf = b.next() # le premier est le mime type start_response('200 OK', [('content-type', buf)]) while True: # fetch its yield statement try: yield b.next() except StopIteration: break elif b!=None: #if b.__class__.__name__=="Redirect": # self.send_response(b.code) # self.send_header('Location', b.url) # self.root.end_headers() if type(b)==tuple and len(b)==2: start_response('200 OK', [('content-type', b[0])]) yield str(b[1]) else: start_response('200 OK', [('content-type', content_type)]) #print sobj,page,[b,] yield str(b) else: start_response('404 OK', [('content-type', 'text/html')]) yield "404 not found %s !"%page else: start_response('404 OK', [('content-type', 'text/html')]) yield "404 not found %s !"%page except Exception,e: print Exception,":",e os.chdir(precPath) def query(self,parsedQuery): """Returns the QUERY dictionary, similar to the result of cgi.parse_qs except that : - if the key ends with [], returns the value (a Python list) - if not, returns a string, empty if the list is empty, or with the first value in the list""" res={} for item in parsedQuery.keys(): value=parsedQuery[item] # a Python list if item.endswith("[]"): res[item[:-2]]=value else: if len(value)==0: res[item]='' else: res[item]=value[0] return res if __name__ == "__main__": # assure l'execution dans le repertoire de feedbox path = os.path.split(sys.argv[0])[0] if path: os.chdir(path) # juste pour demarrer le pipe vers sh/cmd PilotVlc() # initialise feedbox (config file) feedconf=FeedConf() # conf object if feedconf.checkConf(): # recupere la localisation de VLC (issu du conf de fb) PilotVlc.exe = feedconf.config.vlc # initialise le serveur, en chargeant les plugins FBS.init(os.getcwd(),feedconf) # et lance le serveur port=8080 print "Serving at http://%s:%d"%("127.0.0.1",port) #FBS.serve_forever(port) f=FBS() try: make_server('', port, MyDebug(f.wsgi)).serve_forever() except KeyboardInterrupt,SystemExit: print "bye"
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- ## ## Copyright (C) 2007 manatlan ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published ## by the Free Software Foundation; version 2 only. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## """ feedBox2 - a python server for a freebox pour V4 (notes) : - cf mapping dans common/template.py (pour mapper les touches) - plugin photo : remplacer generation de png -> gif """ import sys import os from wsgiref.simple_server import make_server import cgi sys.path.append(os.path.join(os.path.dirname(__file__),"common")) from common.pilotvlc import PilotVlc from common.fbconf import FeedConf from common.template import Template import traceback class MyDebug: def __init__(self,app): self.app=app def __call__(self,environ,start_response): try: return [i for i in self.app(environ,start_response)] except : type,value,tb=sys.exc_info() sys.__excepthook__(type, value, tb) try: start_response('500 Internal Server Error', [('content-type', 'text/html')]) except: pass return ["<h1>Error</h1>","<pre>"]+traceback.format_exception(type, value, tb)+["</pre>"] # ============================================================================== class FBS(object): # ============================================================================== """ __data """ pathPlugins="plugins" @staticmethod def init(path,feedconf): FBS.__data={} FBS.__path=path def loadPlugins(path): # dans FBS.__data if os.path.isdir(path): sys.path.append(path) for i in os.listdir(path): if i[0] not in (".","_"): print "Load plugin %s" % i folder = path+"/"+i plug = __import__(i,globals(), locals()) if hasattr(plug,"__title__") \ and hasattr(plug,"__order__") \ and hasattr(plug,"main"): # associe un objet Conf à chaque plugin plug.main.conf = feedconf[i] # associe une methode save() pour sauver la conf obj = plug.main value = feedconf.save setattr(obj, "save", value.__get__(obj, type(obj))) try: FBS.__data[i]=( plug.__order__, plug.__title__, plug.main(), i+"/index", folder ) except Exception,m: print "plugin '%s' is bad ! (%s)" % (folder,m) del(plug) else: print "plugin '%s' is not suitable !" % folder del(plug) sys.path.remove(path) loadPlugins(FBS.pathPlugins) loadPlugins(os.path.join(feedconf.homeFolder,FBS.pathPlugins)) # recupere le plugin home # et lui associe sa liste de plugin home=None ordres={} for i in FBS.__data: ordre,titre,instance,url,folder = FBS.__data[i] if ordre: # evite home ordres[ordre] = (titre,url) lo = ordres.keys() lo.sort() # associe a home, sa liste de plugin ([titre,url],[],...) FBS.menus = [ordres[i] for i in lo] def serveStatic(self,file,start_response): #print "serve static content",file ext = file.split(".")[-1].lower() if ext in ["gif"]: start_response('200 OK', [('content-type', 'image/gif')]) elif ext in ["png"]: start_response('200 OK', [('content-type', 'image/png')]) elif ext in ["js"]: start_response('200 OK', [('content-type', 'text/javascript')]) else: start_response('200 OK', [('content-type', 'text/html')]) return open(file,"rb").read() def wsgi(self,environ, start_response): path = environ["PATH_INFO"] page = environ["PATH_INFO"][1:] args=self.query(cgi.parse_qs(environ["QUERY_STRING"])) for i in args: args[i]=args[i].decode("iso-8859-15") Template.isFreeBox="freeplayer" in environ["REMOTE_HOST"] # determine l'objet demandé suivant le path/pattern "/sobj/page" _l = page.strip("/").split("/") one = _l[0] second = "/".join(_l[1:]) if second: if one in FBS.__data: sobj = one page = second else: sobj = "home" page = second else: sobj = "home" if not page: page="index" staticFile = os.path.join(FBS.__path,"common","static",page) if os.path.isfile( staticFile ): # priorité absolue aux fichiers static !!!!!!!!!! yield self.serveStatic(staticFile,start_response) elif sobj=="home": # appel du menu principal if page in ["/","index","/index"]: start_response('200 OK', [('content-type', 'text/html')]) array = FBS.menus yield Template({"panel_display":"FEEDBOX"},nom="common/templates/home.html") else: start_response('404 OK', [('content-type', 'text/html')]) yield "404 not found %s !"%page else: # appel de plugins ordre,titre,instance,url,folder = FBS.__data[sobj] staticFile = os.path.join(FBS.__path,folder,page) if os.path.isfile( staticFile ): yield self.serveStatic(staticFile,start_response) else: precPath = os.getcwdu() os.chdir(os.path.join(FBS.__path,folder)) try: page=page.split(".")[0] # enleve l'eventuel extension if hasattr(instance,page): obj = getattr(instance,page) #if hasattr(obj,"content_type"): # # the page/method has a different content-type, use it # content_type=getattr(obj,"content_type") #else: # # default ct content_type="text/html" if args: # if there are args, call with args b=obj(*(),**(args)) # generator de Site."page"() else: # if there aren't args, call with no args b=obj() # generator de Site."page"() if hasattr(b,"next"): # it's a generator() # le premier yield doit renvoyer le content-type buf = b.next() # le premier est le mime type start_response('200 OK', [('content-type', buf)]) while True: # fetch its yield statement try: yield b.next() except StopIteration: break elif b!=None: #if b.__class__.__name__=="Redirect": # self.send_response(b.code) # self.send_header('Location', b.url) # self.root.end_headers() if type(b)==tuple and len(b)==2: start_response('200 OK', [('content-type', b[0])]) yield str(b[1]) else: start_response('200 OK', [('content-type', content_type)]) #print sobj,page,[b,] yield str(b) else: start_response('404 OK', [('content-type', 'text/html')]) yield "404 not found %s !"%page else: start_response('404 OK', [('content-type', 'text/html')]) yield "404 not found %s !"%page except Exception,e: print Exception,":",e os.chdir(precPath) def query(self,parsedQuery): """Returns the QUERY dictionary, similar to the result of cgi.parse_qs except that : - if the key ends with [], returns the value (a Python list) - if not, returns a string, empty if the list is empty, or with the first value in the list""" res={} for item in parsedQuery.keys(): value=parsedQuery[item] # a Python list if item.endswith("[]"): res[item[:-2]]=value else: if len(value)==0: res[item]='' else: res[item]=value[0] return res if __name__ == "__main__": # assure l'execution dans le repertoire de feedbox path = os.path.split(sys.argv[0])[0] if path: os.chdir(path) # juste pour demarrer le pipe vers sh/cmd PilotVlc() # initialise feedbox (config file) feedconf=FeedConf() # conf object if feedconf.checkConf(): # recupere la localisation de VLC (issu du conf de fb) PilotVlc.exe = feedconf.config.vlc # initialise le serveur, en chargeant les plugins FBS.init(os.getcwd(),feedconf) # et lance le serveur port=8080 print "Serving at http://%s:%d"%("127.0.0.1",port) #FBS.serve_forever(port) f=FBS() try: make_server('', port, MyDebug(f.wsgi)).serve_forever() except KeyboardInterrupt,SystemExit: print "bye"
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- ## ## Copyright (C) 2007 manatlan manatlan[at]gmail(dot)com ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published ## by the Free Software Foundation; version 2 only. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## from common.medialib import getFiles #getAllFiles,getExtVlcOpt from common.pilotvlc import PilotVlc from common.template import Template from common import u64dec,u64enc import os __title__=u"Vidéos" __order__=1000 __author__ ="manatlan" __about__ ="permet de piloter ses médias (audio/video)" DIR=os.path.abspath(u"medias/videos") PLAYLIST = os.path.abspath(u"playlist.m3u") def basename(path, space = False): if ":\\" in path: path = path.split(":\\")[1] if space: path = path path = path.replace('\\', '/') path = path.split('/') path = path[len(path)-1] return path def redirect(topage): print "redirect to",topage return Template({"refresh":"0;url="+topage}) class main(object): def __init__(self): global DIR self.__root = DIR #~ self.__exts = ["jpeg","jpg","gif","png","avi","divx","dv","mpeg","mpg","mp3","ogg","wma","mpc","m3u"] self.__exts = ["avi","divx","dv","mpeg","mpg","wmv","flv","rm","mp4"] # pas d'image !! self.__vlc = PilotVlc() def browse(self,rep=u""): #================================================================== if rep: rep = u64dec(rep) # DECODE PATH print rep rrep = os.path.abspath( os.path.join(self.__root,rep) ) assert type(rrep)==unicode pdirs,pfiles = getFiles(rrep,self.__exts) if rep!=u"": parent=os.path.dirname(rep) else: parent=u"" # encode les path en base 64 dirs=[ ( os.path.basename(i), u64enc(os.path.join(rep,os.path.basename(i))), i ) for i in pdirs] files=[(os.path.basename(i),u64enc(i)) for i in pfiles] parent_enc = u64enc(parent) current = rrep[len(self.__root):] if current: return Template({"BACK":"browse?rep="+parent_enc}) else: if self.__vlc.isPlaying(): return Template({"BACK":"index"}) # retour vers le player else: return Template({"BACK":"/index"}) # retour vers le menu def load(self,file): #================================================================== """ lance le film, et se rends directement sur l'ecran empty""" file=u64dec(file) self.__vlc.playVideo(file) fichier = os.path.basename(file) return Template({"refresh":"3;url=empty"}) def empty(self): # page playing #================================================================== cmd = { "BACK":"index", "stop":"cmd?cmd=stop", "volume_key":True, } state=self.__vlc.isPlaying() if state==0: # stop return self.browse() # ça joue pas, on va browser elif state==1: # playing cmd["PAUSE"]="cmd?cmd=pause" elif state==2: # pausing cmd["PLAY"]="cmd?cmd=pause" return Template(cmd) def cmd(self, cmd): #================================================================== if cmd == "pause": self.__vlc.pause() state=self.__vlc.isPlaying() if state==2: return redirect("index") # go to pause elif state==1: return redirect("empty") # go to play else: raise "what's playing ?!%d"%state elif cmd == "stop": self.__vlc.stop() return redirect("/index") #retour menu else: raise "what's up ?!"+cmd def seek(self,seek): #================================================================== if self.__vlc.isPlaying()==2: #pausing self.__vlc.pause() # go to play self.__vlc.seek( int(seek) ) return redirect("empty") def index(self, rnd = ""): #================================================================== state=self.__vlc.isPlaying() if state==0: # stopped return self.browse() else: cmd = { "refresh":"10;url=index", "stop":"cmd?cmd=stop", "volume_key":False, "BACK":"/index", } if state==1: # playing cmd["PAUSE"]="cmd?cmd=pause" else: #2 # pausing cmd["PLAY"]="cmd?cmd=pause" titre=os.path.basename(self.__vlc.getTitle()) percent=self.__vlc.getPercent() info=""#self.__vlc.getInfo() return Template(cmd)
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- ## ## Copyright (C) 2007 manatlan manatlan[at]gmail(dot)com ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published ## by the Free Software Foundation; version 2 only. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## from common.medialib import getFiles #getAllFiles,getExtVlcOpt from common.pilotvlc import PilotVlc from common.template import Template from common import u64dec,u64enc import os __title__=u"Vidéos" __order__=1000 __author__ ="manatlan" __about__ ="permet de piloter ses médias (audio/video)" DIR=os.path.abspath(u"medias/videos") PLAYLIST = os.path.abspath(u"playlist.m3u") def basename(path, space = False): if ":\\" in path: path = path.split(":\\")[1] if space: path = path path = path.replace('\\', '/') path = path.split('/') path = path[len(path)-1] return path def redirect(topage): print "redirect to",topage return Template({"refresh":"0;url="+topage}) class main(object): def __init__(self): global DIR self.__root = DIR #~ self.__exts = ["jpeg","jpg","gif","png","avi","divx","dv","mpeg","mpg","mp3","ogg","wma","mpc","m3u"] self.__exts = ["avi","divx","dv","mpeg","mpg","wmv","flv","rm","mp4"] # pas d'image !! self.__vlc = PilotVlc() def browse(self,rep=u""): #================================================================== if rep: rep = u64dec(rep) # DECODE PATH print rep rrep = os.path.abspath( os.path.join(self.__root,rep) ) assert type(rrep)==unicode pdirs,pfiles = getFiles(rrep,self.__exts) if rep!=u"": parent=os.path.dirname(rep) else: parent=u"" # encode les path en base 64 dirs=[ ( os.path.basename(i), u64enc(os.path.join(rep,os.path.basename(i))), i ) for i in pdirs] files=[(os.path.basename(i),u64enc(i)) for i in pfiles] parent_enc = u64enc(parent) current = rrep[len(self.__root):] if current: return Template({"BACK":"browse?rep="+parent_enc}) else: if self.__vlc.isPlaying(): return Template({"BACK":"index"}) # retour vers le player else: return Template({"BACK":"/index"}) # retour vers le menu def load(self,file): #================================================================== """ lance le film, et se rends directement sur l'ecran empty""" file=u64dec(file) self.__vlc.playVideo(file) fichier = os.path.basename(file) return Template({"refresh":"3;url=empty"}) def empty(self): # page playing #================================================================== cmd = { "BACK":"index", "stop":"cmd?cmd=stop", "volume_key":True, } state=self.__vlc.isPlaying() if state==0: # stop return self.browse() # ça joue pas, on va browser elif state==1: # playing cmd["PAUSE"]="cmd?cmd=pause" elif state==2: # pausing cmd["PLAY"]="cmd?cmd=pause" return Template(cmd) def cmd(self, cmd): #================================================================== if cmd == "pause": self.__vlc.pause() state=self.__vlc.isPlaying() if state==2: return redirect("index") # go to pause elif state==1: return redirect("empty") # go to play else: raise "what's playing ?!%d"%state elif cmd == "stop": self.__vlc.stop() return redirect("/index") #retour menu else: raise "what's up ?!"+cmd def seek(self,seek): #================================================================== if self.__vlc.isPlaying()==2: #pausing self.__vlc.pause() # go to play self.__vlc.seek( int(seek) ) return redirect("empty") def index(self, rnd = ""): #================================================================== state=self.__vlc.isPlaying() if state==0: # stopped return self.browse() else: cmd = { "refresh":"10;url=index", "stop":"cmd?cmd=stop", "volume_key":False, "BACK":"/index", } if state==1: # playing cmd["PAUSE"]="cmd?cmd=pause" else: #2 # pausing cmd["PLAY"]="cmd?cmd=pause" titre=os.path.basename(self.__vlc.getTitle()) percent=self.__vlc.getPercent() info=""#self.__vlc.getInfo() return Template(cmd)
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- ## ## Copyright (C) 2005 manatlan manatlan[at]gmail(dot)com ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published ## by the Free Software Foundation; version 2 only. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## from common.template import Template import urllib import StringIO from meteo import getDays from datetime import datetime __title__=u"Météo" __order__=4000 __author__ ="manatlan" __about__ ="affiche la meteo suivant un departement" class main: def index(self, dep = None): if dep: ddays = getDays(int(dep)) ld = ddays.keys() ld.sort() days=[] for i in ld: a,b,c = ddays[i] d=i.strftime("%A %d %B") if i == datetime.now().date(): d="<u>%s</u>"%d days.append( (d,a,b,c) ) return Template({"BACK":"index"}) else: days=[] return Template({"BACK":"/index"}) def image(self, dep,jo=""): tmp = urllib.urlopen("http://perso0.free.fr/cgi-bin/meteo.pl?dep=%s" % dep) chr = tmp.read(1024) output = StringIO.StringIO() while chr: output.write(chr) chr = tmp.read(1024) tmp.close() return 'image/gif',output.getvalue() if __name__ == "__main__": print getDays(67)
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- from sgmllib import SGMLParser import urllib2,os,re from datetime import datetime,timedelta def __getHtmlSource( url,cookie=[] ): """ recupere le source html de l'url """ import urllib txheaders = { #~ "Host": "www.humanclock.com", "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; rv:1.7.3)Gecko/20040911 Firefox/0.10.1", "Accept": "image/png,*/*;q=0.5", "Accept-Language": "fr,en-us;q=0.7,en;q=0.3", "Accept-Encoding": "gzip,deflate", "Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7", "Keep-Alive": "300", "Proxy-Connection": "keep-alive", 'Cookie': ''.join(cookie) } r = urllib2.Request( url ,"",txheaders) return urllib2.urlopen(r).read() class __getSpan(SGMLParser): def __init__(self,h): SGMLParser.__init__(self) self.feed(h) def reset(self): SGMLParser.reset(self) self.list = [] self.buf="" def handle_data(self, text): self.buf+=text def start_span(self, attrs): self.buf="" def end_span(self): self.list.append(self.buf) def start_img(self, attrs): href = [v for k, v in attrs if k=='src'] if href: self.list.append(os.path.basename(href[0])) def getDays(dep): def __getCookie(url): """ return les cookies """ u=urllib2.urlopen(url) cookie = u.info()['set-cookie'] return cookie rep = os.path.dirname(__file__) recharge=True file = os.path.join(rep,"pc%d.html" % dep) if os.path.isfile(file): if datetime.fromtimestamp(os.stat(file).st_mtime) > datetime.today() - timedelta(hours=12): recharge=False if recharge: url="http://www.pleinchamp.com/meteo/meteoDept.aspx?dpt_id=%s&menu_id=35" % dep c=__getCookie(url) htmlSource = __getHtmlSource( url,c ) open(file,"w").write(htmlSource) else: htmlSource = open(file,"r").read() days={} htmlSource = htmlSource.replace("table","µ") rr = re.compile("""<µ border="0" cellspacing="0" cellpadding="0" width="80" border="1" class="bottomRight">[^µ]+</µ>""",re.DOTALL|re.S) for i in rr.findall(htmlSource): p = __getSpan( i ) jour =int( p.list[0].split(" ")[1] ) #num du jour img = p.list[2] min = p.list[4]# and int(p.list[4]) or None max = p.list[5]# and int(p.list[5]) or None days[jour] = (img,min,max) jours={} j = datetime.now() j -= timedelta(1) # hier for i in range(10): if j.day in days: jours[j.date()] = days[j.day] j += timedelta(1) # lendemain return jours ################################################################################ if __name__=="__main__": print getDays(67)
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- from sgmllib import SGMLParser import urllib2,os,re from datetime import datetime,timedelta def __getHtmlSource( url,cookie=[] ): """ recupere le source html de l'url """ import urllib txheaders = { #~ "Host": "www.humanclock.com", "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; rv:1.7.3)Gecko/20040911 Firefox/0.10.1", "Accept": "image/png,*/*;q=0.5", "Accept-Language": "fr,en-us;q=0.7,en;q=0.3", "Accept-Encoding": "gzip,deflate", "Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7", "Keep-Alive": "300", "Proxy-Connection": "keep-alive", 'Cookie': ''.join(cookie) } r = urllib2.Request( url ,"",txheaders) return urllib2.urlopen(r).read() class __getSpan(SGMLParser): def __init__(self,h): SGMLParser.__init__(self) self.feed(h) def reset(self): SGMLParser.reset(self) self.list = [] self.buf="" def handle_data(self, text): self.buf+=text def start_span(self, attrs): self.buf="" def end_span(self): self.list.append(self.buf) def start_img(self, attrs): href = [v for k, v in attrs if k=='src'] if href: self.list.append(os.path.basename(href[0])) def getDays(dep): def __getCookie(url): """ return les cookies """ u=urllib2.urlopen(url) cookie = u.info()['set-cookie'] return cookie rep = os.path.dirname(__file__) recharge=True file = os.path.join(rep,"pc%d.html" % dep) if os.path.isfile(file): if datetime.fromtimestamp(os.stat(file).st_mtime) > datetime.today() - timedelta(hours=12): recharge=False if recharge: url="http://www.pleinchamp.com/meteo/meteoDept.aspx?dpt_id=%s&menu_id=35" % dep c=__getCookie(url) htmlSource = __getHtmlSource( url,c ) open(file,"w").write(htmlSource) else: htmlSource = open(file,"r").read() days={} htmlSource = htmlSource.replace("table","µ") rr = re.compile("""<µ border="0" cellspacing="0" cellpadding="0" width="80" border="1" class="bottomRight">[^µ]+</µ>""",re.DOTALL|re.S) for i in rr.findall(htmlSource): p = __getSpan( i ) jour =int( p.list[0].split(" ")[1] ) #num du jour img = p.list[2] min = p.list[4]# and int(p.list[4]) or None max = p.list[5]# and int(p.list[5]) or None days[jour] = (img,min,max) jours={} j = datetime.now() j -= timedelta(1) # hier for i in range(10): if j.day in days: jours[j.date()] = days[j.day] j += timedelta(1) # lendemain return jours ################################################################################ if __name__=="__main__": print getDays(67)
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- ## ## Copyright (C) 2005 manatlan manatlan[at]gmail(dot)com ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published ## by the Free Software Foundation; version 2 only. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## from common.template import Template import urllib import StringIO from meteo import getDays from datetime import datetime __title__=u"Météo" __order__=4000 __author__ ="manatlan" __about__ ="affiche la meteo suivant un departement" class main: def index(self, dep = None): if dep: ddays = getDays(int(dep)) ld = ddays.keys() ld.sort() days=[] for i in ld: a,b,c = ddays[i] d=i.strftime("%A %d %B") if i == datetime.now().date(): d="<u>%s</u>"%d days.append( (d,a,b,c) ) return Template({"BACK":"index"}) else: days=[] return Template({"BACK":"/index"}) def image(self, dep,jo=""): tmp = urllib.urlopen("http://perso0.free.fr/cgi-bin/meteo.pl?dep=%s" % dep) chr = tmp.read(1024) output = StringIO.StringIO() while chr: output.write(chr) chr = tmp.read(1024) tmp.close() return 'image/gif',output.getvalue() if __name__ == "__main__": print getDays(67)
Python
#!/usr/bin/env python """Universal feed parser Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom feeds Visit http://feedparser.org/ for the latest version Visit http://feedparser.org/docs/ for the latest documentation Required: Python 2.1 or later Recommended: Python 2.3 or later Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/> """ #__version__ = "pre-3.3-" + "$Revision: 1.51 $"[11:15] + "-cvs" __version__ = "3.3" __license__ = "Python" __copyright__ = "Copyright 2002-4, Mark Pilgrim" __author__ = "Mark Pilgrim <http://diveintomark.org/>" __contributors__ = ["Jason Diamond <http://injektilo.org/>", "John Beimler <http://john.beimler.org/>", "Fazal Majid <http://www.majid.info/mylos/weblog/>", "Aaron Swartz <http://aaronsw.com>"] _debug = 0 # HTTP "User-Agent" header to send to servers when downloading feeds. # If you are embedding feedparser in a larger application, you should # change this to your application name and URL. USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__ # HTTP "Accept" header to send to servers when downloading feeds. If you don't # want to send an Accept header, set this to None. ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1" # List of preferred XML parsers, by SAX driver name. These will be tried first, # but if they're not installed, Python will keep searching through its own list # of pre-installed parsers until it finds one that supports everything we need. PREFERRED_XML_PARSERS = ["drv_libxml2"] # If you want feedparser to automatically run HTML markup through HTML Tidy, set # this to 1. This is off by default because of reports of crashing on some # platforms. If it crashes for you, please submit a bug report with your OS # platform, Python version, and the URL of the feed you were attempting to parse. # Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html> TIDY_MARKUP = 0 # ---------- required modules (should come with any Python distribution) ---------- import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi try: from cStringIO import StringIO as _StringIO except: from StringIO import StringIO as _StringIO # ---------- optional modules (feedparser will work without these, but with reduced functionality) ---------- # gzip is included with most Python distributions, but may not be available if you compiled your own try: import gzip except: gzip = None try: import zlib except: zlib = None # timeoutsocket allows feedparser to time out rather than hang forever on ultra-slow servers. # Python 2.3 now has this functionality available in the standard socket library, so under # 2.3 you don't need to install anything. But you probably should anyway, because the socket # module is buggy and timeoutsocket is better. try: import timeoutsocket # http://www.timo-tasi.org/python/timeoutsocket.py timeoutsocket.setDefaultSocketTimeout(20) except ImportError: import socket if hasattr(socket, 'setdefaulttimeout'): socket.setdefaulttimeout(20) import urllib, urllib2 _mxtidy = None if TIDY_MARKUP: try: from mx.Tidy import Tidy as _mxtidy except: pass # If a real XML parser is available, feedparser will attempt to use it. feedparser has # been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the # Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some # versions of FreeBSD), feedparser will quietly fall back on regex-based parsing. try: import xml.sax xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers from xml.sax.saxutils import escape as _xmlescape _XML_AVAILABLE = 1 except: _XML_AVAILABLE = 0 def _xmlescape(data): data = data.replace("&", "&amp;") data = data.replace(">", "&gt;") data = data.replace("<", "&lt;") return data # base64 support for Atom feeds that contain embedded binary data try: import base64, binascii except: base64 = binascii = None # cjkcodecs and iconv_codec provide support for more character encodings. # Both are available from http://cjkpython.i18n.org/ try: import cjkcodecs.aliases except: pass try: import iconv_codec except: pass # ---------- don't touch these ---------- class CharacterEncodingOverride(Exception): pass class CharacterEncodingUnknown(Exception): pass class NonXMLContentType(Exception): pass sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') sgmllib.special = re.compile('<!') sgmllib.charref = re.compile('&#(x?[0-9A-Fa-f]+)[^0-9A-Fa-f]') SUPPORTED_VERSIONS = {'': 'unknown', 'rss090': 'RSS 0.90', 'rss091n': 'RSS 0.91 (Netscape)', 'rss091u': 'RSS 0.91 (Userland)', 'rss092': 'RSS 0.92', 'rss093': 'RSS 0.93', 'rss094': 'RSS 0.94', 'rss20': 'RSS 2.0', 'rss10': 'RSS 1.0', 'rss': 'RSS (unknown version)', 'atom01': 'Atom 0.1', 'atom02': 'Atom 0.2', 'atom03': 'Atom 0.3', 'atom': 'Atom (unknown version)', 'cdf': 'CDF', 'hotrss': 'Hot RSS' } try: UserDict = dict except NameError: # Python 2.1 does not have dict from UserDict import UserDict def dict(aList): rc = {} for k, v in aList: rc[k] = v return rc class FeedParserDict(UserDict): def __getitem__(self, key): keymap = {'channel': 'feed', 'items': 'entries', 'guid': 'id', 'date': 'modified', 'date_parsed': 'modified_parsed', 'description': ['tagline', 'summary']} realkey = keymap.get(key, key) if type(realkey) == types.ListType: for k in realkey: if UserDict.has_key(self, k): return UserDict.__getitem__(self, k) return UserDict.__getitem__(self, key) return UserDict.__getitem__(self, realkey) def has_key(self, key): return hasattr(self, key) or UserDict.has_key(self, key) def __getattr__(self, key): try: return self.__dict__[key] except KeyError: pass try: return self.__getitem__(key) except: raise AttributeError, "object has no attribute '%s'" % key def __contains__(self, key): return self.has_key(key) def zopeCompatibilityHack(): global FeedParserDict del FeedParserDict def FeedParserDict(aDict=None): rc = {} if aDict: rc.update(aDict) return rc _ebcdic_to_ascii_map = None def _ebcdic_to_ascii(s): global _ebcdic_to_ascii_map if not _ebcdic_to_ascii_map: emap = ( 0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201, 202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208, 209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237, 125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243, 92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249, 48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255 ) import string _ebcdic_to_ascii_map = string.maketrans( \ "".join(map(chr, range(256))), "".join(map(chr, emap))) return s.translate(_ebcdic_to_ascii_map) class _FeedParserMixin: namespaces = {"": "", "http://backend.userland.com/rss": "", "http://blogs.law.harvard.edu/tech/rss": "", "http://purl.org/rss/1.0/": "", "http://my.netscape.com/rdf/simple/0.9/": "", "http://example.com/newformat#": "", "http://example.com/necho": "", "http://purl.org/echo/": "", "uri/of/echo/namespace#": "", "http://purl.org/pie/": "", "http://purl.org/atom/ns#": "", "http://purl.org/rss/1.0/modules/rss091#": "", "http://webns.net/mvcb/": "admin", "http://purl.org/rss/1.0/modules/aggregation/": "ag", "http://purl.org/rss/1.0/modules/annotate/": "annotate", "http://media.tangent.org/rss/1.0/": "audio", "http://backend.userland.com/blogChannelModule": "blogChannel", "http://web.resource.org/cc/": "cc", "http://backend.userland.com/creativeCommonsRssModule": "creativeCommons", "http://purl.org/rss/1.0/modules/company": "co", "http://purl.org/rss/1.0/modules/content/": "content", "http://my.theinfo.org/changed/1.0/rss/": "cp", "http://purl.org/dc/elements/1.1/": "dc", "http://purl.org/dc/terms/": "dcterms", "http://purl.org/rss/1.0/modules/email/": "email", "http://purl.org/rss/1.0/modules/event/": "ev", "http://postneo.com/icbm/": "icbm", "http://purl.org/rss/1.0/modules/image/": "image", "http://xmlns.com/foaf/0.1/": "foaf", "http://freshmeat.net/rss/fm/": "fm", "http://purl.org/rss/1.0/modules/link/": "l", "http://madskills.com/public/xml/rss/module/pingback/": "pingback", "http://prismstandard.org/namespaces/1.2/basic/": "prism", "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf", "http://www.w3.org/2000/01/rdf-schema#": "rdfs", "http://purl.org/rss/1.0/modules/reference/": "ref", "http://purl.org/rss/1.0/modules/richequiv/": "reqv", "http://purl.org/rss/1.0/modules/search/": "search", "http://purl.org/rss/1.0/modules/slash/": "slash", "http://purl.org/rss/1.0/modules/servicestatus/": "ss", "http://hacks.benhammersley.com/rss/streaming/": "str", "http://purl.org/rss/1.0/modules/subscription/": "sub", "http://purl.org/rss/1.0/modules/syndication/": "sy", "http://purl.org/rss/1.0/modules/taxonomy/": "taxo", "http://purl.org/rss/1.0/modules/threading/": "thr", "http://purl.org/rss/1.0/modules/textinput/": "ti", "http://madskills.com/public/xml/rss/module/trackback/":"trackback", "http://wellformedweb.org/CommentAPI/": "wfw", "http://purl.org/rss/1.0/modules/wiki/": "wiki", "http://schemas.xmlsoap.org/soap/envelope/": "soap", "http://www.w3.org/1999/xhtml": "xhtml", "http://www.w3.org/XML/1998/namespace": "xml" } can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'comments', 'license'] can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'copyright', 'description'] can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'copyright', 'description'] html_types = ['text/html', 'application/xhtml+xml'] def __init__(self, baseuri=None, baselang=None, encoding='utf-8'): if _debug: sys.stderr.write("initializing FeedParser\n") self.feeddata = FeedParserDict() # feed-level data self.encoding = encoding # character encoding self.entries = [] # list of entry-level data self.version = '' # feed type/version, see SUPPORTED_VERSIONS # the following are used internally to track state; # some of this is kind of out of control and should # probably be refactored into a finite state machine self.infeed = 0 self.inentry = 0 self.incontent = 0 self.intextinput = 0 self.inimage = 0 self.inauthor = 0 self.incontributor = 0 self.contentparams = FeedParserDict() self.namespacemap = {} self.elementstack = [] self.basestack = [] self.langstack = [] self.baseuri = baseuri or '' self.lang = baselang or None if baselang: self.feeddata['language'] = baselang def unknown_starttag(self, tag, attrs): if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs)) # normalize attrs attrs = [(k.lower(), v) for k, v in attrs] attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] # track xml:base and xml:lang attrsD = dict(attrs) baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri self.baseuri = baseuri lang = attrsD.get('xml:lang', attrsD.get('lang')) if lang == '': # xml:lang could be explicitly set to '', we need to capture that lang = None elif lang is None: # if no xml:lang is specified, use parent lang lang = self.lang if lang: if tag in ('feed', 'rss', 'rdf:RDF'): self.feeddata['language'] = lang self.lang = lang self.basestack.append(baseuri) self.langstack.append(lang) # track namespaces for prefix, uri in attrs: if prefix.startswith('xmlns:'): self.trackNamespace(prefix[6:], uri) elif prefix == 'xmlns': self.trackNamespace(None, uri) # track inline content if self.incontent and self.contentparams.get('mode') == 'escaped': # element declared itself as escaped markup, but it isn't really self.contentparams['mode'] = 'xml' if self.incontent and self.contentparams.get('mode') == 'xml': # Note: probably shouldn't simply recreate localname here, but # our namespace handling isn't actually 100% correct in cases where # the feed redefines the default namespace (which is actually # the usual case for inline content, thanks Sam), so here we # cheat and just reconstruct the element based on localname # because that compensates for the bugs in our namespace handling. # This will horribly munge inline content with non-empty qnames, # but nobody actually does that, so I'm not fixing it. tag = tag.split(':')[-1] return self.handle_data("<%s%s>" % (tag, "".join([' %s="%s"' % t for t in attrs])), escape=0) # match namespaces if tag.find(':') <> -1: prefix, suffix = tag.split(':', 1) else: prefix, suffix = '', tag prefix = self.namespacemap.get(prefix, prefix) if prefix: prefix = prefix + '_' # special hack for better tracking of empty textinput/image elements in illformed feeds if (not prefix) and tag not in ('title', 'link', 'description', 'name'): self.intextinput = 0 if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'width', 'height'): self.inimage = 0 # call special handler (if defined) or default handler methodname = '_start_' + prefix + suffix try: method = getattr(self, methodname) return method(attrsD) except AttributeError: return self.push(prefix + suffix, 1) def unknown_endtag(self, tag): if _debug: sys.stderr.write('end %s\n' % tag) # match namespaces if tag.find(':') <> -1: prefix, suffix = tag.split(':', 1) else: prefix, suffix = '', tag prefix = self.namespacemap.get(prefix, prefix) if prefix: prefix = prefix + '_' # call special handler (if defined) or default handler methodname = '_end_' + prefix + suffix try: method = getattr(self, methodname) method() except AttributeError: self.pop(prefix + suffix) # track inline content if self.incontent and self.contentparams.get('mode') == 'escaped': # element declared itself as escaped markup, but it isn't really self.contentparams['mode'] = 'xml' if self.incontent and self.contentparams.get('mode') == 'xml': tag = tag.split(':')[-1] self.handle_data("</%s>" % tag, escape=0) # track xml:base and xml:lang going out of scope if self.basestack: self.basestack.pop() if self.basestack and self.basestack[-1]: self.baseuri = self.basestack[-1] if self.langstack: self.langstack.pop() if self.langstack: # and (self.langstack[-1] is not None): self.lang = self.langstack[-1] def handle_charref(self, ref): # called for each character reference, e.g. for "&#160;", ref will be "160" if not self.elementstack: return ref = ref.lower() if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'): text = "&#%s;" % ref else: if ref[0] == 'x': c = int(ref[1:], 16) else: c = int(ref) text = unichr(c).encode('utf-8') self.elementstack[-1][2].append(text) def handle_entityref(self, ref): # called for each entity reference, e.g. for "&copy;", ref will be "copy" if not self.elementstack: return if _debug: sys.stderr.write("entering handle_entityref with %s\n" % ref) if ref in ('lt', 'gt', 'quot', 'amp', 'apos'): text = '&%s;' % ref else: # entity resolution graciously donated by Aaron Swartz def name2cp(k): import htmlentitydefs if hasattr(htmlentitydefs, "name2codepoint"): # requires Python 2.3 return htmlentitydefs.name2codepoint[k] k = htmlentitydefs.entitydefs[k] if k.startswith("&#") and k.endswith(";"): return int(k[2:-1]) # not in latin-1 return ord(k) try: name2cp(ref) except KeyError: text = "&%s;" % ref else: text = unichr(name2cp(ref)).encode('utf-8') self.elementstack[-1][2].append(text) def handle_data(self, text, escape=1): # called for each block of plain text, i.e. outside of any tag and # not containing any character or entity references if not self.elementstack: return if escape and self.contentparams.get('mode') == 'xml': text = _xmlescape(text) self.elementstack[-1][2].append(text) def handle_comment(self, text): # called for each comment, e.g. <!-- insert message here --> pass def handle_pi(self, text): # called for each processing instruction, e.g. <?instruction> pass def handle_decl(self, text): pass def parse_declaration(self, i): # override internal declaration handler to handle CDATA blocks if _debug: sys.stderr.write("entering parse_declaration\n") if self.rawdata[i:i+9] == '<![CDATA[': k = self.rawdata.find(']]>', i) if k == -1: k = len(self.rawdata) self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0) return k+3 else: k = self.rawdata.find('>', i) return k+1 def trackNamespace(self, prefix, uri): if (prefix, uri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version: self.version = 'rss090' if uri == 'http://purl.org/rss/1.0/' and not self.version: self.version = 'rss10' if not prefix: return if uri.find('backend.userland.com/rss') <> -1: # match any backend.userland.com namespace uri = 'http://backend.userland.com/rss' if self.namespaces.has_key(uri): self.namespacemap[prefix] = self.namespaces[uri] def resolveURI(self, uri): return urlparse.urljoin(self.baseuri or '', uri) def decodeEntities(self, element, data): return data def push(self, element, expectingText): self.elementstack.append([element, expectingText, []]) def pop(self, element): if not self.elementstack: return if self.elementstack[-1][0] != element: return element, expectingText, pieces = self.elementstack.pop() output = "".join(pieces) output = output.strip() if not expectingText: return output # decode base64 content if self.contentparams.get('mode') == 'base64' and base64: try: output = base64.decodestring(output) except binascii.Error: pass except binascii.Incomplete: pass # resolve relative URIs if (element in self.can_be_relative_uri) and output: output = self.resolveURI(output) # decode entities within embedded markup output = self.decodeEntities(element, output) # resolve relative URIs within embedded markup if self.contentparams.get('type', 'text/html') in self.html_types: if element in self.can_contain_relative_uris: output = _resolveRelativeURIs(output, self.baseuri, self.encoding) # sanitize embedded markup if self.contentparams.get('type', 'text/html') in self.html_types: if element in self.can_contain_dangerous_markup: output = _sanitizeHTML(output, self.encoding) if self.encoding and (type(output) == types.StringType): try: output = unicode(output, self.encoding) except: pass # store output in appropriate place(s) if self.inentry: if element == 'content': self.entries[-1].setdefault(element, []) contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output self.entries[-1][element].append(contentparams) elif element == 'category': self.entries[-1][element] = output domain = self.entries[-1]['categories'][-1][0] self.entries[-1]['categories'][-1] = (domain, output) elif element == 'source': self.entries[-1]['source']['value'] = output elif element == 'link': self.entries[-1][element] = output if output: self.entries[-1]['links'][-1]['href'] = output else: if element == 'description': element = 'summary' self.entries[-1][element] = output if self.incontent: contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output self.entries[-1][element + '_detail'] = contentparams elif self.infeed and (not self.intextinput) and (not self.inimage): if element == 'description': element = 'tagline' self.feeddata[element] = output if element == 'category': domain = self.feeddata['categories'][-1][0] self.feeddata['categories'][-1] = (domain, output) elif element == 'link': self.feeddata['links'][-1]['href'] = output elif self.incontent: contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output self.feeddata[element + '_detail'] = contentparams return output def _mapToStandardPrefix(self, name): colonpos = name.find(':') if colonpos <> -1: prefix = name[:colonpos] suffix = name[colonpos+1:] prefix = self.namespacemap.get(prefix, prefix) name = prefix + ':' + suffix return name def _getAttribute(self, attrsD, name): return attrsD.get(self._mapToStandardPrefix(name)) def _save(self, key, value): if self.inentry: self.entries[-1].setdefault(key, value) elif self.feeddata: self.feeddata.setdefault(key, value) def _start_rss(self, attrsD): versionmap = {'0.91': 'rss091u', '0.92': 'rss092', '0.93': 'rss093', '0.94': 'rss094'} if not self.version: attr_version = attrsD.get('version', '') version = versionmap.get(attr_version) if version: self.version = version elif attr_version.startswith('2.'): self.version = 'rss20' else: self.version = 'rss' def _start_dlhottitles(self, attrsD): self.version = 'hotrss' def _start_channel(self, attrsD): self.infeed = 1 self._cdf_common(attrsD) _start_feedinfo = _start_channel def _cdf_common(self, attrsD): if attrsD.has_key('lastmod'): self._start_modified({}) self.elementstack[-1][-1] = attrsD['lastmod'] self._end_modified() if attrsD.has_key('href'): self._start_link({}) self.elementstack[-1][-1] = attrsD['href'] self._end_link() def _start_feed(self, attrsD): self.infeed = 1 versionmap = {'0.1': 'atom01', '0.2': 'atom02', '0.3': 'atom03'} if not self.version: attr_version = attrsD.get('version') version = versionmap.get(attr_version) if version: self.version = version else: self.version = 'atom' def _end_channel(self): self.infeed = 0 _end_feed = _end_channel def _start_image(self, attrsD): self.inimage = 1 self.push('image', 0) context = self._getContext() context.setdefault('image', FeedParserDict()) def _end_image(self): self.pop('image') self.inimage = 0 def _start_textinput(self, attrsD): self.intextinput = 1 self.push('textinput', 0) context = self._getContext() context.setdefault('textinput', FeedParserDict()) _start_textInput = _start_textinput def _end_textinput(self): self.pop('textinput') self.intextinput = 0 _end_textInput = _end_textinput def _start_author(self, attrsD): self.inauthor = 1 self.push('author', 1) _start_managingeditor = _start_author _start_dc_author = _start_author _start_dc_creator = _start_author def _end_author(self): self.pop('author') self.inauthor = 0 self._sync_author_detail() _end_managingeditor = _end_author _end_dc_author = _end_author _end_dc_creator = _end_author def _start_contributor(self, attrsD): self.incontributor = 1 context = self._getContext() context.setdefault('contributors', []) context['contributors'].append(FeedParserDict()) self.push('contributor', 0) def _end_contributor(self): self.pop('contributor') self.incontributor = 0 def _start_name(self, attrsD): self.push('name', 0) def _end_name(self): value = self.pop('name') if self.inauthor: self._save_author('name', value) elif self.incontributor: self._save_contributor('name', value) elif self.intextinput: context = self._getContext() context['textinput']['name'] = value def _start_width(self, attrsD): self.push('width', 0) def _end_width(self): value = self.pop('width') try: value = int(value) except: value = 0 if self.inimage: context = self._getContext() context['image']['width'] = value def _start_height(self, attrsD): self.push('height', 0) def _end_height(self): value = self.pop('height') try: value = int(value) except: value = 0 if self.inimage: context = self._getContext() context['image']['height'] = value def _start_url(self, attrsD): self.push('url', 1) _start_homepage = _start_url _start_uri = _start_url def _end_url(self): value = self.pop('url') if self.inauthor: self._save_author('url', value) elif self.incontributor: self._save_contributor('url', value) elif self.inimage: context = self._getContext() context['image']['url'] = value elif self.intextinput: context = self._getContext() context['textinput']['link'] = value _end_homepage = _end_url _end_uri = _end_url def _start_email(self, attrsD): self.push('email', 0) def _end_email(self): value = self.pop('email') if self.inauthor: self._save_author('email', value) elif self.incontributor: self._save_contributor('email', value) pass def _getContext(self): if self.inentry: context = self.entries[-1] else: context = self.feeddata return context def _save_author(self, key, value): context = self._getContext() context.setdefault('author_detail', FeedParserDict()) context['author_detail'][key] = value self._sync_author_detail() def _save_contributor(self, key, value): context = self._getContext() context.setdefault('contributors', [FeedParserDict()]) context['contributors'][-1][key] = value def _sync_author_detail(self, key='author'): context = self._getContext() detail = context.get('%s_detail' % key) if detail: name = detail.get('name') email = detail.get('email') if name and email: context[key] = "%s (%s)" % (name, email) elif name: context[key] = name elif email: context[key] = email else: author = context.get(key) if not author: return emailmatch = re.search(r"""(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))""", author) if not emailmatch: return email = emailmatch.group(0) # probably a better way to do the following, but it passes all the tests author = author.replace(email, '') author = author.replace('()', '') author = author.strip() if author and (author[0] == '('): author = author[1:] if author and (author[-1] == ')'): author = author[:-1] author = author.strip() context.setdefault('%s_detail' % key, FeedParserDict()) context['%s_detail' % key]['name'] = author context['%s_detail' % key]['email'] = email def _start_tagline(self, attrsD): self.incontent += 1 self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'escaped'), 'type': attrsD.get('type', 'text/plain'), 'language': self.lang, 'base': self.baseuri}) self.push('tagline', 1) _start_subtitle = _start_tagline def _end_tagline(self): value = self.pop('tagline') self.incontent -= 1 self.contentparams.clear() if self.infeed: self.feeddata['description'] = value _end_subtitle = _end_tagline def _start_copyright(self, attrsD): self.incontent += 1 self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'escaped'), 'type': attrsD.get('type', 'text/plain'), 'language': self.lang, 'base': self.baseuri}) self.push('copyright', 1) _start_dc_rights = _start_copyright def _end_copyright(self): self.pop('copyright') self.incontent -= 1 self.contentparams.clear() _end_dc_rights = _end_copyright def _start_item(self, attrsD): self.entries.append(FeedParserDict()) self.push('item', 0) self.inentry = 1 self.guidislink = 0 id = self._getAttribute(attrsD, 'rdf:about') if id: context = self._getContext() context['id'] = id self._cdf_common(attrsD) _start_entry = _start_item _start_product = _start_item def _end_item(self): self.pop('item') self.inentry = 0 _end_entry = _end_item def _start_dc_language(self, attrsD): self.push('language', 1) _start_language = _start_dc_language def _end_dc_language(self): self.lang = self.pop('language') _end_language = _end_dc_language def _start_dc_publisher(self, attrsD): self.push('publisher', 1) _start_webmaster = _start_dc_publisher def _end_dc_publisher(self): self.pop('publisher') self._sync_author_detail('publisher') _end_webmaster = _end_dc_publisher def _start_dcterms_issued(self, attrsD): self.push('issued', 1) _start_issued = _start_dcterms_issued def _end_dcterms_issued(self): value = self.pop('issued') self._save('issued_parsed', _parse_date(value)) _end_issued = _end_dcterms_issued def _start_dcterms_created(self, attrsD): self.push('created', 1) _start_created = _start_dcterms_created def _end_dcterms_created(self): value = self.pop('created') self._save('created_parsed', _parse_date(value)) _end_created = _end_dcterms_created def _start_dcterms_modified(self, attrsD): self.push('modified', 1) _start_modified = _start_dcterms_modified _start_dc_date = _start_dcterms_modified _start_pubdate = _start_dcterms_modified def _end_dcterms_modified(self): value = self.pop('modified') parsed_value = _parse_date(value) self._save('modified_parsed', parsed_value) _end_modified = _end_dcterms_modified _end_dc_date = _end_dcterms_modified _end_pubdate = _end_dcterms_modified def _start_expirationdate(self, attrsD): self.push('expired', 1) def _end_expirationdate(self): self._save('expired_parsed', _parse_date(self.pop('expired'))) def _start_cc_license(self, attrsD): self.push('license', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('license') def _start_creativecommons_license(self, attrsD): self.push('license', 1) def _end_creativecommons_license(self): self.pop('license') def _start_category(self, attrsD): self.push('category', 1) domain = self._getAttribute(attrsD, 'domain') cats = [] if self.inentry: cats = self.entries[-1].setdefault('categories', []) elif self.infeed: cats = self.feeddata.setdefault('categories', []) cats.append((domain, None)) _start_dc_subject = _start_category _start_keywords = _start_category def _end_category(self): self.pop('category') _end_dc_subject = _end_category _end_keywords = _end_category def _start_cloud(self, attrsD): self.feeddata['cloud'] = FeedParserDict(attrsD) def _start_link(self, attrsD): attrsD.setdefault('rel', 'alternate') attrsD.setdefault('type', 'text/html') if attrsD.has_key('href'): attrsD['href'] = self.resolveURI(attrsD['href']) expectingText = self.infeed or self.inentry if self.inentry: self.entries[-1].setdefault('links', []) self.entries[-1]['links'].append(FeedParserDict(attrsD)) elif self.infeed: self.feeddata.setdefault('links', []) self.feeddata['links'].append(FeedParserDict(attrsD)) if attrsD.has_key('href'): expectingText = 0 if attrsD.get('type', '') in self.html_types: if self.inentry: self.entries[-1]['link'] = attrsD['href'] elif self.infeed: self.feeddata['link'] = attrsD['href'] else: self.push('link', expectingText) _start_producturl = _start_link def _end_link(self): value = self.pop('link') if self.intextinput: context = self._getContext() context['textinput']['link'] = value if self.inimage: context = self._getContext() context['image']['link'] = value _end_producturl = _end_link def _start_guid(self, attrsD): self.guidislink = (attrsD.get('ispermalink', 'true') == 'true') self.push('id', 1) def _end_guid(self): value = self.pop('id') self._save('guidislink', self.guidislink and not self._getContext().has_key('link')) if self.guidislink: # guid acts as link, but only if "ispermalink" is not present or is "true", # and only if the item doesn't already have a link element self._save('link', value) def _start_id(self, attrsD): self.push('id', 1) def _end_id(self): value = self.pop('id') def _start_title(self, attrsD): self.incontent += 1 if _debug: sys.stderr.write('attrsD.xml:lang = %s\n' % attrsD.get('xml:lang')) if _debug: sys.stderr.write('self.lang = %s\n' % self.lang) self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'escaped'), 'type': attrsD.get('type', 'text/plain'), 'language': self.lang, 'base': self.baseuri}) self.push('title', self.infeed or self.inentry) _start_dc_title = _start_title def _end_title(self): value = self.pop('title') self.incontent -= 1 self.contentparams.clear() if self.intextinput: context = self._getContext() context['textinput']['title'] = value elif self.inimage: context = self._getContext() context['image']['title'] = value _end_dc_title = _end_title def _start_description(self, attrsD, default_content_type='text/html'): self.incontent += 1 self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'escaped'), 'type': attrsD.get('type', default_content_type), 'language': self.lang, 'base': self.baseuri}) self.push('description', self.infeed or self.inentry) def _start_abstract(self, attrsD): return self._start_description(attrsD, 'text/plain') def _end_description(self): value = self.pop('description') self.incontent -= 1 self.contentparams.clear() context = self._getContext() if self.intextinput: context['textinput']['description'] = value elif self.inimage: context['image']['description'] = value # elif self.inentry: # context['summary'] = value # elif self.infeed: # context['tagline'] = value _end_abstract = _end_description def _start_info(self, attrsD): self.incontent += 1 self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'escaped'), 'type': attrsD.get('type', 'text/plain'), 'language': self.lang, 'base': self.baseuri}) self.push('info', 1) def _end_info(self): self.pop('info') self.incontent -= 1 self.contentparams.clear() def _start_generator(self, attrsD): if attrsD: if attrsD.has_key('url'): attrsD['url'] = self.resolveURI(attrsD['url']) self.feeddata['generator_detail'] = FeedParserDict(attrsD) self.push('generator', 1) def _end_generator(self): value = self.pop('generator') if self.feeddata.has_key('generator_detail'): self.feeddata['generator_detail']['name'] = value def _start_admin_generatoragent(self, attrsD): self.push('generator', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('generator') self.feeddata['generator_detail'] = FeedParserDict({"url": value}) def _start_admin_errorreportsto(self, attrsD): self.push('errorreportsto', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('errorreportsto') def _start_summary(self, attrsD): self.incontent += 1 self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'escaped'), 'type': attrsD.get('type', 'text/plain'), 'language': self.lang, 'base': self.baseuri}) self.push('summary', 1) def _end_summary(self): value = self.pop('summary') if self.entries: self.entries[-1]['description'] = value self.incontent -= 1 self.contentparams.clear() def _start_enclosure(self, attrsD): if self.inentry: self.entries[-1].setdefault('enclosures', []) self.entries[-1]['enclosures'].append(FeedParserDict(attrsD)) def _start_source(self, attrsD): if self.inentry: self.entries[-1]['source'] = FeedParserDict(attrsD) self.push('source', 1) def _end_source(self): self.pop('source') def _start_content(self, attrsD): self.incontent += 1 self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'xml'), 'type': attrsD.get('type', 'text/plain'), 'language': self.lang, 'base': self.baseuri}) self.push('content', 1) def _start_prodlink(self, attrsD): self.incontent += 1 self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'xml'), 'type': attrsD.get('type', 'text/html'), 'language': self.lang, 'base': self.baseuri}) self.push('content', 1) def _start_body(self, attrsD): self.incontent += 1 self.contentparams = FeedParserDict({'mode': 'xml', 'type': 'application/xhtml+xml', 'language': self.lang, 'base': self.baseuri}) self.push('content', 1) _start_xhtml_body = _start_body def _start_content_encoded(self, attrsD): self.incontent += 1 self.contentparams = FeedParserDict({'mode': 'escaped', 'type': 'text/html', 'language': self.lang, 'base': self.baseuri}) self.push('content', 1) _start_fullitem = _start_content_encoded def _end_content(self): value = self.pop('content') if self.contentparams.get('type') in (['text/plain'] + self.html_types): self._save('description', value) self.incontent -= 1 self.contentparams.clear() _end_body = _end_content _end_xhtml_body = _end_content _end_content_encoded = _end_content _end_fullitem = _end_content _end_prodlink = _end_content if _XML_AVAILABLE: class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler): def __init__(self, baseuri, baselang, encoding): if _debug: sys.stderr.write('trying StrictFeedParser\n') xml.sax.handler.ContentHandler.__init__(self) _FeedParserMixin.__init__(self, baseuri, baselang, encoding) self.bozo = 0 self.exc = None def startPrefixMapping(self, prefix, uri): self.trackNamespace(prefix, uri) def startElementNS(self, name, qname, attrs): namespace, localname = name namespace = str(namespace or '') if namespace.find('backend.userland.com/rss') <> -1: # match any backend.userland.com namespace namespace = 'http://backend.userland.com/rss' prefix = self.namespaces.get(namespace, 'unknown') if prefix: localname = prefix + ':' + localname localname = str(localname).lower() # qname implementation is horribly broken in Python 2.1 (it # doesn't report any), and slightly broken in Python 2.2 (it # doesn't report the xml: namespace). So we match up namespaces # with a known list first, and then possibly override them with # the qnames the SAX parser gives us (if indeed it gives us any # at all). Thanks to MatejC for helping me test this and # tirelessly telling me that it didn't work yet. attrsD = {} for (namespace, attrlocalname), attrvalue in attrs._attrs.items(): prefix = self.namespaces.get(namespace, '') if prefix: attrlocalname = prefix + ":" + attrlocalname attrsD[str(attrlocalname).lower()] = attrvalue for qname in attrs.getQNames(): attrsD[str(qname).lower()] = attrs.getValueByQName(qname) self.unknown_starttag(localname, attrsD.items()) # def resolveEntity(self, publicId, systemId): # return _StringIO() def characters(self, text): self.handle_data(text) def endElementNS(self, name, qname): namespace, localname = name namespace = str(namespace) prefix = self.namespaces.get(namespace, '') if prefix: localname = prefix + ':' + localname localname = str(localname).lower() self.unknown_endtag(localname) def error(self, exc): self.bozo = 1 self.exc = exc def fatalError(self, exc): self.error(exc) raise exc class _BaseHTMLProcessor(sgmllib.SGMLParser): elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr', 'img', 'input', 'isindex', 'link', 'meta', 'param'] def __init__(self, encoding): self.encoding = encoding if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding) sgmllib.SGMLParser.__init__(self) def reset(self): self.pieces = [] sgmllib.SGMLParser.reset(self) def feed(self, data): data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'&lt;!\1', data) data = re.sub(r'<(\S+)/>', r'<\1></\1>', data) data = data.replace('&#39;', "'") data = data.replace('&#34;', '"') if self.encoding and (type(data) == types.UnicodeType): data = data.encode(self.encoding) sgmllib.SGMLParser.feed(self, data) def normalize_attrs(self, attrs): # utility method to be called by descendants attrs = [(k.lower(), v) for k, v in attrs] # if self.encoding: # if _debug: sys.stderr.write('normalize_attrs, encoding=%s\n' % self.encoding) # attrs = [(k, v.encode(self.encoding)) for k, v in attrs] attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] return attrs def unknown_starttag(self, tag, attrs): # called for each start tag # attrs is a list of (attr, value) tuples # e.g. for <pre class="screen">, tag="pre", attrs=[("class", "screen")] if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag) strattrs = "".join([' %s="%s"' % (key, value) for key, value in attrs]) if tag in self.elements_no_end_tag: self.pieces.append("<%(tag)s%(strattrs)s />" % locals()) else: self.pieces.append("<%(tag)s%(strattrs)s>" % locals()) def unknown_endtag(self, tag): # called for each end tag, e.g. for </pre>, tag will be "pre" # Reconstruct the original end tag. if tag not in self.elements_no_end_tag: self.pieces.append("</%(tag)s>" % locals()) def handle_charref(self, ref): # called for each character reference, e.g. for "&#160;", ref will be "160" # Reconstruct the original character reference. self.pieces.append("&#%(ref)s;" % locals()) def handle_entityref(self, ref): # called for each entity reference, e.g. for "&copy;", ref will be "copy" # Reconstruct the original entity reference. self.pieces.append("&%(ref)s;" % locals()) def handle_data(self, text): # called for each block of plain text, i.e. outside of any tag and # not containing any character or entity references # Store the original text verbatim. if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text) self.pieces.append(text) def handle_comment(self, text): # called for each HTML comment, e.g. <!-- insert Javascript code here --> # Reconstruct the original comment. self.pieces.append("<!--%(text)s-->" % locals()) def handle_pi(self, text): # called for each processing instruction, e.g. <?instruction> # Reconstruct original processing instruction. self.pieces.append("<?%(text)s>" % locals()) def handle_decl(self, text): # called for the DOCTYPE, if present, e.g. # <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" # "http://www.w3.org/TR/html4/loose.dtd"> # Reconstruct original DOCTYPE self.pieces.append("<!%(text)s>" % locals()) _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match def _scan_name(self, i, declstartpos): rawdata = self.rawdata n = len(rawdata) if i == n: return None, -1 m = self._new_declname_match(rawdata, i) if m: s = m.group() name = s.strip() if (i + len(s)) == n: return None, -1 # end of buffer return name.lower(), m.end() else: self.handle_data(rawdata) # self.updatepos(declstartpos, i) return None, -1 def output(self): """Return processed HTML as a single string""" return "".join([str(p) for p in self.pieces]) class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor): def __init__(self, baseuri, baselang, encoding): sgmllib.SGMLParser.__init__(self) _FeedParserMixin.__init__(self, baseuri, baselang, encoding) def decodeEntities(self, element, data): data = data.replace('&#60;', '&lt;') data = data.replace('&#x3c;', '&lt;') data = data.replace('&#62;', '&gt;') data = data.replace('&#x3e;', '&gt;') data = data.replace('&#38;', '&amp;') data = data.replace('&#x26;', '&amp;') data = data.replace('&#34;', '&quot;') data = data.replace('&#x22;', '&quot;') data = data.replace('&#39;', '&apos;') data = data.replace('&#x27;', '&apos;') if self.contentparams.get('mode') == 'escaped': data = data.replace('&lt;', '<') data = data.replace('&gt;', '>') data = data.replace('&amp;', '&') data = data.replace('&quot;', '"') data = data.replace('&apos;', "'") return data class _RelativeURIResolver(_BaseHTMLProcessor): relative_uris = [('a', 'href'), ('applet', 'codebase'), ('area', 'href'), ('blockquote', 'cite'), ('body', 'background'), ('del', 'cite'), ('form', 'action'), ('frame', 'longdesc'), ('frame', 'src'), ('iframe', 'longdesc'), ('iframe', 'src'), ('head', 'profile'), ('img', 'longdesc'), ('img', 'src'), ('img', 'usemap'), ('input', 'src'), ('input', 'usemap'), ('ins', 'cite'), ('link', 'href'), ('object', 'classid'), ('object', 'codebase'), ('object', 'data'), ('object', 'usemap'), ('q', 'cite'), ('script', 'src')] def __init__(self, baseuri, encoding): _BaseHTMLProcessor.__init__(self, encoding) self.baseuri = baseuri def resolveURI(self, uri): return urlparse.urljoin(self.baseuri, uri) def unknown_starttag(self, tag, attrs): attrs = self.normalize_attrs(attrs) attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs] _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) def _resolveRelativeURIs(htmlSource, baseURI, encoding): if _debug: sys.stderr.write("entering _resolveRelativeURIs\n") p = _RelativeURIResolver(baseURI, encoding) p.feed(htmlSource) return p.output() class _HTMLSanitizer(_BaseHTMLProcessor): acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big', 'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset', 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup', 'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike', 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var'] acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', 'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing', 'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols', 'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', 'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', 'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type', 'usemap', 'valign', 'value', 'vspace', 'width'] unacceptable_elements_with_end_tag = ['script', 'applet'] def reset(self): _BaseHTMLProcessor.reset(self) self.unacceptablestack = 0 def unknown_starttag(self, tag, attrs): if not tag in self.acceptable_elements: if tag in self.unacceptable_elements_with_end_tag: self.unacceptablestack += 1 return attrs = self.normalize_attrs(attrs) attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes] _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) def unknown_endtag(self, tag): if not tag in self.acceptable_elements: if tag in self.unacceptable_elements_with_end_tag: self.unacceptablestack -= 1 return _BaseHTMLProcessor.unknown_endtag(self, tag) def handle_pi(self, text): pass def handle_decl(self, text): pass def handle_data(self, text): if not self.unacceptablestack: _BaseHTMLProcessor.handle_data(self, text) def _sanitizeHTML(htmlSource, encoding): p = _HTMLSanitizer(encoding) p.feed(htmlSource) data = p.output() if _mxtidy and TIDY_MARKUP: nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, output_xhtml=1, numeric_entities=1, wrap=0) if data.count('<body'): data = data.split('<body', 1)[1] if data.count('>'): data = data.split('>', 1)[1] if data.count('</body'): data = data.split('</body', 1)[0] data = data.strip().replace('\r\n', '\n') return data class _FeedURLHandler(urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler): def http_error_default(self, req, fp, code, msg, headers): if ((code / 100) == 3) and (code != 304): return self.http_error_302(req, fp, code, msg, headers) infourl = urllib.addinfourl(fp, headers, req.get_full_url()) infourl.status = code return infourl def http_error_302(self, req, fp, code, msg, headers): if headers.dict.has_key('location'): infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers) else: infourl = urllib.addinfourl(fp, headers, req.get_full_url()) if not hasattr(infourl, 'status'): infourl.status = code return infourl def http_error_301(self, req, fp, code, msg, headers): if headers.dict.has_key('location'): infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers) else: infourl = urllib.addinfourl(fp, headers, req.get_full_url()) if not hasattr(infourl, 'status'): infourl.status = code return infourl http_error_300 = http_error_302 http_error_303 = http_error_302 http_error_307 = http_error_302 def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers): """URL, filename, or string --> stream This function lets you define parsers that take any input source (URL, pathname to local or network file, or actual data as a string) and deal with it in a uniform manner. Returned object is guaranteed to have all the basic stdio read methods (read, readline, readlines). Just .close() the object when you're done with it. If the etag argument is supplied, it will be used as the value of an If-None-Match request header. If the modified argument is supplied, it must be a tuple of 9 integers as returned by gmtime() in the standard Python time module. This MUST be in GMT (Greenwich Mean Time). The formatted date/time will be used as the value of an If-Modified-Since request header. If the agent argument is supplied, it will be used as the value of a User-Agent request header. If the referrer argument is supplied, it will be used as the value of a Referer[sic] request header. If handlers is supplied, it is a list of handlers used to build a urllib2 opener. """ if hasattr(url_file_stream_or_string, "read"): return url_file_stream_or_string if url_file_stream_or_string == "-": return sys.stdin if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'): if not agent: agent = USER_AGENT # test for inline user:password for basic auth auth = None if base64: urltype, rest = urllib.splittype(url_file_stream_or_string) realhost, rest = urllib.splithost(rest) if realhost: user_passwd, realhost = urllib.splituser(realhost) if user_passwd: url_file_stream_or_string = "%s://%s%s" % (urltype, realhost, rest) auth = base64.encodestring(user_passwd).strip() # try to open with urllib2 (to use optional headers) request = urllib2.Request(url_file_stream_or_string) request.add_header("User-Agent", agent) if etag: request.add_header("If-None-Match", etag) if modified: # format into an RFC 1123-compliant timestamp. We can't use # time.strftime() since the %a and %b directives can be affected # by the current locale, but RFC 2616 states that dates must be # in English. short_weekdays = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] request.add_header("If-Modified-Since", "%s, %02d %s %04d %02d:%02d:%02d GMT" % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5])) if referrer: request.add_header("Referer", referrer) if gzip and zlib: request.add_header("Accept-encoding", "gzip, deflate") elif gzip: request.add_header("Accept-encoding", "gzip") elif zlib: request.add_header("Accept-encoding", "deflate") else: request.add_header("Accept-encoding", "") if auth: request.add_header("Authorization", "Basic %s" % auth) if ACCEPT_HEADER: request.add_header("Accept", ACCEPT_HEADER) opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers)) opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent try: return opener.open(request) finally: opener.close() # JohnD # try to open with native open function (if url_file_stream_or_string is a filename) try: return open(url_file_stream_or_string) except: pass # treat url_file_stream_or_string as string return _StringIO(str(url_file_stream_or_string)) _date_handlers = [] def registerDateHandler(func): """Register a date handler function (takes string, returns 9-tuple date in GMT)""" _date_handlers.insert(0, func) # ISO-8601 date parsing routines written by Fazal Majid. # The ISO 8601 standard is very convoluted and irregular - a full ISO 8601 # parser is beyond the scope of feedparser and would be a worthwhile addition # to the Python library. # A single regular expression cannot parse ISO 8601 date formats into groups # as the standard is highly irregular (for instance is 030104 2003-01-04 or # 0301-04-01), so we use templates instead. # Please note the order in templates is significant because we need a # greedy match. _iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO', 'YY-?MM-?DD', 'YY-?OOO', 'YYYY', '-YY-?MM', '-OOO', '-YY', '--MM-?DD', '--MM', '---DD', 'CC', ''] _iso8601_re = [ tmpl.replace( 'YYYY', r'(?P<year>\d{4})').replace( 'YY', r'(?P<year>\d\d)').replace( 'MM', r'(?P<month>[01]\d)').replace( 'DD', r'(?P<day>[0123]\d)').replace( 'OOO', r'(?P<ordinal>[0123]\d\d)').replace( 'CC', r'(?P<century>\d\d$)') + r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})' + r'(:(?P<second>\d{2}))?' + r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?' for tmpl in _iso8601_tmpl] del tmpl _iso8601_matches = [re.compile(regex).match for regex in _iso8601_re] del regex def _parse_date_iso8601(dateString): """Parse a variety of ISO-8601-compatible formats like 20040105""" m = None for _iso8601_match in _iso8601_matches: m = _iso8601_match(dateString) if m: break if not m: return if m.span() == (0, 0): return params = m.groupdict() ordinal = params.get("ordinal", 0) if ordinal: ordinal = int(ordinal) else: ordinal = 0 year = params.get("year", "--") if not year or year == "--": year = time.gmtime()[0] elif len(year) == 2: # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993 year = 100 * int(time.gmtime()[0] / 100) + int(year) else: year = int(year) month = params.get("month", "-") if not month or month == "-": # ordinals are NOT normalized by mktime, we simulate them # by setting month=1, day=ordinal if ordinal: month = 1 else: month = time.gmtime()[1] month = int(month) day = params.get("day", 0) if not day: # see above if ordinal: day = ordinal elif params.get("century", 0) or \ params.get("year", 0) or params.get("month", 0): day = 1 else: day = time.gmtime()[2] else: day = int(day) # special case of the century - is the first year of the 21st century # 2000 or 2001 ? The debate goes on... if "century" in params.keys(): year = (int(params["century"]) - 1) * 100 + 1 # in ISO 8601 most fields are optional for field in ["hour", "minute", "second", "tzhour", "tzmin"]: if not params.get(field, None): params[field] = 0 hour = int(params.get("hour", 0)) minute = int(params.get("minute", 0)) second = int(params.get("second", 0)) # weekday is normalized by mktime(), we can ignore it weekday = 0 # daylight savings is complex, but not needed for feedparser's purposes # as time zones, if specified, include mention of whether it is active # (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and # and most implementations have DST bugs daylight_savings_flag = 0 tm = [year, month, day, hour, minute, second, weekday, ordinal, daylight_savings_flag] # ISO 8601 time zone adjustments tz = params.get("tz") if tz and tz != "Z": if tz[0] == "-": tm[3] += int(params.get("tzhour", 0)) tm[4] += int(params.get("tzmin", 0)) elif tz[0] == "+": tm[3] -= int(params.get("tzhour", 0)) tm[4] -= int(params.get("tzmin", 0)) else: return None # Python's time.mktime() is a wrapper around the ANSI C mktime(3c) # which is guaranteed to normalize d/m/y/h/m/s. # Many implementations have bugs, but we'll pretend they don't. return time.localtime(time.mktime(tm)) registerDateHandler(_parse_date_iso8601) # 8-bit date handling routines written by ytrewq1. _korean_year = u'\ub144' # b3e2 in euc-kr _korean_month = u'\uc6d4' # bff9 in euc-kr _korean_day = u'\uc77c' # c0cf in euc-kr _korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr _korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr _korean_onblog_date_re = \ re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \ (_korean_year, _korean_month, _korean_day)) _korean_nate_date_re = \ re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \ (_korean_am, _korean_pm)) def _parse_date_onblog(dateString): """Parse a string according to the OnBlog 8-bit date format""" m = _korean_onblog_date_re.match(dateString) if not m: return w3dtfdate = "%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s" % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ 'zonediff': '+09:00'} if _debug: sys.stderr.write("OnBlog date parsed as: %s\n" % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_onblog) def _parse_date_nate(dateString): """Parse a string according to the Nate 8-bit date format""" m = _korean_nate_date_re.match(dateString) if not m: return hour = int(m.group(5)) ampm = m.group(4) if (ampm == _korean_pm): hour += 12 hour = str(hour) if len(hour) == 1: hour = '0' + hour w3dtfdate = "%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s" % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\ 'zonediff': '+09:00'} if _debug: sys.stderr.write("Nate date parsed as: %s\n" % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_nate) _mssql_date_re = \ re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})\.\d+') def _parse_date_mssql(dateString): """Parse a string according to the MS SQL date format""" m = _mssql_date_re.match(dateString) if not m: return w3dtfdate = "%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s" % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ 'zonediff': '+09:00'} if _debug: sys.stderr.write("MS SQL date parsed as: %s\n" % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_mssql) # Unicode strings for Greek date strings _greek_months = \ { \ u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7 u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7 u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7 u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7 u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7 u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7 u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7 u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7 u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7 u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7 u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7 u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7 u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7 u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7 u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7 u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7 u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7 u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7 u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7 } _greek_wdays = \ { \ u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7 u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7 u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7 u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7 u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7 u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7 u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7 } _greek_date_format_re = \ re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)') def _parse_date_greek(dateString): """Parse a string according to a Greek 8-bit date format.""" m = _greek_date_format_re.match(dateString) if not m: return try: wday = _greek_wdays[m.group(1)] month = _greek_months[m.group(3)] except: return rfc822date = "%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s" % \ {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\ 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\ 'zonediff': m.group(8)} if _debug: sys.stderr.write("Greek date parsed as: %s\n" % rfc822date) return _parse_date_rfc822(rfc822date) registerDateHandler(_parse_date_greek) # Unicode strings for Hungarian date strings _hungarian_months = \ { \ u'janu\u00e1r': u'01', # e1 in iso-8859-2 u'febru\u00e1ri': u'02', # e1 in iso-8859-2 u'm\u00e1rcius': u'03', # e1 in iso-8859-2 u'\u00e1prilis': u'04', # e1 in iso-8859-2 u'm\u00e1ujus': u'05', # e1 in iso-8859-2 u'j\u00fanius': u'06', # fa in iso-8859-2 u'j\u00falius': u'07', # fa in iso-8859-2 u'augusztus': u'08', u'szeptember': u'09', u'okt\u00f3ber': u'10', # f3 in iso-8859-2 u'november': u'11', u'december': u'12', } _hungarian_date_format_re = \ re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))') def _parse_date_hungarian(dateString): """Parse a string according to a Hungarian 8-bit date format.""" m = _hungarian_date_format_re.match(dateString) if not m: return try: month = _hungarian_months[m.group(2)] day = m.group(3) if len(day) == 1: day = '0' + day hour = m.group(4) if len(hour) == 1: hour = '0' + hour except: return w3dtfdate = "%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s" % \ {'year': m.group(1), 'month': month, 'day': day,\ 'hour': hour, 'minute': m.group(5),\ 'zonediff': m.group(6)} if _debug: sys.stderr.write("Hungarian date parsed as: %s\n" % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_hungarian) # W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by # Drake and licensed under the Python license. Removed all range checking # for month, day, hour, minute, and second, since mktime will normalize # these later def _parse_date_w3dtf(dateString): def __extract_date(m): year = int(m.group("year")) if year < 100: year = 100 * int(time.gmtime()[0] / 100) + int(year) if year < 1000: return 0, 0, 0 julian = m.group("julian") if julian: julian = int(julian) month = julian / 30 + 1 day = julian % 30 + 1 jday = None while jday != julian: t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0)) jday = time.gmtime(t)[-2] diff = abs(jday - julian) if jday > julian: if diff < day: day = day - diff else: month = month - 1 day = 31 elif jday < julian: if day + diff < 28: day = day + diff else: month = month + 1 return year, month, day month = m.group("month") day = 1 if month is None: month = 1 else: month = int(month) day = m.group("day") if day: day = int(day) else: day = 1 return year, month, day def __extract_time(m): if not m: return 0, 0, 0 hours = m.group("hours") if not hours: return 0, 0, 0 hours = int(hours) minutes = int(m.group("minutes")) seconds = m.group("seconds") if seconds: seconds = int(seconds) else: seconds = 0 return hours, minutes, seconds def __extract_tzd(m): """Return the Time Zone Designator as an offset in seconds from UTC.""" if not m: return 0 tzd = m.group("tzd") if not tzd: return 0 if tzd == "Z": return 0 hours = int(m.group("tzdhours")) minutes = m.group("tzdminutes") if minutes: minutes = int(minutes) else: minutes = 0 offset = (hours*60 + minutes) * 60 if tzd[0] == "+": return -offset return offset __date_re = ("(?P<year>\d\d\d\d)" "(?:(?P<dsep>-|)" "(?:(?P<julian>\d\d\d)" "|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?") __tzd_re = "(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)" __tzd_rx = re.compile(__tzd_re) __time_re = ("(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)" "(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?" + __tzd_re) __datetime_re = "%s(?:T%s)?" % (__date_re, __time_re) __datetime_rx = re.compile(__datetime_re) m = __datetime_rx.match(dateString) if (m is None) or (m.group() != dateString): return gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0) if gmt[0] == 0: return return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone) registerDateHandler(_parse_date_w3dtf) def _parse_date_rfc822(dateString): """Parse an RFC822, RFC1123, RFC2822, or asctime-style date""" tm = rfc822.parsedate_tz(dateString) if tm: return time.gmtime(rfc822.mktime_tz(tm)) # rfc822.py defines several time zones, but we define some extra ones. # "ET" is equivalent to "EST", etc. _additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800} rfc822._timezones.update(_additional_timezones) registerDateHandler(_parse_date_rfc822) def _parse_date(dateString): """Parses a variety of date formats into a 9-tuple in GMT""" for handler in _date_handlers: try: date9tuple = handler(dateString) if not date9tuple: continue if len(date9tuple) != 9: if _debug: sys.stderr.write("date handler function must return 9-tuple\n") raise ValueError map(int, date9tuple) return date9tuple except Exception, e: if _debug: sys.stderr.write("%s raised %s\n" % (handler.__name__, repr(e))) pass return None def _getCharacterEncoding(http_headers, xml_data): """Get the character encoding of the XML document http_headers is a dictionary xml_data is a raw string (not Unicode) This is so much trickier than it sounds, it's not even funny. According to RFC 3023 ("XML Media Types"), if the HTTP Content-Type is application/xml, application/*+xml, application/xml-external-parsed-entity, or application/xml-dtd, the encoding given in the charset parameter of the HTTP Content-Type takes precedence over the encoding given in the XML prefix within the document, and defaults to "utf-8" if neither are specified. But, if the HTTP Content-Type is text/xml, text/*+xml, or text/xml-external-parsed-entity, the encoding given in the XML prefix within the document is ALWAYS IGNORED and only the encoding given in the charset parameter of the HTTP Content-Type header should be respected, and it defaults to "us-ascii" if not specified. Furthermore, discussion on the atom-syntax mailing list with the author of RFC 3023 leads me to the conclusion that any document served with a Content-Type of text/* and no charset parameter must be treated as us-ascii. (We now do this.) And also that it must always be flagged as non-well-formed. (We now do this too.) If Content-Type is unspecified (input was local file or non-HTTP source) or unrecognized (server just got it totally wrong), then go by the encoding given in the XML prefix of the document and default to "iso-8859-1" as per the HTTP specification (RFC 2616). Then, assuming we didn't find a character encoding in the HTTP headers (and the HTTP Content-type allowed us to look in the body), we need to sniff the first few bytes of the XML data and try to determine whether the encoding is ASCII-compatible. Section F of the XML specification shows the way here: http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info If the sniffed encoding is not ASCII-compatible, we need to make it ASCII compatible so that we can sniff further into the XML declaration to find the encoding attribute, which will tell us the true encoding. Of course, none of this guarantees that we will be able to parse the feed in the declared character encoding (assuming it was declared correctly, which many are not). CJKCodecs and iconv_codec help a lot; you should definitely install them if you can. http://cjkpython.i18n.org/ """ def _parseHTTPContentType(content_type): """takes HTTP Content-Type header and returns (content type, charset) If no charset is specified, returns (content type, '') If no content type is specified, returns ('', '') Both return parameters are guaranteed to be lowercase strings """ content_type = content_type or '' content_type, params = cgi.parse_header(content_type) return content_type, params.get('charset', '').replace("'", "") sniffed_xml_encoding = '' xml_encoding = '' true_encoding = '' http_content_type, http_encoding = _parseHTTPContentType(http_headers.get("content-type")) # Must sniff for non-ASCII-compatible character encodings before # searching for XML declaration. This heuristic is defined in # section F of the XML specification: # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info try: if xml_data[:4] == '\x4c\x6f\xa7\x94': # EBCDIC xml_data = _ebcdic_to_ascii(xml_data) elif xml_data[:4] == '\x00\x3c\x00\x3f': # UTF-16BE sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'): # UTF-16BE with BOM sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x3f\x00': # UTF-16LE sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'): # UTF-16LE with BOM sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') elif xml_data[:4] == '\x00\x00\x00\x3c': # UTF-32BE sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x00\x00': # UTF-32LE sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') elif xml_data[:4] == '\x00\x00\xfe\xff': # UTF-32BE with BOM sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') elif xml_data[:4] == '\xff\xfe\x00\x00': # UTF-32LE with BOM sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') elif xml_data[:3] == '\xef\xbb\xbf': # UTF-8 with BOM sniffed_xml_encoding = 'utf-8' xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') else: # ASCII-compatible pass xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data) except: xml_encoding_match = None if xml_encoding_match: xml_encoding = xml_encoding_match.groups()[0].lower() if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): xml_encoding = sniffed_xml_encoding acceptable_content_type = 0 application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity') text_content_types = ('text/xml', 'text/xml-external-parsed-entity') if (http_content_type in application_content_types) or \ (http_content_type.startswith('application/') and http_content_type.endswith('+xml')): acceptable_content_type = 1 true_encoding = http_encoding or xml_encoding or 'utf-8' elif (http_content_type in text_content_types) or \ (http_content_type.startswith('text/')) and http_content_type.endswith('+xml'): acceptable_content_type = 1 true_encoding = http_encoding or 'us-ascii' elif http_content_type.startswith('text/'): true_encoding = http_encoding or 'us-ascii' elif http_headers and (not http_headers.has_key('content-type')): true_encoding = xml_encoding or 'iso-8859-1' else: true_encoding = xml_encoding or 'utf-8' return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type def _toUTF8(data, encoding): """Changes an XML data stream on the fly to specify a new encoding data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already encoding is a string recognized by encodings.aliases """ if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding) # strip Byte Order Mark (if present) if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'): if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-16be': sys.stderr.write('trying utf-16be instead\n') encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'): if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-16le': sys.stderr.write('trying utf-16le instead\n') encoding = 'utf-16le' data = data[2:] elif data[:3] == '\xef\xbb\xbf': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-8': sys.stderr.write('trying utf-8 instead\n') encoding = 'utf-8' data = data[3:] elif data[:4] == '\x00\x00\xfe\xff': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-32be': sys.stderr.write('trying utf-32be instead\n') encoding = 'utf-32be' data = data[4:] elif data[:4] == '\xff\xfe\x00\x00': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-32le': sys.stderr.write('trying utf-32le instead\n') encoding = 'utf-32le' data = data[4:] newdata = unicode(data, encoding) if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding) declmatch = re.compile('^<\?xml[^>]*?>') newdecl = """<?xml version='1.0' encoding='utf-8'?>""" if declmatch.search(newdata): newdata = declmatch.sub(newdecl, newdata) else: newdata = newdecl + u'\n' + newdata return newdata.encode("utf-8") def _stripDoctype(data): """Strips DOCTYPE from XML document, returns (rss_version, stripped_data) rss_version may be "rss091n" or None stripped_data is the same XML document, minus the DOCTYPE """ entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE) data = entity_pattern.sub('', data) doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE) doctype_results = doctype_pattern.findall(data) doctype = doctype_results and doctype_results[0] or '' if doctype.lower().count('netscape'): version = 'rss091n' else: version = None data = doctype_pattern.sub('', data) return version, data def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]): """Parse a feed from a URL, file, stream, or string""" result = FeedParserDict() result['feed'] = FeedParserDict() result['entries'] = [] if _XML_AVAILABLE: result['bozo'] = 0 if type(handlers) == types.InstanceType: handlers = [handlers] try: f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers) data = f.read() except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = '' f = None # if feed is gzip-compressed, decompress it if f and data and hasattr(f, "headers"): if gzip and f.headers.get('content-encoding', '') == 'gzip': try: data = gzip.GzipFile(fileobj=_StringIO(data)).read() except Exception, e: # Some feeds claim to be gzipped but they're not, so # we get garbage. Ideally, we should re-request the # feed without the "Accept-encoding: gzip" header, # but we don't. result['bozo'] = 1 result['bozo_exception'] = e data = '' elif zlib and f.headers.get('content-encoding', '') == 'deflate': try: data = zlib.decompress(data, -zlib.MAX_WBITS) except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = '' # save HTTP headers if hasattr(f, "info"): info = f.info() result["etag"] = info.getheader("ETag") last_modified = info.getheader("Last-Modified") if last_modified: result["modified"] = _parse_date(last_modified) if hasattr(f, "url"): result["url"] = f.url result["status"] = 200 if hasattr(f, "status"): result["status"] = f.status if hasattr(f, "headers"): result["headers"] = f.headers.dict if hasattr(f, "close"): f.close() # there are four encodings to keep track of: # - http_encoding is the encoding declared in the Content-Type HTTP header # - xml_encoding is the encoding declared in the <?xml declaration # - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data # - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications http_headers = result.get("headers", {}) result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \ _getCharacterEncoding(http_headers, data) if http_headers and (not acceptable_content_type): if http_headers.has_key('content-type'): bozo_message = '%s is not an XML media type' % http_headers['content-type'] else: bozo_message = 'no Content-type specified' result['bozo'] = 1 result['bozo_exception'] = NonXMLContentType(bozo_message) result['version'], data = _stripDoctype(data) baseuri = http_headers.get('content-location', result.get('url')) baselang = http_headers.get('content-language', None) # if server sent 304, we're done if result.get("status", 0) == 304: result['version'] = '' result['debug_message'] = "The feed has not changed since you last checked, " + \ "so the server sent no data. This is a feature, not a bug!" return result # if there was a problem downloading, we're done if not data: return result # determine character encoding use_strict_parser = 0 known_encoding = 0 tried_encodings = [] for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding, 'utf-8', 'windows-1252'): if proposed_encoding in tried_encodings: continue if not proposed_encoding: continue try: data = _toUTF8(data, proposed_encoding) known_encoding = 1 use_strict_parser = 1 break except: pass tried_encodings.append(proposed_encoding) if not known_encoding: result['bozo'] = 1 result['bozo_exception'] = CharacterEncodingUnknown( \ "document encoding unknown, I tried " + \ "%s, %s, utf-8, and windows-1252 but nothing worked" % \ (result['encoding'], xml_encoding)) result['encoding'] = '' elif proposed_encoding != result['encoding']: result['bozo'] = 1 result['bozo_exception'] = CharacterEncodingOverride( \ "documented declared as %s, but parsed as %s" % \ (result['encoding'], proposed_encoding)) result['encoding'] = proposed_encoding if not _XML_AVAILABLE: use_strict_parser = 0 if use_strict_parser: # initialize the SAX parser feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8') saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS) saxparser.setFeature(xml.sax.handler.feature_namespaces, 1) saxparser.setContentHandler(feedparser) saxparser.setErrorHandler(feedparser) source = xml.sax.xmlreader.InputSource() source.setByteStream(_StringIO(data)) if hasattr(saxparser, '_ns_stack'): # work around bug in built-in SAX parser (doesn't recognize xml: namespace) # PyXML doesn't have this problem, and it doesn't have _ns_stack either saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'}) try: saxparser.parse(source) except Exception, e: if _debug: import traceback traceback.print_stack() traceback.print_exc() sys.stderr.write('xml parsing failed\n') result['bozo'] = 1 result['bozo_exception'] = feedparser.exc or e use_strict_parser = 0 if not use_strict_parser: feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '') feedparser.feed(data) result['feed'] = feedparser.feeddata result['entries'] = feedparser.entries result['version'] = result['version'] or feedparser.version return result if __name__ == '__main__': if not sys.argv[1:]: print __doc__ sys.exit(0) else: urls = sys.argv[1:] zopeCompatibilityHack() from pprint import pprint for url in urls: print url print result = parse(url) pprint(result) print #REVISION HISTORY #1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements, # added Simon Fell's test suite #1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections #2.0 - 10/19/2002 # JD - use inchannel to watch out for image and textinput elements which can # also contain title, link, and description elements # JD - check for isPermaLink="false" attribute on guid elements # JD - replaced openAnything with open_resource supporting ETag and # If-Modified-Since request headers # JD - parse now accepts etag, modified, agent, and referrer optional # arguments # JD - modified parse to return a dictionary instead of a tuple so that any # etag or modified information can be returned and cached by the caller #2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything # because of etag/modified, return the old etag/modified to the caller to # indicate why nothing is being returned #2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its # useless. Fixes the problem JD was addressing by adding it. #2.1 - 11/14/2002 - MAP - added gzip support #2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent. # start_admingeneratoragent is an example of how to handle elements with # only attributes, no content. #2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify); # also, make sure we send the User-Agent even if urllib2 isn't available. # Match any variation of backend.userland.com/rss namespace. #2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is. #2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's # snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed # project name #2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree); # removed unnecessary urllib code -- urllib2 should always be available anyway; # return actual url, status, and full HTTP headers (as result['url'], # result['status'], and result['headers']) if parsing a remote feed over HTTP -- # this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>; # added the latest namespace-of-the-week for RSS 2.0 #2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom # User-Agent (otherwise urllib2 sends two, which confuses some servers) #2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for # inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds #2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or # textInput, and also to return the character encoding (if specified) #2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking # nested divs within content (JohnD); fixed missing sys import (JohanS); # fixed regular expression to capture XML character encoding (Andrei); # added support for Atom 0.3-style links; fixed bug with textInput tracking; # added support for cloud (MartijnP); added support for multiple # category/dc:subject (MartijnP); normalize content model: "description" gets # description (which can come from description, summary, or full content if no # description), "content" gets dict of base/language/type/value (which can come # from content:encoded, xhtml:body, content, or fullitem); # fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang # tracking; fixed bug tracking unknown tags; fixed bug tracking content when # <content> element is not in default namespace (like Pocketsoap feed); # resolve relative URLs in link, guid, docs, url, comments, wfw:comment, # wfw:commentRSS; resolve relative URLs within embedded HTML markup in # description, xhtml:body, content, content:encoded, title, subtitle, # summary, info, tagline, and copyright; added support for pingback and # trackback namespaces #2.7 - 1/5/2004 - MAP - really added support for trackback and pingback # namespaces, as opposed to 2.6 when I said I did but didn't really; # sanitize HTML markup within some elements; added mxTidy support (if # installed) to tidy HTML markup within some elements; fixed indentation # bug in _parse_date (FazalM); use socket.setdefaulttimeout if available # (FazalM); universal date parsing and normalization (FazalM): 'created', modified', # 'issued' are parsed into 9-tuple date format and stored in 'created_parsed', # 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified' # and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa #2.7.1 - 1/9/2004 - MAP - fixed bug handling &quot; and &apos;. fixed memory # leak not closing url opener (JohnD); added dc:publisher support (MarekK); # added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK) #2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in # encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL); # fixed relative URI processing for guid (skadz); added ICBM support; added # base64 support #2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many # blogspot.com sites); added _debug variable #2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing #3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available); # added several new supported namespaces; fixed bug tracking naked markup in # description; added support for enclosure; added support for source; re-added # support for cloud which got dropped somehow; added support for expirationDate #3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking # xml:base URI, one for documents that don't define one explicitly and one for # documents that define an outer and an inner xml:base that goes out of scope # before the end of the document #3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level #3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result["version"] # will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized; # added support for creativeCommons:license and cc:license; added support for # full Atom content model in title, tagline, info, copyright, summary; fixed bug # with gzip encoding (not always telling server we support it when we do) #3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail # (dictionary of "name", "url", "email"); map author to author_detail if author # contains name + email address #3.0b8 - 1/28/2004 - MAP - added support for contributor #3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added # support for summary #3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from # xml.util.iso8601 #3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain # dangerous markup; fiddled with decodeEntities (not right); liberalized # date parsing even further #3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right); # added support to Atom 0.2 subtitle; added support for Atom content model # in copyright; better sanitizing of dangerous HTML elements with end tags # (script, frameset) #3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img, # etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />) #3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under # Python 2.1 #3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS; # fixed bug capturing author and contributor URL; fixed bug resolving relative # links in author and contributor URL; fixed bug resolvin relative links in # generator URL; added support for recognizing RSS 1.0; passed Simon Fell's # namespace tests, and included them permanently in the test suite with his # permission; fixed namespace handling under Python 2.1 #3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15) #3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023 #3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei); # use libxml2 (if available) #3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author # name was in parentheses; removed ultra-problematic mxTidy support; patch to # workaround crash in PyXML/expat when encountering invalid entities # (MarkMoraes); support for textinput/textInput #3.0b20 - 4/7/2004 - MAP - added CDF support #3.0b21 - 4/14/2004 - MAP - added Hot RSS support #3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in # results dict; changed results dict to allow getting values with results.key # as well as results[key]; work around embedded illformed HTML with half # a DOCTYPE; work around malformed Content-Type header; if character encoding # is wrong, try several common ones before falling back to regexes (if this # works, bozo_exception is set to CharacterEncodingOverride); fixed character # encoding issues in BaseHTMLProcessor by tracking encoding and converting # from Unicode to raw strings before feeding data to sgmllib.SGMLParser; # convert each value in results to Unicode (if possible), even if using # regex-based parsing #3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain # high-bit characters in attributes in embedded HTML in description (thanks # Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in # FeedParserDict; tweaked FeedParserDict.has_key to return True if asking # about a mapped key #3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and # results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could # cause the same encoding to be tried twice (even if it failed the first time); # fixed DOCTYPE stripping when DOCTYPE contained entity declarations; # better textinput and image tracking in illformed RSS 1.0 feeds #3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed # my blink tag tests #3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that # failed to parse utf-16 encoded feeds; made source into a FeedParserDict; # duplicate admin:generatorAgent/@rdf:resource in generator_detail.url; # added support for image; refactored parse() fallback logic to try other # encodings if SAX parsing fails (previously it would only try other encodings # if re-encoding failed); remove unichr madness in normalize_attrs now that # we're properly tracking encoding in and out of BaseHTMLProcessor; set # feed.language from root-level xml:lang; set entry.id from rdf:about; # send Accept header #3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between # iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are # windows-1252); fixed regression that could cause the same encoding to be # tried twice (even if it failed the first time) #3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types; # recover from malformed content-type header parameter with no equals sign # ("text/xml; charset:iso-8859-1") #3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities # to Unicode equivalents in illformed feeds (aaronsw); added and # passed tests for converting character entities to Unicode equivalents # in illformed feeds (aaronsw); test for valid parsers when setting # XML_AVAILABLE; make version and encoding available when server returns # a 304; add handlers parameter to pass arbitrary urllib2 handlers (like # digest auth or proxy support); add code to parse username/password # out of url and send as basic authentication; expose downloading-related # exceptions in bozo_exception (aaronsw); added __contains__ method to # FeedParserDict (aaronsw); added publisher_detail (aaronsw) #3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always # convert feed to UTF-8 before passing to XML parser; completely revamped # logic for determining character encoding and attempting XML parsing # (much faster); increased default timeout to 20 seconds; test for presence # of Location header on redirects; added tests for many alternate character # encodings; support various EBCDIC encodings; support UTF-16BE and # UTF16-LE with or without a BOM; support UTF-8 with a BOM; support # UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no # XML parsers are available; added support for "Content-encoding: deflate"; # send blank "Accept-encoding: " header if neither gzip nor zlib modules # are available #3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure # problem tracking xml:base and xml:lang if element declares it, child # doesn't, first grandchild redeclares it, and second grandchild doesn't; # refactored date parsing; defined public registerDateHandler so callers # can add support for additional date formats at runtime; added support # for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added # zopeCompatibilityHack() which turns FeedParserDict into a regular # dictionary, required for Zope compatibility, and also makes command- # line debugging easier because pprint module formats real dictionaries # better than dictionary-like objects; added NonXMLContentType exception, # which is stored in bozo_exception when a feed is served with a non-XML # media type such as "text/plain"; respect Content-Language as default # language if not xml:lang is present; cloud dict is now FeedParserDict; # generator dict is now FeedParserDict; better tracking of xml:lang, # including support for xml:lang="" to unset the current language; # recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default # namespace; don't overwrite final status on redirects (scenarios: # redirecting to a URL that returns 304, redirecting to a URL that # redirects to another URL with a different type of redirect); add # support for HTTP 303 redirects
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- from sgmllib import SGMLParser import urllib2,os,re from datetime import datetime,timedelta def __getHtmlSource( url,cookie=[] ): """ recupere le source html de l'url """ import urllib txheaders = { #~ "Host": "www.humanclock.com", "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; rv:1.7.3)Gecko/20040911 Firefox/0.10.1", "Accept": "image/png,*/*;q=0.5", "Accept-Language": "fr,en-us;q=0.7,en;q=0.3", "Accept-Encoding": "gzip,deflate", "Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7", "Keep-Alive": "300", "Proxy-Connection": "keep-alive", 'Cookie': ''.join(cookie) } r = urllib2.Request( url ,"",txheaders) return urllib2.urlopen(r).read() class __getSpan(SGMLParser): def __init__(self,h): SGMLParser.__init__(self) self.feed(h) def reset(self): SGMLParser.reset(self) self.list = [] self.buf="" def handle_data(self, text): self.buf+=text def start_span(self, attrs): self.buf="" def end_span(self): self.list.append(self.buf) def start_img(self, attrs): href = [v for k, v in attrs if k=='src'] if href: self.list.append(os.path.basename(href[0])) def getDays(dep): def __getCookie(url): """ return les cookies """ u=urllib2.urlopen(url) cookie = u.info()['set-cookie'] return cookie rep = os.path.dirname(__file__) rep="." recharge=True file = os.path.join(rep,"pc%d.html" % dep) if os.path.isfile(file): if datetime.fromtimestamp(os.stat(file).st_mtime) > datetime.today() - timedelta(hours=12): recharge=False if recharge: url="http://www.pleinchamp.com/meteo/meteoDept.aspx?dpt_id=%s&menu_id=35" % dep c=__getCookie(url) htmlSource = __getHtmlSource( url,c ) open(file,"w").write(htmlSource) else: htmlSource = open(file,"r").read() days={} htmlSource = htmlSource.replace("table","µ") rr = re.compile("""<µ border="0" cellspacing="0" cellpadding="0" width="80" border="1" class="bottomRight">[^µ]+</µ>""",re.DOTALL|re.S) for i in rr.findall(htmlSource): p = __getSpan( i ) jour =int( p.list[0].split(" ")[1] ) #num du jour img = p.list[2] min = p.list[4]# and int(p.list[4]) or None max = p.list[5]# and int(p.list[5]) or None days[jour] = (img,min,max) jours={} j = datetime.now() j -= timedelta(1) # hier for i in range(10): if j.day in days: jours[j.date()] = days[j.day] j += timedelta(1) # lendemain return jours ################################################################################ if __name__=="__main__": print getDays(67)
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- from sgmllib import SGMLParser import urllib2,os,re from datetime import datetime,timedelta def __getHtmlSource( url,cookie=[] ): """ recupere le source html de l'url """ import urllib txheaders = { #~ "Host": "www.humanclock.com", "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; rv:1.7.3)Gecko/20040911 Firefox/0.10.1", "Accept": "image/png,*/*;q=0.5", "Accept-Language": "fr,en-us;q=0.7,en;q=0.3", "Accept-Encoding": "gzip,deflate", "Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7", "Keep-Alive": "300", "Proxy-Connection": "keep-alive", 'Cookie': ''.join(cookie) } r = urllib2.Request( url ,"",txheaders) return urllib2.urlopen(r).read() class __getSpan(SGMLParser): def __init__(self,h): SGMLParser.__init__(self) self.feed(h) def reset(self): SGMLParser.reset(self) self.list = [] self.buf="" def handle_data(self, text): self.buf+=text def start_span(self, attrs): self.buf="" def end_span(self): self.list.append(self.buf) def start_img(self, attrs): href = [v for k, v in attrs if k=='src'] if href: self.list.append(os.path.basename(href[0])) def getDays(dep): def __getCookie(url): """ return les cookies """ u=urllib2.urlopen(url) cookie = u.info()['set-cookie'] return cookie rep = os.path.dirname(__file__) rep="." recharge=True file = os.path.join(rep,"pc%d.html" % dep) if os.path.isfile(file): if datetime.fromtimestamp(os.stat(file).st_mtime) > datetime.today() - timedelta(hours=12): recharge=False if recharge: url="http://www.pleinchamp.com/meteo/meteoDept.aspx?dpt_id=%s&menu_id=35" % dep c=__getCookie(url) htmlSource = __getHtmlSource( url,c ) open(file,"w").write(htmlSource) else: htmlSource = open(file,"r").read() days={} htmlSource = htmlSource.replace("table","µ") rr = re.compile("""<µ border="0" cellspacing="0" cellpadding="0" width="80" border="1" class="bottomRight">[^µ]+</µ>""",re.DOTALL|re.S) for i in rr.findall(htmlSource): p = __getSpan( i ) jour =int( p.list[0].split(" ")[1] ) #num du jour img = p.list[2] min = p.list[4]# and int(p.list[4]) or None max = p.list[5]# and int(p.list[5]) or None days[jour] = (img,min,max) jours={} j = datetime.now() j -= timedelta(1) # hier for i in range(10): if j.day in days: jours[j.date()] = days[j.day] j += timedelta(1) # lendemain return jours ################################################################################ if __name__=="__main__": print getDays(67)
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- ## ## Copyright (C) 2005 alexisoft (manatlan[at]gmail(dot)com) ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published ## by the Free Software Foundation; version 2 only. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## from common.template import Template import feedparser __title__=u"News RSS" __order__=3000 __author__ ="alexisoft/manatlan" __about__ ="permet de naviguer dans ses flux rss" class main: def __init__(self): # initialisation ... if not self.conf.items(): # il n'y a pas de feeds de configuré # on en met, par défaut ... liste = [ ("Le Monde international","http://www.lemonde.fr/rss/sequence/0,2-3210,1-0,0.xml"), ("Le Monde france","http://www.lemonde.fr/rss/sequence/0,2-3224,1-0,0.xml"), ("Linuxfr","http://linuxfr.org/backend/news-homepage/rss20.rss"), ("Linuxfr journaux","http://linuxfr.org/backend/journaux/rss20.rss"), ("Freenews","http://www.freenews.fr/feeds/rss.php"), ("Ratatium","http://www.ratiatum.com/rss/news.rss"), ("Daily python","http://www.pythonware.com/daily/rss2.xml"), ("Planet Ubuntu","http://planet.ubuntu-fr.org/rss.php"), ("google-news une","http://www.xasa.com/news/xml/fr/top.xml"), ("google-news france","http://www.xasa.com/news/xml/fr/nation.xml"), ("google-news inter","http://www.xasa.com/news/xml/fr/world.xml") ] c=1 for titre,url in liste: self.conf["feed%03d"%c] = titre,url c+=1 self.save() def index(self): array=[] feeds = self.conf.keys() feeds.sort() idx=1 for key in feeds: nom,url = self.conf[key] array.append( (key,nom) ) idx+=1 return Template({"BACK":"/index"}) def view(self, feed): title,url = self.conf[feed] rss = feedparser.parse(url) array = [] name = rss.feed.title description = True i = 0 for item in rss.entries: i += 1 if not hasattr(item, 'summary'): description = False array.append((str(i), item.title)) return Template({"BACK":"index"}) def item(self, num, feed): title,url = self.conf[feed] rss = feedparser.parse(url) item = rss.entries[int(num)-1] if not item: return "No RSS item found for num = %s and feed = %s" % (num, feed) name = item.title description = item.summary return Template({"BACK":"view?feed="+feed})
Python
#!/usr/bin/env python """Universal feed parser Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom feeds Visit http://feedparser.org/ for the latest version Visit http://feedparser.org/docs/ for the latest documentation Required: Python 2.1 or later Recommended: Python 2.3 or later Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/> """ #__version__ = "pre-3.3-" + "$Revision: 1.51 $"[11:15] + "-cvs" __version__ = "3.3" __license__ = "Python" __copyright__ = "Copyright 2002-4, Mark Pilgrim" __author__ = "Mark Pilgrim <http://diveintomark.org/>" __contributors__ = ["Jason Diamond <http://injektilo.org/>", "John Beimler <http://john.beimler.org/>", "Fazal Majid <http://www.majid.info/mylos/weblog/>", "Aaron Swartz <http://aaronsw.com>"] _debug = 0 # HTTP "User-Agent" header to send to servers when downloading feeds. # If you are embedding feedparser in a larger application, you should # change this to your application name and URL. USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__ # HTTP "Accept" header to send to servers when downloading feeds. If you don't # want to send an Accept header, set this to None. ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1" # List of preferred XML parsers, by SAX driver name. These will be tried first, # but if they're not installed, Python will keep searching through its own list # of pre-installed parsers until it finds one that supports everything we need. PREFERRED_XML_PARSERS = ["drv_libxml2"] # If you want feedparser to automatically run HTML markup through HTML Tidy, set # this to 1. This is off by default because of reports of crashing on some # platforms. If it crashes for you, please submit a bug report with your OS # platform, Python version, and the URL of the feed you were attempting to parse. # Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html> TIDY_MARKUP = 0 # ---------- required modules (should come with any Python distribution) ---------- import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi try: from cStringIO import StringIO as _StringIO except: from StringIO import StringIO as _StringIO # ---------- optional modules (feedparser will work without these, but with reduced functionality) ---------- # gzip is included with most Python distributions, but may not be available if you compiled your own try: import gzip except: gzip = None try: import zlib except: zlib = None # timeoutsocket allows feedparser to time out rather than hang forever on ultra-slow servers. # Python 2.3 now has this functionality available in the standard socket library, so under # 2.3 you don't need to install anything. But you probably should anyway, because the socket # module is buggy and timeoutsocket is better. try: import timeoutsocket # http://www.timo-tasi.org/python/timeoutsocket.py timeoutsocket.setDefaultSocketTimeout(20) except ImportError: import socket if hasattr(socket, 'setdefaulttimeout'): socket.setdefaulttimeout(20) import urllib, urllib2 _mxtidy = None if TIDY_MARKUP: try: from mx.Tidy import Tidy as _mxtidy except: pass # If a real XML parser is available, feedparser will attempt to use it. feedparser has # been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the # Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some # versions of FreeBSD), feedparser will quietly fall back on regex-based parsing. try: import xml.sax xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers from xml.sax.saxutils import escape as _xmlescape _XML_AVAILABLE = 1 except: _XML_AVAILABLE = 0 def _xmlescape(data): data = data.replace("&", "&amp;") data = data.replace(">", "&gt;") data = data.replace("<", "&lt;") return data # base64 support for Atom feeds that contain embedded binary data try: import base64, binascii except: base64 = binascii = None # cjkcodecs and iconv_codec provide support for more character encodings. # Both are available from http://cjkpython.i18n.org/ try: import cjkcodecs.aliases except: pass try: import iconv_codec except: pass # ---------- don't touch these ---------- class CharacterEncodingOverride(Exception): pass class CharacterEncodingUnknown(Exception): pass class NonXMLContentType(Exception): pass sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') sgmllib.special = re.compile('<!') sgmllib.charref = re.compile('&#(x?[0-9A-Fa-f]+)[^0-9A-Fa-f]') SUPPORTED_VERSIONS = {'': 'unknown', 'rss090': 'RSS 0.90', 'rss091n': 'RSS 0.91 (Netscape)', 'rss091u': 'RSS 0.91 (Userland)', 'rss092': 'RSS 0.92', 'rss093': 'RSS 0.93', 'rss094': 'RSS 0.94', 'rss20': 'RSS 2.0', 'rss10': 'RSS 1.0', 'rss': 'RSS (unknown version)', 'atom01': 'Atom 0.1', 'atom02': 'Atom 0.2', 'atom03': 'Atom 0.3', 'atom': 'Atom (unknown version)', 'cdf': 'CDF', 'hotrss': 'Hot RSS' } try: UserDict = dict except NameError: # Python 2.1 does not have dict from UserDict import UserDict def dict(aList): rc = {} for k, v in aList: rc[k] = v return rc class FeedParserDict(UserDict): def __getitem__(self, key): keymap = {'channel': 'feed', 'items': 'entries', 'guid': 'id', 'date': 'modified', 'date_parsed': 'modified_parsed', 'description': ['tagline', 'summary']} realkey = keymap.get(key, key) if type(realkey) == types.ListType: for k in realkey: if UserDict.has_key(self, k): return UserDict.__getitem__(self, k) return UserDict.__getitem__(self, key) return UserDict.__getitem__(self, realkey) def has_key(self, key): return hasattr(self, key) or UserDict.has_key(self, key) def __getattr__(self, key): try: return self.__dict__[key] except KeyError: pass try: return self.__getitem__(key) except: raise AttributeError, "object has no attribute '%s'" % key def __contains__(self, key): return self.has_key(key) def zopeCompatibilityHack(): global FeedParserDict del FeedParserDict def FeedParserDict(aDict=None): rc = {} if aDict: rc.update(aDict) return rc _ebcdic_to_ascii_map = None def _ebcdic_to_ascii(s): global _ebcdic_to_ascii_map if not _ebcdic_to_ascii_map: emap = ( 0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201, 202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208, 209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237, 125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243, 92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249, 48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255 ) import string _ebcdic_to_ascii_map = string.maketrans( \ "".join(map(chr, range(256))), "".join(map(chr, emap))) return s.translate(_ebcdic_to_ascii_map) class _FeedParserMixin: namespaces = {"": "", "http://backend.userland.com/rss": "", "http://blogs.law.harvard.edu/tech/rss": "", "http://purl.org/rss/1.0/": "", "http://my.netscape.com/rdf/simple/0.9/": "", "http://example.com/newformat#": "", "http://example.com/necho": "", "http://purl.org/echo/": "", "uri/of/echo/namespace#": "", "http://purl.org/pie/": "", "http://purl.org/atom/ns#": "", "http://purl.org/rss/1.0/modules/rss091#": "", "http://webns.net/mvcb/": "admin", "http://purl.org/rss/1.0/modules/aggregation/": "ag", "http://purl.org/rss/1.0/modules/annotate/": "annotate", "http://media.tangent.org/rss/1.0/": "audio", "http://backend.userland.com/blogChannelModule": "blogChannel", "http://web.resource.org/cc/": "cc", "http://backend.userland.com/creativeCommonsRssModule": "creativeCommons", "http://purl.org/rss/1.0/modules/company": "co", "http://purl.org/rss/1.0/modules/content/": "content", "http://my.theinfo.org/changed/1.0/rss/": "cp", "http://purl.org/dc/elements/1.1/": "dc", "http://purl.org/dc/terms/": "dcterms", "http://purl.org/rss/1.0/modules/email/": "email", "http://purl.org/rss/1.0/modules/event/": "ev", "http://postneo.com/icbm/": "icbm", "http://purl.org/rss/1.0/modules/image/": "image", "http://xmlns.com/foaf/0.1/": "foaf", "http://freshmeat.net/rss/fm/": "fm", "http://purl.org/rss/1.0/modules/link/": "l", "http://madskills.com/public/xml/rss/module/pingback/": "pingback", "http://prismstandard.org/namespaces/1.2/basic/": "prism", "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf", "http://www.w3.org/2000/01/rdf-schema#": "rdfs", "http://purl.org/rss/1.0/modules/reference/": "ref", "http://purl.org/rss/1.0/modules/richequiv/": "reqv", "http://purl.org/rss/1.0/modules/search/": "search", "http://purl.org/rss/1.0/modules/slash/": "slash", "http://purl.org/rss/1.0/modules/servicestatus/": "ss", "http://hacks.benhammersley.com/rss/streaming/": "str", "http://purl.org/rss/1.0/modules/subscription/": "sub", "http://purl.org/rss/1.0/modules/syndication/": "sy", "http://purl.org/rss/1.0/modules/taxonomy/": "taxo", "http://purl.org/rss/1.0/modules/threading/": "thr", "http://purl.org/rss/1.0/modules/textinput/": "ti", "http://madskills.com/public/xml/rss/module/trackback/":"trackback", "http://wellformedweb.org/CommentAPI/": "wfw", "http://purl.org/rss/1.0/modules/wiki/": "wiki", "http://schemas.xmlsoap.org/soap/envelope/": "soap", "http://www.w3.org/1999/xhtml": "xhtml", "http://www.w3.org/XML/1998/namespace": "xml" } can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'comments', 'license'] can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'copyright', 'description'] can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'copyright', 'description'] html_types = ['text/html', 'application/xhtml+xml'] def __init__(self, baseuri=None, baselang=None, encoding='utf-8'): if _debug: sys.stderr.write("initializing FeedParser\n") self.feeddata = FeedParserDict() # feed-level data self.encoding = encoding # character encoding self.entries = [] # list of entry-level data self.version = '' # feed type/version, see SUPPORTED_VERSIONS # the following are used internally to track state; # some of this is kind of out of control and should # probably be refactored into a finite state machine self.infeed = 0 self.inentry = 0 self.incontent = 0 self.intextinput = 0 self.inimage = 0 self.inauthor = 0 self.incontributor = 0 self.contentparams = FeedParserDict() self.namespacemap = {} self.elementstack = [] self.basestack = [] self.langstack = [] self.baseuri = baseuri or '' self.lang = baselang or None if baselang: self.feeddata['language'] = baselang def unknown_starttag(self, tag, attrs): if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs)) # normalize attrs attrs = [(k.lower(), v) for k, v in attrs] attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] # track xml:base and xml:lang attrsD = dict(attrs) baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri self.baseuri = baseuri lang = attrsD.get('xml:lang', attrsD.get('lang')) if lang == '': # xml:lang could be explicitly set to '', we need to capture that lang = None elif lang is None: # if no xml:lang is specified, use parent lang lang = self.lang if lang: if tag in ('feed', 'rss', 'rdf:RDF'): self.feeddata['language'] = lang self.lang = lang self.basestack.append(baseuri) self.langstack.append(lang) # track namespaces for prefix, uri in attrs: if prefix.startswith('xmlns:'): self.trackNamespace(prefix[6:], uri) elif prefix == 'xmlns': self.trackNamespace(None, uri) # track inline content if self.incontent and self.contentparams.get('mode') == 'escaped': # element declared itself as escaped markup, but it isn't really self.contentparams['mode'] = 'xml' if self.incontent and self.contentparams.get('mode') == 'xml': # Note: probably shouldn't simply recreate localname here, but # our namespace handling isn't actually 100% correct in cases where # the feed redefines the default namespace (which is actually # the usual case for inline content, thanks Sam), so here we # cheat and just reconstruct the element based on localname # because that compensates for the bugs in our namespace handling. # This will horribly munge inline content with non-empty qnames, # but nobody actually does that, so I'm not fixing it. tag = tag.split(':')[-1] return self.handle_data("<%s%s>" % (tag, "".join([' %s="%s"' % t for t in attrs])), escape=0) # match namespaces if tag.find(':') <> -1: prefix, suffix = tag.split(':', 1) else: prefix, suffix = '', tag prefix = self.namespacemap.get(prefix, prefix) if prefix: prefix = prefix + '_' # special hack for better tracking of empty textinput/image elements in illformed feeds if (not prefix) and tag not in ('title', 'link', 'description', 'name'): self.intextinput = 0 if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'width', 'height'): self.inimage = 0 # call special handler (if defined) or default handler methodname = '_start_' + prefix + suffix try: method = getattr(self, methodname) return method(attrsD) except AttributeError: return self.push(prefix + suffix, 1) def unknown_endtag(self, tag): if _debug: sys.stderr.write('end %s\n' % tag) # match namespaces if tag.find(':') <> -1: prefix, suffix = tag.split(':', 1) else: prefix, suffix = '', tag prefix = self.namespacemap.get(prefix, prefix) if prefix: prefix = prefix + '_' # call special handler (if defined) or default handler methodname = '_end_' + prefix + suffix try: method = getattr(self, methodname) method() except AttributeError: self.pop(prefix + suffix) # track inline content if self.incontent and self.contentparams.get('mode') == 'escaped': # element declared itself as escaped markup, but it isn't really self.contentparams['mode'] = 'xml' if self.incontent and self.contentparams.get('mode') == 'xml': tag = tag.split(':')[-1] self.handle_data("</%s>" % tag, escape=0) # track xml:base and xml:lang going out of scope if self.basestack: self.basestack.pop() if self.basestack and self.basestack[-1]: self.baseuri = self.basestack[-1] if self.langstack: self.langstack.pop() if self.langstack: # and (self.langstack[-1] is not None): self.lang = self.langstack[-1] def handle_charref(self, ref): # called for each character reference, e.g. for "&#160;", ref will be "160" if not self.elementstack: return ref = ref.lower() if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'): text = "&#%s;" % ref else: if ref[0] == 'x': c = int(ref[1:], 16) else: c = int(ref) text = unichr(c).encode('utf-8') self.elementstack[-1][2].append(text) def handle_entityref(self, ref): # called for each entity reference, e.g. for "&copy;", ref will be "copy" if not self.elementstack: return if _debug: sys.stderr.write("entering handle_entityref with %s\n" % ref) if ref in ('lt', 'gt', 'quot', 'amp', 'apos'): text = '&%s;' % ref else: # entity resolution graciously donated by Aaron Swartz def name2cp(k): import htmlentitydefs if hasattr(htmlentitydefs, "name2codepoint"): # requires Python 2.3 return htmlentitydefs.name2codepoint[k] k = htmlentitydefs.entitydefs[k] if k.startswith("&#") and k.endswith(";"): return int(k[2:-1]) # not in latin-1 return ord(k) try: name2cp(ref) except KeyError: text = "&%s;" % ref else: text = unichr(name2cp(ref)).encode('utf-8') self.elementstack[-1][2].append(text) def handle_data(self, text, escape=1): # called for each block of plain text, i.e. outside of any tag and # not containing any character or entity references if not self.elementstack: return if escape and self.contentparams.get('mode') == 'xml': text = _xmlescape(text) self.elementstack[-1][2].append(text) def handle_comment(self, text): # called for each comment, e.g. <!-- insert message here --> pass def handle_pi(self, text): # called for each processing instruction, e.g. <?instruction> pass def handle_decl(self, text): pass def parse_declaration(self, i): # override internal declaration handler to handle CDATA blocks if _debug: sys.stderr.write("entering parse_declaration\n") if self.rawdata[i:i+9] == '<![CDATA[': k = self.rawdata.find(']]>', i) if k == -1: k = len(self.rawdata) self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0) return k+3 else: k = self.rawdata.find('>', i) return k+1 def trackNamespace(self, prefix, uri): if (prefix, uri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version: self.version = 'rss090' if uri == 'http://purl.org/rss/1.0/' and not self.version: self.version = 'rss10' if not prefix: return if uri.find('backend.userland.com/rss') <> -1: # match any backend.userland.com namespace uri = 'http://backend.userland.com/rss' if self.namespaces.has_key(uri): self.namespacemap[prefix] = self.namespaces[uri] def resolveURI(self, uri): return urlparse.urljoin(self.baseuri or '', uri) def decodeEntities(self, element, data): return data def push(self, element, expectingText): self.elementstack.append([element, expectingText, []]) def pop(self, element): if not self.elementstack: return if self.elementstack[-1][0] != element: return element, expectingText, pieces = self.elementstack.pop() output = "".join(pieces) output = output.strip() if not expectingText: return output # decode base64 content if self.contentparams.get('mode') == 'base64' and base64: try: output = base64.decodestring(output) except binascii.Error: pass except binascii.Incomplete: pass # resolve relative URIs if (element in self.can_be_relative_uri) and output: output = self.resolveURI(output) # decode entities within embedded markup output = self.decodeEntities(element, output) # resolve relative URIs within embedded markup if self.contentparams.get('type', 'text/html') in self.html_types: if element in self.can_contain_relative_uris: output = _resolveRelativeURIs(output, self.baseuri, self.encoding) # sanitize embedded markup if self.contentparams.get('type', 'text/html') in self.html_types: if element in self.can_contain_dangerous_markup: output = _sanitizeHTML(output, self.encoding) if self.encoding and (type(output) == types.StringType): try: output = unicode(output, self.encoding) except: pass # store output in appropriate place(s) if self.inentry: if element == 'content': self.entries[-1].setdefault(element, []) contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output self.entries[-1][element].append(contentparams) elif element == 'category': self.entries[-1][element] = output domain = self.entries[-1]['categories'][-1][0] self.entries[-1]['categories'][-1] = (domain, output) elif element == 'source': self.entries[-1]['source']['value'] = output elif element == 'link': self.entries[-1][element] = output if output: self.entries[-1]['links'][-1]['href'] = output else: if element == 'description': element = 'summary' self.entries[-1][element] = output if self.incontent: contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output self.entries[-1][element + '_detail'] = contentparams elif self.infeed and (not self.intextinput) and (not self.inimage): if element == 'description': element = 'tagline' self.feeddata[element] = output if element == 'category': domain = self.feeddata['categories'][-1][0] self.feeddata['categories'][-1] = (domain, output) elif element == 'link': self.feeddata['links'][-1]['href'] = output elif self.incontent: contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output self.feeddata[element + '_detail'] = contentparams return output def _mapToStandardPrefix(self, name): colonpos = name.find(':') if colonpos <> -1: prefix = name[:colonpos] suffix = name[colonpos+1:] prefix = self.namespacemap.get(prefix, prefix) name = prefix + ':' + suffix return name def _getAttribute(self, attrsD, name): return attrsD.get(self._mapToStandardPrefix(name)) def _save(self, key, value): if self.inentry: self.entries[-1].setdefault(key, value) elif self.feeddata: self.feeddata.setdefault(key, value) def _start_rss(self, attrsD): versionmap = {'0.91': 'rss091u', '0.92': 'rss092', '0.93': 'rss093', '0.94': 'rss094'} if not self.version: attr_version = attrsD.get('version', '') version = versionmap.get(attr_version) if version: self.version = version elif attr_version.startswith('2.'): self.version = 'rss20' else: self.version = 'rss' def _start_dlhottitles(self, attrsD): self.version = 'hotrss' def _start_channel(self, attrsD): self.infeed = 1 self._cdf_common(attrsD) _start_feedinfo = _start_channel def _cdf_common(self, attrsD): if attrsD.has_key('lastmod'): self._start_modified({}) self.elementstack[-1][-1] = attrsD['lastmod'] self._end_modified() if attrsD.has_key('href'): self._start_link({}) self.elementstack[-1][-1] = attrsD['href'] self._end_link() def _start_feed(self, attrsD): self.infeed = 1 versionmap = {'0.1': 'atom01', '0.2': 'atom02', '0.3': 'atom03'} if not self.version: attr_version = attrsD.get('version') version = versionmap.get(attr_version) if version: self.version = version else: self.version = 'atom' def _end_channel(self): self.infeed = 0 _end_feed = _end_channel def _start_image(self, attrsD): self.inimage = 1 self.push('image', 0) context = self._getContext() context.setdefault('image', FeedParserDict()) def _end_image(self): self.pop('image') self.inimage = 0 def _start_textinput(self, attrsD): self.intextinput = 1 self.push('textinput', 0) context = self._getContext() context.setdefault('textinput', FeedParserDict()) _start_textInput = _start_textinput def _end_textinput(self): self.pop('textinput') self.intextinput = 0 _end_textInput = _end_textinput def _start_author(self, attrsD): self.inauthor = 1 self.push('author', 1) _start_managingeditor = _start_author _start_dc_author = _start_author _start_dc_creator = _start_author def _end_author(self): self.pop('author') self.inauthor = 0 self._sync_author_detail() _end_managingeditor = _end_author _end_dc_author = _end_author _end_dc_creator = _end_author def _start_contributor(self, attrsD): self.incontributor = 1 context = self._getContext() context.setdefault('contributors', []) context['contributors'].append(FeedParserDict()) self.push('contributor', 0) def _end_contributor(self): self.pop('contributor') self.incontributor = 0 def _start_name(self, attrsD): self.push('name', 0) def _end_name(self): value = self.pop('name') if self.inauthor: self._save_author('name', value) elif self.incontributor: self._save_contributor('name', value) elif self.intextinput: context = self._getContext() context['textinput']['name'] = value def _start_width(self, attrsD): self.push('width', 0) def _end_width(self): value = self.pop('width') try: value = int(value) except: value = 0 if self.inimage: context = self._getContext() context['image']['width'] = value def _start_height(self, attrsD): self.push('height', 0) def _end_height(self): value = self.pop('height') try: value = int(value) except: value = 0 if self.inimage: context = self._getContext() context['image']['height'] = value def _start_url(self, attrsD): self.push('url', 1) _start_homepage = _start_url _start_uri = _start_url def _end_url(self): value = self.pop('url') if self.inauthor: self._save_author('url', value) elif self.incontributor: self._save_contributor('url', value) elif self.inimage: context = self._getContext() context['image']['url'] = value elif self.intextinput: context = self._getContext() context['textinput']['link'] = value _end_homepage = _end_url _end_uri = _end_url def _start_email(self, attrsD): self.push('email', 0) def _end_email(self): value = self.pop('email') if self.inauthor: self._save_author('email', value) elif self.incontributor: self._save_contributor('email', value) pass def _getContext(self): if self.inentry: context = self.entries[-1] else: context = self.feeddata return context def _save_author(self, key, value): context = self._getContext() context.setdefault('author_detail', FeedParserDict()) context['author_detail'][key] = value self._sync_author_detail() def _save_contributor(self, key, value): context = self._getContext() context.setdefault('contributors', [FeedParserDict()]) context['contributors'][-1][key] = value def _sync_author_detail(self, key='author'): context = self._getContext() detail = context.get('%s_detail' % key) if detail: name = detail.get('name') email = detail.get('email') if name and email: context[key] = "%s (%s)" % (name, email) elif name: context[key] = name elif email: context[key] = email else: author = context.get(key) if not author: return emailmatch = re.search(r"""(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))""", author) if not emailmatch: return email = emailmatch.group(0) # probably a better way to do the following, but it passes all the tests author = author.replace(email, '') author = author.replace('()', '') author = author.strip() if author and (author[0] == '('): author = author[1:] if author and (author[-1] == ')'): author = author[:-1] author = author.strip() context.setdefault('%s_detail' % key, FeedParserDict()) context['%s_detail' % key]['name'] = author context['%s_detail' % key]['email'] = email def _start_tagline(self, attrsD): self.incontent += 1 self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'escaped'), 'type': attrsD.get('type', 'text/plain'), 'language': self.lang, 'base': self.baseuri}) self.push('tagline', 1) _start_subtitle = _start_tagline def _end_tagline(self): value = self.pop('tagline') self.incontent -= 1 self.contentparams.clear() if self.infeed: self.feeddata['description'] = value _end_subtitle = _end_tagline def _start_copyright(self, attrsD): self.incontent += 1 self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'escaped'), 'type': attrsD.get('type', 'text/plain'), 'language': self.lang, 'base': self.baseuri}) self.push('copyright', 1) _start_dc_rights = _start_copyright def _end_copyright(self): self.pop('copyright') self.incontent -= 1 self.contentparams.clear() _end_dc_rights = _end_copyright def _start_item(self, attrsD): self.entries.append(FeedParserDict()) self.push('item', 0) self.inentry = 1 self.guidislink = 0 id = self._getAttribute(attrsD, 'rdf:about') if id: context = self._getContext() context['id'] = id self._cdf_common(attrsD) _start_entry = _start_item _start_product = _start_item def _end_item(self): self.pop('item') self.inentry = 0 _end_entry = _end_item def _start_dc_language(self, attrsD): self.push('language', 1) _start_language = _start_dc_language def _end_dc_language(self): self.lang = self.pop('language') _end_language = _end_dc_language def _start_dc_publisher(self, attrsD): self.push('publisher', 1) _start_webmaster = _start_dc_publisher def _end_dc_publisher(self): self.pop('publisher') self._sync_author_detail('publisher') _end_webmaster = _end_dc_publisher def _start_dcterms_issued(self, attrsD): self.push('issued', 1) _start_issued = _start_dcterms_issued def _end_dcterms_issued(self): value = self.pop('issued') self._save('issued_parsed', _parse_date(value)) _end_issued = _end_dcterms_issued def _start_dcterms_created(self, attrsD): self.push('created', 1) _start_created = _start_dcterms_created def _end_dcterms_created(self): value = self.pop('created') self._save('created_parsed', _parse_date(value)) _end_created = _end_dcterms_created def _start_dcterms_modified(self, attrsD): self.push('modified', 1) _start_modified = _start_dcterms_modified _start_dc_date = _start_dcterms_modified _start_pubdate = _start_dcterms_modified def _end_dcterms_modified(self): value = self.pop('modified') parsed_value = _parse_date(value) self._save('modified_parsed', parsed_value) _end_modified = _end_dcterms_modified _end_dc_date = _end_dcterms_modified _end_pubdate = _end_dcterms_modified def _start_expirationdate(self, attrsD): self.push('expired', 1) def _end_expirationdate(self): self._save('expired_parsed', _parse_date(self.pop('expired'))) def _start_cc_license(self, attrsD): self.push('license', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('license') def _start_creativecommons_license(self, attrsD): self.push('license', 1) def _end_creativecommons_license(self): self.pop('license') def _start_category(self, attrsD): self.push('category', 1) domain = self._getAttribute(attrsD, 'domain') cats = [] if self.inentry: cats = self.entries[-1].setdefault('categories', []) elif self.infeed: cats = self.feeddata.setdefault('categories', []) cats.append((domain, None)) _start_dc_subject = _start_category _start_keywords = _start_category def _end_category(self): self.pop('category') _end_dc_subject = _end_category _end_keywords = _end_category def _start_cloud(self, attrsD): self.feeddata['cloud'] = FeedParserDict(attrsD) def _start_link(self, attrsD): attrsD.setdefault('rel', 'alternate') attrsD.setdefault('type', 'text/html') if attrsD.has_key('href'): attrsD['href'] = self.resolveURI(attrsD['href']) expectingText = self.infeed or self.inentry if self.inentry: self.entries[-1].setdefault('links', []) self.entries[-1]['links'].append(FeedParserDict(attrsD)) elif self.infeed: self.feeddata.setdefault('links', []) self.feeddata['links'].append(FeedParserDict(attrsD)) if attrsD.has_key('href'): expectingText = 0 if attrsD.get('type', '') in self.html_types: if self.inentry: self.entries[-1]['link'] = attrsD['href'] elif self.infeed: self.feeddata['link'] = attrsD['href'] else: self.push('link', expectingText) _start_producturl = _start_link def _end_link(self): value = self.pop('link') if self.intextinput: context = self._getContext() context['textinput']['link'] = value if self.inimage: context = self._getContext() context['image']['link'] = value _end_producturl = _end_link def _start_guid(self, attrsD): self.guidislink = (attrsD.get('ispermalink', 'true') == 'true') self.push('id', 1) def _end_guid(self): value = self.pop('id') self._save('guidislink', self.guidislink and not self._getContext().has_key('link')) if self.guidislink: # guid acts as link, but only if "ispermalink" is not present or is "true", # and only if the item doesn't already have a link element self._save('link', value) def _start_id(self, attrsD): self.push('id', 1) def _end_id(self): value = self.pop('id') def _start_title(self, attrsD): self.incontent += 1 if _debug: sys.stderr.write('attrsD.xml:lang = %s\n' % attrsD.get('xml:lang')) if _debug: sys.stderr.write('self.lang = %s\n' % self.lang) self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'escaped'), 'type': attrsD.get('type', 'text/plain'), 'language': self.lang, 'base': self.baseuri}) self.push('title', self.infeed or self.inentry) _start_dc_title = _start_title def _end_title(self): value = self.pop('title') self.incontent -= 1 self.contentparams.clear() if self.intextinput: context = self._getContext() context['textinput']['title'] = value elif self.inimage: context = self._getContext() context['image']['title'] = value _end_dc_title = _end_title def _start_description(self, attrsD, default_content_type='text/html'): self.incontent += 1 self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'escaped'), 'type': attrsD.get('type', default_content_type), 'language': self.lang, 'base': self.baseuri}) self.push('description', self.infeed or self.inentry) def _start_abstract(self, attrsD): return self._start_description(attrsD, 'text/plain') def _end_description(self): value = self.pop('description') self.incontent -= 1 self.contentparams.clear() context = self._getContext() if self.intextinput: context['textinput']['description'] = value elif self.inimage: context['image']['description'] = value # elif self.inentry: # context['summary'] = value # elif self.infeed: # context['tagline'] = value _end_abstract = _end_description def _start_info(self, attrsD): self.incontent += 1 self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'escaped'), 'type': attrsD.get('type', 'text/plain'), 'language': self.lang, 'base': self.baseuri}) self.push('info', 1) def _end_info(self): self.pop('info') self.incontent -= 1 self.contentparams.clear() def _start_generator(self, attrsD): if attrsD: if attrsD.has_key('url'): attrsD['url'] = self.resolveURI(attrsD['url']) self.feeddata['generator_detail'] = FeedParserDict(attrsD) self.push('generator', 1) def _end_generator(self): value = self.pop('generator') if self.feeddata.has_key('generator_detail'): self.feeddata['generator_detail']['name'] = value def _start_admin_generatoragent(self, attrsD): self.push('generator', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('generator') self.feeddata['generator_detail'] = FeedParserDict({"url": value}) def _start_admin_errorreportsto(self, attrsD): self.push('errorreportsto', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('errorreportsto') def _start_summary(self, attrsD): self.incontent += 1 self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'escaped'), 'type': attrsD.get('type', 'text/plain'), 'language': self.lang, 'base': self.baseuri}) self.push('summary', 1) def _end_summary(self): value = self.pop('summary') if self.entries: self.entries[-1]['description'] = value self.incontent -= 1 self.contentparams.clear() def _start_enclosure(self, attrsD): if self.inentry: self.entries[-1].setdefault('enclosures', []) self.entries[-1]['enclosures'].append(FeedParserDict(attrsD)) def _start_source(self, attrsD): if self.inentry: self.entries[-1]['source'] = FeedParserDict(attrsD) self.push('source', 1) def _end_source(self): self.pop('source') def _start_content(self, attrsD): self.incontent += 1 self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'xml'), 'type': attrsD.get('type', 'text/plain'), 'language': self.lang, 'base': self.baseuri}) self.push('content', 1) def _start_prodlink(self, attrsD): self.incontent += 1 self.contentparams = FeedParserDict({'mode': attrsD.get('mode', 'xml'), 'type': attrsD.get('type', 'text/html'), 'language': self.lang, 'base': self.baseuri}) self.push('content', 1) def _start_body(self, attrsD): self.incontent += 1 self.contentparams = FeedParserDict({'mode': 'xml', 'type': 'application/xhtml+xml', 'language': self.lang, 'base': self.baseuri}) self.push('content', 1) _start_xhtml_body = _start_body def _start_content_encoded(self, attrsD): self.incontent += 1 self.contentparams = FeedParserDict({'mode': 'escaped', 'type': 'text/html', 'language': self.lang, 'base': self.baseuri}) self.push('content', 1) _start_fullitem = _start_content_encoded def _end_content(self): value = self.pop('content') if self.contentparams.get('type') in (['text/plain'] + self.html_types): self._save('description', value) self.incontent -= 1 self.contentparams.clear() _end_body = _end_content _end_xhtml_body = _end_content _end_content_encoded = _end_content _end_fullitem = _end_content _end_prodlink = _end_content if _XML_AVAILABLE: class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler): def __init__(self, baseuri, baselang, encoding): if _debug: sys.stderr.write('trying StrictFeedParser\n') xml.sax.handler.ContentHandler.__init__(self) _FeedParserMixin.__init__(self, baseuri, baselang, encoding) self.bozo = 0 self.exc = None def startPrefixMapping(self, prefix, uri): self.trackNamespace(prefix, uri) def startElementNS(self, name, qname, attrs): namespace, localname = name namespace = str(namespace or '') if namespace.find('backend.userland.com/rss') <> -1: # match any backend.userland.com namespace namespace = 'http://backend.userland.com/rss' prefix = self.namespaces.get(namespace, 'unknown') if prefix: localname = prefix + ':' + localname localname = str(localname).lower() # qname implementation is horribly broken in Python 2.1 (it # doesn't report any), and slightly broken in Python 2.2 (it # doesn't report the xml: namespace). So we match up namespaces # with a known list first, and then possibly override them with # the qnames the SAX parser gives us (if indeed it gives us any # at all). Thanks to MatejC for helping me test this and # tirelessly telling me that it didn't work yet. attrsD = {} for (namespace, attrlocalname), attrvalue in attrs._attrs.items(): prefix = self.namespaces.get(namespace, '') if prefix: attrlocalname = prefix + ":" + attrlocalname attrsD[str(attrlocalname).lower()] = attrvalue for qname in attrs.getQNames(): attrsD[str(qname).lower()] = attrs.getValueByQName(qname) self.unknown_starttag(localname, attrsD.items()) # def resolveEntity(self, publicId, systemId): # return _StringIO() def characters(self, text): self.handle_data(text) def endElementNS(self, name, qname): namespace, localname = name namespace = str(namespace) prefix = self.namespaces.get(namespace, '') if prefix: localname = prefix + ':' + localname localname = str(localname).lower() self.unknown_endtag(localname) def error(self, exc): self.bozo = 1 self.exc = exc def fatalError(self, exc): self.error(exc) raise exc class _BaseHTMLProcessor(sgmllib.SGMLParser): elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr', 'img', 'input', 'isindex', 'link', 'meta', 'param'] def __init__(self, encoding): self.encoding = encoding if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding) sgmllib.SGMLParser.__init__(self) def reset(self): self.pieces = [] sgmllib.SGMLParser.reset(self) def feed(self, data): data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'&lt;!\1', data) data = re.sub(r'<(\S+)/>', r'<\1></\1>', data) data = data.replace('&#39;', "'") data = data.replace('&#34;', '"') if self.encoding and (type(data) == types.UnicodeType): data = data.encode(self.encoding) sgmllib.SGMLParser.feed(self, data) def normalize_attrs(self, attrs): # utility method to be called by descendants attrs = [(k.lower(), v) for k, v in attrs] # if self.encoding: # if _debug: sys.stderr.write('normalize_attrs, encoding=%s\n' % self.encoding) # attrs = [(k, v.encode(self.encoding)) for k, v in attrs] attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] return attrs def unknown_starttag(self, tag, attrs): # called for each start tag # attrs is a list of (attr, value) tuples # e.g. for <pre class="screen">, tag="pre", attrs=[("class", "screen")] if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag) strattrs = "".join([' %s="%s"' % (key, value) for key, value in attrs]) if tag in self.elements_no_end_tag: self.pieces.append("<%(tag)s%(strattrs)s />" % locals()) else: self.pieces.append("<%(tag)s%(strattrs)s>" % locals()) def unknown_endtag(self, tag): # called for each end tag, e.g. for </pre>, tag will be "pre" # Reconstruct the original end tag. if tag not in self.elements_no_end_tag: self.pieces.append("</%(tag)s>" % locals()) def handle_charref(self, ref): # called for each character reference, e.g. for "&#160;", ref will be "160" # Reconstruct the original character reference. self.pieces.append("&#%(ref)s;" % locals()) def handle_entityref(self, ref): # called for each entity reference, e.g. for "&copy;", ref will be "copy" # Reconstruct the original entity reference. self.pieces.append("&%(ref)s;" % locals()) def handle_data(self, text): # called for each block of plain text, i.e. outside of any tag and # not containing any character or entity references # Store the original text verbatim. if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text) self.pieces.append(text) def handle_comment(self, text): # called for each HTML comment, e.g. <!-- insert Javascript code here --> # Reconstruct the original comment. self.pieces.append("<!--%(text)s-->" % locals()) def handle_pi(self, text): # called for each processing instruction, e.g. <?instruction> # Reconstruct original processing instruction. self.pieces.append("<?%(text)s>" % locals()) def handle_decl(self, text): # called for the DOCTYPE, if present, e.g. # <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" # "http://www.w3.org/TR/html4/loose.dtd"> # Reconstruct original DOCTYPE self.pieces.append("<!%(text)s>" % locals()) _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match def _scan_name(self, i, declstartpos): rawdata = self.rawdata n = len(rawdata) if i == n: return None, -1 m = self._new_declname_match(rawdata, i) if m: s = m.group() name = s.strip() if (i + len(s)) == n: return None, -1 # end of buffer return name.lower(), m.end() else: self.handle_data(rawdata) # self.updatepos(declstartpos, i) return None, -1 def output(self): """Return processed HTML as a single string""" return "".join([str(p) for p in self.pieces]) class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor): def __init__(self, baseuri, baselang, encoding): sgmllib.SGMLParser.__init__(self) _FeedParserMixin.__init__(self, baseuri, baselang, encoding) def decodeEntities(self, element, data): data = data.replace('&#60;', '&lt;') data = data.replace('&#x3c;', '&lt;') data = data.replace('&#62;', '&gt;') data = data.replace('&#x3e;', '&gt;') data = data.replace('&#38;', '&amp;') data = data.replace('&#x26;', '&amp;') data = data.replace('&#34;', '&quot;') data = data.replace('&#x22;', '&quot;') data = data.replace('&#39;', '&apos;') data = data.replace('&#x27;', '&apos;') if self.contentparams.get('mode') == 'escaped': data = data.replace('&lt;', '<') data = data.replace('&gt;', '>') data = data.replace('&amp;', '&') data = data.replace('&quot;', '"') data = data.replace('&apos;', "'") return data class _RelativeURIResolver(_BaseHTMLProcessor): relative_uris = [('a', 'href'), ('applet', 'codebase'), ('area', 'href'), ('blockquote', 'cite'), ('body', 'background'), ('del', 'cite'), ('form', 'action'), ('frame', 'longdesc'), ('frame', 'src'), ('iframe', 'longdesc'), ('iframe', 'src'), ('head', 'profile'), ('img', 'longdesc'), ('img', 'src'), ('img', 'usemap'), ('input', 'src'), ('input', 'usemap'), ('ins', 'cite'), ('link', 'href'), ('object', 'classid'), ('object', 'codebase'), ('object', 'data'), ('object', 'usemap'), ('q', 'cite'), ('script', 'src')] def __init__(self, baseuri, encoding): _BaseHTMLProcessor.__init__(self, encoding) self.baseuri = baseuri def resolveURI(self, uri): return urlparse.urljoin(self.baseuri, uri) def unknown_starttag(self, tag, attrs): attrs = self.normalize_attrs(attrs) attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs] _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) def _resolveRelativeURIs(htmlSource, baseURI, encoding): if _debug: sys.stderr.write("entering _resolveRelativeURIs\n") p = _RelativeURIResolver(baseURI, encoding) p.feed(htmlSource) return p.output() class _HTMLSanitizer(_BaseHTMLProcessor): acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big', 'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset', 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup', 'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike', 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var'] acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', 'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing', 'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols', 'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', 'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', 'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type', 'usemap', 'valign', 'value', 'vspace', 'width'] unacceptable_elements_with_end_tag = ['script', 'applet'] def reset(self): _BaseHTMLProcessor.reset(self) self.unacceptablestack = 0 def unknown_starttag(self, tag, attrs): if not tag in self.acceptable_elements: if tag in self.unacceptable_elements_with_end_tag: self.unacceptablestack += 1 return attrs = self.normalize_attrs(attrs) attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes] _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) def unknown_endtag(self, tag): if not tag in self.acceptable_elements: if tag in self.unacceptable_elements_with_end_tag: self.unacceptablestack -= 1 return _BaseHTMLProcessor.unknown_endtag(self, tag) def handle_pi(self, text): pass def handle_decl(self, text): pass def handle_data(self, text): if not self.unacceptablestack: _BaseHTMLProcessor.handle_data(self, text) def _sanitizeHTML(htmlSource, encoding): p = _HTMLSanitizer(encoding) p.feed(htmlSource) data = p.output() if _mxtidy and TIDY_MARKUP: nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, output_xhtml=1, numeric_entities=1, wrap=0) if data.count('<body'): data = data.split('<body', 1)[1] if data.count('>'): data = data.split('>', 1)[1] if data.count('</body'): data = data.split('</body', 1)[0] data = data.strip().replace('\r\n', '\n') return data class _FeedURLHandler(urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler): def http_error_default(self, req, fp, code, msg, headers): if ((code / 100) == 3) and (code != 304): return self.http_error_302(req, fp, code, msg, headers) infourl = urllib.addinfourl(fp, headers, req.get_full_url()) infourl.status = code return infourl def http_error_302(self, req, fp, code, msg, headers): if headers.dict.has_key('location'): infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers) else: infourl = urllib.addinfourl(fp, headers, req.get_full_url()) if not hasattr(infourl, 'status'): infourl.status = code return infourl def http_error_301(self, req, fp, code, msg, headers): if headers.dict.has_key('location'): infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers) else: infourl = urllib.addinfourl(fp, headers, req.get_full_url()) if not hasattr(infourl, 'status'): infourl.status = code return infourl http_error_300 = http_error_302 http_error_303 = http_error_302 http_error_307 = http_error_302 def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers): """URL, filename, or string --> stream This function lets you define parsers that take any input source (URL, pathname to local or network file, or actual data as a string) and deal with it in a uniform manner. Returned object is guaranteed to have all the basic stdio read methods (read, readline, readlines). Just .close() the object when you're done with it. If the etag argument is supplied, it will be used as the value of an If-None-Match request header. If the modified argument is supplied, it must be a tuple of 9 integers as returned by gmtime() in the standard Python time module. This MUST be in GMT (Greenwich Mean Time). The formatted date/time will be used as the value of an If-Modified-Since request header. If the agent argument is supplied, it will be used as the value of a User-Agent request header. If the referrer argument is supplied, it will be used as the value of a Referer[sic] request header. If handlers is supplied, it is a list of handlers used to build a urllib2 opener. """ if hasattr(url_file_stream_or_string, "read"): return url_file_stream_or_string if url_file_stream_or_string == "-": return sys.stdin if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'): if not agent: agent = USER_AGENT # test for inline user:password for basic auth auth = None if base64: urltype, rest = urllib.splittype(url_file_stream_or_string) realhost, rest = urllib.splithost(rest) if realhost: user_passwd, realhost = urllib.splituser(realhost) if user_passwd: url_file_stream_or_string = "%s://%s%s" % (urltype, realhost, rest) auth = base64.encodestring(user_passwd).strip() # try to open with urllib2 (to use optional headers) request = urllib2.Request(url_file_stream_or_string) request.add_header("User-Agent", agent) if etag: request.add_header("If-None-Match", etag) if modified: # format into an RFC 1123-compliant timestamp. We can't use # time.strftime() since the %a and %b directives can be affected # by the current locale, but RFC 2616 states that dates must be # in English. short_weekdays = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] request.add_header("If-Modified-Since", "%s, %02d %s %04d %02d:%02d:%02d GMT" % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5])) if referrer: request.add_header("Referer", referrer) if gzip and zlib: request.add_header("Accept-encoding", "gzip, deflate") elif gzip: request.add_header("Accept-encoding", "gzip") elif zlib: request.add_header("Accept-encoding", "deflate") else: request.add_header("Accept-encoding", "") if auth: request.add_header("Authorization", "Basic %s" % auth) if ACCEPT_HEADER: request.add_header("Accept", ACCEPT_HEADER) opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers)) opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent try: return opener.open(request) finally: opener.close() # JohnD # try to open with native open function (if url_file_stream_or_string is a filename) try: return open(url_file_stream_or_string) except: pass # treat url_file_stream_or_string as string return _StringIO(str(url_file_stream_or_string)) _date_handlers = [] def registerDateHandler(func): """Register a date handler function (takes string, returns 9-tuple date in GMT)""" _date_handlers.insert(0, func) # ISO-8601 date parsing routines written by Fazal Majid. # The ISO 8601 standard is very convoluted and irregular - a full ISO 8601 # parser is beyond the scope of feedparser and would be a worthwhile addition # to the Python library. # A single regular expression cannot parse ISO 8601 date formats into groups # as the standard is highly irregular (for instance is 030104 2003-01-04 or # 0301-04-01), so we use templates instead. # Please note the order in templates is significant because we need a # greedy match. _iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO', 'YY-?MM-?DD', 'YY-?OOO', 'YYYY', '-YY-?MM', '-OOO', '-YY', '--MM-?DD', '--MM', '---DD', 'CC', ''] _iso8601_re = [ tmpl.replace( 'YYYY', r'(?P<year>\d{4})').replace( 'YY', r'(?P<year>\d\d)').replace( 'MM', r'(?P<month>[01]\d)').replace( 'DD', r'(?P<day>[0123]\d)').replace( 'OOO', r'(?P<ordinal>[0123]\d\d)').replace( 'CC', r'(?P<century>\d\d$)') + r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})' + r'(:(?P<second>\d{2}))?' + r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?' for tmpl in _iso8601_tmpl] del tmpl _iso8601_matches = [re.compile(regex).match for regex in _iso8601_re] del regex def _parse_date_iso8601(dateString): """Parse a variety of ISO-8601-compatible formats like 20040105""" m = None for _iso8601_match in _iso8601_matches: m = _iso8601_match(dateString) if m: break if not m: return if m.span() == (0, 0): return params = m.groupdict() ordinal = params.get("ordinal", 0) if ordinal: ordinal = int(ordinal) else: ordinal = 0 year = params.get("year", "--") if not year or year == "--": year = time.gmtime()[0] elif len(year) == 2: # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993 year = 100 * int(time.gmtime()[0] / 100) + int(year) else: year = int(year) month = params.get("month", "-") if not month or month == "-": # ordinals are NOT normalized by mktime, we simulate them # by setting month=1, day=ordinal if ordinal: month = 1 else: month = time.gmtime()[1] month = int(month) day = params.get("day", 0) if not day: # see above if ordinal: day = ordinal elif params.get("century", 0) or \ params.get("year", 0) or params.get("month", 0): day = 1 else: day = time.gmtime()[2] else: day = int(day) # special case of the century - is the first year of the 21st century # 2000 or 2001 ? The debate goes on... if "century" in params.keys(): year = (int(params["century"]) - 1) * 100 + 1 # in ISO 8601 most fields are optional for field in ["hour", "minute", "second", "tzhour", "tzmin"]: if not params.get(field, None): params[field] = 0 hour = int(params.get("hour", 0)) minute = int(params.get("minute", 0)) second = int(params.get("second", 0)) # weekday is normalized by mktime(), we can ignore it weekday = 0 # daylight savings is complex, but not needed for feedparser's purposes # as time zones, if specified, include mention of whether it is active # (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and # and most implementations have DST bugs daylight_savings_flag = 0 tm = [year, month, day, hour, minute, second, weekday, ordinal, daylight_savings_flag] # ISO 8601 time zone adjustments tz = params.get("tz") if tz and tz != "Z": if tz[0] == "-": tm[3] += int(params.get("tzhour", 0)) tm[4] += int(params.get("tzmin", 0)) elif tz[0] == "+": tm[3] -= int(params.get("tzhour", 0)) tm[4] -= int(params.get("tzmin", 0)) else: return None # Python's time.mktime() is a wrapper around the ANSI C mktime(3c) # which is guaranteed to normalize d/m/y/h/m/s. # Many implementations have bugs, but we'll pretend they don't. return time.localtime(time.mktime(tm)) registerDateHandler(_parse_date_iso8601) # 8-bit date handling routines written by ytrewq1. _korean_year = u'\ub144' # b3e2 in euc-kr _korean_month = u'\uc6d4' # bff9 in euc-kr _korean_day = u'\uc77c' # c0cf in euc-kr _korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr _korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr _korean_onblog_date_re = \ re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \ (_korean_year, _korean_month, _korean_day)) _korean_nate_date_re = \ re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \ (_korean_am, _korean_pm)) def _parse_date_onblog(dateString): """Parse a string according to the OnBlog 8-bit date format""" m = _korean_onblog_date_re.match(dateString) if not m: return w3dtfdate = "%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s" % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ 'zonediff': '+09:00'} if _debug: sys.stderr.write("OnBlog date parsed as: %s\n" % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_onblog) def _parse_date_nate(dateString): """Parse a string according to the Nate 8-bit date format""" m = _korean_nate_date_re.match(dateString) if not m: return hour = int(m.group(5)) ampm = m.group(4) if (ampm == _korean_pm): hour += 12 hour = str(hour) if len(hour) == 1: hour = '0' + hour w3dtfdate = "%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s" % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\ 'zonediff': '+09:00'} if _debug: sys.stderr.write("Nate date parsed as: %s\n" % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_nate) _mssql_date_re = \ re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})\.\d+') def _parse_date_mssql(dateString): """Parse a string according to the MS SQL date format""" m = _mssql_date_re.match(dateString) if not m: return w3dtfdate = "%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s" % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ 'zonediff': '+09:00'} if _debug: sys.stderr.write("MS SQL date parsed as: %s\n" % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_mssql) # Unicode strings for Greek date strings _greek_months = \ { \ u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7 u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7 u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7 u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7 u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7 u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7 u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7 u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7 u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7 u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7 u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7 u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7 u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7 u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7 u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7 u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7 u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7 u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7 u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7 } _greek_wdays = \ { \ u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7 u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7 u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7 u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7 u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7 u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7 u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7 } _greek_date_format_re = \ re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)') def _parse_date_greek(dateString): """Parse a string according to a Greek 8-bit date format.""" m = _greek_date_format_re.match(dateString) if not m: return try: wday = _greek_wdays[m.group(1)] month = _greek_months[m.group(3)] except: return rfc822date = "%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s" % \ {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\ 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\ 'zonediff': m.group(8)} if _debug: sys.stderr.write("Greek date parsed as: %s\n" % rfc822date) return _parse_date_rfc822(rfc822date) registerDateHandler(_parse_date_greek) # Unicode strings for Hungarian date strings _hungarian_months = \ { \ u'janu\u00e1r': u'01', # e1 in iso-8859-2 u'febru\u00e1ri': u'02', # e1 in iso-8859-2 u'm\u00e1rcius': u'03', # e1 in iso-8859-2 u'\u00e1prilis': u'04', # e1 in iso-8859-2 u'm\u00e1ujus': u'05', # e1 in iso-8859-2 u'j\u00fanius': u'06', # fa in iso-8859-2 u'j\u00falius': u'07', # fa in iso-8859-2 u'augusztus': u'08', u'szeptember': u'09', u'okt\u00f3ber': u'10', # f3 in iso-8859-2 u'november': u'11', u'december': u'12', } _hungarian_date_format_re = \ re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))') def _parse_date_hungarian(dateString): """Parse a string according to a Hungarian 8-bit date format.""" m = _hungarian_date_format_re.match(dateString) if not m: return try: month = _hungarian_months[m.group(2)] day = m.group(3) if len(day) == 1: day = '0' + day hour = m.group(4) if len(hour) == 1: hour = '0' + hour except: return w3dtfdate = "%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s" % \ {'year': m.group(1), 'month': month, 'day': day,\ 'hour': hour, 'minute': m.group(5),\ 'zonediff': m.group(6)} if _debug: sys.stderr.write("Hungarian date parsed as: %s\n" % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_hungarian) # W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by # Drake and licensed under the Python license. Removed all range checking # for month, day, hour, minute, and second, since mktime will normalize # these later def _parse_date_w3dtf(dateString): def __extract_date(m): year = int(m.group("year")) if year < 100: year = 100 * int(time.gmtime()[0] / 100) + int(year) if year < 1000: return 0, 0, 0 julian = m.group("julian") if julian: julian = int(julian) month = julian / 30 + 1 day = julian % 30 + 1 jday = None while jday != julian: t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0)) jday = time.gmtime(t)[-2] diff = abs(jday - julian) if jday > julian: if diff < day: day = day - diff else: month = month - 1 day = 31 elif jday < julian: if day + diff < 28: day = day + diff else: month = month + 1 return year, month, day month = m.group("month") day = 1 if month is None: month = 1 else: month = int(month) day = m.group("day") if day: day = int(day) else: day = 1 return year, month, day def __extract_time(m): if not m: return 0, 0, 0 hours = m.group("hours") if not hours: return 0, 0, 0 hours = int(hours) minutes = int(m.group("minutes")) seconds = m.group("seconds") if seconds: seconds = int(seconds) else: seconds = 0 return hours, minutes, seconds def __extract_tzd(m): """Return the Time Zone Designator as an offset in seconds from UTC.""" if not m: return 0 tzd = m.group("tzd") if not tzd: return 0 if tzd == "Z": return 0 hours = int(m.group("tzdhours")) minutes = m.group("tzdminutes") if minutes: minutes = int(minutes) else: minutes = 0 offset = (hours*60 + minutes) * 60 if tzd[0] == "+": return -offset return offset __date_re = ("(?P<year>\d\d\d\d)" "(?:(?P<dsep>-|)" "(?:(?P<julian>\d\d\d)" "|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?") __tzd_re = "(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)" __tzd_rx = re.compile(__tzd_re) __time_re = ("(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)" "(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?" + __tzd_re) __datetime_re = "%s(?:T%s)?" % (__date_re, __time_re) __datetime_rx = re.compile(__datetime_re) m = __datetime_rx.match(dateString) if (m is None) or (m.group() != dateString): return gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0) if gmt[0] == 0: return return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone) registerDateHandler(_parse_date_w3dtf) def _parse_date_rfc822(dateString): """Parse an RFC822, RFC1123, RFC2822, or asctime-style date""" tm = rfc822.parsedate_tz(dateString) if tm: return time.gmtime(rfc822.mktime_tz(tm)) # rfc822.py defines several time zones, but we define some extra ones. # "ET" is equivalent to "EST", etc. _additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800} rfc822._timezones.update(_additional_timezones) registerDateHandler(_parse_date_rfc822) def _parse_date(dateString): """Parses a variety of date formats into a 9-tuple in GMT""" for handler in _date_handlers: try: date9tuple = handler(dateString) if not date9tuple: continue if len(date9tuple) != 9: if _debug: sys.stderr.write("date handler function must return 9-tuple\n") raise ValueError map(int, date9tuple) return date9tuple except Exception, e: if _debug: sys.stderr.write("%s raised %s\n" % (handler.__name__, repr(e))) pass return None def _getCharacterEncoding(http_headers, xml_data): """Get the character encoding of the XML document http_headers is a dictionary xml_data is a raw string (not Unicode) This is so much trickier than it sounds, it's not even funny. According to RFC 3023 ("XML Media Types"), if the HTTP Content-Type is application/xml, application/*+xml, application/xml-external-parsed-entity, or application/xml-dtd, the encoding given in the charset parameter of the HTTP Content-Type takes precedence over the encoding given in the XML prefix within the document, and defaults to "utf-8" if neither are specified. But, if the HTTP Content-Type is text/xml, text/*+xml, or text/xml-external-parsed-entity, the encoding given in the XML prefix within the document is ALWAYS IGNORED and only the encoding given in the charset parameter of the HTTP Content-Type header should be respected, and it defaults to "us-ascii" if not specified. Furthermore, discussion on the atom-syntax mailing list with the author of RFC 3023 leads me to the conclusion that any document served with a Content-Type of text/* and no charset parameter must be treated as us-ascii. (We now do this.) And also that it must always be flagged as non-well-formed. (We now do this too.) If Content-Type is unspecified (input was local file or non-HTTP source) or unrecognized (server just got it totally wrong), then go by the encoding given in the XML prefix of the document and default to "iso-8859-1" as per the HTTP specification (RFC 2616). Then, assuming we didn't find a character encoding in the HTTP headers (and the HTTP Content-type allowed us to look in the body), we need to sniff the first few bytes of the XML data and try to determine whether the encoding is ASCII-compatible. Section F of the XML specification shows the way here: http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info If the sniffed encoding is not ASCII-compatible, we need to make it ASCII compatible so that we can sniff further into the XML declaration to find the encoding attribute, which will tell us the true encoding. Of course, none of this guarantees that we will be able to parse the feed in the declared character encoding (assuming it was declared correctly, which many are not). CJKCodecs and iconv_codec help a lot; you should definitely install them if you can. http://cjkpython.i18n.org/ """ def _parseHTTPContentType(content_type): """takes HTTP Content-Type header and returns (content type, charset) If no charset is specified, returns (content type, '') If no content type is specified, returns ('', '') Both return parameters are guaranteed to be lowercase strings """ content_type = content_type or '' content_type, params = cgi.parse_header(content_type) return content_type, params.get('charset', '').replace("'", "") sniffed_xml_encoding = '' xml_encoding = '' true_encoding = '' http_content_type, http_encoding = _parseHTTPContentType(http_headers.get("content-type")) # Must sniff for non-ASCII-compatible character encodings before # searching for XML declaration. This heuristic is defined in # section F of the XML specification: # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info try: if xml_data[:4] == '\x4c\x6f\xa7\x94': # EBCDIC xml_data = _ebcdic_to_ascii(xml_data) elif xml_data[:4] == '\x00\x3c\x00\x3f': # UTF-16BE sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'): # UTF-16BE with BOM sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x3f\x00': # UTF-16LE sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'): # UTF-16LE with BOM sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') elif xml_data[:4] == '\x00\x00\x00\x3c': # UTF-32BE sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x00\x00': # UTF-32LE sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') elif xml_data[:4] == '\x00\x00\xfe\xff': # UTF-32BE with BOM sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') elif xml_data[:4] == '\xff\xfe\x00\x00': # UTF-32LE with BOM sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') elif xml_data[:3] == '\xef\xbb\xbf': # UTF-8 with BOM sniffed_xml_encoding = 'utf-8' xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') else: # ASCII-compatible pass xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data) except: xml_encoding_match = None if xml_encoding_match: xml_encoding = xml_encoding_match.groups()[0].lower() if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): xml_encoding = sniffed_xml_encoding acceptable_content_type = 0 application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity') text_content_types = ('text/xml', 'text/xml-external-parsed-entity') if (http_content_type in application_content_types) or \ (http_content_type.startswith('application/') and http_content_type.endswith('+xml')): acceptable_content_type = 1 true_encoding = http_encoding or xml_encoding or 'utf-8' elif (http_content_type in text_content_types) or \ (http_content_type.startswith('text/')) and http_content_type.endswith('+xml'): acceptable_content_type = 1 true_encoding = http_encoding or 'us-ascii' elif http_content_type.startswith('text/'): true_encoding = http_encoding or 'us-ascii' elif http_headers and (not http_headers.has_key('content-type')): true_encoding = xml_encoding or 'iso-8859-1' else: true_encoding = xml_encoding or 'utf-8' return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type def _toUTF8(data, encoding): """Changes an XML data stream on the fly to specify a new encoding data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already encoding is a string recognized by encodings.aliases """ if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding) # strip Byte Order Mark (if present) if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'): if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-16be': sys.stderr.write('trying utf-16be instead\n') encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'): if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-16le': sys.stderr.write('trying utf-16le instead\n') encoding = 'utf-16le' data = data[2:] elif data[:3] == '\xef\xbb\xbf': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-8': sys.stderr.write('trying utf-8 instead\n') encoding = 'utf-8' data = data[3:] elif data[:4] == '\x00\x00\xfe\xff': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-32be': sys.stderr.write('trying utf-32be instead\n') encoding = 'utf-32be' data = data[4:] elif data[:4] == '\xff\xfe\x00\x00': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-32le': sys.stderr.write('trying utf-32le instead\n') encoding = 'utf-32le' data = data[4:] newdata = unicode(data, encoding) if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding) declmatch = re.compile('^<\?xml[^>]*?>') newdecl = """<?xml version='1.0' encoding='utf-8'?>""" if declmatch.search(newdata): newdata = declmatch.sub(newdecl, newdata) else: newdata = newdecl + u'\n' + newdata return newdata.encode("utf-8") def _stripDoctype(data): """Strips DOCTYPE from XML document, returns (rss_version, stripped_data) rss_version may be "rss091n" or None stripped_data is the same XML document, minus the DOCTYPE """ entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE) data = entity_pattern.sub('', data) doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE) doctype_results = doctype_pattern.findall(data) doctype = doctype_results and doctype_results[0] or '' if doctype.lower().count('netscape'): version = 'rss091n' else: version = None data = doctype_pattern.sub('', data) return version, data def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]): """Parse a feed from a URL, file, stream, or string""" result = FeedParserDict() result['feed'] = FeedParserDict() result['entries'] = [] if _XML_AVAILABLE: result['bozo'] = 0 if type(handlers) == types.InstanceType: handlers = [handlers] try: f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers) data = f.read() except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = '' f = None # if feed is gzip-compressed, decompress it if f and data and hasattr(f, "headers"): if gzip and f.headers.get('content-encoding', '') == 'gzip': try: data = gzip.GzipFile(fileobj=_StringIO(data)).read() except Exception, e: # Some feeds claim to be gzipped but they're not, so # we get garbage. Ideally, we should re-request the # feed without the "Accept-encoding: gzip" header, # but we don't. result['bozo'] = 1 result['bozo_exception'] = e data = '' elif zlib and f.headers.get('content-encoding', '') == 'deflate': try: data = zlib.decompress(data, -zlib.MAX_WBITS) except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = '' # save HTTP headers if hasattr(f, "info"): info = f.info() result["etag"] = info.getheader("ETag") last_modified = info.getheader("Last-Modified") if last_modified: result["modified"] = _parse_date(last_modified) if hasattr(f, "url"): result["url"] = f.url result["status"] = 200 if hasattr(f, "status"): result["status"] = f.status if hasattr(f, "headers"): result["headers"] = f.headers.dict if hasattr(f, "close"): f.close() # there are four encodings to keep track of: # - http_encoding is the encoding declared in the Content-Type HTTP header # - xml_encoding is the encoding declared in the <?xml declaration # - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data # - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications http_headers = result.get("headers", {}) result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \ _getCharacterEncoding(http_headers, data) if http_headers and (not acceptable_content_type): if http_headers.has_key('content-type'): bozo_message = '%s is not an XML media type' % http_headers['content-type'] else: bozo_message = 'no Content-type specified' result['bozo'] = 1 result['bozo_exception'] = NonXMLContentType(bozo_message) result['version'], data = _stripDoctype(data) baseuri = http_headers.get('content-location', result.get('url')) baselang = http_headers.get('content-language', None) # if server sent 304, we're done if result.get("status", 0) == 304: result['version'] = '' result['debug_message'] = "The feed has not changed since you last checked, " + \ "so the server sent no data. This is a feature, not a bug!" return result # if there was a problem downloading, we're done if not data: return result # determine character encoding use_strict_parser = 0 known_encoding = 0 tried_encodings = [] for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding, 'utf-8', 'windows-1252'): if proposed_encoding in tried_encodings: continue if not proposed_encoding: continue try: data = _toUTF8(data, proposed_encoding) known_encoding = 1 use_strict_parser = 1 break except: pass tried_encodings.append(proposed_encoding) if not known_encoding: result['bozo'] = 1 result['bozo_exception'] = CharacterEncodingUnknown( \ "document encoding unknown, I tried " + \ "%s, %s, utf-8, and windows-1252 but nothing worked" % \ (result['encoding'], xml_encoding)) result['encoding'] = '' elif proposed_encoding != result['encoding']: result['bozo'] = 1 result['bozo_exception'] = CharacterEncodingOverride( \ "documented declared as %s, but parsed as %s" % \ (result['encoding'], proposed_encoding)) result['encoding'] = proposed_encoding if not _XML_AVAILABLE: use_strict_parser = 0 if use_strict_parser: # initialize the SAX parser feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8') saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS) saxparser.setFeature(xml.sax.handler.feature_namespaces, 1) saxparser.setContentHandler(feedparser) saxparser.setErrorHandler(feedparser) source = xml.sax.xmlreader.InputSource() source.setByteStream(_StringIO(data)) if hasattr(saxparser, '_ns_stack'): # work around bug in built-in SAX parser (doesn't recognize xml: namespace) # PyXML doesn't have this problem, and it doesn't have _ns_stack either saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'}) try: saxparser.parse(source) except Exception, e: if _debug: import traceback traceback.print_stack() traceback.print_exc() sys.stderr.write('xml parsing failed\n') result['bozo'] = 1 result['bozo_exception'] = feedparser.exc or e use_strict_parser = 0 if not use_strict_parser: feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '') feedparser.feed(data) result['feed'] = feedparser.feeddata result['entries'] = feedparser.entries result['version'] = result['version'] or feedparser.version return result if __name__ == '__main__': if not sys.argv[1:]: print __doc__ sys.exit(0) else: urls = sys.argv[1:] zopeCompatibilityHack() from pprint import pprint for url in urls: print url print result = parse(url) pprint(result) print #REVISION HISTORY #1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements, # added Simon Fell's test suite #1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections #2.0 - 10/19/2002 # JD - use inchannel to watch out for image and textinput elements which can # also contain title, link, and description elements # JD - check for isPermaLink="false" attribute on guid elements # JD - replaced openAnything with open_resource supporting ETag and # If-Modified-Since request headers # JD - parse now accepts etag, modified, agent, and referrer optional # arguments # JD - modified parse to return a dictionary instead of a tuple so that any # etag or modified information can be returned and cached by the caller #2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything # because of etag/modified, return the old etag/modified to the caller to # indicate why nothing is being returned #2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its # useless. Fixes the problem JD was addressing by adding it. #2.1 - 11/14/2002 - MAP - added gzip support #2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent. # start_admingeneratoragent is an example of how to handle elements with # only attributes, no content. #2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify); # also, make sure we send the User-Agent even if urllib2 isn't available. # Match any variation of backend.userland.com/rss namespace. #2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is. #2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's # snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed # project name #2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree); # removed unnecessary urllib code -- urllib2 should always be available anyway; # return actual url, status, and full HTTP headers (as result['url'], # result['status'], and result['headers']) if parsing a remote feed over HTTP -- # this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>; # added the latest namespace-of-the-week for RSS 2.0 #2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom # User-Agent (otherwise urllib2 sends two, which confuses some servers) #2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for # inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds #2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or # textInput, and also to return the character encoding (if specified) #2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking # nested divs within content (JohnD); fixed missing sys import (JohanS); # fixed regular expression to capture XML character encoding (Andrei); # added support for Atom 0.3-style links; fixed bug with textInput tracking; # added support for cloud (MartijnP); added support for multiple # category/dc:subject (MartijnP); normalize content model: "description" gets # description (which can come from description, summary, or full content if no # description), "content" gets dict of base/language/type/value (which can come # from content:encoded, xhtml:body, content, or fullitem); # fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang # tracking; fixed bug tracking unknown tags; fixed bug tracking content when # <content> element is not in default namespace (like Pocketsoap feed); # resolve relative URLs in link, guid, docs, url, comments, wfw:comment, # wfw:commentRSS; resolve relative URLs within embedded HTML markup in # description, xhtml:body, content, content:encoded, title, subtitle, # summary, info, tagline, and copyright; added support for pingback and # trackback namespaces #2.7 - 1/5/2004 - MAP - really added support for trackback and pingback # namespaces, as opposed to 2.6 when I said I did but didn't really; # sanitize HTML markup within some elements; added mxTidy support (if # installed) to tidy HTML markup within some elements; fixed indentation # bug in _parse_date (FazalM); use socket.setdefaulttimeout if available # (FazalM); universal date parsing and normalization (FazalM): 'created', modified', # 'issued' are parsed into 9-tuple date format and stored in 'created_parsed', # 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified' # and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa #2.7.1 - 1/9/2004 - MAP - fixed bug handling &quot; and &apos;. fixed memory # leak not closing url opener (JohnD); added dc:publisher support (MarekK); # added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK) #2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in # encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL); # fixed relative URI processing for guid (skadz); added ICBM support; added # base64 support #2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many # blogspot.com sites); added _debug variable #2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing #3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available); # added several new supported namespaces; fixed bug tracking naked markup in # description; added support for enclosure; added support for source; re-added # support for cloud which got dropped somehow; added support for expirationDate #3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking # xml:base URI, one for documents that don't define one explicitly and one for # documents that define an outer and an inner xml:base that goes out of scope # before the end of the document #3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level #3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result["version"] # will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized; # added support for creativeCommons:license and cc:license; added support for # full Atom content model in title, tagline, info, copyright, summary; fixed bug # with gzip encoding (not always telling server we support it when we do) #3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail # (dictionary of "name", "url", "email"); map author to author_detail if author # contains name + email address #3.0b8 - 1/28/2004 - MAP - added support for contributor #3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added # support for summary #3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from # xml.util.iso8601 #3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain # dangerous markup; fiddled with decodeEntities (not right); liberalized # date parsing even further #3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right); # added support to Atom 0.2 subtitle; added support for Atom content model # in copyright; better sanitizing of dangerous HTML elements with end tags # (script, frameset) #3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img, # etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />) #3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under # Python 2.1 #3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS; # fixed bug capturing author and contributor URL; fixed bug resolving relative # links in author and contributor URL; fixed bug resolvin relative links in # generator URL; added support for recognizing RSS 1.0; passed Simon Fell's # namespace tests, and included them permanently in the test suite with his # permission; fixed namespace handling under Python 2.1 #3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15) #3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023 #3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei); # use libxml2 (if available) #3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author # name was in parentheses; removed ultra-problematic mxTidy support; patch to # workaround crash in PyXML/expat when encountering invalid entities # (MarkMoraes); support for textinput/textInput #3.0b20 - 4/7/2004 - MAP - added CDF support #3.0b21 - 4/14/2004 - MAP - added Hot RSS support #3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in # results dict; changed results dict to allow getting values with results.key # as well as results[key]; work around embedded illformed HTML with half # a DOCTYPE; work around malformed Content-Type header; if character encoding # is wrong, try several common ones before falling back to regexes (if this # works, bozo_exception is set to CharacterEncodingOverride); fixed character # encoding issues in BaseHTMLProcessor by tracking encoding and converting # from Unicode to raw strings before feeding data to sgmllib.SGMLParser; # convert each value in results to Unicode (if possible), even if using # regex-based parsing #3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain # high-bit characters in attributes in embedded HTML in description (thanks # Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in # FeedParserDict; tweaked FeedParserDict.has_key to return True if asking # about a mapped key #3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and # results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could # cause the same encoding to be tried twice (even if it failed the first time); # fixed DOCTYPE stripping when DOCTYPE contained entity declarations; # better textinput and image tracking in illformed RSS 1.0 feeds #3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed # my blink tag tests #3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that # failed to parse utf-16 encoded feeds; made source into a FeedParserDict; # duplicate admin:generatorAgent/@rdf:resource in generator_detail.url; # added support for image; refactored parse() fallback logic to try other # encodings if SAX parsing fails (previously it would only try other encodings # if re-encoding failed); remove unichr madness in normalize_attrs now that # we're properly tracking encoding in and out of BaseHTMLProcessor; set # feed.language from root-level xml:lang; set entry.id from rdf:about; # send Accept header #3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between # iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are # windows-1252); fixed regression that could cause the same encoding to be # tried twice (even if it failed the first time) #3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types; # recover from malformed content-type header parameter with no equals sign # ("text/xml; charset:iso-8859-1") #3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities # to Unicode equivalents in illformed feeds (aaronsw); added and # passed tests for converting character entities to Unicode equivalents # in illformed feeds (aaronsw); test for valid parsers when setting # XML_AVAILABLE; make version and encoding available when server returns # a 304; add handlers parameter to pass arbitrary urllib2 handlers (like # digest auth or proxy support); add code to parse username/password # out of url and send as basic authentication; expose downloading-related # exceptions in bozo_exception (aaronsw); added __contains__ method to # FeedParserDict (aaronsw); added publisher_detail (aaronsw) #3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always # convert feed to UTF-8 before passing to XML parser; completely revamped # logic for determining character encoding and attempting XML parsing # (much faster); increased default timeout to 20 seconds; test for presence # of Location header on redirects; added tests for many alternate character # encodings; support various EBCDIC encodings; support UTF-16BE and # UTF16-LE with or without a BOM; support UTF-8 with a BOM; support # UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no # XML parsers are available; added support for "Content-encoding: deflate"; # send blank "Accept-encoding: " header if neither gzip nor zlib modules # are available #3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure # problem tracking xml:base and xml:lang if element declares it, child # doesn't, first grandchild redeclares it, and second grandchild doesn't; # refactored date parsing; defined public registerDateHandler so callers # can add support for additional date formats at runtime; added support # for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added # zopeCompatibilityHack() which turns FeedParserDict into a regular # dictionary, required for Zope compatibility, and also makes command- # line debugging easier because pprint module formats real dictionaries # better than dictionary-like objects; added NonXMLContentType exception, # which is stored in bozo_exception when a feed is served with a non-XML # media type such as "text/plain"; respect Content-Language as default # language if not xml:lang is present; cloud dict is now FeedParserDict; # generator dict is now FeedParserDict; better tracking of xml:lang, # including support for xml:lang="" to unset the current language; # recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default # namespace; don't overwrite final status on redirects (scenarios: # redirecting to a URL that returns 304, redirecting to a URL that # redirects to another URL with a different type of redirect); add # support for HTTP 303 redirects
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- ## ## Copyright (C) 2005 alexisoft (manatlan[at]gmail(dot)com) ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published ## by the Free Software Foundation; version 2 only. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## from common.template import Template import feedparser __title__=u"News RSS" __order__=3000 __author__ ="alexisoft/manatlan" __about__ ="permet de naviguer dans ses flux rss" class main: def __init__(self): # initialisation ... if not self.conf.items(): # il n'y a pas de feeds de configuré # on en met, par défaut ... liste = [ ("Le Monde international","http://www.lemonde.fr/rss/sequence/0,2-3210,1-0,0.xml"), ("Le Monde france","http://www.lemonde.fr/rss/sequence/0,2-3224,1-0,0.xml"), ("Linuxfr","http://linuxfr.org/backend/news-homepage/rss20.rss"), ("Linuxfr journaux","http://linuxfr.org/backend/journaux/rss20.rss"), ("Freenews","http://www.freenews.fr/feeds/rss.php"), ("Ratatium","http://www.ratiatum.com/rss/news.rss"), ("Daily python","http://www.pythonware.com/daily/rss2.xml"), ("Planet Ubuntu","http://planet.ubuntu-fr.org/rss.php"), ("google-news une","http://www.xasa.com/news/xml/fr/top.xml"), ("google-news france","http://www.xasa.com/news/xml/fr/nation.xml"), ("google-news inter","http://www.xasa.com/news/xml/fr/world.xml") ] c=1 for titre,url in liste: self.conf["feed%03d"%c] = titre,url c+=1 self.save() def index(self): array=[] feeds = self.conf.keys() feeds.sort() idx=1 for key in feeds: nom,url = self.conf[key] array.append( (key,nom) ) idx+=1 return Template({"BACK":"/index"}) def view(self, feed): title,url = self.conf[feed] rss = feedparser.parse(url) array = [] name = rss.feed.title description = True i = 0 for item in rss.entries: i += 1 if not hasattr(item, 'summary'): description = False array.append((str(i), item.title)) return Template({"BACK":"index"}) def item(self, num, feed): title,url = self.conf[feed] rss = feedparser.parse(url) item = rss.entries[int(num)-1] if not item: return "No RSS item found for num = %s and feed = %s" % (num, feed) name = item.title description = item.summary return Template({"BACK":"view?feed="+feed})
Python
#!/usr/bin/env python # -*- coding: UTF8 -*- # ============================================================================== # field type descriptions as (length, abbreviation, full name) tuples FIELD_TYPES=( (0, 'X', 'Proprietary'), # no such type (1, 'B', 'Byte'), (1, 'A', 'ASCII'), (2, 'S', 'Short'), (4, 'L', 'Long'), (8, 'R', 'Ratio'), (1, 'SB', 'Signed Byte'), (1, 'U', 'Undefined'), (2, 'SS', 'Signed Short'), (4, 'SL', 'Signed Long'), (8, 'SR', 'Signed Ratio') ) # dictionary of main EXIF tag names # first element of tuple is tag name, optional second element is # another dictionary giving names to values EXIF_TAGS={ 0x0100: ('ImageWidth', ), 0x0101: ('ImageLength', ), 0x0102: ('BitsPerSample', ), 0x0103: ('Compression', {1: 'Uncompressed TIFF', 6: 'JPEG Compressed'}), 0x0106: ('PhotometricInterpretation', ), 0x010A: ('FillOrder', ), 0x010D: ('DocumentName', ), 0x010E: ('ImageDescription', ), 0x010F: ('Make', ), 0x0110: ('Model', ), 0x0111: ('StripOffsets', ), 0x0112: ('Orientation', ), 0x0115: ('SamplesPerPixel', ), 0x0116: ('RowsPerStrip', ), 0x0117: ('StripByteCounts', ), 0x011A: ('XResolution', ), 0x011B: ('YResolution', ), 0x011C: ('PlanarConfiguration', ), 0x0128: ('ResolutionUnit', {1: 'Not Absolute', 2: 'Pixels/Inch', 3: 'Pixels/Centimeter'}), 0x012D: ('TransferFunction', ), 0x0131: ('Software', ), 0x0132: ('DateTime', ), 0x013B: ('Artist', ), 0x013E: ('WhitePoint', ), 0x013F: ('PrimaryChromaticities', ), 0x0156: ('TransferRange', ), 0x0200: ('JPEGProc', ), 0x0201: ('JPEGInterchangeFormat', ), 0x0202: ('JPEGInterchangeFormatLength', ), 0x0211: ('YCbCrCoefficients', ), 0x0212: ('YCbCrSubSampling', ), 0x0213: ('YCbCrPositioning', ), 0x0214: ('ReferenceBlackWhite', ), 0x828D: ('CFARepeatPatternDim', ), 0x828E: ('CFAPattern', ), 0x828F: ('BatteryLevel', ), 0x8298: ('Copyright', ), 0x829A: ('ExposureTime', ), 0x829D: ('FNumber', ), 0x83BB: ('IPTC/NAA', ), 0x8769: ('ExifOffset', ), 0x8773: ('InterColorProfile', ), 0x8822: ('ExposureProgram', {0: 'Unidentified', 1: 'Manual', 2: 'Program Normal', 3: 'Aperture Priority', 4: 'Shutter Priority', 5: 'Program Creative', 6: 'Program Action', 7: 'Portrait Mode', 8: 'Landscape Mode'}), 0x8824: ('SpectralSensitivity', ), 0x8825: ('GPSInfo', ), 0x8827: ('ISOSpeedRatings', ), 0x8828: ('OECF', ), # print as string 0x9000: ('ExifVersion', lambda x: ''.join(map(chr, x))), 0x9003: ('DateTimeOriginal', ), 0x9004: ('DateTimeDigitized', ), 0x9101: ('ComponentsConfiguration', {0: '', 1: 'Y', 2: 'Cb', 3: 'Cr', 4: 'Red', 5: 'Green', 6: 'Blue'}), 0x9102: ('CompressedBitsPerPixel', ), 0x9201: ('ShutterSpeedValue', ), 0x9202: ('ApertureValue', ), 0x9203: ('BrightnessValue', ), 0x9204: ('ExposureBiasValue', ), 0x9205: ('MaxApertureValue', ), 0x9206: ('SubjectDistance', ), 0x9207: ('MeteringMode', {0: 'Unidentified', 1: 'Average', 2: 'CenterWeightedAverage', 3: 'Spot', 4: 'MultiSpot'}), 0x9208: ('LightSource', {0: 'Unknown', 1: 'Daylight', 2: 'Fluorescent', 3: 'Tungsten', 10: 'Flash', 17: 'Standard Light A', 18: 'Standard Light B', 19: 'Standard Light C', 20: 'D55', 21: 'D65', 22: 'D75', 255: 'Other'}), 0x9209: ('Flash', {0: 'No', 1: 'Fired', 5: 'Fired (?)', # no return sensed 7: 'Fired (!)', # return sensed 9: 'Fill Fired', 13: 'Fill Fired (?)', 15: 'Fill Fired (!)', 16: 'Off', 24: 'Auto Off', 25: 'Auto Fired', 29: 'Auto Fired (?)', 31: 'Auto Fired (!)', 32: 'Not Available'}), 0x920A: ('FocalLength', ), 0x927C: ('MakerNote', ), # print as string 0x9286: ('UserComment', lambda x: ''.join(map(chr, x))), 0x9290: ('SubSecTime', ), 0x9291: ('SubSecTimeOriginal', ), 0x9292: ('SubSecTimeDigitized', ), # print as string 0xA000: ('FlashPixVersion', lambda x: ''.join(map(chr, x))), 0xA001: ('ColorSpace', ), 0xA002: ('ExifImageWidth', ), 0xA003: ('ExifImageLength', ), 0xA005: ('InteroperabilityOffset', ), 0xA20B: ('FlashEnergy', ), # 0x920B in TIFF/EP 0xA20C: ('SpatialFrequencyResponse', ), # 0x920C - - 0xA20E: ('FocalPlaneXResolution', ), # 0x920E - - 0xA20F: ('FocalPlaneYResolution', ), # 0x920F - - 0xA210: ('FocalPlaneResolutionUnit', ), # 0x9210 - - 0xA214: ('SubjectLocation', ), # 0x9214 - - 0xA215: ('ExposureIndex', ), # 0x9215 - - 0xA217: ('SensingMethod', ), # 0x9217 - - 0xA300: ('FileSource', {3: 'Digital Camera'}), 0xA301: ('SceneType', {1: 'Directly Photographed'}), } # interoperability tags INTR_TAGS={ 0x0001: ('InteroperabilityIndex', ), 0x0002: ('InteroperabilityVersion', ), 0x1000: ('RelatedImageFileFormat', ), 0x1001: ('RelatedImageWidth', ), 0x1002: ('RelatedImageLength', ), } # GPS tags (not used yet, haven't seen camera with GPS) GPS_TAGS={ 0x0000: ('GPSVersionID', ), 0x0001: ('GPSLatitudeRef', ), 0x0002: ('GPSLatitude', ), 0x0003: ('GPSLongitudeRef', ), 0x0004: ('GPSLongitude', ), 0x0005: ('GPSAltitudeRef', ), 0x0006: ('GPSAltitude', ), 0x0007: ('GPSTimeStamp', ), 0x0008: ('GPSSatellites', ), 0x0009: ('GPSStatus', ), 0x000A: ('GPSMeasureMode', ), 0x000B: ('GPSDOP', ), 0x000C: ('GPSSpeedRef', ), 0x000D: ('GPSSpeed', ), 0x000E: ('GPSTrackRef', ), 0x000F: ('GPSTrack', ), 0x0010: ('GPSImgDirectionRef', ), 0x0011: ('GPSImgDirection', ), 0x0012: ('GPSMapDatum', ), 0x0013: ('GPSDestLatitudeRef', ), 0x0014: ('GPSDestLatitude', ), 0x0015: ('GPSDestLongitudeRef', ), 0x0016: ('GPSDestLongitude', ), 0x0017: ('GPSDestBearingRef', ), 0x0018: ('GPSDestBearing', ), 0x0019: ('GPSDestDistanceRef', ), 0x001A: ('GPSDestDistance', ) } # Nikon E99x MakerNote Tags # http://members.tripod.com/~tawba/990exif.htm MAKERNOTE_NIKON_NEWER_TAGS={ 0x0002: ('ISOSetting', ), 0x0003: ('ColorMode', ), 0x0004: ('Quality', ), 0x0005: ('Whitebalance', ), 0x0006: ('ImageSharpening', ), 0x0007: ('FocusMode', ), 0x0008: ('FlashSetting', ), 0x000F: ('ISOSelection', ), 0x0080: ('ImageAdjustment', ), 0x0082: ('AuxiliaryLens', ), 0x0085: ('ManualFocusDistance', ), 0x0086: ('DigitalZoomFactor', ), 0x0088: ('AFFocusPosition', {0x0000: 'Center', 0x0100: 'Top', 0x0200: 'Bottom', 0x0300: 'Left', 0x0400: 'Right'}), 0x0094: ('Saturation', {-3: 'B&W', -2: '-2', -1: '-1', 0: '0', 1: '1', 2: '2'}), 0x0095: ('NoiseReduction', ), 0x0010: ('DataDump', ) } MAKERNOTE_NIKON_OLDER_TAGS={ 0x0003: ('Quality', {1: 'VGA Basic', 2: 'VGA Normal', 3: 'VGA Fine', 4: 'SXGA Basic', 5: 'SXGA Normal', 6: 'SXGA Fine'}), 0x0004: ('ColorMode', {1: 'Color', 2: 'Monochrome'}), 0x0005: ('ImageAdjustment', {0: 'Normal', 1: 'Bright+', 2: 'Bright-', 3: 'Contrast+', 4: 'Contrast-'}), 0x0006: ('CCDSpeed', {0: 'ISO 80', 2: 'ISO 160', 4: 'ISO 320', 5: 'ISO 100'}), 0x0007: ('WhiteBalance', {0: 'Auto', 1: 'Preset', 2: 'Daylight', 3: 'Incandescent', 4: 'Fluorescent', 5: 'Cloudy', 6: 'Speed Light'}) } # decode Olympus SpecialMode tag in MakerNote def olympus_special_mode(v): a={ 0: 'Normal', 1: 'Unknown', 2: 'Fast', 3: 'Panorama'} b={ 0: 'Non-panoramic', 1: 'Left to right', 2: 'Right to left', 3: 'Bottom to top', 4: 'Top to bottom'} return '%s - sequence %d - %s' % (a[v[0]], v[1], b[v[2]]) MAKERNOTE_OLYMPUS_TAGS={ # ah HAH! those sneeeeeaky bastids! this is how they get past the fact # that a JPEG thumbnail is not allowed in an uncompressed TIFF file 0x0100: ('JPEGThumbnail', ), 0x0200: ('SpecialMode', olympus_special_mode), 0x0201: ('JPEGQual', {1: 'SQ', 2: 'HQ', 3: 'SHQ'}), 0x0202: ('Macro', {0: 'Normal', 1: 'Macro'}), 0x0204: ('DigitalZoom', ), 0x0207: ('SoftwareRelease', ), 0x0208: ('PictureInfo', ), # print as string 0x0209: ('CameraID', lambda x: ''.join(map(chr, x))), 0x0F00: ('DataDump', ) } MAKERNOTE_CASIO_TAGS={ 0x0001: ('RecordingMode', {1: 'Single Shutter', 2: 'Panorama', 3: 'Night Scene', 4: 'Portrait', 5: 'Landscape'}), 0x0002: ('Quality', {1: 'Economy', 2: 'Normal', 3: 'Fine'}), 0x0003: ('FocusingMode', {2: 'Macro', 3: 'Auto Focus', 4: 'Manual Focus', 5: 'Infinity'}), 0x0004: ('FlashMode', {1: 'Auto', 2: 'On', 3: 'Off', 4: 'Red Eye Reduction'}), 0x0005: ('FlashIntensity', {11: 'Weak', 13: 'Normal', 15: 'Strong'}), 0x0006: ('Object Distance', ), 0x0007: ('WhiteBalance', {1: 'Auto', 2: 'Tungsten', 3: 'Daylight', 4: 'Fluorescent', 5: 'Shade', 129: 'Manual'}), 0x000B: ('Sharpness', {0: 'Normal', 1: 'Soft', 2: 'Hard'}), 0x000C: ('Contrast', {0: 'Normal', 1: 'Low', 2: 'High'}), 0x000D: ('Saturation', {0: 'Normal', 1: 'Low', 2: 'High'}), 0x0014: ('CCDSpeed', {64: 'Normal', 80: 'Normal', 100: 'High', 125: '+1.0', 244: '+3.0', 250: '+2.0',}) } MAKERNOTE_FUJIFILM_TAGS={ 0x0000: ('NoteVersion', lambda x: ''.join(map(chr, x))), 0x1000: ('Quality', ), 0x1001: ('Sharpness', {1: 'Soft', 2: 'Soft', 3: 'Normal', 4: 'Hard', 5: 'Hard'}), 0x1002: ('WhiteBalance', {0: 'Auto', 256: 'Daylight', 512: 'Cloudy', 768: 'DaylightColor-Fluorescent', 769: 'DaywhiteColor-Fluorescent', 770: 'White-Fluorescent', 1024: 'Incandescent', 3840: 'Custom'}), 0x1003: ('Color', {0: 'Normal', 256: 'High', 512: 'Low'}), 0x1004: ('Tone', {0: 'Normal', 256: 'High', 512: 'Low'}), 0x1010: ('FlashMode', {0: 'Auto', 1: 'On', 2: 'Off', 3: 'Red Eye Reduction'}), 0x1011: ('FlashStrength', ), 0x1020: ('Macro', {0: 'Off', 1: 'On'}), 0x1021: ('FocusMode', {0: 'Auto', 1: 'Manual'}), 0x1030: ('SlowSync', {0: 'Off', 1: 'On'}), 0x1031: ('PictureMode', {0: 'Auto', 1: 'Portrait', 2: 'Landscape', 4: 'Sports', 5: 'Night', 6: 'Program AE', 256: 'Aperture Priority AE', 512: 'Shutter Priority AE', 768: 'Manual Exposure'}), 0x1100: ('MotorOrBracket', {0: 'Off', 1: 'On'}), 0x1300: ('BlurWarning', {0: 'Off', 1: 'On'}), 0x1301: ('FocusWarning', {0: 'Off', 1: 'On'}), 0x1302: ('AEWarning', {0: 'Off', 1: 'On'}) } MAKERNOTE_CANON_TAGS={ 0x0006: ('ImageType', ), 0x0007: ('FirmwareVersion', ), 0x0008: ('ImageNumber', ), 0x0009: ('OwnerName', ) } # see http://www.burren.cx/david/canon.html by David Burren # this is in element offset, name, optional value dictionary format MAKERNOTE_CANON_TAG_0x001={ 1: ('Macromode', {1: 'Macro', 2: 'Normal'}), 2: ('SelfTimer', ), 3: ('Quality', {2: 'Normal', 3: 'Fine', 5: 'Superfine'}), 4: ('FlashMode', {0: 'Flash Not Fired', 1: 'Auto', 2: 'On', 3: 'Red-Eye Reduction', 4: 'Slow Synchro', 5: 'Auto + Red-Eye Reduction', 6: 'On + Red-Eye Reduction', 16: 'external flash'}), 5: ('ContinuousDriveMode', {0: 'Single Or Timer', 1: 'Continuous'}), 7: ('FocusMode', {0: 'One-Shot', 1: 'AI Servo', 2: 'AI Focus', 3: 'MF', 4: 'Single', 5: 'Continuous', 6: 'MF'}), 10: ('ImageSize', {0: 'Large', 1: 'Medium', 2: 'Small'}), 11: ('EasyShootingMode', {0: 'Full Auto', 1: 'Manual', 2: 'Landscape', 3: 'Fast Shutter', 4: 'Slow Shutter', 5: 'Night', 6: 'B&W', 7: 'Sepia', 8: 'Portrait', 9: 'Sports', 10: 'Macro/Close-Up', 11: 'Pan Focus'}), 12: ('DigitalZoom', {0: 'None', 1: '2x', 2: '4x'}), 13: ('Contrast', {0xFFFF: 'Low', 0: 'Normal', 1: 'High'}), 14: ('Saturation', {0xFFFF: 'Low', 0: 'Normal', 1: 'High'}), 15: ('Sharpness', {0xFFFF: 'Low', 0: 'Normal', 1: 'High'}), 16: ('ISO', {0: 'See ISOSpeedRatings Tag', 15: 'Auto', 16: '50', 17: '100', 18: '200', 19: '400'}), 17: ('MeteringMode', {3: 'Evaluative', 4: 'Partial', 5: 'Center-weighted'}), 18: ('FocusType', {0: 'Manual', 1: 'Auto', 3: 'Close-Up (Macro)', 8: 'Locked (Pan Mode)'}), 19: ('AFPointSelected', {0x3000: 'None (MF)', 0x3001: 'Auto-Selected', 0x3002: 'Right', 0x3003: 'Center', 0x3004: 'Left'}), 20: ('ExposureMode', {0: 'Easy Shooting', 1: 'Program', 2: 'Tv-priority', 3: 'Av-priority', 4: 'Manual', 5: 'A-DEP'}), 23: ('LongFocalLengthOfLensInFocalUnits', ), 24: ('ShortFocalLengthOfLensInFocalUnits', ), 25: ('FocalUnitsPerMM', ), 28: ('FlashActivity', {0: 'Did Not Fire', 1: 'Fired'}), 29: ('FlashDetails', {14: 'External E-TTL', 13: 'Internal Flash', 11: 'FP Sync Used', 7: '2nd("Rear")-Curtain Sync Used', 4: 'FP Sync Enabled'}), 32: ('FocusMode', {0: 'Single', 1: 'Continuous'}) } MAKERNOTE_CANON_TAG_0x004={ 7: ('WhiteBalance', {0: 'Auto', 1: 'Sunny', 2: 'Cloudy', 3: 'Tungsten', 4: 'Fluorescent', 5: 'Flash', 6: 'Custom'}), 9: ('SequenceNumber', ), 14: ('AFPointUsed', ), 15: ('FlashBias', {0XFFC0: '-2 EV', 0XFFCC: '-1.67 EV', 0XFFD0: '-1.50 EV', 0XFFD4: '-1.33 EV', 0XFFE0: '-1 EV', 0XFFEC: '-0.67 EV', 0XFFF0: '-0.50 EV', 0XFFF4: '-0.33 EV', 0X0000: '0 EV', 0X000C: '0.33 EV', 0X0010: '0.50 EV', 0X0014: '0.67 EV', 0X0020: '1 EV', 0X002C: '1.33 EV', 0X0030: '1.50 EV', 0X0034: '1.67 EV', 0X0040: '2 EV'}), 19: ('SubjectDistance', ) } # extract multibyte integer in Motorola format (little endian) def s2n_motorola(str): x=0 for c in str: x=(x << 8) | ord(c) return x # extract multibyte integer in Intel format (big endian) def s2n_intel(str): x=0 y=0L for c in str: x=x | (ord(c) << y) y=y+8 return x # ratio object that eventually will be able to reduce itself to lowest # common denominator for printing def gcd(a, b): if b == 0: return a else: return gcd(b, a % b) class Ratio: def __init__(self, num, den): self.num=num self.den=den def __repr__(self): self.reduce() if self.den == 1: return str(self.num) return '%d/%d' % (self.num, self.den) def reduce(self): div=gcd(self.num, self.den) if div > 1: self.num=self.num/div self.den=self.den/div # for ease of dealing with tags class IFD_Tag: def __init__(self, printable, tag, field_type, values, field_offset, field_length): # printable version of data self.printable=printable # tag ID number self.tag=tag # field type as index into FIELD_TYPES self.field_type=field_type # offset of start of field in bytes from beginning of IFD self.field_offset=field_offset # length of data field in bytes self.field_length=field_length # either a string or array of data items self.values=values def __str__(self): return self.printable def __repr__(self): return '(0x%04X) %s=%s @ %d' % (self.tag, FIELD_TYPES[self.field_type][2], self.printable, self.field_offset) # class that handles an EXIF header class EXIF_header: def __init__(self, file, endian, offset, debug=0): self.file=file self.endian=endian self.offset=offset self.debug=debug self.tags={} # convert slice to integer, based on sign and endian flags def s2n(self, offset, length, signed=0): self.file.seek(self.offset+offset) slice=self.file.read(length) if self.endian == 'I': val=s2n_intel(slice) else: val=s2n_motorola(slice) # Sign extension ? if signed: #msb=1 << (8*length-1) #if val & msb: # val=val-(msb << 1) pass return val # convert offset to string def n2s(self, offset, length): s='' for i in range(length): if self.endian == 'I': s=s+chr(offset & 0xFF) else: s=chr(offset & 0xFF)+s offset=offset >> 8 return s # return first IFD def first_IFD(self): return self.s2n(4, 4) # return pointer to next IFD def next_IFD(self, ifd): entries=self.s2n(ifd, 2) return self.s2n(ifd+2+12*entries, 4) # return list of IFDs in header def list_IFDs(self): i=self.first_IFD() a=[] while i: a.append(i) i=self.next_IFD(i) return a # return list of entries in this IFD def dump_IFD(self, ifd, ifd_name, dict=EXIF_TAGS): entries=self.s2n(ifd, 2) for i in range(entries): entry=ifd+2+12*i tag=self.s2n(entry, 2) field_type=self.s2n(entry+2, 2) if not 0 < field_type < len(FIELD_TYPES): # unknown field type raise ValueError, \ 'unknown type %d in tag 0x%04X' % (field_type, tag) typelen=FIELD_TYPES[field_type][0] count=self.s2n(entry+4, 4) offset=entry+8 if count*typelen > 4: # not the value, it's a pointer to the value offset=self.s2n(offset, 4) field_offset=offset if field_type == 2: # special case: null-terminated ASCII string if count != 0: self.file.seek(self.offset+offset) values=self.file.read(count).strip().replace('\x00','') else: values='' else: values=[] signed=(field_type in [6, 8, 9, 10]) for j in range(count): if field_type in (5, 10): # a ratio value_j=Ratio(self.s2n(offset, 4, signed), self.s2n(offset+4, 4, signed)) else: value_j=self.s2n(offset, typelen, signed) values.append(value_j) offset=offset+typelen # now "values" is either a string or an array if count == 1 and field_type != 2: printable=str(values[0]) else: printable=str(values) # figure out tag name tag_entry=dict.get(tag) if tag_entry: tag_name=tag_entry[0] if len(tag_entry) != 1: # optional 2nd tag element is present if callable(tag_entry[1]): # call mapping function printable=tag_entry[1](values) else: printable='' for i in values: # use LUT for this tag printable+=tag_entry[1].get(i, repr(i)) else: tag_name='Tag 0x%04X' % tag self.tags[ifd_name+' '+tag_name]=IFD_Tag(printable, tag, field_type, values, field_offset, count*typelen) if self.debug: print ' %s: %s' % (tag_name, repr(self.tags[ifd_name+' '+tag_name])) # extract uncompressed TIFF thumbnail (like pulling teeth) # we take advantage of the pre-existing layout in the thumbnail IFD as # much as possible def extract_TIFF_thumbnail(self, thumb_ifd): entries=self.s2n(thumb_ifd, 2) # this is header plus offset to IFD ... if self.endian == 'M': tiff='MM\x00*\x00\x00\x00\x08' else: tiff='II*\x00\x08\x00\x00\x00' # ... plus thumbnail IFD data plus a null "next IFD" pointer self.file.seek(self.offset+thumb_ifd) tiff+=self.file.read(entries*12+2)+'\x00\x00\x00\x00' # fix up large value offset pointers into data area for i in range(entries): entry=thumb_ifd+2+12*i tag=self.s2n(entry, 2) field_type=self.s2n(entry+2, 2) typelen=FIELD_TYPES[field_type][0] count=self.s2n(entry+4, 4) oldoff=self.s2n(entry+8, 4) # start of the 4-byte pointer area in entry ptr=i*12+18 # remember strip offsets location if tag == 0x0111: strip_off=ptr strip_len=count*typelen # is it in the data area? if count*typelen > 4: # update offset pointer (nasty "strings are immutable" crap) # should be able to say "tiff[ptr:ptr+4]=newoff" newoff=len(tiff) tiff=tiff[:ptr]+self.n2s(newoff, 4)+tiff[ptr+4:] # remember strip offsets location if tag == 0x0111: strip_off=newoff strip_len=4 # get original data and store it self.file.seek(self.offset+oldoff) tiff+=self.file.read(count*typelen) # add pixel strips and update strip offset info old_offsets=self.tags['Thumbnail StripOffsets'].values old_counts=self.tags['Thumbnail StripByteCounts'].values for i in range(len(old_offsets)): # update offset pointer (more nasty "strings are immutable" crap) offset=self.n2s(len(tiff), strip_len) tiff=tiff[:strip_off]+offset+tiff[strip_off+strip_len:] strip_off+=strip_len # add pixel strip to end self.file.seek(self.offset+old_offsets[i]) tiff+=self.file.read(old_counts[i]) self.tags['TIFFThumbnail']=tiff # decode all the camera-specific MakerNote formats def decode_maker_note(self): note=self.tags['EXIF MakerNote'] make=self.tags['Image Make'].printable model=self.tags['Image Model'].printable # Nikon if make == 'NIKON': if note.values[0:5] == [78, 105, 107, 111, 110]: # "Nikon" # older model self.dump_IFD(note.field_offset+8, 'MakerNote', dict=MAKERNOTE_NIKON_OLDER_TAGS) else: # newer model (E99x or D1) self.dump_IFD(note.field_offset, 'MakerNote', dict=MAKERNOTE_NIKON_NEWER_TAGS) return # Olympus if make[:7] == 'OLYMPUS': self.dump_IFD(note.field_offset+8, 'MakerNote', dict=MAKERNOTE_OLYMPUS_TAGS) return # Casio if make == 'Casio': self.dump_IFD(note.field_offset, 'MakerNote', dict=MAKERNOTE_CASIO_TAGS) return # Fujifilm if make == 'FUJIFILM': # bug: everything else is "Motorola" endian, but the MakerNote # is "Intel" endian endian=self.endian self.endian='I' # bug: IFD offsets are from beginning of MakerNote, not # beginning of file header offset=self.offset self.offset+=note.field_offset # process note with bogus values (note is actually at offset 12) self.dump_IFD(12, 'MakerNote', dict=MAKERNOTE_FUJIFILM_TAGS) # reset to correct values self.endian=endian self.offset=offset return # Canon if make == 'Canon': self.dump_IFD(note.field_offset, 'MakerNote', dict=MAKERNOTE_CANON_TAGS) for i in (('MakerNote Tag 0x0001', MAKERNOTE_CANON_TAG_0x001), ('MakerNote Tag 0x0004', MAKERNOTE_CANON_TAG_0x004)): if i[0] in self.tags: # MALZ # self.canon_decode_tag(self.tags[i[0]].values, i[1]) return # decode Canon MakerNote tag based on offset within tag # see http://www.burren.cx/david/canon.html by David Burren def canon_decode_tag(self, value, dict): for i in range(1, len(value)): x=dict.get(i, ('Unknown', )) if self.debug: print i, x name=x[0] if len(x) > 1: val=x[1].get(value[i], 'Unknown') else: val=value[i] # it's not a real IFD Tag but we fake one to make everybody # happy. this will have a "proprietary" type self.tags['MakerNote '+name]=IFD_Tag(str(val), None, 0, None, None, None) # process an image file (expects an open file object) # this is the function that has to deal with all the arbitrary nasty bits # of the EXIF standard def process_file(file, debug=0): # determine whether it's a JPEG or TIFF data=file.read(12) if data[0:4] in ['II*\x00', 'MM\x00*']: # it's a TIFF file file.seek(0) endian=file.read(1) file.read(1) offset=0 elif data[0:2] == '\xFF\xD8': # it's a JPEG file # skip JFIF style header(s) while data[2] == '\xFF' and data[6:10] in ('JFIF', 'JFXX', 'OLYM'): length=ord(data[4])*256+ord(data[5]) file.read(length-8) # fake an EXIF beginning of file data='\xFF\x00'+file.read(10) if data[2] == '\xFF' and data[6:10] == 'Exif': # detected EXIF header offset=file.tell() endian=file.read(1) else: # no EXIF information return {} else: # file format not recognized return {} # deal with the EXIF info we found if debug: print {'I': 'Intel', 'M': 'Motorola'}[endian], 'format' hdr=EXIF_header(file, endian, offset, debug) ifd_list=hdr.list_IFDs() ctr=0 for i in ifd_list: if ctr == 0: IFD_name='Image' elif ctr == 1: IFD_name='Thumbnail' thumb_ifd=i else: IFD_name='IFD %d' % ctr if debug: print ' IFD %d (%s) at offset %d:' % (ctr, IFD_name, i) hdr.dump_IFD(i, IFD_name) # EXIF IFD exif_off=hdr.tags.get(IFD_name+' ExifOffset') if exif_off: if debug: print ' EXIF SubIFD at offset %d:' % exif_off.values[0] hdr.dump_IFD(exif_off.values[0], 'EXIF') # Interoperability IFD contained in EXIF IFD intr_off=hdr.tags.get('EXIF SubIFD InteroperabilityOffset') if intr_off: if debug: print ' EXIF Interoperability SubSubIFD at offset %d:' \ % intr_off.values[0] hdr.dump_IFD(intr_off.values[0], 'EXIF Interoperability', dict=INTR_TAGS) # GPS IFD gps_off=hdr.tags.get(IFD_name+' GPSInfo') if gps_off: if debug: print ' GPS SubIFD at offset %d:' % gps_off.values[0] hdr.dump_IFD(gps_off.values[0], 'GPS', dict=GPS_TAGS) ctr+=1 # extract uncompressed TIFF thumbnail thumb=hdr.tags.get('Thumbnail Compression') if thumb and thumb.printable == 'Uncompressed TIFF': hdr.extract_TIFF_thumbnail(thumb_ifd) # JPEG thumbnail (thankfully the JPEG data is stored as a unit) thumb_off=hdr.tags.get('Thumbnail JPEGInterchangeFormat') if thumb_off: file.seek(offset+thumb_off.values[0]) size=hdr.tags['Thumbnail JPEGInterchangeFormatLength'].values[0] hdr.tags['JPEGThumbnail']=file.read(size) # deal with MakerNote contained in EXIF IFD if hdr.tags.has_key('EXIF MakerNote'): hdr.decode_maker_note() # Sometimes in a TIFF file, a JPEG thumbnail is hidden in the MakerNote # since it's not allowed in a uncompressed TIFF IFD if not hdr.tags.has_key('JPEGThumbnail'): thumb_off=hdr.tags.get('MakerNote JPEGThumbnail') if thumb_off: file.seek(offset+thumb_off.values[0]) hdr.tags['JPEGThumbnail']=file.read(thumb_off.field_length) return hdr.tags
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- ## ## Copyright (C) 2005 manatlan manatlan[at]gmail(dot)com ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published ## by the Free Software Foundation; version 2 only. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## from common.template import Template from common.medialib import getFiles import urllib import StringIO,os,sys from exif import process_file from common import u64dec,u64enc,pageNavigator try: import Image,ImageFile,ImageDraw,ImageFont #pil except: print "Le plugin 'photos' necessite PIL (python-image) (http://www.pythonware.com/products/pil/)" sys.exit(-1) __title__=u"Photos" __order__=2000 __author__ ="manatlan" __about__ ="permet de naviguer dans ses photos (utilise l' *exif-thumbnail*)" DIR=os.path.abspath(u"medias/photos") class main: def index(self,dir=None,page=1): global DIR if dir: dir=u64dec(dir) else: dir=DIR cmd={} liste = [] ndirs,nfiles = getFiles(dir,"jpg") if dir!=DIR: url = u64enc(os.path.dirname(dir)) liste.append( ("..",None,url)) cmd["BACK"]="index?dir=%s" %url else: cmd["BACK"]="/index" for i in ndirs: name = os.path.basename(i) liste.append( (name,None,u64enc(i)) ) for i in nfiles: name = os.path.basename(i) liste.append( (name,"f",u64enc(i)) ) cmd["PREV"],cmd["NEXT"],navig,items = pageNavigator(liste,12,"index?page=%d&dir="+u64enc(dir),int(page)) if len(nfiles)>0: cmd["PLAY"]="show?file="+u64enc(nfiles[0])+"&play=1" rep=dir[len(DIR):] return Template(cmd) def show(self,file,play=""): fichier = file # garde en u64 encode !!! file=u64dec(file) ndirs,nfiles = getFiles(os.path.dirname(file),"jpg") idx=nfiles.index(file) dir=os.path.dirname(file) nextFile=nfiles[ (idx+1)%len(nfiles) ] prevFile=nfiles[ (idx-1)%len(nfiles) ] cmd = {"BACK":u"index?dir=%s&page=%d" %(u64enc(dir),1+((len(ndirs)+1+idx)/12)), "NEXT":u"show?file="+u64enc(nextFile), "PREV":u"show?file="+u64enc(prevFile), "max_images_in_cache":0 } if play: cmd["PAUSE"] =u"show?file="+u64enc(file) cmd["refresh"]=u"3;url=show?file="+u64enc(nextFile)+"&play=1" cmd["stop"]=cmd["BACK"] else: cmd["PLAY"] =u"show?file="+u64enc(nextFile)+"&play=1" return Template(cmd) #-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.- def mini(self, file): #-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.- file=u64dec(file) # recupere l' "internal EXIF thumbnail" de la jpeg fid = open(file, 'rb') try: exif = process_file(fid) tb=exif["JPEGThumbnail"] fid.close() p = ImageFile.Parser() p.feed(tb) im = p.close() except: im = Image.open("image.png") if im: w,h = im.size w,h = fit(float(w),float(h),130,130,1) im = im.resize((w,h)) f = StringIO.StringIO() im.save(f, "PNG") yield "image/png" f.seek(0) yield f.read() else: yield None #-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.- def image(self, file): #-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.- file=u64dec(file) im = Image.open(file) w,h = im.size w,h = fit(float(w),float(h),672.0,528.0,1) im = im.resize((w,h)) f = StringIO.StringIO() im.save(f, "GIF") yield "image/gif" f.seek(0) while True: buf=f.read(2048) if buf=="": break yield buf f.close() def fit(orig_width, orig_height, dest_width, dest_height,zoom): if orig_width == 0 or orig_height == 0: return 0, 0 scale = min(dest_width/orig_width, dest_height/orig_height) if scale > 1: scale = 1 scale*=zoom fit_width = scale * orig_width fit_height = scale * orig_height return int(fit_width), int(fit_height)
Python
#!/usr/bin/env python # -*- coding: UTF8 -*- # ============================================================================== # field type descriptions as (length, abbreviation, full name) tuples FIELD_TYPES=( (0, 'X', 'Proprietary'), # no such type (1, 'B', 'Byte'), (1, 'A', 'ASCII'), (2, 'S', 'Short'), (4, 'L', 'Long'), (8, 'R', 'Ratio'), (1, 'SB', 'Signed Byte'), (1, 'U', 'Undefined'), (2, 'SS', 'Signed Short'), (4, 'SL', 'Signed Long'), (8, 'SR', 'Signed Ratio') ) # dictionary of main EXIF tag names # first element of tuple is tag name, optional second element is # another dictionary giving names to values EXIF_TAGS={ 0x0100: ('ImageWidth', ), 0x0101: ('ImageLength', ), 0x0102: ('BitsPerSample', ), 0x0103: ('Compression', {1: 'Uncompressed TIFF', 6: 'JPEG Compressed'}), 0x0106: ('PhotometricInterpretation', ), 0x010A: ('FillOrder', ), 0x010D: ('DocumentName', ), 0x010E: ('ImageDescription', ), 0x010F: ('Make', ), 0x0110: ('Model', ), 0x0111: ('StripOffsets', ), 0x0112: ('Orientation', ), 0x0115: ('SamplesPerPixel', ), 0x0116: ('RowsPerStrip', ), 0x0117: ('StripByteCounts', ), 0x011A: ('XResolution', ), 0x011B: ('YResolution', ), 0x011C: ('PlanarConfiguration', ), 0x0128: ('ResolutionUnit', {1: 'Not Absolute', 2: 'Pixels/Inch', 3: 'Pixels/Centimeter'}), 0x012D: ('TransferFunction', ), 0x0131: ('Software', ), 0x0132: ('DateTime', ), 0x013B: ('Artist', ), 0x013E: ('WhitePoint', ), 0x013F: ('PrimaryChromaticities', ), 0x0156: ('TransferRange', ), 0x0200: ('JPEGProc', ), 0x0201: ('JPEGInterchangeFormat', ), 0x0202: ('JPEGInterchangeFormatLength', ), 0x0211: ('YCbCrCoefficients', ), 0x0212: ('YCbCrSubSampling', ), 0x0213: ('YCbCrPositioning', ), 0x0214: ('ReferenceBlackWhite', ), 0x828D: ('CFARepeatPatternDim', ), 0x828E: ('CFAPattern', ), 0x828F: ('BatteryLevel', ), 0x8298: ('Copyright', ), 0x829A: ('ExposureTime', ), 0x829D: ('FNumber', ), 0x83BB: ('IPTC/NAA', ), 0x8769: ('ExifOffset', ), 0x8773: ('InterColorProfile', ), 0x8822: ('ExposureProgram', {0: 'Unidentified', 1: 'Manual', 2: 'Program Normal', 3: 'Aperture Priority', 4: 'Shutter Priority', 5: 'Program Creative', 6: 'Program Action', 7: 'Portrait Mode', 8: 'Landscape Mode'}), 0x8824: ('SpectralSensitivity', ), 0x8825: ('GPSInfo', ), 0x8827: ('ISOSpeedRatings', ), 0x8828: ('OECF', ), # print as string 0x9000: ('ExifVersion', lambda x: ''.join(map(chr, x))), 0x9003: ('DateTimeOriginal', ), 0x9004: ('DateTimeDigitized', ), 0x9101: ('ComponentsConfiguration', {0: '', 1: 'Y', 2: 'Cb', 3: 'Cr', 4: 'Red', 5: 'Green', 6: 'Blue'}), 0x9102: ('CompressedBitsPerPixel', ), 0x9201: ('ShutterSpeedValue', ), 0x9202: ('ApertureValue', ), 0x9203: ('BrightnessValue', ), 0x9204: ('ExposureBiasValue', ), 0x9205: ('MaxApertureValue', ), 0x9206: ('SubjectDistance', ), 0x9207: ('MeteringMode', {0: 'Unidentified', 1: 'Average', 2: 'CenterWeightedAverage', 3: 'Spot', 4: 'MultiSpot'}), 0x9208: ('LightSource', {0: 'Unknown', 1: 'Daylight', 2: 'Fluorescent', 3: 'Tungsten', 10: 'Flash', 17: 'Standard Light A', 18: 'Standard Light B', 19: 'Standard Light C', 20: 'D55', 21: 'D65', 22: 'D75', 255: 'Other'}), 0x9209: ('Flash', {0: 'No', 1: 'Fired', 5: 'Fired (?)', # no return sensed 7: 'Fired (!)', # return sensed 9: 'Fill Fired', 13: 'Fill Fired (?)', 15: 'Fill Fired (!)', 16: 'Off', 24: 'Auto Off', 25: 'Auto Fired', 29: 'Auto Fired (?)', 31: 'Auto Fired (!)', 32: 'Not Available'}), 0x920A: ('FocalLength', ), 0x927C: ('MakerNote', ), # print as string 0x9286: ('UserComment', lambda x: ''.join(map(chr, x))), 0x9290: ('SubSecTime', ), 0x9291: ('SubSecTimeOriginal', ), 0x9292: ('SubSecTimeDigitized', ), # print as string 0xA000: ('FlashPixVersion', lambda x: ''.join(map(chr, x))), 0xA001: ('ColorSpace', ), 0xA002: ('ExifImageWidth', ), 0xA003: ('ExifImageLength', ), 0xA005: ('InteroperabilityOffset', ), 0xA20B: ('FlashEnergy', ), # 0x920B in TIFF/EP 0xA20C: ('SpatialFrequencyResponse', ), # 0x920C - - 0xA20E: ('FocalPlaneXResolution', ), # 0x920E - - 0xA20F: ('FocalPlaneYResolution', ), # 0x920F - - 0xA210: ('FocalPlaneResolutionUnit', ), # 0x9210 - - 0xA214: ('SubjectLocation', ), # 0x9214 - - 0xA215: ('ExposureIndex', ), # 0x9215 - - 0xA217: ('SensingMethod', ), # 0x9217 - - 0xA300: ('FileSource', {3: 'Digital Camera'}), 0xA301: ('SceneType', {1: 'Directly Photographed'}), } # interoperability tags INTR_TAGS={ 0x0001: ('InteroperabilityIndex', ), 0x0002: ('InteroperabilityVersion', ), 0x1000: ('RelatedImageFileFormat', ), 0x1001: ('RelatedImageWidth', ), 0x1002: ('RelatedImageLength', ), } # GPS tags (not used yet, haven't seen camera with GPS) GPS_TAGS={ 0x0000: ('GPSVersionID', ), 0x0001: ('GPSLatitudeRef', ), 0x0002: ('GPSLatitude', ), 0x0003: ('GPSLongitudeRef', ), 0x0004: ('GPSLongitude', ), 0x0005: ('GPSAltitudeRef', ), 0x0006: ('GPSAltitude', ), 0x0007: ('GPSTimeStamp', ), 0x0008: ('GPSSatellites', ), 0x0009: ('GPSStatus', ), 0x000A: ('GPSMeasureMode', ), 0x000B: ('GPSDOP', ), 0x000C: ('GPSSpeedRef', ), 0x000D: ('GPSSpeed', ), 0x000E: ('GPSTrackRef', ), 0x000F: ('GPSTrack', ), 0x0010: ('GPSImgDirectionRef', ), 0x0011: ('GPSImgDirection', ), 0x0012: ('GPSMapDatum', ), 0x0013: ('GPSDestLatitudeRef', ), 0x0014: ('GPSDestLatitude', ), 0x0015: ('GPSDestLongitudeRef', ), 0x0016: ('GPSDestLongitude', ), 0x0017: ('GPSDestBearingRef', ), 0x0018: ('GPSDestBearing', ), 0x0019: ('GPSDestDistanceRef', ), 0x001A: ('GPSDestDistance', ) } # Nikon E99x MakerNote Tags # http://members.tripod.com/~tawba/990exif.htm MAKERNOTE_NIKON_NEWER_TAGS={ 0x0002: ('ISOSetting', ), 0x0003: ('ColorMode', ), 0x0004: ('Quality', ), 0x0005: ('Whitebalance', ), 0x0006: ('ImageSharpening', ), 0x0007: ('FocusMode', ), 0x0008: ('FlashSetting', ), 0x000F: ('ISOSelection', ), 0x0080: ('ImageAdjustment', ), 0x0082: ('AuxiliaryLens', ), 0x0085: ('ManualFocusDistance', ), 0x0086: ('DigitalZoomFactor', ), 0x0088: ('AFFocusPosition', {0x0000: 'Center', 0x0100: 'Top', 0x0200: 'Bottom', 0x0300: 'Left', 0x0400: 'Right'}), 0x0094: ('Saturation', {-3: 'B&W', -2: '-2', -1: '-1', 0: '0', 1: '1', 2: '2'}), 0x0095: ('NoiseReduction', ), 0x0010: ('DataDump', ) } MAKERNOTE_NIKON_OLDER_TAGS={ 0x0003: ('Quality', {1: 'VGA Basic', 2: 'VGA Normal', 3: 'VGA Fine', 4: 'SXGA Basic', 5: 'SXGA Normal', 6: 'SXGA Fine'}), 0x0004: ('ColorMode', {1: 'Color', 2: 'Monochrome'}), 0x0005: ('ImageAdjustment', {0: 'Normal', 1: 'Bright+', 2: 'Bright-', 3: 'Contrast+', 4: 'Contrast-'}), 0x0006: ('CCDSpeed', {0: 'ISO 80', 2: 'ISO 160', 4: 'ISO 320', 5: 'ISO 100'}), 0x0007: ('WhiteBalance', {0: 'Auto', 1: 'Preset', 2: 'Daylight', 3: 'Incandescent', 4: 'Fluorescent', 5: 'Cloudy', 6: 'Speed Light'}) } # decode Olympus SpecialMode tag in MakerNote def olympus_special_mode(v): a={ 0: 'Normal', 1: 'Unknown', 2: 'Fast', 3: 'Panorama'} b={ 0: 'Non-panoramic', 1: 'Left to right', 2: 'Right to left', 3: 'Bottom to top', 4: 'Top to bottom'} return '%s - sequence %d - %s' % (a[v[0]], v[1], b[v[2]]) MAKERNOTE_OLYMPUS_TAGS={ # ah HAH! those sneeeeeaky bastids! this is how they get past the fact # that a JPEG thumbnail is not allowed in an uncompressed TIFF file 0x0100: ('JPEGThumbnail', ), 0x0200: ('SpecialMode', olympus_special_mode), 0x0201: ('JPEGQual', {1: 'SQ', 2: 'HQ', 3: 'SHQ'}), 0x0202: ('Macro', {0: 'Normal', 1: 'Macro'}), 0x0204: ('DigitalZoom', ), 0x0207: ('SoftwareRelease', ), 0x0208: ('PictureInfo', ), # print as string 0x0209: ('CameraID', lambda x: ''.join(map(chr, x))), 0x0F00: ('DataDump', ) } MAKERNOTE_CASIO_TAGS={ 0x0001: ('RecordingMode', {1: 'Single Shutter', 2: 'Panorama', 3: 'Night Scene', 4: 'Portrait', 5: 'Landscape'}), 0x0002: ('Quality', {1: 'Economy', 2: 'Normal', 3: 'Fine'}), 0x0003: ('FocusingMode', {2: 'Macro', 3: 'Auto Focus', 4: 'Manual Focus', 5: 'Infinity'}), 0x0004: ('FlashMode', {1: 'Auto', 2: 'On', 3: 'Off', 4: 'Red Eye Reduction'}), 0x0005: ('FlashIntensity', {11: 'Weak', 13: 'Normal', 15: 'Strong'}), 0x0006: ('Object Distance', ), 0x0007: ('WhiteBalance', {1: 'Auto', 2: 'Tungsten', 3: 'Daylight', 4: 'Fluorescent', 5: 'Shade', 129: 'Manual'}), 0x000B: ('Sharpness', {0: 'Normal', 1: 'Soft', 2: 'Hard'}), 0x000C: ('Contrast', {0: 'Normal', 1: 'Low', 2: 'High'}), 0x000D: ('Saturation', {0: 'Normal', 1: 'Low', 2: 'High'}), 0x0014: ('CCDSpeed', {64: 'Normal', 80: 'Normal', 100: 'High', 125: '+1.0', 244: '+3.0', 250: '+2.0',}) } MAKERNOTE_FUJIFILM_TAGS={ 0x0000: ('NoteVersion', lambda x: ''.join(map(chr, x))), 0x1000: ('Quality', ), 0x1001: ('Sharpness', {1: 'Soft', 2: 'Soft', 3: 'Normal', 4: 'Hard', 5: 'Hard'}), 0x1002: ('WhiteBalance', {0: 'Auto', 256: 'Daylight', 512: 'Cloudy', 768: 'DaylightColor-Fluorescent', 769: 'DaywhiteColor-Fluorescent', 770: 'White-Fluorescent', 1024: 'Incandescent', 3840: 'Custom'}), 0x1003: ('Color', {0: 'Normal', 256: 'High', 512: 'Low'}), 0x1004: ('Tone', {0: 'Normal', 256: 'High', 512: 'Low'}), 0x1010: ('FlashMode', {0: 'Auto', 1: 'On', 2: 'Off', 3: 'Red Eye Reduction'}), 0x1011: ('FlashStrength', ), 0x1020: ('Macro', {0: 'Off', 1: 'On'}), 0x1021: ('FocusMode', {0: 'Auto', 1: 'Manual'}), 0x1030: ('SlowSync', {0: 'Off', 1: 'On'}), 0x1031: ('PictureMode', {0: 'Auto', 1: 'Portrait', 2: 'Landscape', 4: 'Sports', 5: 'Night', 6: 'Program AE', 256: 'Aperture Priority AE', 512: 'Shutter Priority AE', 768: 'Manual Exposure'}), 0x1100: ('MotorOrBracket', {0: 'Off', 1: 'On'}), 0x1300: ('BlurWarning', {0: 'Off', 1: 'On'}), 0x1301: ('FocusWarning', {0: 'Off', 1: 'On'}), 0x1302: ('AEWarning', {0: 'Off', 1: 'On'}) } MAKERNOTE_CANON_TAGS={ 0x0006: ('ImageType', ), 0x0007: ('FirmwareVersion', ), 0x0008: ('ImageNumber', ), 0x0009: ('OwnerName', ) } # see http://www.burren.cx/david/canon.html by David Burren # this is in element offset, name, optional value dictionary format MAKERNOTE_CANON_TAG_0x001={ 1: ('Macromode', {1: 'Macro', 2: 'Normal'}), 2: ('SelfTimer', ), 3: ('Quality', {2: 'Normal', 3: 'Fine', 5: 'Superfine'}), 4: ('FlashMode', {0: 'Flash Not Fired', 1: 'Auto', 2: 'On', 3: 'Red-Eye Reduction', 4: 'Slow Synchro', 5: 'Auto + Red-Eye Reduction', 6: 'On + Red-Eye Reduction', 16: 'external flash'}), 5: ('ContinuousDriveMode', {0: 'Single Or Timer', 1: 'Continuous'}), 7: ('FocusMode', {0: 'One-Shot', 1: 'AI Servo', 2: 'AI Focus', 3: 'MF', 4: 'Single', 5: 'Continuous', 6: 'MF'}), 10: ('ImageSize', {0: 'Large', 1: 'Medium', 2: 'Small'}), 11: ('EasyShootingMode', {0: 'Full Auto', 1: 'Manual', 2: 'Landscape', 3: 'Fast Shutter', 4: 'Slow Shutter', 5: 'Night', 6: 'B&W', 7: 'Sepia', 8: 'Portrait', 9: 'Sports', 10: 'Macro/Close-Up', 11: 'Pan Focus'}), 12: ('DigitalZoom', {0: 'None', 1: '2x', 2: '4x'}), 13: ('Contrast', {0xFFFF: 'Low', 0: 'Normal', 1: 'High'}), 14: ('Saturation', {0xFFFF: 'Low', 0: 'Normal', 1: 'High'}), 15: ('Sharpness', {0xFFFF: 'Low', 0: 'Normal', 1: 'High'}), 16: ('ISO', {0: 'See ISOSpeedRatings Tag', 15: 'Auto', 16: '50', 17: '100', 18: '200', 19: '400'}), 17: ('MeteringMode', {3: 'Evaluative', 4: 'Partial', 5: 'Center-weighted'}), 18: ('FocusType', {0: 'Manual', 1: 'Auto', 3: 'Close-Up (Macro)', 8: 'Locked (Pan Mode)'}), 19: ('AFPointSelected', {0x3000: 'None (MF)', 0x3001: 'Auto-Selected', 0x3002: 'Right', 0x3003: 'Center', 0x3004: 'Left'}), 20: ('ExposureMode', {0: 'Easy Shooting', 1: 'Program', 2: 'Tv-priority', 3: 'Av-priority', 4: 'Manual', 5: 'A-DEP'}), 23: ('LongFocalLengthOfLensInFocalUnits', ), 24: ('ShortFocalLengthOfLensInFocalUnits', ), 25: ('FocalUnitsPerMM', ), 28: ('FlashActivity', {0: 'Did Not Fire', 1: 'Fired'}), 29: ('FlashDetails', {14: 'External E-TTL', 13: 'Internal Flash', 11: 'FP Sync Used', 7: '2nd("Rear")-Curtain Sync Used', 4: 'FP Sync Enabled'}), 32: ('FocusMode', {0: 'Single', 1: 'Continuous'}) } MAKERNOTE_CANON_TAG_0x004={ 7: ('WhiteBalance', {0: 'Auto', 1: 'Sunny', 2: 'Cloudy', 3: 'Tungsten', 4: 'Fluorescent', 5: 'Flash', 6: 'Custom'}), 9: ('SequenceNumber', ), 14: ('AFPointUsed', ), 15: ('FlashBias', {0XFFC0: '-2 EV', 0XFFCC: '-1.67 EV', 0XFFD0: '-1.50 EV', 0XFFD4: '-1.33 EV', 0XFFE0: '-1 EV', 0XFFEC: '-0.67 EV', 0XFFF0: '-0.50 EV', 0XFFF4: '-0.33 EV', 0X0000: '0 EV', 0X000C: '0.33 EV', 0X0010: '0.50 EV', 0X0014: '0.67 EV', 0X0020: '1 EV', 0X002C: '1.33 EV', 0X0030: '1.50 EV', 0X0034: '1.67 EV', 0X0040: '2 EV'}), 19: ('SubjectDistance', ) } # extract multibyte integer in Motorola format (little endian) def s2n_motorola(str): x=0 for c in str: x=(x << 8) | ord(c) return x # extract multibyte integer in Intel format (big endian) def s2n_intel(str): x=0 y=0L for c in str: x=x | (ord(c) << y) y=y+8 return x # ratio object that eventually will be able to reduce itself to lowest # common denominator for printing def gcd(a, b): if b == 0: return a else: return gcd(b, a % b) class Ratio: def __init__(self, num, den): self.num=num self.den=den def __repr__(self): self.reduce() if self.den == 1: return str(self.num) return '%d/%d' % (self.num, self.den) def reduce(self): div=gcd(self.num, self.den) if div > 1: self.num=self.num/div self.den=self.den/div # for ease of dealing with tags class IFD_Tag: def __init__(self, printable, tag, field_type, values, field_offset, field_length): # printable version of data self.printable=printable # tag ID number self.tag=tag # field type as index into FIELD_TYPES self.field_type=field_type # offset of start of field in bytes from beginning of IFD self.field_offset=field_offset # length of data field in bytes self.field_length=field_length # either a string or array of data items self.values=values def __str__(self): return self.printable def __repr__(self): return '(0x%04X) %s=%s @ %d' % (self.tag, FIELD_TYPES[self.field_type][2], self.printable, self.field_offset) # class that handles an EXIF header class EXIF_header: def __init__(self, file, endian, offset, debug=0): self.file=file self.endian=endian self.offset=offset self.debug=debug self.tags={} # convert slice to integer, based on sign and endian flags def s2n(self, offset, length, signed=0): self.file.seek(self.offset+offset) slice=self.file.read(length) if self.endian == 'I': val=s2n_intel(slice) else: val=s2n_motorola(slice) # Sign extension ? if signed: #msb=1 << (8*length-1) #if val & msb: # val=val-(msb << 1) pass return val # convert offset to string def n2s(self, offset, length): s='' for i in range(length): if self.endian == 'I': s=s+chr(offset & 0xFF) else: s=chr(offset & 0xFF)+s offset=offset >> 8 return s # return first IFD def first_IFD(self): return self.s2n(4, 4) # return pointer to next IFD def next_IFD(self, ifd): entries=self.s2n(ifd, 2) return self.s2n(ifd+2+12*entries, 4) # return list of IFDs in header def list_IFDs(self): i=self.first_IFD() a=[] while i: a.append(i) i=self.next_IFD(i) return a # return list of entries in this IFD def dump_IFD(self, ifd, ifd_name, dict=EXIF_TAGS): entries=self.s2n(ifd, 2) for i in range(entries): entry=ifd+2+12*i tag=self.s2n(entry, 2) field_type=self.s2n(entry+2, 2) if not 0 < field_type < len(FIELD_TYPES): # unknown field type raise ValueError, \ 'unknown type %d in tag 0x%04X' % (field_type, tag) typelen=FIELD_TYPES[field_type][0] count=self.s2n(entry+4, 4) offset=entry+8 if count*typelen > 4: # not the value, it's a pointer to the value offset=self.s2n(offset, 4) field_offset=offset if field_type == 2: # special case: null-terminated ASCII string if count != 0: self.file.seek(self.offset+offset) values=self.file.read(count).strip().replace('\x00','') else: values='' else: values=[] signed=(field_type in [6, 8, 9, 10]) for j in range(count): if field_type in (5, 10): # a ratio value_j=Ratio(self.s2n(offset, 4, signed), self.s2n(offset+4, 4, signed)) else: value_j=self.s2n(offset, typelen, signed) values.append(value_j) offset=offset+typelen # now "values" is either a string or an array if count == 1 and field_type != 2: printable=str(values[0]) else: printable=str(values) # figure out tag name tag_entry=dict.get(tag) if tag_entry: tag_name=tag_entry[0] if len(tag_entry) != 1: # optional 2nd tag element is present if callable(tag_entry[1]): # call mapping function printable=tag_entry[1](values) else: printable='' for i in values: # use LUT for this tag printable+=tag_entry[1].get(i, repr(i)) else: tag_name='Tag 0x%04X' % tag self.tags[ifd_name+' '+tag_name]=IFD_Tag(printable, tag, field_type, values, field_offset, count*typelen) if self.debug: print ' %s: %s' % (tag_name, repr(self.tags[ifd_name+' '+tag_name])) # extract uncompressed TIFF thumbnail (like pulling teeth) # we take advantage of the pre-existing layout in the thumbnail IFD as # much as possible def extract_TIFF_thumbnail(self, thumb_ifd): entries=self.s2n(thumb_ifd, 2) # this is header plus offset to IFD ... if self.endian == 'M': tiff='MM\x00*\x00\x00\x00\x08' else: tiff='II*\x00\x08\x00\x00\x00' # ... plus thumbnail IFD data plus a null "next IFD" pointer self.file.seek(self.offset+thumb_ifd) tiff+=self.file.read(entries*12+2)+'\x00\x00\x00\x00' # fix up large value offset pointers into data area for i in range(entries): entry=thumb_ifd+2+12*i tag=self.s2n(entry, 2) field_type=self.s2n(entry+2, 2) typelen=FIELD_TYPES[field_type][0] count=self.s2n(entry+4, 4) oldoff=self.s2n(entry+8, 4) # start of the 4-byte pointer area in entry ptr=i*12+18 # remember strip offsets location if tag == 0x0111: strip_off=ptr strip_len=count*typelen # is it in the data area? if count*typelen > 4: # update offset pointer (nasty "strings are immutable" crap) # should be able to say "tiff[ptr:ptr+4]=newoff" newoff=len(tiff) tiff=tiff[:ptr]+self.n2s(newoff, 4)+tiff[ptr+4:] # remember strip offsets location if tag == 0x0111: strip_off=newoff strip_len=4 # get original data and store it self.file.seek(self.offset+oldoff) tiff+=self.file.read(count*typelen) # add pixel strips and update strip offset info old_offsets=self.tags['Thumbnail StripOffsets'].values old_counts=self.tags['Thumbnail StripByteCounts'].values for i in range(len(old_offsets)): # update offset pointer (more nasty "strings are immutable" crap) offset=self.n2s(len(tiff), strip_len) tiff=tiff[:strip_off]+offset+tiff[strip_off+strip_len:] strip_off+=strip_len # add pixel strip to end self.file.seek(self.offset+old_offsets[i]) tiff+=self.file.read(old_counts[i]) self.tags['TIFFThumbnail']=tiff # decode all the camera-specific MakerNote formats def decode_maker_note(self): note=self.tags['EXIF MakerNote'] make=self.tags['Image Make'].printable model=self.tags['Image Model'].printable # Nikon if make == 'NIKON': if note.values[0:5] == [78, 105, 107, 111, 110]: # "Nikon" # older model self.dump_IFD(note.field_offset+8, 'MakerNote', dict=MAKERNOTE_NIKON_OLDER_TAGS) else: # newer model (E99x or D1) self.dump_IFD(note.field_offset, 'MakerNote', dict=MAKERNOTE_NIKON_NEWER_TAGS) return # Olympus if make[:7] == 'OLYMPUS': self.dump_IFD(note.field_offset+8, 'MakerNote', dict=MAKERNOTE_OLYMPUS_TAGS) return # Casio if make == 'Casio': self.dump_IFD(note.field_offset, 'MakerNote', dict=MAKERNOTE_CASIO_TAGS) return # Fujifilm if make == 'FUJIFILM': # bug: everything else is "Motorola" endian, but the MakerNote # is "Intel" endian endian=self.endian self.endian='I' # bug: IFD offsets are from beginning of MakerNote, not # beginning of file header offset=self.offset self.offset+=note.field_offset # process note with bogus values (note is actually at offset 12) self.dump_IFD(12, 'MakerNote', dict=MAKERNOTE_FUJIFILM_TAGS) # reset to correct values self.endian=endian self.offset=offset return # Canon if make == 'Canon': self.dump_IFD(note.field_offset, 'MakerNote', dict=MAKERNOTE_CANON_TAGS) for i in (('MakerNote Tag 0x0001', MAKERNOTE_CANON_TAG_0x001), ('MakerNote Tag 0x0004', MAKERNOTE_CANON_TAG_0x004)): if i[0] in self.tags: # MALZ # self.canon_decode_tag(self.tags[i[0]].values, i[1]) return # decode Canon MakerNote tag based on offset within tag # see http://www.burren.cx/david/canon.html by David Burren def canon_decode_tag(self, value, dict): for i in range(1, len(value)): x=dict.get(i, ('Unknown', )) if self.debug: print i, x name=x[0] if len(x) > 1: val=x[1].get(value[i], 'Unknown') else: val=value[i] # it's not a real IFD Tag but we fake one to make everybody # happy. this will have a "proprietary" type self.tags['MakerNote '+name]=IFD_Tag(str(val), None, 0, None, None, None) # process an image file (expects an open file object) # this is the function that has to deal with all the arbitrary nasty bits # of the EXIF standard def process_file(file, debug=0): # determine whether it's a JPEG or TIFF data=file.read(12) if data[0:4] in ['II*\x00', 'MM\x00*']: # it's a TIFF file file.seek(0) endian=file.read(1) file.read(1) offset=0 elif data[0:2] == '\xFF\xD8': # it's a JPEG file # skip JFIF style header(s) while data[2] == '\xFF' and data[6:10] in ('JFIF', 'JFXX', 'OLYM'): length=ord(data[4])*256+ord(data[5]) file.read(length-8) # fake an EXIF beginning of file data='\xFF\x00'+file.read(10) if data[2] == '\xFF' and data[6:10] == 'Exif': # detected EXIF header offset=file.tell() endian=file.read(1) else: # no EXIF information return {} else: # file format not recognized return {} # deal with the EXIF info we found if debug: print {'I': 'Intel', 'M': 'Motorola'}[endian], 'format' hdr=EXIF_header(file, endian, offset, debug) ifd_list=hdr.list_IFDs() ctr=0 for i in ifd_list: if ctr == 0: IFD_name='Image' elif ctr == 1: IFD_name='Thumbnail' thumb_ifd=i else: IFD_name='IFD %d' % ctr if debug: print ' IFD %d (%s) at offset %d:' % (ctr, IFD_name, i) hdr.dump_IFD(i, IFD_name) # EXIF IFD exif_off=hdr.tags.get(IFD_name+' ExifOffset') if exif_off: if debug: print ' EXIF SubIFD at offset %d:' % exif_off.values[0] hdr.dump_IFD(exif_off.values[0], 'EXIF') # Interoperability IFD contained in EXIF IFD intr_off=hdr.tags.get('EXIF SubIFD InteroperabilityOffset') if intr_off: if debug: print ' EXIF Interoperability SubSubIFD at offset %d:' \ % intr_off.values[0] hdr.dump_IFD(intr_off.values[0], 'EXIF Interoperability', dict=INTR_TAGS) # GPS IFD gps_off=hdr.tags.get(IFD_name+' GPSInfo') if gps_off: if debug: print ' GPS SubIFD at offset %d:' % gps_off.values[0] hdr.dump_IFD(gps_off.values[0], 'GPS', dict=GPS_TAGS) ctr+=1 # extract uncompressed TIFF thumbnail thumb=hdr.tags.get('Thumbnail Compression') if thumb and thumb.printable == 'Uncompressed TIFF': hdr.extract_TIFF_thumbnail(thumb_ifd) # JPEG thumbnail (thankfully the JPEG data is stored as a unit) thumb_off=hdr.tags.get('Thumbnail JPEGInterchangeFormat') if thumb_off: file.seek(offset+thumb_off.values[0]) size=hdr.tags['Thumbnail JPEGInterchangeFormatLength'].values[0] hdr.tags['JPEGThumbnail']=file.read(size) # deal with MakerNote contained in EXIF IFD if hdr.tags.has_key('EXIF MakerNote'): hdr.decode_maker_note() # Sometimes in a TIFF file, a JPEG thumbnail is hidden in the MakerNote # since it's not allowed in a uncompressed TIFF IFD if not hdr.tags.has_key('JPEGThumbnail'): thumb_off=hdr.tags.get('MakerNote JPEGThumbnail') if thumb_off: file.seek(offset+thumb_off.values[0]) hdr.tags['JPEGThumbnail']=file.read(thumb_off.field_length) return hdr.tags
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- ## ## Copyright (C) 2005 manatlan manatlan[at]gmail(dot)com ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published ## by the Free Software Foundation; version 2 only. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## from common.template import Template from common.medialib import getFiles import urllib import StringIO,os,sys from exif import process_file from common import u64dec,u64enc,pageNavigator try: import Image,ImageFile,ImageDraw,ImageFont #pil except: print "Le plugin 'photos' necessite PIL (python-image) (http://www.pythonware.com/products/pil/)" sys.exit(-1) __title__=u"Photos" __order__=2000 __author__ ="manatlan" __about__ ="permet de naviguer dans ses photos (utilise l' *exif-thumbnail*)" DIR=os.path.abspath(u"medias/photos") class main: def index(self,dir=None,page=1): global DIR if dir: dir=u64dec(dir) else: dir=DIR cmd={} liste = [] ndirs,nfiles = getFiles(dir,"jpg") if dir!=DIR: url = u64enc(os.path.dirname(dir)) liste.append( ("..",None,url)) cmd["BACK"]="index?dir=%s" %url else: cmd["BACK"]="/index" for i in ndirs: name = os.path.basename(i) liste.append( (name,None,u64enc(i)) ) for i in nfiles: name = os.path.basename(i) liste.append( (name,"f",u64enc(i)) ) cmd["PREV"],cmd["NEXT"],navig,items = pageNavigator(liste,12,"index?page=%d&dir="+u64enc(dir),int(page)) if len(nfiles)>0: cmd["PLAY"]="show?file="+u64enc(nfiles[0])+"&play=1" rep=dir[len(DIR):] return Template(cmd) def show(self,file,play=""): fichier = file # garde en u64 encode !!! file=u64dec(file) ndirs,nfiles = getFiles(os.path.dirname(file),"jpg") idx=nfiles.index(file) dir=os.path.dirname(file) nextFile=nfiles[ (idx+1)%len(nfiles) ] prevFile=nfiles[ (idx-1)%len(nfiles) ] cmd = {"BACK":u"index?dir=%s&page=%d" %(u64enc(dir),1+((len(ndirs)+1+idx)/12)), "NEXT":u"show?file="+u64enc(nextFile), "PREV":u"show?file="+u64enc(prevFile), "max_images_in_cache":0 } if play: cmd["PAUSE"] =u"show?file="+u64enc(file) cmd["refresh"]=u"3;url=show?file="+u64enc(nextFile)+"&play=1" cmd["stop"]=cmd["BACK"] else: cmd["PLAY"] =u"show?file="+u64enc(nextFile)+"&play=1" return Template(cmd) #-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.- def mini(self, file): #-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.- file=u64dec(file) # recupere l' "internal EXIF thumbnail" de la jpeg fid = open(file, 'rb') try: exif = process_file(fid) tb=exif["JPEGThumbnail"] fid.close() p = ImageFile.Parser() p.feed(tb) im = p.close() except: im = Image.open("image.png") if im: w,h = im.size w,h = fit(float(w),float(h),130,130,1) im = im.resize((w,h)) f = StringIO.StringIO() im.save(f, "PNG") yield "image/png" f.seek(0) yield f.read() else: yield None #-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.- def image(self, file): #-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.- file=u64dec(file) im = Image.open(file) w,h = im.size w,h = fit(float(w),float(h),672.0,528.0,1) im = im.resize((w,h)) f = StringIO.StringIO() im.save(f, "GIF") yield "image/gif" f.seek(0) while True: buf=f.read(2048) if buf=="": break yield buf f.close() def fit(orig_width, orig_height, dest_width, dest_height,zoom): if orig_width == 0 or orig_height == 0: return 0, 0 scale = min(dest_width/orig_width, dest_height/orig_height) if scale > 1: scale = 1 scale*=zoom fit_width = scale * orig_width fit_height = scale * orig_height return int(fit_width), int(fit_height)
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- import os,sys from dict4ini import DictIni import os def getHomeFolder(): """ return the home folder of the user (platform independant) return an existing folder or None """ if "APPDATA" in os.environ: #windows NT,2k,XP,etc. fallback home = os.environ['APPDATA'] if os.path.isdir(home): return home else: #all user-based OSes home = os.path.expanduser("~") if home!="~" and os.path.isdir(home): return home else: # freedesktop *nix ? if "XDG_CONFIG_HOME" in os.environ: home = os.environ['XDG_CONFIG_HOME'] if os.path.isdir(home): return home else: #*nix fallback if "HOME" in os.environ: home = os.environ['HOME'] if os.path.isdir(home): return home return None class FeedConf(DictIni): homeFolder="" # renseigné à l'instanciation def __init__(self): hd = getHomeFolder() FeedConf.homeFolder=os.path.join(hd,".feedbox") if not os.path.isdir(FeedConf.homeFolder): os.mkdir(FeedConf.homeFolder) homeConf = os.path.join(FeedConf.homeFolder,"feedbox.ini") DictIni.__init__(self,homeConf) def checkConf(self): """ verifie que vlc est bien positionné et qu'il existe sinon demande à l'utilisateur de le situer renvoi true : si vlc est ok renvoi false : si ça va pas """ vlc = self.config.vlc if not( vlc and os.path.isfile(vlc) ): import easygui easygui.msgbox(""" C'est la première fois que vous lancer feedBox. Veuillez selectionner votre VLC ... ""","feedbox") vlc = easygui.fileopenbox() del(easygui) # libere la mem if vlc: self.config.vlc = vlc.decode( sys.getfilesystemencoding() ) return True else: return False else: return True if __name__ == "__main__": fc=FeedConf() #fc["conf"].kif=12 fc.checkConf() c=fc["jojo"] print c.has_key("kk") #c.kif=12 fc.save() print fc.homeFolder
Python
"""Beautiful Soup Elixir and Tonic "The Screen-Scraper's Friend" v2.1.0 http://www.crummy.com/software/BeautifulSoup/ Beautiful Soup parses arbitrarily invalid XML- or HTML-like substance into a tree representation. It provides methods and Pythonic idioms that make it easy to search and modify the tree. A well-formed XML/HTML document will yield a well-formed data structure. An ill-formed XML/HTML document will yield a correspondingly ill-formed data structure. If your document is only locally well-formed, you can use this library to find and process the well-formed part of it. The BeautifulSoup class has heuristics for obtaining a sensible parse tree in the face of common HTML errors. Beautiful Soup has no external dependencies. It works with Python 2.2 and up. Beautiful Soup defines classes for four different parsing strategies: * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific language that kind of looks like XML. * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid or invalid. * ICantBelieveItsBeautifulSoup, for parsing valid but bizarre HTML that trips up BeautifulSoup. * BeautifulSOAP, for making it easier to parse XML documents that use lots of subelements containing a single string, where you'd prefer they put that string into an attribute (such as SOAP messages). You can subclass BeautifulStoneSoup or BeautifulSoup to create a parsing strategy specific to an XML schema or a particular bizarre HTML document. Typically your subclass would just override SELF_CLOSING_TAGS and/or NESTABLE_TAGS. """ from __future__ import generators __author__ = "Leonard Richardson (leonardr@segfault.org)" __version__ = "2.1.0" __copyright__ = "Copyright (c) 2004-2005 Leonard Richardson" __license__ = "PSF" from sgmllib import SGMLParser, SGMLParseError import types import re import sgmllib #This code makes Beautiful Soup able to parse XML with namespaces sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') class NullType(object): """Similar to NoneType with a corresponding singleton instance 'Null' that, unlike None, accepts any message and returns itself. Examples: >>> Null("send", "a", "message")("and one more", ... "and what you get still") is Null True """ def __new__(cls): return Null def __call__(self, *args, **kwargs): return Null ## def __getstate__(self, *args): return Null def __getattr__(self, attr): return Null def __getitem__(self, item): return Null def __setattr__(self, attr, value): pass def __setitem__(self, item, value): pass def __len__(self): return 0 # FIXME: is this a python bug? otherwise ``for x in Null: pass`` # never terminates... def __iter__(self): return iter([]) def __contains__(self, item): return False def __repr__(self): return "Null" Null = object.__new__(NullType) class PageElement: """Contains the navigational information for some part of the page (either a tag or a piece of text)""" def setup(self, parent=Null, previous=Null): """Sets up the initial relations between this element and other elements.""" self.parent = parent self.previous = previous self.next = Null self.previousSibling = Null self.nextSibling = Null if self.parent and self.parent.contents: self.previousSibling = self.parent.contents[-1] self.previousSibling.nextSibling = self def findNext(self, name=None, attrs={}, text=None): """Returns the first item that matches the given criteria and appears after this Tag in the document.""" return self._first(self.fetchNext, name, attrs, text) firstNext = findNext def fetchNext(self, name=None, attrs={}, text=None, limit=None): """Returns all items that match the given criteria and appear before after Tag in the document.""" return self._fetch(name, attrs, text, limit, self.nextGenerator) def findNextSibling(self, name=None, attrs={}, text=None): """Returns the closest sibling to this Tag that matches the given criteria and appears after this Tag in the document.""" return self._first(self.fetchNextSiblings, name, attrs, text) firstNextSibling = findNextSibling def fetchNextSiblings(self, name=None, attrs={}, text=None, limit=None): """Returns the siblings of this Tag that match the given criteria and appear after this Tag in the document.""" return self._fetch(name, attrs, text, limit, self.nextSiblingGenerator) def findPrevious(self, name=None, attrs={}, text=None): """Returns the first item that matches the given criteria and appears before this Tag in the document.""" return self._first(self.fetchPrevious, name, attrs, text) def fetchPrevious(self, name=None, attrs={}, text=None, limit=None): """Returns all items that match the given criteria and appear before this Tag in the document.""" return self._fetch(name, attrs, text, limit, self.previousGenerator) firstPrevious = findPrevious def findPreviousSibling(self, name=None, attrs={}, text=None): """Returns the closest sibling to this Tag that matches the given criteria and appears before this Tag in the document.""" return self._first(self.fetchPreviousSiblings, name, attrs, text) firstPreviousSibling = findPreviousSibling def fetchPreviousSiblings(self, name=None, attrs={}, text=None, limit=None): """Returns the siblings of this Tag that match the given criteria and appear before this Tag in the document.""" return self._fetch(name, attrs, text, limit, self.previousSiblingGenerator) def findParent(self, name=None, attrs={}): """Returns the closest parent of this Tag that matches the given criteria.""" r = Null l = self.fetchParents(name, attrs, 1) if l: r = l[0] return r firstParent = findParent def fetchParents(self, name=None, attrs={}, limit=None): """Returns the parents of this Tag that match the given criteria.""" return self._fetch(name, attrs, None, limit, self.parentGenerator) #These methods do the real heavy lifting. def _first(self, method, name, attrs, text): r = Null l = method(name, attrs, text, 1) if l: r = l[0] return r def _fetch(self, name, attrs, text, limit, generator): "Iterates over a generator looking for things that match." if not hasattr(attrs, 'items'): attrs = {'class' : attrs} results = [] g = generator() while True: try: i = g.next() except StopIteration: break found = None if isinstance(i, Tag): if not text: if not name or self._matches(i, name): match = True for attr, matchAgainst in attrs.items(): check = i.get(attr) if not self._matches(check, matchAgainst): match = False break if match: found = i elif text: if self._matches(i, text): found = i if found: results.append(found) if limit and len(results) >= limit: break return results #Generators that can be used to navigate starting from both #NavigableTexts and Tags. def nextGenerator(self): i = self while i: i = i.next yield i def nextSiblingGenerator(self): i = self while i: i = i.nextSibling yield i def previousGenerator(self): i = self while i: i = i.previous yield i def previousSiblingGenerator(self): i = self while i: i = i.previousSibling yield i def parentGenerator(self): i = self while i: i = i.parent yield i def _matches(self, chunk, howToMatch): #print 'looking for %s in %s' % (howToMatch, chunk) # # If given a list of items, return true if the list contains a # text element that matches. if isList(chunk) and not isinstance(chunk, Tag): for tag in chunk: if isinstance(tag, NavigableText) and self._matches(tag, howToMatch): return True return False if callable(howToMatch): return howToMatch(chunk) if isinstance(chunk, Tag): #Custom match methods take the tag as an argument, but all other #ways of matching match the tag name as a string chunk = chunk.name #Now we know that chunk is a string if not type(chunk) in types.StringTypes: chunk = str(chunk) if hasattr(howToMatch, 'match'): # It's a regexp object. return howToMatch.search(chunk) if isList(howToMatch): return chunk in howToMatch if hasattr(howToMatch, 'items'): return howToMatch.has_key(chunk) #It's just a string return str(howToMatch) == chunk class NavigableText(PageElement): def __getattr__(self, attr): "For backwards compatibility, text.string gives you text" if attr == 'string': return self else: raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr) class NavigableString(str, NavigableText): pass class NavigableUnicodeString(unicode, NavigableText): pass class Tag(PageElement): """Represents a found HTML tag with its attributes and contents.""" def __init__(self, name, attrs=None, parent=Null, previous=Null): "Basic constructor." self.name = name if attrs == None: attrs = [] self.attrs = attrs self.contents = [] self.setup(parent, previous) self.hidden = False def get(self, key, default=None): """Returns the value of the 'key' attribute for the tag, or the value given for 'default' if it doesn't have that attribute.""" return self._getAttrMap().get(key, default) def __getitem__(self, key): """tag[key] returns the value of the 'key' attribute for the tag, and throws an exception if it's not there.""" return self._getAttrMap()[key] def __iter__(self): "Iterating over a tag iterates over its contents." return iter(self.contents) def __len__(self): "The length of a tag is the length of its list of contents." return len(self.contents) def __contains__(self, x): return x in self.contents def __nonzero__(self): "A tag is non-None even if it has no contents." return True def __setitem__(self, key, value): """Setting tag[key] sets the value of the 'key' attribute for the tag.""" self._getAttrMap() self.attrMap[key] = value found = False for i in range(0, len(self.attrs)): if self.attrs[i][0] == key: self.attrs[i] = (key, value) found = True if not found: self.attrs.append((key, value)) self._getAttrMap()[key] = value def __delitem__(self, key): "Deleting tag[key] deletes all 'key' attributes for the tag." for item in self.attrs: if item[0] == key: self.attrs.remove(item) #We don't break because bad HTML can define the same #attribute multiple times. self._getAttrMap() if self.attrMap.has_key(key): del self.attrMap[key] def __call__(self, *args, **kwargs): """Calling a tag like a function is the same as calling its fetch() method. Eg. tag('a') returns a list of all the A tags found within this tag.""" return apply(self.fetch, args, kwargs) def __getattr__(self, tag): if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3: return self.first(tag[:-3]) elif tag.find('__') != 0: return self.first(tag) def __eq__(self, other): """Returns true iff this tag has the same name, the same attributes, and the same contents (recursively) as the given tag. NOTE: right now this will return false if two tags have the same attributes in a different order. Should this be fixed?""" if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other): return False for i in range(0, len(self.contents)): if self.contents[i] != other.contents[i]: return False return True def __ne__(self, other): """Returns true iff this tag is not identical to the other tag, as defined in __eq__.""" return not self == other def __repr__(self): """Renders this tag as a string.""" return str(self) def __unicode__(self): return self.__str__(1) def __str__(self, needUnicode=None, showStructureIndent=None): """Returns a string or Unicode representation of this tag and its contents. NOTE: since Python's HTML parser consumes whitespace, this method is not certain to reproduce the whitespace present in the original string.""" attrs = [] if self.attrs: for key, val in self.attrs: attrs.append('%s="%s"' % (key, val)) close = '' closeTag = '' if self.isSelfClosing(): close = ' /' else: closeTag = '</%s>' % self.name indentIncrement = None if showStructureIndent != None: indentIncrement = showStructureIndent if not self.hidden: indentIncrement += 1 contents = self.renderContents(indentIncrement, needUnicode=needUnicode) if showStructureIndent: space = '\n%s' % (' ' * showStructureIndent) if self.hidden: s = contents else: s = [] attributeString = '' if attrs: attributeString = ' ' + ' '.join(attrs) if showStructureIndent: s.append(space) s.append('<%s%s%s>' % (self.name, attributeString, close)) s.append(contents) if closeTag and showStructureIndent != None: s.append(space) s.append(closeTag) s = ''.join(s) isUnicode = type(s) == types.UnicodeType if needUnicode and not isUnicode: s = unicode(s) elif isUnicode and needUnicode==False: s = str(s) return s def prettify(self, needUnicode=None): return self.__str__(needUnicode, showStructureIndent=True) def renderContents(self, showStructureIndent=None, needUnicode=None): """Renders the contents of this tag as a (possibly Unicode) string.""" s=[] for c in self: text = None if isinstance(c, NavigableUnicodeString) or type(c) == types.UnicodeType: text = unicode(c) elif isinstance(c, Tag): s.append(c.__str__(needUnicode, showStructureIndent)) elif needUnicode: text = unicode(c) else: text = str(c) if text: if showStructureIndent != None: if text[-1] == '\n': text = text[:-1] s.append(text) return ''.join(s) #Soup methods def firstText(self, text, recursive=True): """Convenience method to retrieve the first piece of text matching the given criteria. 'text' can be a string, a regular expression object, a callable that takes a string and returns whether or not the string 'matches', etc.""" return self.first(recursive=recursive, text=text) def fetchText(self, text, recursive=True, limit=None): """Convenience method to retrieve all pieces of text matching the given criteria. 'text' can be a string, a regular expression object, a callable that takes a string and returns whether or not the string 'matches', etc.""" return self.fetch(recursive=recursive, text=text, limit=limit) def first(self, name=None, attrs={}, recursive=True, text=None): """Return only the first child of this Tag matching the given criteria.""" r = Null l = self.fetch(name, attrs, recursive, text, 1) if l: r = l[0] return r findChild = first def fetch(self, name=None, attrs={}, recursive=True, text=None, limit=None): """Extracts a list of Tag objects that match the given criteria. You can specify the name of the Tag and any attributes you want the Tag to have. The value of a key-value pair in the 'attrs' map can be a string, a list of strings, a regular expression object, or a callable that takes a string and returns whether or not the string matches for some custom definition of 'matches'. The same is true of the tag name.""" generator = self.recursiveChildGenerator if not recursive: generator = self.childGenerator return self._fetch(name, attrs, text, limit, generator) fetchChildren = fetch #Utility methods def isSelfClosing(self): """Returns true iff this is a self-closing tag as defined in the HTML standard. TODO: This is specific to BeautifulSoup and its subclasses, but it's used by __str__""" return self.name in BeautifulSoup.SELF_CLOSING_TAGS def append(self, tag): """Appends the given tag to the contents of this tag.""" self.contents.append(tag) #Private methods def _getAttrMap(self): """Initializes a map representation of this tag's attributes, if not already initialized.""" if not getattr(self, 'attrMap'): self.attrMap = {} for (key, value) in self.attrs: self.attrMap[key] = value return self.attrMap #Generator methods def childGenerator(self): for i in range(0, len(self.contents)): yield self.contents[i] raise StopIteration def recursiveChildGenerator(self): stack = [(self, 0)] while stack: tag, start = stack.pop() if isinstance(tag, Tag): for i in range(start, len(tag.contents)): a = tag.contents[i] yield a if isinstance(a, Tag) and tag.contents: if i < len(tag.contents) - 1: stack.append((tag, i+1)) stack.append((a, 0)) break raise StopIteration def isList(l): """Convenience method that works with all 2.x versions of Python to determine whether or not something is listlike.""" return hasattr(l, '__iter__') \ or (type(l) in (types.ListType, types.TupleType)) def buildTagMap(default, *args): """Turns a list of maps, lists, or scalars into a single map. Used to build the SELF_CLOSING_TAGS and NESTABLE_TAGS maps out of lists and partial maps.""" built = {} for portion in args: if hasattr(portion, 'items'): #It's a map. Merge it. for k,v in portion.items(): built[k] = v elif isList(portion): #It's a list. Map each item to the default. for k in portion: built[k] = default else: #It's a scalar. Map it to the default. built[portion] = default return built class BeautifulStoneSoup(Tag, SGMLParser): """This class contains the basic parser and fetch code. It defines a parser that knows nothing about tag behavior except for the following: You can't close a tag without closing all the tags it encloses. That is, "<foo><bar></foo>" actually means "<foo><bar></bar></foo>". [Another possible explanation is "<foo><bar /></foo>", but since this class defines no SELF_CLOSING_TAGS, it will never use that explanation.] This class is useful for parsing XML or made-up markup languages, or when BeautifulSoup makes an assumption counter to what you were expecting.""" SELF_CLOSING_TAGS = {} NESTABLE_TAGS = {} QUOTE_TAGS = {} #As a public service we will by default silently replace MS smart quotes #and similar characters with their HTML or ASCII equivalents. MS_CHARS = { '\x80' : 'euro', '\x81' : ' ', '\x82' : 'sbquo', '\x83' : 'fnof', '\x84' : 'bdquo', '\x85' : 'hellip', '\x86' : 'dagger', '\x87' : 'Dagger', '\x88' : 'caret', '\x89' : '%', '\x8A' : 'Scaron', '\x8B' : 'lt;', '\x8C' : 'OElig', '\x8D' : '?', '\x8E' : 'Z', '\x8F' : '?', '\x90' : '?', '\x91' : 'lsquo', '\x92' : 'rsquo', '\x93' : 'ldquo', '\x94' : 'rdquo', '\x95' : 'bull', '\x96' : 'ndash', '\x97' : 'mdash', '\x98' : 'tilde', '\x99' : 'trade', '\x9a' : 'scaron', '\x9b' : 'gt', '\x9c' : 'oelig', '\x9d' : '?', '\x9e' : 'z', '\x9f' : 'Yuml',} PARSER_MASSAGE = [(re.compile('(<[^<>]*)/>'), lambda(x):x.group(1) + ' />'), (re.compile('<!\s+([^<>]*)>'), lambda(x):'<!' + x.group(1) + '>'), (re.compile("([\x80-\x9f])", re.M), lambda(x): '&' + BeautifulStoneSoup.MS_CHARS.get(x.group(1)) + ';') ] ROOT_TAG_NAME = '[document]' def __init__(self, text=None, avoidParserProblems=True, initialTextIsEverything=True): """Initialize this as the 'root tag' and feed in any text to the parser. NOTE about avoidParserProblems: sgmllib will process most bad HTML, and BeautifulSoup has tricks for dealing with some HTML that kills sgmllib, but Beautiful Soup can nonetheless choke or lose data if your data uses self-closing tags or declarations incorrectly. By default, Beautiful Soup sanitizes its input to avoid the vast majority of these problems. The problems are relatively rare, even in bad HTML, so feel free to pass in False to avoidParserProblems if they don't apply to you, and you'll get better performance. The only reason I have this turned on by default is so I don't get so many tech support questions. The two most common instances of invalid HTML that will choke sgmllib are fixed by the default parser massage techniques: <br/> (No space between name of closing tag and tag close) <! --Comment--> (Extraneous whitespace in declaration) You can pass in a custom list of (RE object, replace method) tuples to get Beautiful Soup to scrub your input the way you want.""" Tag.__init__(self, self.ROOT_TAG_NAME) if avoidParserProblems \ and not isList(avoidParserProblems): avoidParserProblems = self.PARSER_MASSAGE self.avoidParserProblems = avoidParserProblems SGMLParser.__init__(self) self.quoteStack = [] self.hidden = 1 self.reset() if hasattr(text, 'read'): #It's a file-type object. text = text.read() if text: self.feed(text) if initialTextIsEverything: self.done() def __getattr__(self, methodName): """This method routes method call requests to either the SGMLParser superclass or the Tag superclass, depending on the method name.""" if methodName.find('start_') == 0 or methodName.find('end_') == 0 \ or methodName.find('do_') == 0: return SGMLParser.__getattr__(self, methodName) elif methodName.find('__') != 0: return Tag.__getattr__(self, methodName) else: raise AttributeError def feed(self, text): if self.avoidParserProblems: for fix, m in self.avoidParserProblems: text = fix.sub(m, text) SGMLParser.feed(self, text) self.endData() def done(self): """Called when you're done parsing, so that the unclosed tags can be correctly processed.""" while self.currentTag.name != self.ROOT_TAG_NAME: self.popTag() def reset(self): SGMLParser.reset(self) self.currentData = [] self.currentTag = None self.tagStack = [] self.pushTag(self) def popTag(self): tag = self.tagStack.pop() # Tags with just one string-owning child get the child as a # 'string' property, so that soup.tag.string is shorthand for # soup.tag.contents[0] if len(self.currentTag.contents) == 1 and \ isinstance(self.currentTag.contents[0], NavigableText): self.currentTag.string = self.currentTag.contents[0] #print "Pop", tag.name if self.tagStack: self.currentTag = self.tagStack[-1] return self.currentTag def pushTag(self, tag): #print "Push", tag.name if self.currentTag: self.currentTag.append(tag) self.tagStack.append(tag) self.currentTag = self.tagStack[-1] def endData(self): currentData = ''.join(self.currentData) if currentData: if not currentData.strip(): if '\n' in currentData: currentData = '\n' else: currentData = ' ' c = NavigableString if type(currentData) == types.UnicodeType: c = NavigableUnicodeString o = c(currentData) o.setup(self.currentTag, self.previous) if self.previous: self.previous.next = o self.previous = o self.currentTag.contents.append(o) self.currentData = [] def _popToTag(self, name, inclusivePop=True): """Pops the tag stack up to and including the most recent instance of the given tag. If inclusivePop is false, pops the tag stack up to but *not* including the most recent instqance of the given tag.""" if name == self.ROOT_TAG_NAME: return #print "Pop to tag", name numPops = 0 mostRecentTag = None for i in range(len(self.tagStack)-1, 0, -1): if name == self.tagStack[i].name: numPops = len(self.tagStack)-i break if not inclusivePop: numPops = numPops - 1 for i in range(0, numPops): mostRecentTag = self.popTag() return mostRecentTag def _smartPop(self, name): """We need to pop up to the previous tag of this type, unless one of this tag's nesting reset triggers comes between this tag and the previous tag of this type, OR unless this tag is a generic nesting trigger and another generic nesting trigger comes between this tag and the previous tag of this type. Examples: <p>Foo<b>Bar<p> should pop to 'p', not 'b'. <p>Foo<table>Bar<p> should pop to 'table', not 'p'. <p>Foo<table><tr>Bar<p> should pop to 'tr', not 'p'. <p>Foo<b>Bar<p> should pop to 'p', not 'b'. <li><ul><li> *<li>* should pop to 'ul', not the first 'li'. <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr' <td><tr><td> *<td>* should pop to 'tr', not the first 'td' """ nestingResetTriggers = self.NESTABLE_TAGS.get(name) isNestable = nestingResetTriggers != None isResetNesting = self.RESET_NESTING_TAGS.has_key(name) popTo = None inclusive = True for i in range(len(self.tagStack)-1, 0, -1): p = self.tagStack[i] if (not p or p.name == name) and not isNestable: #Non-nestable tags get popped to the top or to their #last occurance. popTo = name break if (nestingResetTriggers != None and p.name in nestingResetTriggers) \ or (nestingResetTriggers == None and isResetNesting and self.RESET_NESTING_TAGS.has_key(p.name)): #If we encounter one of the nesting reset triggers #peculiar to this tag, or we encounter another tag #that causes nesting to reset, pop up to but not #including that tag. popTo = p.name inclusive = False break p = p.parent if popTo: self._popToTag(popTo, inclusive) def unknown_starttag(self, name, attrs, selfClosing=0): if self.quoteStack: #This is not a real tag. #print "<%s> is not real!" % name attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs)) self.handle_data('<%s%s>' % (name, attrs)) return self.endData() if not name in self.SELF_CLOSING_TAGS and not selfClosing: self._smartPop(name) tag = Tag(name, attrs, self.currentTag, self.previous) if self.previous: self.previous.next = tag self.previous = tag self.pushTag(tag) if selfClosing or name in self.SELF_CLOSING_TAGS: self.popTag() if name in self.QUOTE_TAGS: #print "Beginning quote (%s)" % name self.quoteStack.append(name) def unknown_endtag(self, name): if self.quoteStack and self.quoteStack[-1] != name: #This is not a real end tag. #print "</%s> is not real!" % name self.handle_data('</%s>' % name) return self.endData() self._popToTag(name) if self.quoteStack and self.quoteStack[-1] == name: self.quoteStack.pop() def handle_data(self, data): self.currentData.append(data) def handle_pi(self, text): "Propagate processing instructions right through." self.handle_data("<?%s>" % text) def handle_comment(self, text): "Propagate comments right through." self.handle_data("<!--%s-->" % text) def handle_charref(self, ref): "Propagate char refs right through." self.handle_data('&#%s;' % ref) def handle_entityref(self, ref): "Propagate entity refs right through." self.handle_data('&%s;' % ref) def handle_decl(self, data): "Propagate DOCTYPEs and the like right through." self.handle_data('<!%s>' % data) def parse_declaration(self, i): """Treat a bogus SGML declaration as raw data. Treat a CDATA declaration as regular data.""" j = None if self.rawdata[i:i+9] == '<![CDATA[': k = self.rawdata.find(']]>', i) if k == -1: k = len(self.rawdata) self.handle_data(self.rawdata[i+9:k]) j = k+3 else: try: j = SGMLParser.parse_declaration(self, i) except SGMLParseError: toHandle = self.rawdata[i:] self.handle_data(toHandle) j = i + len(toHandle) return j class BeautifulSoup(BeautifulStoneSoup): """This parser knows the following facts about HTML: * Some tags have no closing tag and should be interpreted as being closed as soon as they are encountered. * The text inside some tags (ie. 'script') may contain tags which are not really part of the document and which should be parsed as text, not tags. If you want to parse the text as tags, you can always fetch it and parse it explicitly. * Tag nesting rules: Most tags can't be nested at all. For instance, the occurance of a <p> tag should implicitly close the previous <p> tag. <p>Para1<p>Para2 should be transformed into: <p>Para1</p><p>Para2 Some tags can be nested arbitrarily. For instance, the occurance of a <blockquote> tag should _not_ implicitly close the previous <blockquote> tag. Alice said: <blockquote>Bob said: <blockquote>Blah should NOT be transformed into: Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah Some tags can be nested, but the nesting is reset by the interposition of other tags. For instance, a <tr> tag should implicitly close the previous <tr> tag within the same <table>, but not close a <tr> tag in another table. <table><tr>Blah<tr>Blah should be transformed into: <table><tr>Blah</tr><tr>Blah but, <tr>Blah<table><tr>Blah should NOT be transformed into <tr>Blah<table></tr><tr>Blah Differing assumptions about tag nesting rules are a major source of problems with the BeautifulSoup class. If BeautifulSoup is not treating as nestable a tag your page author treats as nestable, try ICantBelieveItsBeautifulSoup before writing your own subclass.""" SELF_CLOSING_TAGS = buildTagMap(None, ['br' , 'hr', 'input', 'img', 'meta', 'spacer', 'link', 'frame']) QUOTE_TAGS = {'script': None} #According to the HTML standard, each of these inline tags can #contain another tag of the same type. Furthermore, it's common #to actually use these tags this way. NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup', 'center'] #According to the HTML standard, these block tags can contain #another tag of the same type. Furthermore, it's common #to actually use these tags this way. NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del'] #Lists can contain other lists, but there are restrictions. NESTABLE_LIST_TAGS = { 'ol' : [], 'ul' : [], 'li' : ['ul', 'ol'], 'dl' : [], 'dd' : ['dl'], 'dt' : ['dl'] } #Tables can contain other tables, but there are restrictions. NESTABLE_TABLE_TAGS = {'table' : ['tr', 'td'], #Not sure about this one. 'tr' : ['table'], 'td' : ['tr'], 'th' : ['tr'], } NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre'] #If one of these tags is encountered, all tags up to the next tag of #this type are popped. RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript', NON_NESTABLE_BLOCK_TAGS, NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS, NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) class ICantBelieveItsBeautifulSoup(BeautifulSoup): """The BeautifulSoup class is oriented towards skipping over common HTML errors like unclosed tags. However, sometimes it makes errors of its own. For instance, consider this fragment: <b>Foo<b>Bar</b></b> This is perfectly valid (if bizarre) HTML. However, the BeautifulSoup class will implicitly close the first b tag when it encounters the second 'b'. It will think the author wrote "<b>Foo<b>Bar", and didn't close the first 'b' tag, because there's no real-world reason to bold something that's already bold. When it encounters '</b></b>' it will close two more 'b' tags, for a grand total of three tags closed instead of two. This can throw off the rest of your document structure. The same is true of a number of other tags, listed below. It's much more common for someone to forget to close (eg.) a 'b' tag than to actually use nested 'b' tags, and the BeautifulSoup class handles the common case. This class handles the not-co-common case: where you can't believe someone wrote what they did, but it's valid HTML and BeautifulSoup screwed up by assuming it wouldn't be. If this doesn't do what you need, try subclassing this class or BeautifulSoup, and providing your own list of NESTABLE_TAGS.""" I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \ ['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong', 'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b', 'big'] I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript'] NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS, I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS, I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS) class BeautifulSOAP(BeautifulStoneSoup): """This class will push a tag with only a single string child into the tag's parent as an attribute. The attribute's name is the tag name, and the value is the string child. An example should give the flavor of the change: <foo><bar>baz</bar></foo> => <foo bar="baz"><bar>baz</bar></foo> You can then access fooTag['bar'] instead of fooTag.barTag.string. This is, of course, useful for scraping structures that tend to use subelements instead of attributes, such as SOAP messages. Note that it modifies its input, so don't print the modified version out. I'm not sure how many people really want to use this class; let me know if you do. Mainly I like the name.""" def popTag(self): if len(self.tagStack) > 1: tag = self.tagStack[-1] parent = self.tagStack[-2] parent._getAttrMap() if (isinstance(tag, Tag) and len(tag.contents) == 1 and isinstance(tag.contents[0], NavigableText) and not parent.attrMap.has_key(tag.name)): parent[tag.name] = tag.contents[0] BeautifulStoneSoup.popTag(self) #Enterprise class names! It has come to our attention that some people #think the names of the Beautiful Soup parser classes are too silly #and "unprofessional" for use in enterprise screen-scraping. We feel #your pain! For such-minded folk, the Beautiful Soup Consortium And #All-Night Kosher Bakery recommends renaming this file to #"RobustParser.py" (or, in cases of extreme enterprisitude, #"RobustParserBeanInterface.class") and using the following #enterprise-friendly class aliases: class RobustXMLParser(BeautifulStoneSoup): pass class RobustHTMLParser(BeautifulSoup): pass class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup): pass class SimplifyingSOAPParser(BeautifulSOAP): pass ### #By default, act as an HTML pretty-printer. if __name__ == '__main__': import sys soup = BeautifulSoup(sys.stdin.read()) print soup.prettify()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- ################################################################################ ## Simplification Wrapper around minidom ## ensure "unicode" manipulation ## ## manatlan ################################################################################ ## XmlDoc wrap DocumentObject ## XmlNode wrap NodeObject ################################################################################ from xml.dom.minidom import parse, parseString import urllib def u(s): """ return any s as "unicode string" """ if type(s)==unicode: return s elif type(s)== str: try: v=s.decode("utf_8") except: try: v=s.decode("mbcs") except: try: v=s.decode("cp1252") except: raise "u() unicodification error : "+str([s]) return v else: return unicode(s) ################################################################################ class XmlDoc(object): ################################################################################ def __init__(self,file="",xml="",url="",dom=None): if file: self.__dom = parse(file) elif xml: self.__dom = parseString(xml) elif url: f = urllib.urlopen(url) data = f.read() f.close() self.__dom = parseString(data) elif dom: self.__dom = dom else: raise "bad XmlDoc call()" def __getDE(self): """ recupere la node documentElement (root) du doc """ return XmlNode(self.__dom.documentElement) documentElement=property(__getDE) def getAll(self,n): """ retourne une liste des xmlNode du nom de "n" de la dom """ return [XmlNode(i) for i in self.__dom.getElementsByTagName(u(n))] def createNode(self,n,attrs={}): """ cree une xmlNode du nom de "n" avec ses attributs(dict) "attr" si renseigné """ node=self.__dom.createElement(u(n)) if attrs: for i in attrs: nom = u(i) val = u(attrs[i]) node.setAttributeNode(self.__dom.createAttribute(nom)) node.setAttribute(nom,val) return XmlNode(node) def dump(self): """ renvoi le contenu xml de la dom """ self.crunch() xml = self.__dom.toprettyxml() xml=xml.replace( u"""<?xml version="1.0" ?>""", u"""<?xml version="1.0" encoding="utf-8" ?>""") return xml # en UNICODE !!! def saveAs(self,name): """ enregistre la dom dans le fichier "name" """ fid = open(name,"w") if fid: #~ self.__dom.writexml(fid,encoding="utf-8",addindent="\t",newl="\r") # BUGGED !!! fid.write( self.dump().encode("utf_8") ) fid.close() return True return False def crunch(self,node=None): """ supprime les nodes text qui sont vides """ if not node: node = self.__dom.documentElement aeff=[] for i in node.childNodes: if i.nodeType == i.TEXT_NODE: content = i.data.strip(" \r\n\t") if not content: aeff.append(i) else: i.data = content elif i.nodeType == i.ELEMENT_NODE: self.crunch(i) for i in aeff: i.parentNode.removeChild(i) ################################################################################ class XmlNode(object): # renvoi de l'utf8 ;-( ... comme libxml2 ################################################################################ def __getOD(self): """ recupere le XmlDoc maitre """ return XmlDoc(dom = self.__node.ownerDocument) ownerDocument=property(__getOD) def __getCN(self): """ recupere la liste de ses childnodes """ return [XmlNode(i) for i in self.__node.childNodes] childNodes=property(__getCN) def __getN(self): """ recupere la vrai NODE ;-( """ return self.__node node=property(__getN) def __init__(self,node): self.__node = node def getAttr(self,nom): """ retourne le contenu de l'attribut "nom" de la node """ # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * return self.__node.getAttribute(u(nom)) #~ return self.__node.getAttribute(u(nom)).encode("utf_8") # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * def setAttr(self,nom,val): """ retourne le contenu de l'attribut "nom" de la node """ return self.__node.setAttribute(u(nom),u(val)) def removeChilds(self): """ supprimes les nodes filles""" for node in self.__node.childNodes: self.__node.removeChild(node) def setText(self,val): """ màj le contenu texte de la node """ self.removeChilds() t=self.__node.ownerDocument.createTextNode(u(val)) # val en unicode !!! self.__node.appendChild(t) def getText(self): """ recupere le contenu texte de la node """ rc = u"" for node in self.__node.childNodes: if node.nodeType == node.TEXT_NODE: rc = rc + node.data # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * return rc.strip() #~ return rc.strip().encode("utf_8") # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * def appendChild(self,n): """ ajoute la xmlNode "n" à la node """ return XmlNode(self.__node.appendChild(n.__node)) def unlink(self): """ supprime la node """ self.__node.parentNode.removeChild(self.__node) del(self) def getAll(self,n): """ retourne une liste des xmlNode du nom de "n" à partir de la node """ return [XmlNode(i) for i in self.__node.getElementsByTagName(u(n))] if __name__ == "__main__": x=XmlDoc(xml="<root><a/><a/></root>") n= x.getAll("a") n[0].setText(u"fdés") x.saveAs("jo.xml") x=XmlDoc(file="jo.xml") n= x.getAll("a") q=x.createNode("kàkà",{"jimà":12,u"kà":12.23}) q.setText(u"àùùù") n[0].appendChild(q) n[1].setText(u"fdés") n[1].setAttr("joé","jimà") x.saveAs("jo.xml") print x.dump().encode("utf_8")
Python
""" EasyGui provides an easy-to-use interface for simple GUI interaction with a user. It does not require the programmer to know anything about tkinter, frames, widgets, callbacks or lambda. All GUI interactions are invoked by simple function calls that return results. Documentation is in an accompanying file, easygui.txt. WARNING about using EasyGui with IDLE ====================================== You may encounter problems using IDLE to run programs that use EasyGui. Try it and find out. EasyGui is a collection of Tkinter routines that run their own event loops. IDLE is also a Tkinter application, with its own event loop. The two may conflict, with the unpredictable results. If you find that you have problems, try running your program outside of IDLE. Note that EasyGui requires Tk release 8.0 or greater. """ EasyGuiRevisionInfo = " version 0.72 2004-06-20" # see easygui.txt for revision history information __all__ = ['ynbox' , 'ccbox' , 'boolbox' , 'indexbox' , 'msgbox' , 'buttonbox' , 'integerbox' , 'multenterbox' , 'enterbox' , 'choicebox' , 'codebox' , 'textbox' , 'diropenbox' , 'fileopenbox' , 'filesavebox' , 'passwordbox' , 'multpasswordbox' , 'multchoicebox' ] import sys from Tkinter import * if TkVersion < 8.0 : print "\n" * 3 print "*"*75 print "Running Tk version:", TkVersion print "You must be using Tk version 8.0 or greater to use EasyGui." print "Terminating." print "*"*75 print "\n" * 3 sys.exit(0) rootWindowPosition = "+300+200" import string DEFAULT_FONT_FAMILY = ("MS", "Sans", "Serif") MONOSPACE_FONT_FAMILY = ("Courier") DEFAULT_FONT_SIZE = 10 BIG_FONT_SIZE = 12 SMALL_FONT_SIZE = 9 CODEBOX_FONT_SIZE = 9 TEXTBOX_FONT_SIZE = DEFAULT_FONT_SIZE import tkFileDialog #------------------------------------------------------------------- # various boxes built on top of the basic buttonbox #------------------------------------------------------------------- def ynbox(message="Shall I continue?", title=""): """Display a message box with choices of Yes and No. The default is "Yes". Returns returns 1 if "Yes" is chosen, or if the dialog is cancelled (which is interpreted as choosing the default). Otherwise returns 0. If invoked without a message parameter, displays a generic request for a confirmation that the user wishes to continue. So it can be used this way: if ynbox(): pass # continue else: sys.exit(0) # exit the program """ choices = ["Yes", "No"] if title == None: title = "" return boolbox(message, title, choices) def ccbox(message="Shall I continue?", title=""): """Display a message box with choices of Continue and Cancel. The default is "Continue". Returns returns 1 if "Continue" is chosen, or if the dialog is cancelled (which is interpreted as choosing the default). Otherwise returns 0. If invoked without a message parameter, displays a generic request for a confirmation that the user wishes to continue. So it can be used this way: if ccbox(): pass # continue else: sys.exit(0) # exit the program """ choices = ["Continue", "Cancel"] if title == None: title = "" return boolbox(message, title, choices) def boolbox(message="Shall I continue?", title="", choices=["Yes","No"]): """Display a boolean message box. The default is the first choice. Returns returns 1 if the first choice is chosen, or if the dialog is cancelled (which is interpreted as choosing the default). Otherwise returns 0. """ if title == None: if message == "Shall I continue?": title = "Confirmation" else: title = "" reply = buttonbox(message, title, choices) if reply == choices[0]: return 1 else: return 0 def indexbox(message="Shall I continue?", title="", choices=["Yes","No"]): """Display a buttonbox with the specified choices. Return the index of the choice selected. """ reply = buttonbox(message, title, choices) index = -1 for choice in choices: index = index + 1 if reply == choice: return index #------------------------------------------------------------------- # msgbox #------------------------------------------------------------------- def msgbox(message="Shall I continue?", title="", buttonMessage="OK"): """Display a messagebox """ choices = [buttonMessage] reply = buttonbox(message, title, choices) return reply #------------------------------------------------------------------- # buttonbox #------------------------------------------------------------------- def buttonbox(message="Shall I continue?", title="", choices = ["Button1", "Button2", "Button3"]): """Display a message, a title, and a set of buttons. The buttons are defined by the members of the choices list. Return the text of the button that the user selected. """ global root, __replyButtonText, __widgetTexts, buttonsFrame if title == None: title = "" if message == None: message = "This is an example of a buttonbox." # Initialize __replyButtonText to the first choice. # This is what will be used if the window is closed by the close button. __replyButtonText = choices[0] root = Tk() root.protocol('WM_DELETE_WINDOW', denyWindowManagerClose ) root.title(title) root.iconname('Dialog') root.geometry(rootWindowPosition) root.minsize(400, 100) # ------------- define the frames -------------------------------------------- messageFrame = Frame(root) messageFrame.pack(side=TOP, fill=BOTH) buttonsFrame = Frame(root) buttonsFrame.pack(side=BOTTOM, fill=BOTH) # -------------------- place the widgets in the frames ----------------------- messageWidget = Message(messageFrame, text=message, width=400) messageWidget.configure(font=(DEFAULT_FONT_FAMILY,DEFAULT_FONT_SIZE)) messageWidget.pack(side=TOP, expand=YES, fill=X, padx='3m', pady='3m') __put_buttons_in_buttonframe(choices) # -------------- the action begins ----------- # put the focus on the first button __firstWidget.focus_force() root.mainloop() root.destroy() return __replyButtonText #------------------------------------------------------------------- # integerbox #------------------------------------------------------------------- def integerbox(message="Enter something.", title="" , argDefault=None, argLowerBound=0, argUpperBound=99): """Show a box in which a user can enter an integer. In addition to arguments for message and title, this function accepts integer arguments for default_value, lowerbound, and upperbound. The default_value argument may be None. When the user enters some text, the text is checked to verify that it can be converted to an integer between the lowerbound and upperbound. If it can be, the integer (not the text) is returned. If it cannot, then an error message is displayed, and the integerbox is redisplayed. If the user cancels the operation, the default value is returned. """ if argDefault == None: argDefault = "" elif argDefault == "": pass elif type(argDefault) != type(1): raise AssertionError("integerbox received a non-integer default value of " + str(argDefault) , "Error") raise "Argument TYPE error in call to EasyGui integerbox" if type(argLowerBound) != type(1): raise AssertionError("integerbox received a non-integer default value of " + str(argLowerBound) , "Error") raise "Argument TYPE error in call to EasyGui integerbox" if type(argUpperBound) != type(1): raise AssertionError("integerbox received a non-integer default value of " + str(argUpperBound) , "Error") raise "Argument TYPE error in call to EasyGui integerbox" if message == "": message = ("Enter an integer between " + str(argLowerBound) + " and " + str(argUpperBound) ) while 1: result = enterbox(message, title, str(argDefault)) if result == None: return argDefault try: myInteger = int(result) except: msgbox ("The value that you entered is not an integer.", "Error") continue if myInteger >= argLowerBound and myInteger <= argUpperBound: return myInteger else: msgbox ("The value that you entered is not between the lower bound of " + str(argLowerBound) + " and the upper bound of " + str(argUpperBound) + "." , "Error") #------------------------------------------------------------------- # multenterbox #------------------------------------------------------------------- def multenterbox(message="Fill in values for the fields." , title="" , argListOfFieldNames = [] , argListOfFieldValues = [] ): """Show screen with multiple data entry fields. The third argument is a list of fieldnames. The the forth argument is a list of field values. If there are fewer values than names, the list of values is padded with empty strings until the number of values is the same as the number of names. If there are more values than names, the list of values is truncated so that there are as many values as names. Returns a list of the values of the fields, or None if the user cancels the operation. Here is some example code, that shows how values returned from multenterbox can be checked for validity before they are accepted. ---------------------------------------------------------------------- msg = "Enter your personal information" title = "Credit Card Application" fieldNames = ["Name","Street Address","City","State","ZipCode"] fieldValues = [] # we start with blanks for the values fieldValues = multenterbox(msg,title, fieldNames) # make sure that none of the fields was left blank while 1: if fieldValues == None: break errmsg = "" for i in range(len(fieldNames)): if fieldValues[i].strip() == "": errmsg = errmsg + ('"%s" is a required field.\n\n' % fieldNames[i]) if errmsg == "": break # no problems found fieldValues = multenterbox(errmsg, title, fieldNames, fieldValues) print "Reply was:", fieldValues ---------------------------------------------------------------------- """ return __multfillablebox( message,title,argListOfFieldNames,argListOfFieldValues,None) def multpasswordbox(message="Fill in values for the fields." , title="" , argListOfFieldNames = [] , argListOfFieldValues = [] ): """Same interface as multenterbox. But in multpassword box, the last of the fields is assumed to be a password, and is masked with asterisks. Here is some example code, that shows how values returned from multpasswordbox can be checked for validity before they are accepted. ---------------------------------------------------------------------- msg = "Enter logon information" title = "Demo of multpasswordbox" fieldNames = ["Server ID", "User ID", "Password"] fieldValues = [] # we start with blanks for the values fieldValues = multpasswordbox(msg,title, fieldNames) # make sure that none of the fields was left blank while 1: if fieldValues == None: break errmsg = "" for i in range(len(fieldNames)): if fieldValues[i].strip() == "": errmsg = errmsg + ('"%s" is a required field.\n\n' % fieldNames[i]) if errmsg == "": break # no problems found fieldValues = multpasswordbox(errmsg, title, fieldNames, fieldValues) print "Reply was:", fieldValues ---------------------------------------------------------------------- """ return __multfillablebox( message,title,argListOfFieldNames,argListOfFieldValues,"*") def __multfillablebox(message="Fill in values for the fields." , title="" , argListOfFieldNames = [] , argListOfFieldValues = [] , argMaskCharacter = None ): global root, __multenterboxText, __multenterboxDefaultText, cancelButton, entryWidget, okButton if title == None: title == "" choices = ["OK", "Cancel"] if len(argListOfFieldNames) == 0: return None if len(argListOfFieldValues) == len(argListOfFieldNames): pass elif len(argListOfFieldValues) > len(argListOfFieldNames): argListOfFieldNames = argListOfFieldNames[0:len(argListOfFieldValues)] else: while len(argListOfFieldValues) < len(argListOfFieldNames): argListOfFieldValues.append("") root = Tk() root.protocol('WM_DELETE_WINDOW', denyWindowManagerClose ) root.title(title) root.iconname('Dialog') root.geometry(rootWindowPosition) root.bind("<Escape>", __multenterboxCancel) # -------------------- put subframes in the root -------------------- messageFrame = Frame(root) messageFrame.pack(side=TOP, fill=BOTH) #-------------------- the message widget ---------------------------- messageWidget = Message(messageFrame, width="4.5i", text=message) messageWidget.configure(font=(DEFAULT_FONT_FAMILY,DEFAULT_FONT_SIZE)) messageWidget.pack(side=RIGHT, expand=1, fill=BOTH, padx='3m', pady='3m') global entryWidgets entryWidgets = [] lastWidgetIndex = len(argListOfFieldNames) - 1 for widgetIndex in range(len(argListOfFieldNames)): argFieldName = argListOfFieldNames[widgetIndex] argFieldValue = argListOfFieldValues[widgetIndex] entryFrame = Frame(root) entryFrame.pack(side=TOP, fill=BOTH) # --------- entryWidget ---------------------------------------------- labelWidget = Label(entryFrame, text=argFieldName) labelWidget.pack(side=LEFT) entryWidgets.append(Entry(entryFrame, width=40)) entryWidgets[widgetIndex].configure(font=(DEFAULT_FONT_FAMILY,BIG_FONT_SIZE)) entryWidgets[widgetIndex].pack(side=RIGHT, padx="3m") entryWidgets[widgetIndex].bind("<Return>", __multenterboxGetText) entryWidgets[widgetIndex].bind("<Escape>", __multenterboxCancel) # for the last entryWidget, if this is a multpasswordbox, # show the contents as just asterisks if widgetIndex == lastWidgetIndex: if argMaskCharacter: entryWidgets[widgetIndex].configure(show=argMaskCharacter) # put text into the entryWidget entryWidgets[widgetIndex].insert(0,argFieldValue) widgetIndex += 1 # ------------------ ok button ------------------------------- buttonsFrame = Frame(root) buttonsFrame.pack(side=BOTTOM, fill=BOTH) okButton = Button(buttonsFrame, takefocus=1, text="OK") okButton.pack(expand=1, side=LEFT, padx='3m', pady='3m', ipadx='2m', ipady='1m') okButton.bind("<Return>" , __multenterboxGetText) okButton.bind("<Button-1>", __multenterboxGetText) # ------------------ cancel button ------------------------------- cancelButton = Button(buttonsFrame, takefocus=1, text="Cancel") cancelButton.pack(expand=1, side=RIGHT, padx='3m', pady='3m', ipadx='2m', ipady='1m') cancelButton.bind("<Return>" , __multenterboxCancel) cancelButton.bind("<Button-1>", __multenterboxCancel) # ------------------- time for action! ----------------- entryWidgets[0].focus_force() # put the focus on the entryWidget root.mainloop() # run it! # -------- after the run has completed ---------------------------------- root.destroy() # button_click didn't destroy root, so we do it now return __multenterboxText def __multenterboxGetText(event): global root, __multenterboxText, entryWidget __multenterboxText = [] global entryWidgets for entryWidget in entryWidgets: __multenterboxText.append(entryWidget.get()) root.quit() def __multenterboxCancel(event): global root, __multenterboxDefaultText, __multenterboxText __multenterboxText = None root.quit() #------------------------------------------------------------------- # enterbox #------------------------------------------------------------------- def enterbox(message="Enter something.", title="", argDefaultText=""): """Show a box in which a user can enter some text. You may optionally specify some default text, which will appear in the enterbox when it is displayed. Returns the text that the user entered, or None if he cancels the operation. """ return __fillablebox(message, title, argDefaultText, None) def passwordbox(message="Enter your password.", title="", argDefaultPassword=""): """Show a box in which a user can enter a password. The text is masked with asterisks, so the password is not displayed. Returns the text that the user entered, or None if he cancels the operation. """ return __fillablebox(message, title, argDefaultPassword, "*") def __fillablebox(message, title, argDefaultText, argMaskCharacter): """Show a box in which a user can enter some text. You may optionally specify some default text, which will appear in the enterbox when it is displayed. Returns the text that the user entered, or None if he cancels the operation. """ global root, __enterboxText, __enterboxDefaultText, cancelButton, entryWidget, okButton if title == None: title == "" if argDefaultText == None: argDefaultText = "" __enterboxDefaultText = argDefaultText __enterboxText = __enterboxDefaultText choices = ["OK", "Cancel"] root = Tk() root.protocol('WM_DELETE_WINDOW', denyWindowManagerClose ) root.title(title) root.iconname('Dialog') root.geometry(rootWindowPosition) root.bind("<Escape>", __enterboxCancel) # -------------------- put subframes in the root -------------------- messageFrame = Frame(root) messageFrame.pack(side=TOP, fill=BOTH) entryFrame = Frame(root) entryFrame.pack(side=TOP, fill=BOTH) buttonsFrame = Frame(root) buttonsFrame.pack(side=BOTTOM, fill=BOTH) #-------------------- the message widget ---------------------------- messageWidget = Message(messageFrame, width="4.5i", text=message) messageWidget.configure(font=(DEFAULT_FONT_FAMILY,DEFAULT_FONT_SIZE)) messageWidget.pack(side=RIGHT, expand=1, fill=BOTH, padx='3m', pady='3m') # --------- entryWidget ---------------------------------------------- entryWidget = Entry(entryFrame, width=40) entryWidget.configure(font=(DEFAULT_FONT_FAMILY,BIG_FONT_SIZE)) if argMaskCharacter: entryWidget.configure(show=argMaskCharacter) entryWidget.pack(side=LEFT, padx="3m") entryWidget.bind("<Return>", __enterboxGetText) entryWidget.bind("<Escape>", __enterboxCancel) # put text into the entryWidget entryWidget.insert(0,__enterboxDefaultText) # ------------------ ok button ------------------------------- okButton = Button(buttonsFrame, takefocus=1, text="OK") okButton.pack(expand=1, side=LEFT, padx='3m', pady='3m', ipadx='2m', ipady='1m') okButton.bind("<Return>" , __enterboxGetText) okButton.bind("<Button-1>", __enterboxGetText) # ------------------ (possible) restore button ------------------------------- if argDefaultText != None: # make a button to restore the default text restoreButton = Button(buttonsFrame, takefocus=1, text="Restore default") restoreButton.pack(expand=1, side=LEFT, padx='3m', pady='3m', ipadx='2m', ipady='1m') restoreButton.bind("<Return>" , __enterboxRestore) restoreButton.bind("<Button-1>", __enterboxRestore) # ------------------ cancel button ------------------------------- cancelButton = Button(buttonsFrame, takefocus=1, text="Cancel") cancelButton.pack(expand=1, side=RIGHT, padx='3m', pady='3m', ipadx='2m', ipady='1m') cancelButton.bind("<Return>" , __enterboxCancel) cancelButton.bind("<Button-1>", __enterboxCancel) # ------------------- time for action! ----------------- entryWidget.focus_force() # put the focus on the entryWidget root.mainloop() # run it! # -------- after the run has completed ---------------------------------- root.destroy() # button_click didn't destroy root, so we do it now return __enterboxText def __enterboxGetText(event): global root, __enterboxText, entryWidget __enterboxText = entryWidget.get() root.quit() def __enterboxRestore(event): global root, __enterboxText, entryWidget entryWidget.delete(0,len(entryWidget.get())) entryWidget.insert(0, __enterboxDefaultText) def __enterboxCancel(event): global root, __enterboxDefaultText, __enterboxText __enterboxText = None root.quit() def denyWindowManagerClose(): """ don't allow WindowManager close """ x = Tk() x.withdraw() x.bell() x.destroy() #------------------------------------------------------------------- # choicebox and multchoicebox #------------------------------------------------------------------- def multchoicebox(message="Pick as many items as you like.", title="" , choices=["program logic error - no choices specified"] ): """Present the user with a list of choices. allow him to select multiple items and return them in a list. if the user doesn't choose anything from the list, return the empty list. return None if he cancelled selection. """ global __choiceboxMultipleSelect __choiceboxMultipleSelect = 1 return __choicebox(message, title, choices) def choicebox(message="Pick something.", title="" , choices=["program logic error - no choices specified"] ): """Present the user with a list of choices. return the choice that he selects. return None if he cancels the selection selection. """ global __choiceboxMultipleSelect __choiceboxMultipleSelect = 0 return __choicebox(message, title, choices) def __choicebox(message, title, choices): """internal routine to support choicebox() and multchoicebox() """ global root, __choiceboxResults, choiceboxWidget, defaultText global choiceboxWidget, choiceboxChoices # If choices is a tuple, we make it a list so we can sort it. # If choices is already a list, we make a new list, so that when # we sort the choices, we don't affect the list object that we # were given. choices = list(choices) # make sure all choices are strings for index in range(len(choices)): choices[index] = str(choices[index]) choiceboxButtons = ["OK", "Cancel"] lines_to_show = min(len(choices), 20) lines_to_show = 20 if title == None: title = "" # Initialize __choiceboxResults # This is the value that will be returned if the user clicks the close icon __choiceboxResults = None root = Tk() root.protocol('WM_DELETE_WINDOW', denyWindowManagerClose ) screen_width = root.winfo_screenwidth() screen_height = root.winfo_screenheight() root_width = int((screen_width * 0.8)) root_height = int((screen_height * 0.5)) root_xpos = int((screen_width * 0.1)) root_ypos = int((screen_height * 0.05)) root.title(title) root.iconname('Dialog') rootWindowPosition = "+0+0" root.geometry(rootWindowPosition) root.expand=NO root.minsize(root_width, root_height) rootWindowPosition = "+" + str(root_xpos) + "+" + str(root_ypos) root.geometry(rootWindowPosition) # ---------------- put the frames in the window ----------------------------------------- message_and_buttonsFrame = Frame(root) message_and_buttonsFrame.pack(side=TOP, fill=X, expand=NO) messageFrame = Frame(message_and_buttonsFrame) messageFrame.pack(side=LEFT, fill=X, expand=YES) buttonsFrame = Frame(message_and_buttonsFrame) buttonsFrame.pack(side=RIGHT, expand=NO, pady=0) choiceboxFrame = Frame(root) choiceboxFrame.pack(side=BOTTOM, fill=BOTH, expand=YES) # -------------------------- put the widgets in the frames ------------------------------ # ---------- put a message widget in the message frame------------------- messageWidget = Message(messageFrame, anchor=NW, text=message, width=int(root_width * 0.9)) messageWidget.configure(font=(DEFAULT_FONT_FAMILY,DEFAULT_FONT_SIZE)) messageWidget.pack(side=LEFT, expand=YES, fill=BOTH, padx='1m', pady='1m') # -------- put the choiceboxWidget in the choiceboxFrame --------------------------- choiceboxWidget = Listbox(choiceboxFrame , height=lines_to_show , borderwidth="1m" , relief="flat" , bg="white" ) if __choiceboxMultipleSelect: choiceboxWidget.configure(selectmode=MULTIPLE) choiceboxWidget.configure(font=(DEFAULT_FONT_FAMILY,DEFAULT_FONT_SIZE)) # add a vertical scrollbar to the frame rightScrollbar = Scrollbar(choiceboxFrame, orient=VERTICAL, command=choiceboxWidget.yview) choiceboxWidget.configure(yscrollcommand = rightScrollbar.set) # add a horizontal scrollbar to the frame bottomScrollbar = Scrollbar(choiceboxFrame, orient=HORIZONTAL, command=choiceboxWidget.xview) choiceboxWidget.configure(xscrollcommand = bottomScrollbar.set) # pack the Listbox and the scrollbars. Note that although we must define # the textbox first, we must pack it last, so that the bottomScrollbar will # be located properly. bottomScrollbar.pack(side=BOTTOM, fill = X) rightScrollbar.pack(side=RIGHT, fill = Y) choiceboxWidget.pack(side=LEFT, padx="1m", pady="1m", expand=YES, fill=BOTH) #--------------------------------------------------- # sort the choices # eliminate duplicates # put the choices into the choiceboxWidget #--------------------------------------------------- for index in range(len(choices)): choices[index] == str(choices[index]) choices.sort( lambda x,y: cmp(x.lower(), y.lower())) # case-insensitive sort lastInserted = None choiceboxChoices = [] for choice in choices: if choice == lastInserted: pass else: choiceboxWidget.insert(END, choice) choiceboxChoices.append(choice) lastInserted = choice root.bind('<Any-Key>', KeyboardListener) # put the buttons in the buttonsFrame if len(choices) > 0: okButton = Button(buttonsFrame, takefocus=YES, text="OK", height=1, width=6) okButton.pack(expand=NO, side=TOP, padx='2m', pady='1m', ipady="1m", ipadx="2m") okButton.bind("<Return>", __choiceboxGetChoice) okButton.bind("<Button-1>",__choiceboxGetChoice) # now bind the keyboard events choiceboxWidget.bind("<Return>", __choiceboxGetChoice) choiceboxWidget.bind("<Double-Button-1>", __choiceboxGetChoice) else: # now bind the keyboard events choiceboxWidget.bind("<Return>", __choiceboxCancel) choiceboxWidget.bind("<Double-Button-1>", __choiceboxCancel) cancelButton = Button(buttonsFrame, takefocus=YES, text="Cancel", height=1, width=6) cancelButton.pack(expand=NO, side=BOTTOM, padx='2m', pady='1m', ipady="1m", ipadx="2m") cancelButton.bind("<Return>", __choiceboxCancel) cancelButton.bind("<Button-1>", __choiceboxCancel) # add special buttons for multiple select features if len(choices) > 0 and __choiceboxMultipleSelect: selectionButtonsFrame = Frame(messageFrame) selectionButtonsFrame.pack(side=RIGHT, fill=Y, expand=NO) selectAllButton = Button(selectionButtonsFrame, text="Select All", height=1, width=6) selectAllButton.bind("<Button-1>",__choiceboxSelectAll) selectAllButton.pack(expand=NO, side=TOP, padx='2m', pady='1m', ipady="1m", ipadx="2m") clearAllButton = Button(selectionButtonsFrame, text="Clear All", height=1, width=6) clearAllButton.bind("<Button-1>",__choiceboxClearAll) clearAllButton.pack(expand=NO, side=TOP, padx='2m', pady='1m', ipady="1m", ipadx="2m") # -------------------- bind some keyboard events ---------------------------- root.bind("<Escape>", __choiceboxCancel) # --------------------- the action begins ----------------------------------- # put the focus on the choiceboxWidget, and the select highlight on the first item choiceboxWidget.select_set(0) choiceboxWidget.focus_force() # --- run it! ----- root.mainloop() root.destroy() return __choiceboxResults def __choiceboxGetChoice(event): global root, __choiceboxResults, choiceboxWidget if __choiceboxMultipleSelect: __choiceboxResults = [choiceboxWidget.get(index) for index in choiceboxWidget.curselection()] else: choice_index = choiceboxWidget.curselection() __choiceboxResults = choiceboxWidget.get(choice_index) # print "Debugging> mouse-event=", event, " event.type=", event.type # print "Debugging> choice =", choice_index, __choiceboxResults root.quit() def __choiceboxSelectAll(event): global choiceboxWidget, choiceboxChoices choiceboxWidget.selection_set(0, len(choiceboxChoices)-1) def __choiceboxClearAll(event): global choiceboxWidget, choiceboxChoices choiceboxWidget.selection_clear(0, len(choiceboxChoices)-1) def __choiceboxCancel(event): global root, __choiceboxResults __choiceboxResults = None root.quit() def KeyboardListener(event): global choiceboxChoices, choiceboxWidget key = event.keysym if len(key) <= 1: if key in string.printable: # Find the key in the list. # before we clear the list, remember the selected member try: start_n = int(choiceboxWidget.curselection()[0]) except IndexError: start_n = -1 ## clear the selection. choiceboxWidget.selection_clear(0, 'end') ## start from previous selection +1 for n in range(start_n+1, len(choiceboxChoices)): item = choiceboxChoices[n] if item[0].lower() == key.lower(): choiceboxWidget.selection_set(first=n) choiceboxWidget.see(n) return else: # has not found it so loop from top for n in range(len(choiceboxChoices)): item = choiceboxChoices[n] if item[0].lower() == key.lower(): choiceboxWidget.selection_set(first = n) choiceboxWidget.see(n) return # nothing matched -- we'll look for the next logical choice for n in range(len(choiceboxChoices)): item = choiceboxChoices[n] if item[0].lower() > key.lower(): if n > 0: choiceboxWidget.selection_set(first = (n-1)) else: choiceboxWidget.selection_set(first = 0) choiceboxWidget.see(n) return # still no match (nothing was greater than the key) # we set the selection to the first item in the list lastIndex = len(choiceboxChoices)-1 choiceboxWidget.selection_set(first = lastIndex) choiceboxWidget.see(lastIndex) return #------------------------------------------------------------------- # codebox #------------------------------------------------------------------- def codebox(message="", title="", text=""): """ Display some text in a monospaced font, with no line wrapping. This function is suitable for displaying code and text that is formatted using spaces. The text parameter should be a string, or a list or tuple of lines to be displayed in the textbox. """ textbox(message, title, text, codebox=1 ) #------------------------------------------------------------------- # textbox #------------------------------------------------------------------- def textbox(message="", title="", text="", codebox=0): """Display some text in a proportional font with line wrapping at word breaks. This function is suitable for displaying general written text. The text parameter should be a string, or a list or tuple of lines to be displayed in the textbox. """ if message == None: message = "" if title == None: title = "" global root, __replyButtonText, __widgetTexts, buttonsFrame choices = ["0K"] __replyButtonText = choices[0] root = Tk() root.protocol('WM_DELETE_WINDOW', denyWindowManagerClose ) screen_width = root.winfo_screenwidth() screen_height = root.winfo_screenheight() root_width = int((screen_width * 0.8)) root_height = int((screen_height * 0.5)) root_xpos = int((screen_width * 0.1)) root_ypos = int((screen_height * 0.05)) root.title(title) root.iconname('Dialog') rootWindowPosition = "+0+0" root.geometry(rootWindowPosition) root.expand=NO root.minsize(root_width, root_height) rootWindowPosition = "+" + str(root_xpos) + "+" + str(root_ypos) root.geometry(rootWindowPosition) mainframe = Frame(root) mainframe.pack(side=TOP, fill=BOTH, expand=YES) # ---- put frames in the window ----------------------------------- # we pack the textboxFrame first, so it will expand first textboxFrame = Frame(mainframe, borderwidth=3) textboxFrame.pack(side=BOTTOM , fill=BOTH, expand=YES) message_and_buttonsFrame = Frame(mainframe) message_and_buttonsFrame.pack(side=TOP, fill=X, expand=NO) messageFrame = Frame(message_and_buttonsFrame) messageFrame.pack(side=LEFT, fill=X, expand=YES) buttonsFrame = Frame(message_and_buttonsFrame) buttonsFrame.pack(side=RIGHT, expand=NO) # -------------------- put widgets in the frames -------------------- # put a textbox in the top frame if codebox: character_width = int((root_width * 0.6) / CODEBOX_FONT_SIZE) textbox = Text(textboxFrame,height=25,width=character_width, padx="2m", pady="1m") textbox.configure(wrap=NONE) textbox.configure(font=(MONOSPACE_FONT_FAMILY, CODEBOX_FONT_SIZE)) else: character_width = int((root_width * 0.6) / SMALL_FONT_SIZE) textbox = Text( textboxFrame , height=25 , width=character_width , padx="2m" , pady="1m" ) textbox.configure(wrap=WORD) textbox.configure(font=(DEFAULT_FONT_FAMILY,TEXTBOX_FONT_SIZE)) # some simple keybindings for scrolling mainframe.bind("<Next>" , textbox.yview_scroll( 1,PAGES)) mainframe.bind("<Prior>", textbox.yview_scroll(-1,PAGES)) mainframe.bind("<Right>", textbox.xview_scroll( 1,PAGES)) mainframe.bind("<Left>" , textbox.xview_scroll(-1,PAGES)) mainframe.bind("<Down>", textbox.yview_scroll( 1,UNITS)) mainframe.bind("<Up>" , textbox.yview_scroll(-1,UNITS)) # add a vertical scrollbar to the frame rightScrollbar = Scrollbar(textboxFrame, orient=VERTICAL, command=textbox.yview) textbox.configure(yscrollcommand = rightScrollbar.set) # add a horizontal scrollbar to the frame bottomScrollbar = Scrollbar(textboxFrame, orient=HORIZONTAL, command=textbox.xview) textbox.configure(xscrollcommand = bottomScrollbar.set) # pack the textbox and the scrollbars. Note that although we must define # the textbox first, we must pack it last, so that the bottomScrollbar will # be located properly. # Note that we need a bottom scrollbar only for code. # Text will be displayed with wordwrap, so we don't need to have a horizontal # scroll for it. if codebox: bottomScrollbar.pack(side=BOTTOM, fill=X) rightScrollbar.pack(side=RIGHT, fill=Y) textbox.pack(side=LEFT, fill=BOTH, expand=YES) # ---------- put a message widget in the message frame------------------- messageWidget = Message(messageFrame, anchor=NW, text=message, width=int(root_width * 0.9)) messageWidget.configure(font=(DEFAULT_FONT_FAMILY,DEFAULT_FONT_SIZE)) messageWidget.pack(side=LEFT, expand=YES, fill=BOTH, padx='1m', pady='1m') # put the buttons in the buttonsFrame okButton = Button(buttonsFrame, takefocus=YES, text="OK", height=1, width=6) okButton.pack(expand=NO, side=TOP, padx='2m', pady='1m', ipady="1m", ipadx="2m") okButton.bind("<Return>" , __textboxOK) okButton.bind("<Button-1>", __textboxOK) okButton.bind("<Escape>" , __textboxOK) # ----------------- the action begins ---------------------------------------- try: # load the text into the textbox if type(text) == type("abc"): pass else: try: text = "".join(text) # convert a list or a tuple to a string except: msgbox("Exception when trying to convert "+ str(type(text)) + " to text in textbox") sys.exit(16) textbox.insert(END,text, "normal") # disable the textbox, so the text cannot be edited textbox.configure(state=DISABLED) except: msgbox("Exception when trying to load the textbox.") sys.exit(16) try: okButton.focus_force() except: msgbox("Exception when trying to put focus on okButton.") sys.exit(16) root.mainloop() root.destroy() return __replyButtonText def __textboxOK(event): global root root.quit() #------------------------------------------------------------------- # diropenbox #------------------------------------------------------------------- def diropenbox(msg=None, title=None, argInitialDir=None): """A dialog to get a directory name. Note that the msg argument, if specified, is ignored. Returns the name of a directory, or None if user chose to cancel. If an initial directory is specified in argument 3, and that directory exists, then the dialog box will start with that directory. """ root = Tk() root.withdraw() if argInitialDir == None: f = tkFileDialog.askdirectory(parent=root, title=title) else: f = tkFileDialog.askdirectory(parent=root, title=title, initialdir=argInitialDir) if f == "": return None return f #------------------------------------------------------------------- # fileopenbox #------------------------------------------------------------------- def fileopenbox(msg=None, title=None, argInitialFile=None): """A dialog to get a file name. Returns the name of a file, or None if user chose to cancel. if argInitialFile contains a valid filename, the dialog will be positioned at that file when it appears. """ root = Tk() root.withdraw() f = tkFileDialog.askopenfilename(parent=root,title=title, initialfile=argInitialFile) if f == "": return None return f #------------------------------------------------------------------- # filesavebox #------------------------------------------------------------------- def filesavebox(msg=None, title=None, argInitialFile=None): """A file to get the name of a file to save. Returns the name of a file, or None if user chose to cancel. if argInitialFile contains a valid filename, the dialog will be positioned at that file when it appears. """ root = Tk() root.withdraw() f = tkFileDialog.asksaveasfilename(parent=root, title=title, initialfile=argInitialFile) if f == "": return None return f #------------------------------------------------------------------- # utility routines #------------------------------------------------------------------- # These routines are used by several other functions in the EasyGui module. def __buttonEvent(event): """Handle an event that is generated by a person clicking a button. """ global root, __widgetTexts, __replyButtonText __replyButtonText = __widgetTexts[event.widget] root.quit() # quit the main loop def __put_buttons_in_buttonframe(choices): """Put the buttons in the buttons frame """ global __widgetTexts, __firstWidget, buttonsFrame __widgetTexts = {} i = 0 for buttonText in choices: tempButton = Button(buttonsFrame, takefocus=1, text=buttonText) tempButton.pack(expand=YES, side=LEFT, padx='1m', pady='1m', ipadx='2m', ipady='1m') # remember the text associated with this widget __widgetTexts[tempButton] = buttonText # remember the first widget, so we can put the focus there if i == 0: __firstWidget = tempButton i = 1 # bind the keyboard events to the widget tempButton.bind("<Return>", __buttonEvent) tempButton.bind("<Button-1>", __buttonEvent) #------------------------------------------------------------------- # test driver code #------------------------------------------------------------------- def _test(): # simple way to clear the console print "\n" * 100 # START DEMONSTRATION DATA =================================================== choices_abc = ["This is choice 1", "And this is choice 2"] message = "Pick one! This is a huge choice, and you've got to make the right one " \ "or you will surely mess up the rest of your life, and the lives of your " \ "friends and neighbors!" title = "" # ============================= define a code snippet ========================= code_snippet = ("dafsdfa dasflkj pp[oadsij asdfp;ij asdfpjkop asdfpok asdfpok asdfpok"*3) +"\n"+\ """# here is some dummy Python code for someItem in myListOfStuff: do something(someItem) do something() do something() if somethingElse(someItem): doSomethingEvenMoreInteresting() """*16 #======================== end of code snippet ============================== #================================= some text =========================== text_snippet = ((\ """It was the best of times, and it was the worst of times. The rich ate cake, and the poor had cake recommended to them, but wished only for enough cash to buy bread. The time was ripe for revolution! """ \ *5)+"\n\n")*10 #===========================end of text ================================ intro_message = ("Pick the kind of box that you wish to demo.\n\n" + "In EasyGui, all GUI interactions are invoked by simple function calls.\n\n" + "EasyGui is different from other GUIs in that it is NOT event-driven. It allows" + " you to program in a traditional linear fashion, and to put up dialogs for simple" + " input and output when you need to. If you are new to the event-driven paradigm" + " for GUIs, EasyGui will allow you to be productive with very basic tasks" + " immediately. Later, if you wish to make the transition to an event-driven GUI" + " paradigm, you can move to an event-driven style with a more powerful GUI package" + "such as anygui, PythonCard, Tkinter, wxPython, etc." + "\n\nEasyGui is running Tk version: " + str(TkVersion) ) #========================================== END DEMONSTRATION DATA while 1: # do forever choices = [ "msgbox", "buttonbox", "choicebox", "multchoicebox", "textbox", "ynbox", "ccbox", "enterbox", "codebox", "integerbox", "boolbox", "indexbox", "filesavebox", "fileopenbox", "passwordbox", "multenterbox", "multpasswordbox", "diropenbox" ] choice = choicebox(intro_message, "EasyGui " + EasyGuiRevisionInfo, choices) if choice == None: return reply = choice.split() if reply[0] == "msgbox": reply = msgbox("short message", "This is a long title") print "Reply was:", reply elif reply[0] == "buttonbox": reply = buttonbox() print "Reply was:", reply reply = buttonbox(message, "Demo of Buttonbox with many, many buttons!", choices) print "Reply was:", reply elif reply[0] == "boolbox": reply = boolbox() print "Reply was:", reply elif reply[0] == "integerbox": reply = integerbox( "Enter a number between 3 and 333", "Demo: integerbox WITH a default value", 222, 3, 333) print "Reply was:", reply reply = integerbox( "Enter a number between 0 and 99", "Demo: integerbox WITHOUT a default value" ) print "Reply was:", reply elif reply[0] == "diropenbox": title = "Demo of diropenbox" msg = "This is a test of the diropenbox.\n\nPick the directory that you wish to open." d = diropenbox(msg, title) print "You chose directory...:", d elif reply[0] == "fileopenbox": f = fileopenbox() print "You chose to open file:", f elif reply[0] == "filesavebox": f = filesavebox() print "You chose to save file:", f elif reply[0] == "indexbox": title = reply[0] msg = "Demo of " + reply[0] choices = ["Choice1", "Choice2", "Choice3", "Choice4"] reply = indexbox(msg, title, choices) print "Reply was:", reply elif reply[0] == "passwordbox": reply = passwordbox("Demo of password box WITHOUT default" + "\n\nEnter your secret password", "Member Logon") print "Reply was:", str(reply) reply = passwordbox("Demo of password box WITH default" + "\n\nEnter your secret password", "Member Logon", "alfie") print "Reply was:", str(reply) elif reply[0] == "enterbox": reply = enterbox("Enter the name of your best friend:", "Love!", "Suzy Smith") print "Reply was:", str(reply) reply = enterbox("Enter the name of your worst enemy:", "Hate!") print "Reply was:", str(reply) elif reply[0] == "multenterbox": msg = "Enter your personal information" title = "Credit Card Application" fieldNames = ["Name","Street Address","City","State","ZipCode"] fieldValues = [] # we start with blanks for the values fieldValues = multenterbox(msg,title, fieldNames) # make sure that none of the fields was left blank while 1: if fieldValues == None: break errmsg = "" for i in range(len(fieldNames)): if fieldValues[i].strip() == "": errmsg = errmsg + ('"%s" is a required field.\n\n' % fieldNames[i]) if errmsg == "": break # no problems found fieldValues = multenterbox(errmsg, title, fieldNames, fieldValues) print "Reply was:", fieldValues elif reply[0] == "multpasswordbox": msg = "Enter logon information" title = "Demo of multpasswordbox" fieldNames = ["Server ID", "User ID", "Password"] fieldValues = [] # we start with blanks for the values fieldValues = multpasswordbox(msg,title, fieldNames) # make sure that none of the fields was left blank while 1: if fieldValues == None: break errmsg = "" for i in range(len(fieldNames)): if fieldValues[i].strip() == "": errmsg = errmsg + ('"%s" is a required field.\n\n' % fieldNames[i]) if errmsg == "": break # no problems found fieldValues = multpasswordbox(errmsg, title, fieldNames, fieldValues) print "Reply was:", fieldValues elif reply[0] == "ynbox": reply = ynbox(message, title) print "Reply was:", reply elif reply[0] == "ccbox": reply = ccbox(message) print "Reply was:", reply elif reply[0] == "choicebox": longchoice = "This is an example of a very long option which you may or may not wish to choose."*2 listChoices = ["nnn", "ddd", "eee", "fff", "aaa", longchoice , "aaa", "bbb", "ccc", "ggg", "hhh", "iii", "jjj", "kkk", "LLL", "mmm" , "nnn", "ooo", "ppp", "qqq", "rrr", "sss", "ttt", "uuu", "vvv"] message = "Pick something. " + ("A wrapable sentence of text ?! "*30) + "\nA separate line of text."*6 reply = choicebox(message, None, listChoices) print "Reply was:", reply message = "Pick something. " reply = choicebox(message, None, listChoices) print "Reply was:", reply message = "Pick something. " reply = choicebox("The list of choices is empty!", None, []) print "Reply was:", reply elif reply[0] == "multchoicebox": listChoices = ["aaa", "bbb", "ccc", "ggg", "hhh", "iii", "jjj", "kkk" , "LLL", "mmm" , "nnn", "ooo", "ppp", "qqq" , "rrr", "sss", "ttt", "uuu", "vvv"] message = "Pick as many choices as you wish." reply = multchoicebox(message,"DEMO OF multchoicebox", listChoices) print "Reply was:", reply elif reply[0] == "textbox": message = "Here is some sample text. " * 16 reply = textbox(message, "Text Sample", text_snippet) print "Reply was:", reply elif reply[0] == "codebox": message = "Here is some sample code. " * 16 reply = codebox(message, "Code Sample", code_snippet) print "Reply was:", reply else: msgbox("Choice\n\n" + choice + "\n\nis not recognized", "Program Logic Error") return if __name__ == '__main__': _test()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- import xml.dom.minidom as dom """ Title: Pickle Python Objects to a DOM tree and vice versa Submitter: Uwe Schmitt (other recipes) Last Updated: 2004/12/04 Version no: 1.1 url:http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/355487 manatlan modifications : - unicode aware - attribute index removed for the list - and 2 simple fct : data2node & node2data """ class PickleMeToXML(object): pass # helper function def getType(obj): """ generates string representation of class of obj discarding decoration """ return str(obj.__class__).split("'")[1].split(".")[-1] _easyToPickle = [ "int", "float" ] _isCallable = lambda o: hasattr(o, "__call__") # # pickling # def _pickleDictItems(root, node, fabric): for key, value in root.items(): tempnode = fabric.createElement("item") tempnode.appendChild(pickle(key, fabric, "key")) tempnode.appendChild(pickle(value, fabric, "value")) node.appendChild(tempnode) def _pickleListItems(root, node, fabric): for idx, obj in enumerate(root): tempnode = pickle(obj, fabric, "item") #~ tempnode.attributes["index"] = str(idx) #manatlan node.appendChild(tempnode) _pickleTupleItems = _pickleListItems def pickle(root, fabric, elementName="root"): node = fabric.createElement(elementName) typeStr = getType(root) node.attributes["type"]=typeStr if isinstance(root, PickleMeToXML): node = _pickleObjectWithAttributes(node, root, fabric, elementName) elif typeStr in _easyToPickle: node.appendChild(fabric.createTextNode(str(root))) elif isinstance(root, dict): _pickleDictItems(root, node, fabric) elif isinstance(root, list): _pickleListItems(root, node, fabric) elif isinstance(root, tuple): _pickleTupleItems(root, node, fabric) elif isinstance(root, unicode) or isinstance(root, str): node.attributes["type"]="string" if isinstance(root, str): val = root.decode("utf_8") else: val = root #~ rep = repr(val)[2:-1] # without le "u'"+"'" node.appendChild(fabric.createTextNode( val )) #en unicode !!!! else: # fallback handler print "------------****---------------",root node.appendChild(fabric.createTextNode(repr(root))) return node def _pickleObjectWithAttributes(node, root, fabric, elementName): # pickle all members or just a subset ??? if hasattr(root, "__pickle_to_xml__"): attributesToPickle = root.__pickle_to_xml__ else: # avoid members which are python internal attributesToPickle = [ name for name in dir(root) if not name.startswith("__") ] for name in attributesToPickle: obj = getattr(root, name) # do not pickle member functions if _isCallable(obj): continue # is there some special encoding method ?? if hasattr(root, "_xml_encode_%s" % name): value = getattr(root, "_xml_encode_%s" % name)() node.appendChild(fabric.createTextNode(value)) else: node.appendChild(pickle(obj, fabric, name)) return node # # unpickling # # helper functions def _getElementChilds(node, doLower = 1): """ returns list of (tagname, element) for all element childs of node """ dolow = doLower and (lambda x:x.lower()) or (lambda x:x) return [ (dolow(no.tagName), no) for no in node.childNodes if no.nodeType != no.TEXT_NODE ] def _getText(nodelist): """ returns collected and stripped text of textnodes among nodes in nodelist """ rc = "" for node in nodelist: if node.nodeType == node.TEXT_NODE: rc = rc + node.data return rc.strip() # main unpickle function def unpickle(node): typeName= node.attributes["type"].value if typeName in _easyToPickle: initValue = _getText(node.childNodes) value = eval("%s(%r)" % (typeName, initValue)) return value elif typeName=="tuple": return _unpickleTuple(node) elif typeName=="list": return _unpickleList(node) elif typeName=="dict": return _unpickleDict(node) elif typeName=="string": #~ obj = eval("""unicode(u'%s')""" % _getText(node.childNodes)) obj = _getText(node.childNodes) # unicode ! return obj else: obj = eval("object.__new__(%s)" % typeName) for name, element in _getElementChilds(node): setattr(obj, name, unpickle(element)) return obj class XMLUnpicklingException(Exception): pass def _unpickleList(node): li = [] # collect entries, you can not assume that the # members of the list appear in the right order ! idx=0 #manatlan for name, element in _getElementChilds(node): if name != "item": raise XMLUnpicklingException() #~ idx = int(element.attributes["index"].value) obj = unpickle(element) li.append((idx, obj)) idx+=1 #manatlan # rebuild list with right order li.sort() return [ item[1] for item in li ] def _unpickleTuple(node): return tuple(_unpickleList(node)) def _unpickleDict(node): dd = dict() for name, element in _getElementChilds(node): if name != "item": raise XMLUnpicklingException() childList = _getElementChilds(element) if len(childList) != 2: raise XMLUnpicklingException() for name, element in childList: if name=="key": key = unpickle(element) elif name=="value": value = unpickle(element) dd[key]=value return dd class Data(PickleMeToXML): pass def data2node( data ): if data!=None: obj = Data() obj.value = data node = pickle(root=obj, fabric=dom.Document()) return node.childNodes[0] def node2data( nod ): node = nod.cloneNode(True) doc = node.ownerDocument n = doc.createElement("root") n.setAttributeNode(doc.createAttribute("type")) n.setAttribute("type","Data") n.appendChild(node) obj=unpickle(n) return obj.value if __name__=="__main__": v=u"héllo","hél",9,{1:12},({ u"a":123,u"béé":566},123) v=None node = data2node(v) print node.toprettyxml().encode("utf_8") #~ #=============================================== #~ val = node2data( node ) #~ print val[0].encode("utf_8") #~ print val[1].encode("utf_8")
Python
#!/usr/bin/python # -*- coding: utf-8 -*- import os,sys import stat #========================================================= def getFiles(rep,exts=[]): #========================================================= assert type(rep)==unicode files = [] dirs=[] if os.path.isdir(rep): for i in os.listdir(rep): if i[0]!=".": file = os.path.join(rep,i) if os.path.isfile(file): n,ext = os.path.splitext(i) if exts: if ext: if ext[1:].lower() in exts: files.append(file) else: files.append(file) else: dirs.append(file) dirs.sort(lambda a,b: cmp(a.lower(),b.lower())) files.sort(lambda a,b: cmp(a.lower(),b.lower())) return dirs,files # UNICODE def walktree (top = ".", depthfirst = True): """This walks a directory tree, starting from 'top'. This is somewhat like os.path.walk, but using generators instead of a visit function. One important difference is that walktree() defaults to DEPTH first with optional BREADTH first, whereas the os.path.walk function allows only BREADTH first. Depth first was made the default because it is safer if you are going to be modifying directory names. This avoids the problem of renaming a directory before visiting the children of that directory. """ names = os.listdir(top) if not depthfirst: yield top, names for name in names: try: st = os.lstat(os.path.join(top, name)) except os.error: continue if stat.S_ISDIR(st.st_mode): for (newtop, children) in walktree (os.path.join(top, name), depthfirst): yield newtop, children if depthfirst: yield top, names #========================================================= def getAllFiles(rep,exts=[]): #========================================================= assert type(rep)==unicode files = [] for (basepath, children) in walktree(rep,False): for child in children: if child[0]!=".": file = os.path.join(basepath,child) n,ext = os.path.splitext(child) if exts: if ext: if ext[1:].lower() in exts: files.append(file) else: files.append(file) return files # UNICODE #========================================================= def getExtVlcOpt(filename): #========================================================= assert type(filename)==unicode opt_jpg = [ "sout=#transcode:std", "sout-transcode-width=720", "sout-transcode-height=576", "sout-transcode-vb=9000", "sout-transcode-vcodec=mp2v", "sout-transcode-vt=1000000", "sout-transcode-vfilter=deinterlace", "sout-deinterlace-mode=blend", "sout-ffmpeg-keyint=1" ] opt_dvd = [ "sout=#transcode:std", "sout-transcode-ab=384", "sout-transcode-acodec=mpga", "sout-transcode-channels=2", "file-caching=1000" ] opt_avi = [ "sout=#transcode:std", "sout-transcode-ab=384", "sout-transcode-acodec=mpga", "sout-transcode-channels=2", "sout-transcode-vb=9000", "sout-transcode-vcodec=mp2v", "sout-transcode-vt=1000000", "file-caching=1000" ] #print "#EXTM3U\n"; #~ filename = os.path.abspath(filename) ext= filename.split(".")[-1].lower() com=["#EXTINF:0,%s" % os.path.basename(filename)] com.append("#file:%s"%filename) # self-add ;-) if ext in [".vob"]: com += ["#EXTVLCOPT:"+i for i in opt_dvd] com.append(filename) elif ext in "avi|ogg|mp3|mkv|mp4|mov|aac|au|aif|aiff|mjpeg|wmv|wma|wav|asf".split("|"): com += ["#EXTVLCOPT:"+i for i in opt_avi] com.append(filename) elif ext in "jpg|jpeg|png|tiff|gif".split("|"): com += ["#EXTVLCOPT:"+i for i in opt_jpg] com.append( "#EXTVLCOPT:fake-file=%s" % filename) com.append( "fake:") elif filename[:5] =="/dev/": com += ["#EXTVLCOPT:"+i for i in opt_dvd] com.append( "dvdsimple:"+filename) elif filename[:7]=="http://": com += ["#EXTVLCOPT:"+i for i in opt_avi] com.append(filename) else: com.append(filename) return com if __name__ == "__main__": print getFiles(u"/media/data/photos/2007/02/verbier_2007 A TAGGUER",["jpg"])[1] #print getAllFiles(u"/media/d/divx",["avi"])
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- from base64 import urlsafe_b64encode,urlsafe_b64decode def u64enc(f): assert type(f)==unicode a=f.encode("utf_8") b=urlsafe_b64encode( a) return unicode(b,"utf_8") def u64dec(f): assert type(f)==unicode a=f.encode("utf_8") b=urlsafe_b64decode( a) return unicode(b,"utf_8") class OList: """ l=[("martin","jean"),("dupond","yves"),("klein","michel")] or l=[["martin","jean"],["dupond","yves"],["klein","michel"]] ll = OList("nom","prenom")(l) print ll[2].prenom,"==",ll[2][1],"==",l[2][1] """ def __init__(self,*a): self.__n=list(a) def __call__(self,liste): class NList(list): def __init__(self,n,l): assert len(l)==len(n), "No match "+str(n)+" and "+str(l) list.__init__(self,l) for i in n: setattr(self,i,l[ n.index(i) ]) self.__noms = n def __repr__(self): m=" ".join(["""%s='%s'""" % (i,str(getattr(self,i))) for i in self.__noms]) return "<objet %s>" % m return [ NList(self.__n, i) for i in liste ] import math def mkPager(ll,nbperpage,page=None): """ Fabrique un *pager*, en fonction de la list "ll", et du nb d'item à afficher par page "nbperpage". retourne un tuple contenant : - un tuple touches (first,prev,next,last) : contenant le num de page pour la navigation first et last sont toujours renseignés next et prev sont contextuels - une liste cadrée des pages (qui contient forcément celle en cours) - une sous-liste de ll, suivant la page en cours """ # controls et calcul de maxPage assert nbperpage>1 assert type(ll)==list maxPage=int(math.ceil(float(len(ll))/nbperpage)) if page!=None: assert 1<=page<=maxPage, "nb max de page = %d,page=%d"%(maxPage,page) else: page=1 # fabrique de la ligne du "pager navigator" lp=range(1,maxPage+1) if len(lp)>1: idx=lp.index(page) off=5 mi = max(idx-off,0) ma = min(mi+(1+off*2),len(lp)) if idx-(1+off*2)>0: mi = min(idx-off,ma-(1+off*2)) lp=lp[mi:ma] else: lp=[] # retourne les elements affichés dans cette page debut = (page-1)*nbperpage fin = debut+nbperpage # def les touches FIRST/PREV/NEXT/LAST first= maxPage>1 and 1 or None prev= page>1 and page-1 or None next= page<maxPage and page+1 or None last = maxPage>1 and maxPage or None return (first,prev,next,last),lp,ll[debut:fin] def pageNavigator(ll,nb,url,page=1): assert "%d" in url (first,prev,next,last),lp,l=mkPager(ll,nb,page) keyPrev,keyNext="","" lnav=[] if prev: lnav.append( (url%first, "|<") ) lnav.append( (url%prev, "<<") ) keyPrev = url%prev for i in lp: if i==page: lnav.append((None,"<font color='#FFFF00'>%d</font>"%i)) else: lnav.append((url%i,str(i))) if next: lnav.append( (url%next, ">>")) lnav.append( (url%last, ">|")) keyNext = url%next h="" for u,t in lnav: if u: h+= "<a href='%s'>%s</a>" % (u,t) else: h+=t h+=" " return keyPrev,keyNext,h,l if __name__=="__main__": ll=range(5686) h,l= pageNavigator(ll,"browse?page=%d",2) print h print l
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- import os,sys from dict4ini import DictIni import os def getHomeFolder(): """ return the home folder of the user (platform independant) return an existing folder or None """ if "APPDATA" in os.environ: #windows NT,2k,XP,etc. fallback home = os.environ['APPDATA'] if os.path.isdir(home): return home else: #all user-based OSes home = os.path.expanduser("~") if home!="~" and os.path.isdir(home): return home else: # freedesktop *nix ? if "XDG_CONFIG_HOME" in os.environ: home = os.environ['XDG_CONFIG_HOME'] if os.path.isdir(home): return home else: #*nix fallback if "HOME" in os.environ: home = os.environ['HOME'] if os.path.isdir(home): return home return None class FeedConf(DictIni): homeFolder="" # renseigné à l'instanciation def __init__(self): hd = getHomeFolder() FeedConf.homeFolder=os.path.join(hd,".feedbox") if not os.path.isdir(FeedConf.homeFolder): os.mkdir(FeedConf.homeFolder) homeConf = os.path.join(FeedConf.homeFolder,"feedbox.ini") DictIni.__init__(self,homeConf) def checkConf(self): """ verifie que vlc est bien positionné et qu'il existe sinon demande à l'utilisateur de le situer renvoi true : si vlc est ok renvoi false : si ça va pas """ vlc = self.config.vlc if not( vlc and os.path.isfile(vlc) ): import easygui easygui.msgbox(""" C'est la première fois que vous lancer feedBox. Veuillez selectionner votre VLC ... ""","feedbox") vlc = easygui.fileopenbox() del(easygui) # libere la mem if vlc: self.config.vlc = vlc.decode( sys.getfilesystemencoding() ) return True else: return False else: return True if __name__ == "__main__": fc=FeedConf() #fc["conf"].kif=12 fc.checkConf() c=fc["jojo"] print c.has_key("kk") #c.kif=12 fc.save() print fc.homeFolder
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- import xml.dom.minidom as dom """ Title: Pickle Python Objects to a DOM tree and vice versa Submitter: Uwe Schmitt (other recipes) Last Updated: 2004/12/04 Version no: 1.1 url:http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/355487 manatlan modifications : - unicode aware - attribute index removed for the list - and 2 simple fct : data2node & node2data """ class PickleMeToXML(object): pass # helper function def getType(obj): """ generates string representation of class of obj discarding decoration """ return str(obj.__class__).split("'")[1].split(".")[-1] _easyToPickle = [ "int", "float" ] _isCallable = lambda o: hasattr(o, "__call__") # # pickling # def _pickleDictItems(root, node, fabric): for key, value in root.items(): tempnode = fabric.createElement("item") tempnode.appendChild(pickle(key, fabric, "key")) tempnode.appendChild(pickle(value, fabric, "value")) node.appendChild(tempnode) def _pickleListItems(root, node, fabric): for idx, obj in enumerate(root): tempnode = pickle(obj, fabric, "item") #~ tempnode.attributes["index"] = str(idx) #manatlan node.appendChild(tempnode) _pickleTupleItems = _pickleListItems def pickle(root, fabric, elementName="root"): node = fabric.createElement(elementName) typeStr = getType(root) node.attributes["type"]=typeStr if isinstance(root, PickleMeToXML): node = _pickleObjectWithAttributes(node, root, fabric, elementName) elif typeStr in _easyToPickle: node.appendChild(fabric.createTextNode(str(root))) elif isinstance(root, dict): _pickleDictItems(root, node, fabric) elif isinstance(root, list): _pickleListItems(root, node, fabric) elif isinstance(root, tuple): _pickleTupleItems(root, node, fabric) elif isinstance(root, unicode) or isinstance(root, str): node.attributes["type"]="string" if isinstance(root, str): val = root.decode("utf_8") else: val = root #~ rep = repr(val)[2:-1] # without le "u'"+"'" node.appendChild(fabric.createTextNode( val )) #en unicode !!!! else: # fallback handler print "------------****---------------",root node.appendChild(fabric.createTextNode(repr(root))) return node def _pickleObjectWithAttributes(node, root, fabric, elementName): # pickle all members or just a subset ??? if hasattr(root, "__pickle_to_xml__"): attributesToPickle = root.__pickle_to_xml__ else: # avoid members which are python internal attributesToPickle = [ name for name in dir(root) if not name.startswith("__") ] for name in attributesToPickle: obj = getattr(root, name) # do not pickle member functions if _isCallable(obj): continue # is there some special encoding method ?? if hasattr(root, "_xml_encode_%s" % name): value = getattr(root, "_xml_encode_%s" % name)() node.appendChild(fabric.createTextNode(value)) else: node.appendChild(pickle(obj, fabric, name)) return node # # unpickling # # helper functions def _getElementChilds(node, doLower = 1): """ returns list of (tagname, element) for all element childs of node """ dolow = doLower and (lambda x:x.lower()) or (lambda x:x) return [ (dolow(no.tagName), no) for no in node.childNodes if no.nodeType != no.TEXT_NODE ] def _getText(nodelist): """ returns collected and stripped text of textnodes among nodes in nodelist """ rc = "" for node in nodelist: if node.nodeType == node.TEXT_NODE: rc = rc + node.data return rc.strip() # main unpickle function def unpickle(node): typeName= node.attributes["type"].value if typeName in _easyToPickle: initValue = _getText(node.childNodes) value = eval("%s(%r)" % (typeName, initValue)) return value elif typeName=="tuple": return _unpickleTuple(node) elif typeName=="list": return _unpickleList(node) elif typeName=="dict": return _unpickleDict(node) elif typeName=="string": #~ obj = eval("""unicode(u'%s')""" % _getText(node.childNodes)) obj = _getText(node.childNodes) # unicode ! return obj else: obj = eval("object.__new__(%s)" % typeName) for name, element in _getElementChilds(node): setattr(obj, name, unpickle(element)) return obj class XMLUnpicklingException(Exception): pass def _unpickleList(node): li = [] # collect entries, you can not assume that the # members of the list appear in the right order ! idx=0 #manatlan for name, element in _getElementChilds(node): if name != "item": raise XMLUnpicklingException() #~ idx = int(element.attributes["index"].value) obj = unpickle(element) li.append((idx, obj)) idx+=1 #manatlan # rebuild list with right order li.sort() return [ item[1] for item in li ] def _unpickleTuple(node): return tuple(_unpickleList(node)) def _unpickleDict(node): dd = dict() for name, element in _getElementChilds(node): if name != "item": raise XMLUnpicklingException() childList = _getElementChilds(element) if len(childList) != 2: raise XMLUnpicklingException() for name, element in childList: if name=="key": key = unpickle(element) elif name=="value": value = unpickle(element) dd[key]=value return dd class Data(PickleMeToXML): pass def data2node( data ): if data!=None: obj = Data() obj.value = data node = pickle(root=obj, fabric=dom.Document()) return node.childNodes[0] def node2data( nod ): node = nod.cloneNode(True) doc = node.ownerDocument n = doc.createElement("root") n.setAttributeNode(doc.createAttribute("type")) n.setAttribute("type","Data") n.appendChild(node) obj=unpickle(n) return obj.value if __name__=="__main__": v=u"héllo","hél",9,{1:12},({ u"a":123,u"béé":566},123) v=None node = data2node(v) print node.toprettyxml().encode("utf_8") #~ #=============================================== #~ val = node2data( node ) #~ print val[0].encode("utf_8") #~ print val[1].encode("utf_8")
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- import re import os import subprocess import errno import time import sys import locale isNumber = lambda x: re.compile(r"^[-+]?\d+$").match(x) #:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-: # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/440554 #:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-: PIPE = subprocess.PIPE if subprocess.mswindows: try: from win32file import ReadFile, WriteFile from win32pipe import PeekNamedPipe import msvcrt except: raise "Sous WINDOWS : Installer PyWin32 (http://sourceforge.net/project/showfiles.php?group_id=78018&package_id=79063)" else: import select import fcntl class Popen(subprocess.Popen): def recv(self, maxsize=None): return self._recv('stdout', maxsize) def recv_err(self, maxsize=None): return self._recv('stderr', maxsize) def send_recv(self, input='', maxsize=None): return self.send(input), self.recv(maxsize), self.recv_err(maxsize) def get_conn_maxsize(self, which, maxsize): if maxsize is None: maxsize = 1024 elif maxsize < 1: maxsize = 1 return getattr(self, which), maxsize def _close(self, which): getattr(self, which).close() setattr(self, which, None) if subprocess.mswindows: def send(self, input): if not self.stdin: return None try: x = msvcrt.get_osfhandle(self.stdin.fileno()) (errCode, written) = WriteFile(x, input) except ValueError: return self._close('stdin') except (subprocess.pywintypes.error, Exception), why: if why[0] in (109, errno.ESHUTDOWN): return self._close('stdin') raise return written def _recv(self, which, maxsize): conn, maxsize = self.get_conn_maxsize(which, maxsize) if conn is None: return None try: x = msvcrt.get_osfhandle(conn.fileno()) (read, nAvail, nMessage) = PeekNamedPipe(x, 0) if maxsize < nAvail: nAvail = maxsize if nAvail > 0: (errCode, read) = ReadFile(x, nAvail, None) except ValueError: return self._close(which) except (subprocess.pywintypes.error, Exception), why: if why[0] in (109, errno.ESHUTDOWN): return self._close(which) raise if self.universal_newlines: read = self._translate_newlines(read) return read else: def send(self, input): if not self.stdin: return None if not select.select([], [self.stdin], [], 0)[1]: return 0 try: written = os.write(self.stdin.fileno(), input) except OSError, why: if why[0] == errno.EPIPE: #broken pipe return self._close('stdin') raise return written def _recv(self, which, maxsize): conn, maxsize = self.get_conn_maxsize(which, maxsize) if conn is None: return None flags = fcntl.fcntl(conn, fcntl.F_GETFL) if not conn.closed: fcntl.fcntl(conn, fcntl.F_SETFL, flags| os.O_NONBLOCK) try: if not select.select([conn], [], [], 0)[0]: return '' r = conn.read(maxsize) if not r: return self._close(which) if self.universal_newlines: r = self._translate_newlines(r) return r finally: if not conn.closed: fcntl.fcntl(conn, fcntl.F_SETFL, flags) message = "Other end disconnected!" def recv_some(p, t=.1, e=1, tr=5, stderr=0): if tr < 1: tr = 1 x = time.time()+t y = [] r = '' pr = p.recv if stderr: pr = p.recv_err while time.time() < x or r: r = pr() if r is None: if e: raise Exception(message) else: break elif r: y.append(r) else: time.sleep(max((x-time.time())/tr, 0)) return ''.join(y) def send_all(p, data): while len(data): sent = p.send(data) if sent is None: raise Exception(message) data = buffer(data, sent) #:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-: """ +----[ Commandes de l'interface à distance ] | | add XYZ . . . . . . . . . ajoute XYZ à la playlist | enqueue XYZ . . . . . . . queue XYZ to playlist | playlist . . . . . . . . . . . .afficher les éléments de la playlist | play . . . . . . . . . . . . . jouer | stop . . . . . . . . . . . . . arrêter | next . . . . . . . . . . . . élément suivant | prev . . . . . . . . . . . . élément précédent | goto . . . . . . . . . . . . . aller à l'élément X | clear . . . . . . . . . . . . .vider la liste de lecture | status . . . . . . . . . . . .afficher les informations | title [X] . . . . . . . . . . afficher le titre courant ou sauter à un titre | title_n . . . . . . . . . . . .titre suivant dans l'item courant | title_p . . . . . . . . . . . .titre précédent | chapter [X] . . . . . . modifier/afficher le chapitre | chapter_n . . . . . . . . chapitre suivant | chapter_p . . . . . . . . chapitre précédant | | seek X . . . . . . . . . . .se déplacer, en secondes, ex. « seek 12 » | pause . . . . . . . . . . .basculer la pause | fastforward . . . . . . avance rapide | rewind . . . . . . . . . . rembobiner | play . . . . . . . . . . . . jouer plus vite | slower . . . . . . . . . . . jouer plus lentement | normal . . . . . . . . . . . jouer à vitesse normale | f [on|off]. . . . . . . . . basculer le mode plein-écran | info . . . . . . . . . . . . . informations sur le flux courant | get_time . . seconds elapsed since stream's beginning | is_playing . . 1 si un élément est joué, 0 sinon | get_title . . . the title of the current stream | get_length . . the length of the current stream | | volume [X] . . . . . . . .modifier/afficher le volume | volup [X] . . . . . . . . augmenter le volume de X | voldown [X] . . . . . . .diminuer le volume de X | adev [X] . . . . . . . . . modifier/afficher le périphérique audio | achan [X] . . . . . . . . modifier/afficher les canaux audios | atrack [X] . . . . . . . . . set/get audio track | vtrack [X] . . . . . . . . . set/get video track | vratio [X] . . . . . set/get video aspect ratio | vcrop [X] . . . . . . . . . set/get video crop | vzoom [X] . . . . . . . . . set/get video zoom | strack [X] . . . . . . . set/get subtitles track | menu [on|off|up|down|left|right|select] utiliser le menu | | marq-marquee CHA . . écrit CHA sur la vidéo | marq-x X . . . . . . . . .décalage du texte depuis la gauche | marq-y Y . . . . . . . . . décalage du texte depuis le haut | marq-position # . . . contrôle la position relative du texte | marq-color # . . . . . . couleur du texte, RGB | marq_opacity #. . . . . opacité du texte | marq-timeout T . . . . .disparition du texte, en ms | marq-size # . . . . . . . .taille du texte, en pixels | | time-format CHAINE . . écrit l'heure selon CHAINE | time-x X . . . . . . . . .décalage de l'heure depuis la gauche | time-y Y . . . . . . . . . décalage de l'heure depuis le haut | time-position # . . . . . position relative de l'heure | time-color # . . . . . . . couleur de l'heure, RGB | time-opacity # . . . . . opacité de l'heure | time-size # . . . . . . . taille de l'heure, en pixels | | logo-file CHAÎNE. . . fichier de logo à incruster | logo-x X . . . . . . . . . .décalage du logo depuis la gauche | logo-y Y . . . . . . . . . décalage du logo depuis le haut | logo-position # . . . . . position relative du logo | logo-transparency #. .transparence du logo | | mosaic-alpha # . . . . . transparence mosaïque | mosaic-height #. . . . .hauteur mosaïque | mosaic-width # . . . . largeur mosaïque | mosaic-xoffset # . . . .décalage de la mosaïque depuis la gauche | mosaic-yoffset # . . . .décalage de la mosaïque depuis le haut | mosaic-align 0.2,4.6,8.10. .alignement de la mosaïque | mosaic-vborder # . . . .limite verticale | mosaic-hborder # . . . .limite horizontale | mosaic-position {0=auto,1=fixe} . . . .position | mosaic-rows # . . . nombre de rangées | mosaic-cols # . . .. . nombre de colonnes | mosaic-keep-aspect-ratio {0,1} . . . .étirement | | check-updates [newer] [equal] [older] | [undef] [info] [source] [binary] [plugin] | | help . . . . . . . . . . . . . ce message d'aide | longhelp . . . . . . . . .message d'aide plus long | logout . . . . . . . . . . . quitte l'interface sans fermer vlc | quit . . . . . . . . . . . . quitte VLC | +----[ fin de l'aide ]""" class PilotVlc(object): # SINGLETON !!!! exe=u"/usr/bin/vlc" ipFbx="freeplayer.freebox.fr" # ip destinataire du flux debug=False #ipFbx="127.0.0.1" # ip destinataire du flux #debug=True def __new__(type): if not '_the_instance' in type.__dict__: os.system("killall vlc") type._the_instance = object.__new__(type) self=type._the_instance if sys.platform[:3] == 'win': cmd="cmd" else: cmd="sh" self.__pipe = Popen(cmd, stdin=PIPE, stdout=PIPE) time.sleep(0.3) recv_some(self.__pipe) return type._the_instance def __cmd(self,cmd,withStatusChange=False): if self.__pipe: assert type(cmd)==unicode encoding = locale.getpreferredencoding() if sys.platform[:3] == 'win': tail = u'\r\n' else: tail = u'\n' send_all(self.__pipe, (cmd + tail).encode(encoding) ) time.sleep(0.2) buf= recv_some(self.__pipe).decode(encoding) list=[] for i in buf.strip().split(u"\r\n"): if i.startswith(u"status change: "): if not withStatusChange: continue if i.startswith(u"Interface de commande "): continue list.append(i) if PilotVlc.debug: print "::",[cmd,],"::",buf.strip().split(u"\r\n") return list def __del__(self): self.stop() def isPlaying(self): """ retourne True ou False suivant que VLC est lancé et joue qqchose 1 : play 2 : pause 0 : stop (vlc n'est pas lance) """ ll = self.__cmd(u"is_playing") if ll: if u"1" in ll: if u"status change: ( play state: 1 )" in self.__cmd(u"status",True): return 1 else: return 2 else: return 0 else: return 0 def __getLength(self): """ renvoi la longueur en sec du media actuel """ buf=self.__cmd(u"get_length") if buf: nbs=[int(i) for i in buf if isNumber(i)] if nbs: return nbs[0] def __getTime(self): """ renvoi le nb de sec passé du media actuel """ buf=self.__cmd(u"get_time") if buf: nbs=[int(i) for i in buf if isNumber(i)] if nbs: return nbs[0] def getPercent(self): length = self.__getLength() time = self.__getTime() if length: return (time * 100)/length def getTitle(self): """ renvoi le titre du media actuel """ ll=self.__cmd(u"get_title") if ll: for i in ll: if os.path.isfile(i): return i def getVolume(self): """ retourne le pourcentage du volume """ l=self.__cmd(u"volume",True) if l: #status change: ( audio volume: 1000 ) for i in l: g=re.match("status change: \( audio volume: (\d+) \)",i) if g: v=int(g.group(1)) return (v*100)/1024 def setVolume(self,v): """ met le pourcentage du volume """ assert v in range(0,101) self.__cmd(u"volume %d" % ((v*1024)/100)) def playVideo(self,file): assert type(file)==unicode assert '"' not in file assert os.path.isfile(PilotVlc.exe) cmds=[ PilotVlc.exe, "-I", "rc", '--rc-fake-tty', '--no-quiet', '--no-advanced', """--sout="#std" """, "--sout-standard-access=udp", "--sout-standard-mux=ts", "--sout-standard-dst=%s:1234" % PilotVlc.ipFbx, "--sout-ts-pid-video=68", "--sout-ts-pid-audio=69", "--sout-ts-pid-spu=70", "--sout-ts-pcr=80", "--sout-ts-dts-delay=400", "--sout-transcode-maxwidth=720", "--sout-transcode-maxheight=576", "--play-and-stop", "--no-playlist-autostart", ] cmd=unicode(u" ".join(cmds)) cmd+=(u""" "%s" """ % file) print (cmd,) if self.isPlaying(): self.stop() # #if not self.isPlaying(): # self.__connect() self.__cmd(cmd) time.sleep(1) def play(self): self.__cmd(u"play") def pause(self): self.__cmd(u"pause") def stop(self): # kill VLC if self.isPlaying(): send_all(self.__pipe, "quit\n" ) time.sleep(0.2) def seek(self,p): """ seek en pourcentage""" assert p in range(0,101) length = self.__getLength() val = int( (p*length) / 100 ) self.__cmd(u"seek %d" % val) def getInfo(self): return self.__cmd(u"info") def cmd(self,m,s=False): # TEST ONLY return self.__cmd(m,s) if __name__ == '__main__': #TODO: mettre d'autres fichiers media=u"/media/d/divx/brice de nice.avi" media2=u"/media/d/divx/L'Iznôgoud.FRENCH.DVDRiP.XViD-iD.avi" media2=u"/home/manatlan/.democracy/Movies/3491230.mp4-35f86c5c3754328f919a780fddc3cb4b12c61e4.mp4" PilotVlc.debug=True #TODO: unittest ! try: p=PilotVlc() assert p.isPlaying()== 0 p.playVideo(media2) assert p.isPlaying()== 1 p.pause() assert p.isPlaying()==2 assert p.getVolume()==None assert p.getTitle()==media2 p.seek(40) # seek pas pendant la pause assert p.getPercent()==0 # recupere donc etat actuel du seek p.play() # play fait rien assert p.isPlaying()==2 p.pause() # sort de la pause assert p.isPlaying()==1 time.sleep(1) p.seek(50) # test seek assert p.getPercent()==50 p.setVolume(100) # test volume assert p.getVolume()==100 assert p.isPlaying()== 1 p.stop() assert p.isPlaying()== 0,p.isPlaying() assert p.getTitle()==None pp=PilotVlc() assert pp.isPlaying()== 0 pp.playVideo(media) assert pp.isPlaying()== 1 assert pp.getTitle()==media pp.pause() # pause assert pp.isPlaying()==2 pp.pause() # play assert pp.isPlaying()==1 pp.pause() assert pp.isPlaying()==2 pp.playVideo(media2) assert pp.isPlaying()==1 assert pp.getTitle()==media2 assert pp.getPercent()==0 ppp=PilotVlc() #recupere l'instance en cours, en train de jouer assert ppp.isPlaying()== 1 print ppp.cmd(u"info") ppp.stop() pppp=PilotVlc() #recupere l'instance en cours, ne jouant pas assert pppp.isPlaying()== 0 assert pppp.getVolume()==None assert pppp.getTitle()==None assert pppp.getPercent()==None finally: p=PilotVlc() p.stop()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- import inspect from mako.template import Template as MakoTemplate from mako.lookup import TemplateLookup import os #MYLOOKUP = TemplateLookup(directories=["common/","../../common/",".","../.."]) MYLOOKUP = TemplateLookup(directories=[os.path.dirname(__file__),os.getcwd()]) BTN = {} for i in os.listdir("common/static"): if i.lower().endswith(".gif"): BTN[i[:-4]]=i def Template(head,nom=None): global MYLOOKUP assert type(head)==dict #glob = inspect.currentframe(1).f_globals loc = inspect.currentframe(1).f_locals if "self" in loc: del(loc["self"]) if nom: filename=nom else: filename = "templates/"+inspect.currentframe(1).f_code.co_name+".html" # meta par defaut head["home_page"]="/index" head["digit_key"]="false" loc["isFreeBox"] = Template.isFreeBox loc["rlinks"]=[] loc["headers"]="" for key,value in head.items(): # mapping gestion feedbox -> telecommande if key=="BACK": key="star" if key=="NEXT": key="next" if key=="PREV": key="prev" if key=="PLAY": key="pause" # sur HD pas de touche play : uniquement play/pause if key=="PAUSE": key="pause" meta,rl=gen(key,value) loc["headers"]+=meta+"\n" loc["rlinks"].append(rl) mytemplate = MakoTemplate(filename=filename,output_encoding='iso-8859-15', encoding_errors='replace',lookup=MYLOOKUP) return mytemplate.render(**loc) Template.isFreeBox=False def mkButton(key,value): global BTN if key in BTN: key = """<img src="%s">""" % BTN[key] return """<span onclick="document.location='%s';">%s</span>""" % (value,key) def gen(key,value): key=key.lower() if type(value) in (str,unicode): assert '"' not in value button="" if key in "help options red green yellow blue info guide stop play pause rec up down left right star sharp".split(" "): # balise LINK ret= """<link rel="%s" href="%s"> """ % (key,value) if value and value!="none": button=mkButton(key,value) elif key in "back prev next rev fwd pip".split(" "): # balise LINK (freebox HD ONLY !!!!) if value and value!="none": button=mkButton(key,value) ret= """<link rel="%s" href="%s"> """ % (key,value) elif key in "volume_key program_key wait_new_picture digit_key".split(" "): # balise FLAGS ret= """<flags %s="%s">""" % (key,value and "true" or "false") else: # balise META p="" # traitement vrai navigateur if key in "home_page love_page love2_page mail_page mail2_page nochannel_page channel_page star_page sharp_page settings_page".split(" "): # genere un vrai lien if value and value!="none": button=mkButton(key,value) elif key=="refresh": # genere un vrai refresh http pour un vrai navigateur p="""<META HTTP-EQUIV="Refresh" CONTENT="%s">""" % value ret= """<meta name="%s" content="%s">%s""" % (key,value,p) return ret,button
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- import os,sys from xmldoc import XmlDoc,XmlNode from xpickle import node2data,data2node class Conf(object): def __init__(self,fb,nodeConf): assert type(nodeConf == XmlNode) self.__nodeConf = nodeConf self.__fb = fb def __iter__(self): """ iter les "names" des item de la conf """ for i in self.__nodeConf.getAll(u"key"): yield i.getAttr(u"name") def __len__(self): """ retourne le nb d'items de la conf """ return len(self.__nodeConf.getAll(u"key")) def __getNode(self,n): """ retourne la XmlNode de l'item 'n' ou None """ for i in self.__nodeConf.getAll(u"key"): attr = i.getAttr(u"name") if attr==n: return i return None def __getitem__(self,n): """ retourne la valeur de l'item 'n' dans la conf ou None """ node = self.__getNode(n) if node: # recupere la 1er vrai node (y en a qu'une!) ou non for i in node.childNodes: if i.node.nodeType == i.node.ELEMENT_NODE: return node2data( i.node ) return None def __delitem__(self,n): """ supprime l'item 'n' dans la conf """ node = self.__getNode(n) if node: node.unlink() self.__fb.save() # save conf return True return None def __setitem__(self,n,v): """ màj le contenu de l'item 'n' dans la conf s'il n'existait pas, on cree l'item en question """ node = self.__getNode(n) if node: node.unlink() node=self.__nodeConf.ownerDocument.createNode(u"key",{u"name":n}) newnode = self.__nodeConf.appendChild(node) nd=data2node(v) if nd: newnode.appendChild(XmlNode(nd)) self.__fb.save() # save conf class FeedBox(object): def __init__(self,fileConf=None): if not fileConf: # try to determinate the fileConf try: #windows NT,2k,XP,etc. fallback home = os.environ['APPDATA'] except: try: #all user-based OSes thd = os.path.expanduser("~") if thd == "~": raise home= thd except: try: #*nix fallback home = os.environ['HOME'] except: #What os are people using? home = os.path.dirname(os.path.abspath(__file__)) fileConf = os.path.join(home,".feedBox.xml") print "FeedBox Conf File : ",fileConf self.__file = fileConf if os.path.isfile(fileConf): self.__dom=XmlDoc(file=fileConf) else: # create the conf file self.__dom=XmlDoc(xml="<feedBox><conf/><plugins/></feedBox>") def checkConf(self): """ verifie que vlc est bien positionné et qu'il existe sinon demande à l'utilisateur de le situer renvoi true : si vlc est ok renvoi false : si ça va pas """ c=self.getConf() vlc = c["vlc"] if not( vlc and os.path.isfile(vlc) ): import easygui easygui.msgbox(""" C'est la première fois que vous lancer feedBox. Veuillez selectionner votre VLC ... ""","feedbox") vlc = easygui.fileopenbox() if vlc: c["vlc"] = vlc.decode( sys.getfilesystemencoding() ) return True else: return False else: return True def getConf(self,plug=None): """ recupere l'objet de configuration (Conf()) pour le plugin nommé "plug", sinon la conf globale de feedbox """ if plug: nplug = self.__dom.getAll("plugins")[0] nodes = nplug.getAll(plug) if nodes: return Conf(self,nodes[0]) else: n=nplug.ownerDocument.createNode(plug) node = nplug.appendChild(n) return Conf(self,node) else: return Conf( self, self.__dom.getAll("conf")[0] ) def save(self): """ enregistre la configuration """ self.__dom.saveAs(self.__file) if __name__ == "__main__": #~ try: #~ os.unlink("test.xml") #~ except: #~ pass f = FeedBox("test.xml") c = f.getConf() c["region"]=56 c["region"]=( {u"kkçàak":122,23:(1,2,3)}) print c["koko"] f.save()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- ################################################################################ ## Simplification Wrapper around minidom ## ensure "unicode" manipulation ## ## manatlan ################################################################################ ## XmlDoc wrap DocumentObject ## XmlNode wrap NodeObject ################################################################################ from xml.dom.minidom import parse, parseString import urllib def u(s): """ return any s as "unicode string" """ if type(s)==unicode: return s elif type(s)== str: try: v=s.decode("utf_8") except: try: v=s.decode("mbcs") except: try: v=s.decode("cp1252") except: raise "u() unicodification error : "+str([s]) return v else: return unicode(s) ################################################################################ class XmlDoc(object): ################################################################################ def __init__(self,file="",xml="",url="",dom=None): if file: self.__dom = parse(file) elif xml: self.__dom = parseString(xml) elif url: f = urllib.urlopen(url) data = f.read() f.close() self.__dom = parseString(data) elif dom: self.__dom = dom else: raise "bad XmlDoc call()" def __getDE(self): """ recupere la node documentElement (root) du doc """ return XmlNode(self.__dom.documentElement) documentElement=property(__getDE) def getAll(self,n): """ retourne une liste des xmlNode du nom de "n" de la dom """ return [XmlNode(i) for i in self.__dom.getElementsByTagName(u(n))] def createNode(self,n,attrs={}): """ cree une xmlNode du nom de "n" avec ses attributs(dict) "attr" si renseigné """ node=self.__dom.createElement(u(n)) if attrs: for i in attrs: nom = u(i) val = u(attrs[i]) node.setAttributeNode(self.__dom.createAttribute(nom)) node.setAttribute(nom,val) return XmlNode(node) def dump(self): """ renvoi le contenu xml de la dom """ self.crunch() xml = self.__dom.toprettyxml() xml=xml.replace( u"""<?xml version="1.0" ?>""", u"""<?xml version="1.0" encoding="utf-8" ?>""") return xml # en UNICODE !!! def saveAs(self,name): """ enregistre la dom dans le fichier "name" """ fid = open(name,"w") if fid: #~ self.__dom.writexml(fid,encoding="utf-8",addindent="\t",newl="\r") # BUGGED !!! fid.write( self.dump().encode("utf_8") ) fid.close() return True return False def crunch(self,node=None): """ supprime les nodes text qui sont vides """ if not node: node = self.__dom.documentElement aeff=[] for i in node.childNodes: if i.nodeType == i.TEXT_NODE: content = i.data.strip(" \r\n\t") if not content: aeff.append(i) else: i.data = content elif i.nodeType == i.ELEMENT_NODE: self.crunch(i) for i in aeff: i.parentNode.removeChild(i) ################################################################################ class XmlNode(object): # renvoi de l'utf8 ;-( ... comme libxml2 ################################################################################ def __getOD(self): """ recupere le XmlDoc maitre """ return XmlDoc(dom = self.__node.ownerDocument) ownerDocument=property(__getOD) def __getCN(self): """ recupere la liste de ses childnodes """ return [XmlNode(i) for i in self.__node.childNodes] childNodes=property(__getCN) def __getN(self): """ recupere la vrai NODE ;-( """ return self.__node node=property(__getN) def __init__(self,node): self.__node = node def getAttr(self,nom): """ retourne le contenu de l'attribut "nom" de la node """ # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * return self.__node.getAttribute(u(nom)) #~ return self.__node.getAttribute(u(nom)).encode("utf_8") # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * def setAttr(self,nom,val): """ retourne le contenu de l'attribut "nom" de la node """ return self.__node.setAttribute(u(nom),u(val)) def removeChilds(self): """ supprimes les nodes filles""" for node in self.__node.childNodes: self.__node.removeChild(node) def setText(self,val): """ màj le contenu texte de la node """ self.removeChilds() t=self.__node.ownerDocument.createTextNode(u(val)) # val en unicode !!! self.__node.appendChild(t) def getText(self): """ recupere le contenu texte de la node """ rc = u"" for node in self.__node.childNodes: if node.nodeType == node.TEXT_NODE: rc = rc + node.data # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * return rc.strip() #~ return rc.strip().encode("utf_8") # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * def appendChild(self,n): """ ajoute la xmlNode "n" à la node """ return XmlNode(self.__node.appendChild(n.__node)) def unlink(self): """ supprime la node """ self.__node.parentNode.removeChild(self.__node) del(self) def getAll(self,n): """ retourne une liste des xmlNode du nom de "n" à partir de la node """ return [XmlNode(i) for i in self.__node.getElementsByTagName(u(n))] if __name__ == "__main__": x=XmlDoc(xml="<root><a/><a/></root>") n= x.getAll("a") n[0].setText(u"fdés") x.saveAs("jo.xml") x=XmlDoc(file="jo.xml") n= x.getAll("a") q=x.createNode("kàkà",{"jimà":12,u"kà":12.23}) q.setText(u"àùùù") n[0].appendChild(q) n[1].setText(u"fdés") n[1].setAttr("joé","jimà") x.saveAs("jo.xml") print x.dump().encode("utf_8")
Python
#!/usr/bin/python # -*- coding: utf-8 -*- import os,sys import stat #========================================================= def getFiles(rep,exts=[]): #========================================================= assert type(rep)==unicode files = [] dirs=[] if os.path.isdir(rep): for i in os.listdir(rep): if i[0]!=".": file = os.path.join(rep,i) if os.path.isfile(file): n,ext = os.path.splitext(i) if exts: if ext: if ext[1:].lower() in exts: files.append(file) else: files.append(file) else: dirs.append(file) dirs.sort(lambda a,b: cmp(a.lower(),b.lower())) files.sort(lambda a,b: cmp(a.lower(),b.lower())) return dirs,files # UNICODE def walktree (top = ".", depthfirst = True): """This walks a directory tree, starting from 'top'. This is somewhat like os.path.walk, but using generators instead of a visit function. One important difference is that walktree() defaults to DEPTH first with optional BREADTH first, whereas the os.path.walk function allows only BREADTH first. Depth first was made the default because it is safer if you are going to be modifying directory names. This avoids the problem of renaming a directory before visiting the children of that directory. """ names = os.listdir(top) if not depthfirst: yield top, names for name in names: try: st = os.lstat(os.path.join(top, name)) except os.error: continue if stat.S_ISDIR(st.st_mode): for (newtop, children) in walktree (os.path.join(top, name), depthfirst): yield newtop, children if depthfirst: yield top, names #========================================================= def getAllFiles(rep,exts=[]): #========================================================= assert type(rep)==unicode files = [] for (basepath, children) in walktree(rep,False): for child in children: if child[0]!=".": file = os.path.join(basepath,child) n,ext = os.path.splitext(child) if exts: if ext: if ext[1:].lower() in exts: files.append(file) else: files.append(file) return files # UNICODE #========================================================= def getExtVlcOpt(filename): #========================================================= assert type(filename)==unicode opt_jpg = [ "sout=#transcode:std", "sout-transcode-width=720", "sout-transcode-height=576", "sout-transcode-vb=9000", "sout-transcode-vcodec=mp2v", "sout-transcode-vt=1000000", "sout-transcode-vfilter=deinterlace", "sout-deinterlace-mode=blend", "sout-ffmpeg-keyint=1" ] opt_dvd = [ "sout=#transcode:std", "sout-transcode-ab=384", "sout-transcode-acodec=mpga", "sout-transcode-channels=2", "file-caching=1000" ] opt_avi = [ "sout=#transcode:std", "sout-transcode-ab=384", "sout-transcode-acodec=mpga", "sout-transcode-channels=2", "sout-transcode-vb=9000", "sout-transcode-vcodec=mp2v", "sout-transcode-vt=1000000", "file-caching=1000" ] #print "#EXTM3U\n"; #~ filename = os.path.abspath(filename) ext= filename.split(".")[-1].lower() com=["#EXTINF:0,%s" % os.path.basename(filename)] com.append("#file:%s"%filename) # self-add ;-) if ext in [".vob"]: com += ["#EXTVLCOPT:"+i for i in opt_dvd] com.append(filename) elif ext in "avi|ogg|mp3|mkv|mp4|mov|aac|au|aif|aiff|mjpeg|wmv|wma|wav|asf".split("|"): com += ["#EXTVLCOPT:"+i for i in opt_avi] com.append(filename) elif ext in "jpg|jpeg|png|tiff|gif".split("|"): com += ["#EXTVLCOPT:"+i for i in opt_jpg] com.append( "#EXTVLCOPT:fake-file=%s" % filename) com.append( "fake:") elif filename[:5] =="/dev/": com += ["#EXTVLCOPT:"+i for i in opt_dvd] com.append( "dvdsimple:"+filename) elif filename[:7]=="http://": com += ["#EXTVLCOPT:"+i for i in opt_avi] com.append(filename) else: com.append(filename) return com if __name__ == "__main__": print getFiles(u"/media/data/photos/2007/02/verbier_2007 A TAGGUER",["jpg"])[1] #print getAllFiles(u"/media/d/divx",["avi"])
Python
#coding=utf-8 # dump python dict to ini format file # Author: limodou (limodou@gmail.com) # Copyleft GPL # $Revision$ import sys import locale import os.path section_delimeter = '/' class DictNode(object): def __init__(self, values, encoding=None, root=None, section=[]): self._items = values self._encoding = encoding self._root = root self._section = section def __getitem__(self, name): if self._items.has_key(name): value = self._items[name] if isinstance(value, dict): return DictNode(value, self._encoding, self._root, self._section + [name]) else: return value else: self._items[name] = {} return DictNode(self._items[name], self._encoding, self._root, self._section + [name]) def __setitem__(self, name, value): if section_delimeter in name: sec = name.split(section_delimeter) obj = self._items for i in sec[:-1]: if obj.has_key(i): if isinstance(obj[i], dict): obj = obj[i] else: obj[i] = {} #may lost some data obj = obj[i] else: obj[i] = {} obj = obj[i] obj[sec[-1]] = value else: self._items[name] = value def __delitem__(self, name): if self._items.has_key(name): del self._items[name] def __repr__(self): return repr(self.items) def __getattr__(self, name): return self.__getitem__(name) def __setattr__(self, name, value): if name.startswith('_'): if name == '_comment': self._root._comments[section_delimeter.join(self._section)] = value else: self.__dict__[name] = value else: self.__setitem__(name, value) def comment(self, name, comment): if name: self._root._comments[section_delimeter.join(self._section + [name])] = comment else: self._root._comments[section_delimeter.join(self._section)] = comment def __delattr__(self, name): if self._items.has_key(name): del self._items[name] def __str__(self): return repr(self._items) def __len__(self): return len(self._items) def has_key(self, name): return self._items.has_key(name) def items(self): return self._items.items() def setdefault(self, name, value): return self._items.setdefault(name, value) def get(self, name, default=None): return self._items.get(name, None) def keys(self): return self._items.keys() def values(self): return self._items.values() class DictIni(DictNode): def __init__(self, inifile=None, values=None, encoding=None, commentdelimeter='#'): self._items = {} self._inifile = inifile self._root = self self._section = [] self._commentdelimeter = commentdelimeter if values is not None: self._items = values self._comments = {} self._encoding = getdefaultencoding(encoding) if self._inifile and os.path.exists(self._inifile): self.read(self._inifile, self._encoding) def setfilename(self, filename): self._inifile = filename def getfilename(self): return self._inifile def save(self, inifile=None, encoding=None): if inifile is None: inifile = self._inifile if isinstance(inifile, (str, unicode)): f = file(inifile, 'w') elif isinstance(inifile, file): f = inifile else: f = inifile if not f: f = sys.stdout if encoding is None: encoding = self._encoding f.write(self._savedict([], self._items, encoding)) if isinstance(inifile, (str, unicode)): f.close() def _savedict(self, section, values, encoding): if values: buf = [] default = [] for key, value in values.items(): if isinstance(value, dict): sec = section[:] sec.append(key) buf.append(self._savedict(sec, value, encoding)) else: c = self._comments.get(section_delimeter.join(section + [key]), '') if c: lines = c.splitlines() default.append('\n'.join(['%s %s' % (self._commentdelimeter, x) for x in lines])) default.append("%s = %s" % (key, uni_prt(value, encoding))) if default: buf.insert(0, '\n'.join(default)) buf.insert(0, '[%s]' % section_delimeter.join(section)) c = self._comments.get(section_delimeter.join(section), '') if c: lines = c.splitlines() buf.insert(0, '\n'.join(['%s %s' % (self._commentdelimeter, x) for x in lines])) return '\n'.join(buf + ['']) else: return '' def read(self, inifile=None, encoding=None): if inifile is None: inifile = self._inifile if isinstance(inifile, (str, unicode)): try: f = file(inifile, 'r') except: return #may raise Exception is better elif isinstance(inifile, file): f = inifile else: f = inifile if not f: f = sys.stdin if encoding is None: encoding = self._encoding comments = [] for line in f.readlines(): line = line.strip() if not line: continue if line.startswith(self._commentdelimeter): comments.append(line[1:].lstrip()) continue if line.startswith('['): #section section = line[1:-1] #if comment then set it if comments: self.comment(section, '\n'.join(comments)) comments = [] continue key, value = line.split('=', 1) key = key.strip() value = process_value(value.strip(), encoding) if section: self.__setitem__(section + section_delimeter + key, value) #if comment then set it if comments: self.__getitem__(section).comment(key, '\n'.join(comments)) comments = [] else: self.__setitem__(key, value) #if comment then set it if comments: self.comment(key, '\n'.join(comments)) comments = [] if isinstance(inifile, (str, unicode)): f.close() def process_value(value, encoding=None): length = len(value) t = value i = 0 r = [] buf = [] listflag = False while i < length: if t[i] == '"': #string quote buf.append(t[i]) i += 1 while t[i] != '"' or (t[i] == '"' and t[i-1] == '\\'): buf.append(t[i]) i += 1 buf.append(t[i]) i += 1 elif t[i] == ',': r.append(''.join(buf)) buf = [] i += 1 listflag = True elif t[i] == 'u': buf.append(t[i]) i += 1 else: buf.append(t[i]) i += 1 while i < length and t[i] != ',': buf.append(t[i]) i += 1 if buf: r.append(''.join(buf)) result = [] for i in r: if i.isdigit(): result.append(int(i)) elif i and i.startswith('u"'): result.append(unicode(unescstr(i[1:]), encoding)) else: result.append(unescstr(i)) if listflag: return result elif result: return result[0] else: return '' def unescstr(value): if value.startswith('"') and value.endswith('"'): value = value[1:-1] escapechars = [("\\", "\\\\"), ("'", r"\'"), ('\"', r'\"'), ('\b', r'\b'), ('\t', r"\t"), ('\r', r"\r"), ('\n', r"\n")] for item in escapechars: k, v = item value = value.replace(v, k) return value def getdefaultencoding(encoding): if not encoding: encoding = locale.getdefaultlocale()[1] if not encoding: encoding = sys.getfilesystemencoding() if not encoding: encoding = 'utf-8' return encoding def uni_prt(a, encoding=None): escapechars = [("\\", "\\\\"), ("'", r"\'"), ('\"', r'\"'), ('\b', r'\b'), ('\t', r"\t"), ('\r', r"\r"), ('\n', r"\n")] s = [] if isinstance(a, (list, tuple)): for i, k in enumerate(a): s.append(uni_prt(k, encoding)) s.append(',') elif isinstance(a, str): t = a for i in escapechars: t = t.replace(i[0], i[1]) if ' ' in t or ',' in t or t.isdigit(): s.append('"%s"' % t) else: s.append("%s" % t) elif isinstance(a, unicode): t = a for i in escapechars: t = t.replace(i[0], i[1]) s.append('u"%s"' % t.encode(encoding)) else: s.append(str(a)) return ''.join(s) if __name__ == '__main__': # d = DictIni() # d._comment = 'Test\nTest2' # d.a = 'b' # d['b'] = 3 # d.c.d = (1,2,'b asf aaa') # d['s']['t'] = u'中国' # d['s'].a = 1 # d['m.m'] = 'testing' # # d.t.m.p = '3' # d.setfilename('test.ini') # d.t.m.comment('p', 'PTesting') # print d.getfilename() # print '---------------------' # # d.save() # # a = process_value('1,abc,"aa cc",," ,\\"sdf",u"aaa"', 'ascii') # print a # print uni_prt(a, 'utf-8') # # t = DictIni(inifile='test.ini') # print t # # t.setfilename('test1.ini') # t.save() t = DictIni(inifile='confbot.ini') print t t.setfilename('test2.ini') t.save()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- import re import os import subprocess import errno import time import sys import locale isNumber = lambda x: re.compile(r"^[-+]?\d+$").match(x) #:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-: # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/440554 #:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-: PIPE = subprocess.PIPE if subprocess.mswindows: try: from win32file import ReadFile, WriteFile from win32pipe import PeekNamedPipe import msvcrt except: raise "Sous WINDOWS : Installer PyWin32 (http://sourceforge.net/project/showfiles.php?group_id=78018&package_id=79063)" else: import select import fcntl class Popen(subprocess.Popen): def recv(self, maxsize=None): return self._recv('stdout', maxsize) def recv_err(self, maxsize=None): return self._recv('stderr', maxsize) def send_recv(self, input='', maxsize=None): return self.send(input), self.recv(maxsize), self.recv_err(maxsize) def get_conn_maxsize(self, which, maxsize): if maxsize is None: maxsize = 1024 elif maxsize < 1: maxsize = 1 return getattr(self, which), maxsize def _close(self, which): getattr(self, which).close() setattr(self, which, None) if subprocess.mswindows: def send(self, input): if not self.stdin: return None try: x = msvcrt.get_osfhandle(self.stdin.fileno()) (errCode, written) = WriteFile(x, input) except ValueError: return self._close('stdin') except (subprocess.pywintypes.error, Exception), why: if why[0] in (109, errno.ESHUTDOWN): return self._close('stdin') raise return written def _recv(self, which, maxsize): conn, maxsize = self.get_conn_maxsize(which, maxsize) if conn is None: return None try: x = msvcrt.get_osfhandle(conn.fileno()) (read, nAvail, nMessage) = PeekNamedPipe(x, 0) if maxsize < nAvail: nAvail = maxsize if nAvail > 0: (errCode, read) = ReadFile(x, nAvail, None) except ValueError: return self._close(which) except (subprocess.pywintypes.error, Exception), why: if why[0] in (109, errno.ESHUTDOWN): return self._close(which) raise if self.universal_newlines: read = self._translate_newlines(read) return read else: def send(self, input): if not self.stdin: return None if not select.select([], [self.stdin], [], 0)[1]: return 0 try: written = os.write(self.stdin.fileno(), input) except OSError, why: if why[0] == errno.EPIPE: #broken pipe return self._close('stdin') raise return written def _recv(self, which, maxsize): conn, maxsize = self.get_conn_maxsize(which, maxsize) if conn is None: return None flags = fcntl.fcntl(conn, fcntl.F_GETFL) if not conn.closed: fcntl.fcntl(conn, fcntl.F_SETFL, flags| os.O_NONBLOCK) try: if not select.select([conn], [], [], 0)[0]: return '' r = conn.read(maxsize) if not r: return self._close(which) if self.universal_newlines: r = self._translate_newlines(r) return r finally: if not conn.closed: fcntl.fcntl(conn, fcntl.F_SETFL, flags) message = "Other end disconnected!" def recv_some(p, t=.1, e=1, tr=5, stderr=0): if tr < 1: tr = 1 x = time.time()+t y = [] r = '' pr = p.recv if stderr: pr = p.recv_err while time.time() < x or r: r = pr() if r is None: if e: raise Exception(message) else: break elif r: y.append(r) else: time.sleep(max((x-time.time())/tr, 0)) return ''.join(y) def send_all(p, data): while len(data): sent = p.send(data) if sent is None: raise Exception(message) data = buffer(data, sent) #:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-: """ +----[ Commandes de l'interface à distance ] | | add XYZ . . . . . . . . . ajoute XYZ à la playlist | enqueue XYZ . . . . . . . queue XYZ to playlist | playlist . . . . . . . . . . . .afficher les éléments de la playlist | play . . . . . . . . . . . . . jouer | stop . . . . . . . . . . . . . arrêter | next . . . . . . . . . . . . élément suivant | prev . . . . . . . . . . . . élément précédent | goto . . . . . . . . . . . . . aller à l'élément X | clear . . . . . . . . . . . . .vider la liste de lecture | status . . . . . . . . . . . .afficher les informations | title [X] . . . . . . . . . . afficher le titre courant ou sauter à un titre | title_n . . . . . . . . . . . .titre suivant dans l'item courant | title_p . . . . . . . . . . . .titre précédent | chapter [X] . . . . . . modifier/afficher le chapitre | chapter_n . . . . . . . . chapitre suivant | chapter_p . . . . . . . . chapitre précédant | | seek X . . . . . . . . . . .se déplacer, en secondes, ex. « seek 12 » | pause . . . . . . . . . . .basculer la pause | fastforward . . . . . . avance rapide | rewind . . . . . . . . . . rembobiner | play . . . . . . . . . . . . jouer plus vite | slower . . . . . . . . . . . jouer plus lentement | normal . . . . . . . . . . . jouer à vitesse normale | f [on|off]. . . . . . . . . basculer le mode plein-écran | info . . . . . . . . . . . . . informations sur le flux courant | get_time . . seconds elapsed since stream's beginning | is_playing . . 1 si un élément est joué, 0 sinon | get_title . . . the title of the current stream | get_length . . the length of the current stream | | volume [X] . . . . . . . .modifier/afficher le volume | volup [X] . . . . . . . . augmenter le volume de X | voldown [X] . . . . . . .diminuer le volume de X | adev [X] . . . . . . . . . modifier/afficher le périphérique audio | achan [X] . . . . . . . . modifier/afficher les canaux audios | atrack [X] . . . . . . . . . set/get audio track | vtrack [X] . . . . . . . . . set/get video track | vratio [X] . . . . . set/get video aspect ratio | vcrop [X] . . . . . . . . . set/get video crop | vzoom [X] . . . . . . . . . set/get video zoom | strack [X] . . . . . . . set/get subtitles track | menu [on|off|up|down|left|right|select] utiliser le menu | | marq-marquee CHA . . écrit CHA sur la vidéo | marq-x X . . . . . . . . .décalage du texte depuis la gauche | marq-y Y . . . . . . . . . décalage du texte depuis le haut | marq-position # . . . contrôle la position relative du texte | marq-color # . . . . . . couleur du texte, RGB | marq_opacity #. . . . . opacité du texte | marq-timeout T . . . . .disparition du texte, en ms | marq-size # . . . . . . . .taille du texte, en pixels | | time-format CHAINE . . écrit l'heure selon CHAINE | time-x X . . . . . . . . .décalage de l'heure depuis la gauche | time-y Y . . . . . . . . . décalage de l'heure depuis le haut | time-position # . . . . . position relative de l'heure | time-color # . . . . . . . couleur de l'heure, RGB | time-opacity # . . . . . opacité de l'heure | time-size # . . . . . . . taille de l'heure, en pixels | | logo-file CHAÎNE. . . fichier de logo à incruster | logo-x X . . . . . . . . . .décalage du logo depuis la gauche | logo-y Y . . . . . . . . . décalage du logo depuis le haut | logo-position # . . . . . position relative du logo | logo-transparency #. .transparence du logo | | mosaic-alpha # . . . . . transparence mosaïque | mosaic-height #. . . . .hauteur mosaïque | mosaic-width # . . . . largeur mosaïque | mosaic-xoffset # . . . .décalage de la mosaïque depuis la gauche | mosaic-yoffset # . . . .décalage de la mosaïque depuis le haut | mosaic-align 0.2,4.6,8.10. .alignement de la mosaïque | mosaic-vborder # . . . .limite verticale | mosaic-hborder # . . . .limite horizontale | mosaic-position {0=auto,1=fixe} . . . .position | mosaic-rows # . . . nombre de rangées | mosaic-cols # . . .. . nombre de colonnes | mosaic-keep-aspect-ratio {0,1} . . . .étirement | | check-updates [newer] [equal] [older] | [undef] [info] [source] [binary] [plugin] | | help . . . . . . . . . . . . . ce message d'aide | longhelp . . . . . . . . .message d'aide plus long | logout . . . . . . . . . . . quitte l'interface sans fermer vlc | quit . . . . . . . . . . . . quitte VLC | +----[ fin de l'aide ]""" class PilotVlc(object): # SINGLETON !!!! exe=u"/usr/bin/vlc" ipFbx="freeplayer.freebox.fr" # ip destinataire du flux debug=False #ipFbx="127.0.0.1" # ip destinataire du flux #debug=True def __new__(type): if not '_the_instance' in type.__dict__: os.system("killall vlc") type._the_instance = object.__new__(type) self=type._the_instance if sys.platform[:3] == 'win': cmd="cmd" else: cmd="sh" self.__pipe = Popen(cmd, stdin=PIPE, stdout=PIPE) time.sleep(0.3) recv_some(self.__pipe) return type._the_instance def __cmd(self,cmd,withStatusChange=False): if self.__pipe: assert type(cmd)==unicode encoding = locale.getpreferredencoding() if sys.platform[:3] == 'win': tail = u'\r\n' else: tail = u'\n' send_all(self.__pipe, (cmd + tail).encode(encoding) ) time.sleep(0.2) buf= recv_some(self.__pipe).decode(encoding) list=[] for i in buf.strip().split(u"\r\n"): if i.startswith(u"status change: "): if not withStatusChange: continue if i.startswith(u"Interface de commande "): continue list.append(i) if PilotVlc.debug: print "::",[cmd,],"::",buf.strip().split(u"\r\n") return list def __del__(self): self.stop() def isPlaying(self): """ retourne True ou False suivant que VLC est lancé et joue qqchose 1 : play 2 : pause 0 : stop (vlc n'est pas lance) """ ll = self.__cmd(u"is_playing") if ll: if u"1" in ll: if u"status change: ( play state: 1 )" in self.__cmd(u"status",True): return 1 else: return 2 else: return 0 else: return 0 def __getLength(self): """ renvoi la longueur en sec du media actuel """ buf=self.__cmd(u"get_length") if buf: nbs=[int(i) for i in buf if isNumber(i)] if nbs: return nbs[0] def __getTime(self): """ renvoi le nb de sec passé du media actuel """ buf=self.__cmd(u"get_time") if buf: nbs=[int(i) for i in buf if isNumber(i)] if nbs: return nbs[0] def getPercent(self): length = self.__getLength() time = self.__getTime() if length: return (time * 100)/length def getTitle(self): """ renvoi le titre du media actuel """ ll=self.__cmd(u"get_title") if ll: for i in ll: if os.path.isfile(i): return i def getVolume(self): """ retourne le pourcentage du volume """ l=self.__cmd(u"volume",True) if l: #status change: ( audio volume: 1000 ) for i in l: g=re.match("status change: \( audio volume: (\d+) \)",i) if g: v=int(g.group(1)) return (v*100)/1024 def setVolume(self,v): """ met le pourcentage du volume """ assert v in range(0,101) self.__cmd(u"volume %d" % ((v*1024)/100)) def playVideo(self,file): assert type(file)==unicode assert '"' not in file assert os.path.isfile(PilotVlc.exe) cmds=[ PilotVlc.exe, "-I", "rc", '--rc-fake-tty', '--no-quiet', '--no-advanced', """--sout="#std" """, "--sout-standard-access=udp", "--sout-standard-mux=ts", "--sout-standard-dst=%s:1234" % PilotVlc.ipFbx, "--sout-ts-pid-video=68", "--sout-ts-pid-audio=69", "--sout-ts-pid-spu=70", "--sout-ts-pcr=80", "--sout-ts-dts-delay=400", "--sout-transcode-maxwidth=720", "--sout-transcode-maxheight=576", "--play-and-stop", "--no-playlist-autostart", ] cmd=unicode(u" ".join(cmds)) cmd+=(u""" "%s" """ % file) print (cmd,) if self.isPlaying(): self.stop() # #if not self.isPlaying(): # self.__connect() self.__cmd(cmd) time.sleep(1) def play(self): self.__cmd(u"play") def pause(self): self.__cmd(u"pause") def stop(self): # kill VLC if self.isPlaying(): send_all(self.__pipe, "quit\n" ) time.sleep(0.2) def seek(self,p): """ seek en pourcentage""" assert p in range(0,101) length = self.__getLength() val = int( (p*length) / 100 ) self.__cmd(u"seek %d" % val) def getInfo(self): return self.__cmd(u"info") def cmd(self,m,s=False): # TEST ONLY return self.__cmd(m,s) if __name__ == '__main__': #TODO: mettre d'autres fichiers media=u"/media/d/divx/brice de nice.avi" media2=u"/media/d/divx/L'Iznôgoud.FRENCH.DVDRiP.XViD-iD.avi" media2=u"/home/manatlan/.democracy/Movies/3491230.mp4-35f86c5c3754328f919a780fddc3cb4b12c61e4.mp4" PilotVlc.debug=True #TODO: unittest ! try: p=PilotVlc() assert p.isPlaying()== 0 p.playVideo(media2) assert p.isPlaying()== 1 p.pause() assert p.isPlaying()==2 assert p.getVolume()==None assert p.getTitle()==media2 p.seek(40) # seek pas pendant la pause assert p.getPercent()==0 # recupere donc etat actuel du seek p.play() # play fait rien assert p.isPlaying()==2 p.pause() # sort de la pause assert p.isPlaying()==1 time.sleep(1) p.seek(50) # test seek assert p.getPercent()==50 p.setVolume(100) # test volume assert p.getVolume()==100 assert p.isPlaying()== 1 p.stop() assert p.isPlaying()== 0,p.isPlaying() assert p.getTitle()==None pp=PilotVlc() assert pp.isPlaying()== 0 pp.playVideo(media) assert pp.isPlaying()== 1 assert pp.getTitle()==media pp.pause() # pause assert pp.isPlaying()==2 pp.pause() # play assert pp.isPlaying()==1 pp.pause() assert pp.isPlaying()==2 pp.playVideo(media2) assert pp.isPlaying()==1 assert pp.getTitle()==media2 assert pp.getPercent()==0 ppp=PilotVlc() #recupere l'instance en cours, en train de jouer assert ppp.isPlaying()== 1 print ppp.cmd(u"info") ppp.stop() pppp=PilotVlc() #recupere l'instance en cours, ne jouant pas assert pppp.isPlaying()== 0 assert pppp.getVolume()==None assert pppp.getTitle()==None assert pppp.getPercent()==None finally: p=PilotVlc() p.stop()
Python
# runtime.py # Copyright (C) 2006, 2007 Michael Bayer mike_mp@zzzcomputing.com # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """provides runtime services for templates, including Context, Namespace, and various helper functions.""" from mako import exceptions, util import inspect, sys class Context(object): """provides runtime namespace, output buffer, and various callstacks for templates.""" def __init__(self, buffer, **data): self._buffer_stack = [buffer] self._data = data self._kwargs = data.copy() self._with_template = None self.namespaces = {} # "capture" function which proxies to the generic "capture" function data['capture'] = lambda x, *args, **kwargs: capture(self, x, *args, **kwargs) # "caller" stack used by def calls with content self.caller_stack = data['caller'] = CallerStack() lookup = property(lambda self:self._with_template.lookup) kwargs = property(lambda self:self._kwargs.copy()) def push_caller(self, caller): self.caller_stack.append(caller) def pop_caller(self): del self.caller_stack[-1] def keys(self): return self._data.keys() def __getitem__(self, key): return self._data[key] def push_buffer(self): """push a capturing buffer onto this Context.""" self._buffer_stack.append(util.FastEncodingBuffer()) def pop_buffer(self): """pop the most recent capturing buffer from this Context.""" return self._buffer_stack.pop() def get(self, key, default=None): return self._data.get(key, default) def write(self, string): """write a string to this Context's underlying output buffer.""" self._buffer_stack[-1].write(string) def _copy(self): c = Context.__new__(Context) c._buffer_stack = self._buffer_stack c._data = self._data.copy() c._kwargs = self._kwargs c._with_template = self._with_template c.namespaces = self.namespaces c.caller_stack = self.caller_stack return c def locals_(self, d): """create a new Context with a copy of this Context's current state, updated with the given dictionary.""" if len(d) == 0: return self c = self._copy() c._data.update(d) return c def _clean_inheritance_tokens(self): """create a new copy of this Context with tokens related to inheritance state removed.""" c = self._copy() x = c._data x.pop('self', None) x.pop('parent', None) x.pop('next', None) return c class CallerStack(list): def __init__(self): self.nextcaller = None def __nonzero__(self): return self._get_caller() and True or False def _get_caller(self): return self.nextcaller or self[-1] def __getattr__(self, key): return getattr(self._get_caller(), key) def push_frame(self): self.append(self.nextcaller or None) self.nextcaller = None def pop_frame(self): self.pop() class Undefined(object): """represents an undefined value in a template.""" def __str__(self): raise NameError("Undefined") def __nonzero__(self): return False UNDEFINED = Undefined() class Namespace(object): """provides access to collections of rendering methods, which can be local, from other templates, or from imported modules""" def __init__(self, name, context, module=None, template=None, templateuri=None, callables=None, inherits=None, populate_self=True, calling_uri=None): self.name = name if module is not None: mod = __import__(module) for token in module.split('.')[1:]: mod = getattr(mod, token) self._module = mod else: self._module = None if templateuri is not None: self.template = _lookup_template(context, templateuri, calling_uri) self._templateuri = self.template.module._template_uri else: self.template = template if self.template is not None: self._templateuri = self.template.module._template_uri self.context = context self.inherits = inherits if callables is not None: self.callables = dict([(c.func_name, c) for c in callables]) else: self.callables = None if populate_self and self.template is not None: (lclcallable, self.context) = _populate_self_namespace(context, self.template, self_ns=self) module = property(lambda s:s._module or s.template.module) filename = property(lambda s:s._module and s._module.__file__ or s.template.filename) uri = property(lambda s:s.template.uri) def get_namespace(self, uri): """return a namespace corresponding to the given template uri. if a relative uri, it is adjusted to that of the template of this namespace""" key = (self, uri) if self.context.namespaces.has_key(key): return self.context.namespaces[key] else: ns = Namespace(uri, self.context, templateuri=uri, calling_uri=self._templateuri) self.context.namespaces[key] = ns return ns def get_template(self, uri): return _lookup_template(self.context, uri, self._templateuri) def get_cached(self, key, **kwargs): if self.template: if self.template.cache_dir: kwargs.setdefault('data_dir', self.template.cache_dir) if self.template.cache_type: kwargs.setdefault('type', self.template.cache_type) if self.template.cache_url: kwargs.setdefault('url', self.template.cache_url) return self.template.module._template_cache.get(key, **kwargs) def include_file(self, uri, **kwargs): """include a file at the given uri""" _include_file(self.context, uri, self._templateuri, **kwargs) def _populate(self, d, l): for ident in l: if ident == '*': for (k, v) in self._get_star(): d[k] = v else: d[ident] = getattr(self, ident) def _get_star(self): if self.callables is not None: for k in self.callables: yield (k, self.callables[key]) if self.template is not None: def get(key): callable_ = self.template.get_def(key).callable_ return lambda *args, **kwargs:callable_(self.context, *args, **kwargs) for k in self.template.module._exports: yield (k, get(k)) if self._module is not None: def get(key): callable_ = getattr(self._module, key) return lambda *args, **kwargs:callable_(self.context, *args, **kwargs) for k in dir(self._module): if k[0] != '_': yield (k, get(k)) def __getattr__(self, key): if self.callables is not None: try: return self.callables[key] except KeyError: pass if self.template is not None: try: callable_ = self.template.get_def(key).callable_ return lambda *args, **kwargs:callable_(self.context, *args, **kwargs) except AttributeError: pass if self._module is not None: try: callable_ = getattr(self._module, key) return lambda *args, **kwargs:callable_(self.context, *args, **kwargs) except AttributeError: pass if self.inherits is not None: return getattr(self.inherits, key) raise exceptions.RuntimeException("Namespace '%s' has no member '%s'" % (self.name, key)) def supports_caller(func): """apply a caller_stack compatibility decorator to a plain Python function.""" def wrap_stackframe(context, *args, **kwargs): context.caller_stack.push_frame() try: return func(context, *args, **kwargs) finally: context.caller_stack.pop_frame() return wrap_stackframe def capture(context, callable_, *args, **kwargs): """execute the given template def, capturing the output into a buffer.""" if not callable(callable_): raise exceptions.RuntimeException("capture() function expects a callable as its argument (i.e. capture(func, *args, **kwargs))") context.push_buffer() try: callable_(*args, **kwargs) finally: buf = context.pop_buffer() return buf.getvalue() def _include_file(context, uri, calling_uri, **kwargs): """locate the template from the given uri and include it in the current output.""" template = _lookup_template(context, uri, calling_uri) (callable_, ctx) = _populate_self_namespace(context._clean_inheritance_tokens(), template) callable_(ctx, **_kwargs_for_callable(callable_, context._data, **kwargs)) def _inherit_from(context, uri, calling_uri): """called by the _inherit method in template modules to set up the inheritance chain at the start of a template's execution.""" if uri is None: return None template = _lookup_template(context, uri, calling_uri) self_ns = context['self'] ih = self_ns while ih.inherits is not None: ih = ih.inherits lclcontext = context.locals_({'next':ih}) ih.inherits = Namespace("self:%s" % template.uri, lclcontext, template = template, populate_self=False) context._data['parent'] = lclcontext._data['local'] = ih.inherits callable_ = getattr(template.module, '_mako_inherit', None) if callable_ is not None: ret = callable_(template, lclcontext) if ret: return ret gen_ns = getattr(template.module, '_mako_generate_namespaces', None) if gen_ns is not None: gen_ns(context) return (template.callable_, lclcontext) def _lookup_template(context, uri, relativeto): lookup = context._with_template.lookup if lookup is None: raise exceptions.TemplateLookupException("Template '%s' has no TemplateLookup associated" % context._with_template.uri) uri = lookup.adjust_uri(uri, relativeto) try: return lookup.get_template(uri) except exceptions.TopLevelLookupException, e: raise exceptions.TemplateLookupException(str(e)) def _populate_self_namespace(context, template, self_ns=None): if self_ns is None: self_ns = Namespace('self:%s' % template.uri, context, template=template, populate_self=False) context._data['self'] = context._data['local'] = self_ns if hasattr(template.module, '_mako_inherit'): ret = template.module._mako_inherit(template, context) if ret: return ret return (template.callable_, context) def _render(template, callable_, args, data, as_unicode=False): """create a Context and return the string output of the given template and template callable.""" if as_unicode: buf = util.FastEncodingBuffer() elif template.output_encoding: buf = util.FastEncodingBuffer(template.output_encoding, template.encoding_errors) else: buf = util.StringIO() context = Context(buf, **data) context._with_template = template _render_context(template, callable_, context, *args, **_kwargs_for_callable(callable_, data)) return context.pop_buffer().getvalue() def _kwargs_for_callable(callable_, data, **kwargs): argspec = inspect.getargspec(callable_) namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None] for arg in namedargs: if arg != 'context' and arg in data and arg not in kwargs: kwargs[arg] = data[arg] return kwargs def _render_context(tmpl, callable_, context, *args, **kwargs): import mako.template as template # create polymorphic 'self' namespace for this template with possibly updated context if not isinstance(tmpl, template.DefTemplate): # if main render method, call from the base of the inheritance stack (inherit, lclcontext) = _populate_self_namespace(context, tmpl) _exec_template(inherit, lclcontext, args=args, kwargs=kwargs) else: # otherwise, call the actual rendering method specified (inherit, lclcontext) = _populate_self_namespace(context, tmpl.parent) _exec_template(callable_, context, args=args, kwargs=kwargs) def _exec_template(callable_, context, args=None, kwargs=None): """execute a rendering callable given the callable, a Context, and optional explicit arguments the contextual Template will be located if it exists, and the error handling options specified on that Template will be interpreted here. """ template = context._with_template if template is not None and (template.format_exceptions or template.error_handler): error = None try: callable_(context, *args, **kwargs) except Exception, e: error = e except: e = sys.exc_info()[0] error = e if error: if template.error_handler: result = template.error_handler(context, error) if not result: raise error else: context._buffer_stack = [util.StringIO()] error_template = exceptions.html_error_template() context._with_template = error_template error_template.render_context(context, error=error) else: callable_(context, *args, **kwargs)
Python
# pygen.py # Copyright (C) 2006, 2007 Michael Bayer mike_mp@zzzcomputing.com # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """utilities for generating and formatting literal Python code.""" import re, string from StringIO import StringIO class PythonPrinter(object): def __init__(self, stream): # indentation counter self.indent = 0 # a stack storing information about why we incremented # the indentation counter, to help us determine if we # should decrement it self.indent_detail = [] # the string of whitespace multiplied by the indent # counter to produce a line self.indentstring = " " # the stream we are writing to self.stream = stream # a list of lines that represents a buffered "block" of code, # which can be later printed relative to an indent level self.line_buffer = [] self.in_indent_lines = False self._reset_multi_line_flags() def write(self, text): self.stream.write(text) def write_indented_block(self, block): """print a line or lines of python which already contain indentation. The indentation of the total block of lines will be adjusted to that of the current indent level.""" self.in_indent_lines = False for l in re.split(r'\r?\n', block): self.line_buffer.append(l) def writelines(self, *lines): """print a series of lines of python.""" for line in lines: self.writeline(line) def writeline(self, line): """print a line of python, indenting it according to the current indent level. this also adjusts the indentation counter according to the content of the line.""" if not self.in_indent_lines: self._flush_adjusted_lines() self.in_indent_lines = True decreased_indent = False if (line is None or re.match(r"^\s*#",line) or re.match(r"^\s*$", line) ): hastext = False else: hastext = True is_comment = line and len(line) and line[0] == '#' # see if this line should decrease the indentation level if (not decreased_indent and not is_comment and (not hastext or self._is_unindentor(line)) ): if self.indent > 0: self.indent -=1 # if the indent_detail stack is empty, the user # probably put extra closures - the resulting # module wont compile. if len(self.indent_detail) == 0: raise "Too many whitespace closures" self.indent_detail.pop() if line is None: return # write the line self.stream.write(self._indent_line(line) + "\n") # see if this line should increase the indentation level. # note that a line can both decrase (before printing) and # then increase (after printing) the indentation level. if re.search(r":[ \t]*(?:#.*)?$", line): # increment indentation count, and also # keep track of what the keyword was that indented us, # if it is a python compound statement keyword # where we might have to look for an "unindent" keyword match = re.match(r"^\s*(if|try|elif|while|for)", line) if match: # its a "compound" keyword, so we will check for "unindentors" indentor = match.group(1) self.indent +=1 self.indent_detail.append(indentor) else: indentor = None # its not a "compound" keyword. but lets also # test for valid Python keywords that might be indenting us, # else assume its a non-indenting line m2 = re.match(r"^\s*(def|class|else|elif|except|finally)", line) if m2: self.indent += 1 self.indent_detail.append(indentor) def close(self): """close this printer, flushing any remaining lines.""" self._flush_adjusted_lines() def _is_unindentor(self, line): """return true if the given line is an 'unindentor', relative to the last 'indent' event received.""" # no indentation detail has been pushed on; return False if len(self.indent_detail) == 0: return False indentor = self.indent_detail[-1] # the last indent keyword we grabbed is not a # compound statement keyword; return False if indentor is None: return False # if the current line doesnt have one of the "unindentor" keywords, # return False match = re.match(r"^\s*(else|elif|except|finally)", line) if not match: return False # whitespace matches up, we have a compound indentor, # and this line has an unindentor, this # is probably good enough return True # should we decide that its not good enough, heres # more stuff to check. #keyword = match.group(1) # match the original indent keyword #for crit in [ # (r'if|elif', r'else|elif'), # (r'try', r'except|finally|else'), # (r'while|for', r'else'), #]: # if re.match(crit[0], indentor) and re.match(crit[1], keyword): return True #return False def _indent_line(self, line, stripspace = ''): """indent the given line according to the current indent level. stripspace is a string of space that will be truncated from the start of the line before indenting.""" return re.sub(r"^%s" % stripspace, self.indentstring * self.indent, line) def _reset_multi_line_flags(self): """reset the flags which would indicate we are in a backslashed or triple-quoted section.""" (self.backslashed, self.triplequoted) = (False, False) def _in_multi_line(self, line): """return true if the given line is part of a multi-line block, via backslash or triple-quote.""" # we are only looking for explicitly joined lines here, # not implicit ones (i.e. brackets, braces etc.). this is just # to guard against the possibility of modifying the space inside # of a literal multiline string with unfortunately placed whitespace current_state = (self.backslashed or self.triplequoted) if re.search(r"\\$", line): self.backslashed = True else: self.backslashed = False triples = len(re.findall(r"\"\"\"|\'\'\'", line)) if triples == 1 or triples % 2 != 0: self.triplequoted = not self.triplequoted return current_state def _flush_adjusted_lines(self): stripspace = None self._reset_multi_line_flags() for entry in self.line_buffer: if self._in_multi_line(entry): self.stream.write(entry + "\n") else: entry = string.expandtabs(entry) if stripspace is None and re.search(r"^[ \t]*[^# \t]", entry): stripspace = re.match(r"^([ \t]*)", entry).group(1) self.stream.write(self._indent_line(entry, stripspace) + "\n") self.line_buffer = [] self._reset_multi_line_flags() def adjust_whitespace(text): """remove the left-whitespace margin of a block of Python code.""" state = [False, False] (backslashed, triplequoted) = (0, 1) def in_multi_line(line): current_state = (state[backslashed] or state[triplequoted]) if re.search(r"\\$", line): state[backslashed] = True else: state[backslashed] = False line = re.split(r'#', line)[0] triples = len(re.findall(r"\"\"\"|\'\'\'", line)) if triples == 1 or triples % 2 != 0: state[triplequoted] = not state[triplequoted] return current_state def _indent_line(line, stripspace = ''): return re.sub(r"^%s" % stripspace, '', line) stream = StringIO() stripspace = None for line in re.split(r'\r?\n', text): if in_multi_line(line): stream.write(line + "\n") else: line = string.expandtabs(line) if stripspace is None and re.search(r"^[ \t]*[^# \t]", line): stripspace = re.match(r"^([ \t]*)", line).group(1) stream.write(_indent_line(line, stripspace) + "\n") return stream.getvalue()
Python
# lookup.py # Copyright (C) 2006, 2007 Michael Bayer mike_mp@zzzcomputing.com # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import os, stat, posixpath, re from mako import exceptions, util from mako.template import Template try: import threading except: import dummy_threading as threading class TemplateCollection(object): def has_template(self, uri): try: self.get_template(uri) return True except exceptions.TemplateLookupException, e: return False def get_template(self, uri, relativeto=None): raise NotImplementedError() def filename_to_uri(self, uri, filename): """convert the given filename to a uri relative to this TemplateCollection.""" return uri def adjust_uri(self, uri, filename): """adjust the given uri based on the calling filename. when this method is called from the runtime, the 'filename' parameter is taken directly to the 'filename' attribute of the calling template. Therefore a custom TemplateCollection subclass can place any string identifier desired in the "filename" parameter of the Template objects it constructs and have them come back here.""" return uri class TemplateLookup(TemplateCollection): def __init__(self, directories=None, module_directory=None, filesystem_checks=True, collection_size=-1, format_exceptions=False, error_handler=None, output_encoding=None, encoding_errors='strict', cache_type=None, cache_dir=None, cache_url=None, modulename_callable=None, default_filters=['unicode'], buffer_filters=[], imports=None, input_encoding=None, preprocessor=None): if isinstance(directories, basestring): directories = [directories] self.directories = [posixpath.normpath(d) for d in directories or []] self.module_directory = module_directory self.modulename_callable = modulename_callable self.filesystem_checks = filesystem_checks self.collection_size = collection_size self.template_args = {'format_exceptions':format_exceptions, 'error_handler':error_handler, 'output_encoding':output_encoding, 'encoding_errors':encoding_errors, 'input_encoding':input_encoding, 'module_directory':module_directory, 'cache_type':cache_type, 'cache_dir':cache_dir or module_directory, 'cache_url':cache_url, 'default_filters':default_filters, 'buffer_filters':buffer_filters, 'imports':imports, 'preprocessor':preprocessor} if collection_size == -1: self.__collection = {} self._uri_cache = {} else: self.__collection = util.LRUCache(collection_size) self._uri_cache = util.LRUCache(collection_size) self._mutex = threading.Lock() def get_template(self, uri): try: if self.filesystem_checks: return self.__check(uri, self.__collection[uri]) else: return self.__collection[uri] except KeyError: u = re.sub(r'^\/+', '', uri) for dir in self.directories: srcfile = posixpath.normpath(posixpath.join(dir, u)) if os.access(srcfile, os.F_OK): return self.__load(srcfile, uri) else: raise exceptions.TopLevelLookupException("Cant locate template for uri '%s'" % uri) def adjust_uri(self, uri, relativeto): """adjust the given uri based on the calling filename.""" if uri[0] != '/': if relativeto is not None: return posixpath.join(posixpath.dirname(relativeto), uri) else: return '/' + uri else: return uri def filename_to_uri(self, filename): try: return self._uri_cache[filename] except KeyError: value = self.__relativeize(filename) self._uri_cache[filename] = value return value def __relativeize(self, filename): """return the portion of a filename that is 'relative' to the directories in this lookup.""" filename = posixpath.normpath(filename) for dir in self.directories: if filename[0:len(dir)] == dir: return filename[len(dir):] else: return None def __load(self, filename, uri): self._mutex.acquire() try: try: # try returning from collection one more time in case concurrent thread already loaded return self.__collection[uri] except KeyError: pass try: self.__collection[uri] = Template(uri=uri, filename=posixpath.normpath(filename), lookup=self, module_filename=(self.modulename_callable is not None and self.modulename_callable(filename, uri) or None), **self.template_args) return self.__collection[uri] except: self.__collection.pop(uri, None) raise finally: self._mutex.release() def __check(self, uri, template): if template.filename is None: return template if not os.access(template.filename, os.F_OK): self.__collection.pop(uri, None) raise exceptions.TemplateLookupException("Cant locate template for uri '%s'" % uri) elif template.module._modified_time < os.stat(template.filename)[stat.ST_MTIME]: self.__collection.pop(uri, None) return self.__load(template.filename, uri) else: return template def put_string(self, uri, text): self.__collection[uri] = Template(text, lookup=self, uri=uri, **self.template_args) def put_template(self, uri, template): self.__collection[uri] = template
Python
# lexer.py # Copyright (C) 2006, 2007 Michael Bayer mike_mp@zzzcomputing.com # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """provides the Lexer class for parsing template strings into parse trees.""" import re, codecs from mako import parsetree, exceptions from mako.pygen import adjust_whitespace _regexp_cache = {} class Lexer(object): def __init__(self, text, filename=None, input_encoding=None, preprocessor=None): self.text = text self.filename = filename self.template = parsetree.TemplateNode(self.filename) self.matched_lineno = 1 self.matched_charpos = 0 self.lineno = 1 self.match_position = 0 self.tag = [] self.control_line = [] self.encoding = input_encoding if preprocessor is None: self.preprocessor = [] elif not hasattr(preprocessor, '__iter__'): self.preprocessor = [preprocessor] else: self.preprocessor = preprocessor exception_kwargs = property(lambda self:{'source':self.text, 'lineno':self.matched_lineno, 'pos':self.matched_charpos, 'filename':self.filename}) def match(self, regexp, flags=None): """match the given regular expression string and flags to the current text position. if a match occurs, update the current text and line position.""" mp = self.match_position try: reg = _regexp_cache[(regexp, flags)] except KeyError: if flags: reg = re.compile(regexp, flags) else: reg = re.compile(regexp) _regexp_cache[(regexp, flags)] = reg match = reg.match(self.text, self.match_position) if match: (start, end) = match.span() if end == start: self.match_position = end + 1 else: self.match_position = end self.matched_lineno = self.lineno lines = re.findall(r"\n", self.text[mp:self.match_position]) cp = mp - 1 while (cp >= 0 and cp<self.textlength and self.text[cp] != '\n'): cp -=1 self.matched_charpos = mp - cp self.lineno += len(lines) #print "MATCHED:", match.group(0), "LINE START:", self.matched_lineno, "LINE END:", self.lineno #print "MATCH:", regexp, "\n", self.text[mp : mp + 15], (match and "TRUE" or "FALSE") return match def parse_until_text(self, *text): startpos = self.match_position while True: match = self.match(r'#.*\n') if match: continue match = self.match(r'(\"\"\"|\'\'\'|\"|\')') if match: m = self.match(r'.*?%s' % match.group(1), re.S) if not m: raise exceptions.SyntaxException("Unmatched '%s'" % match.group(1), **self.exception_kwargs) else: match = self.match(r'(%s)' % r'|'.join(text)) if match: return (self.text[startpos:self.match_position-len(match.group(1))], match.group(1)) else: match = self.match(r".*?(?=\"|\'|#|%s)" % r'|'.join(text), re.S) if not match: raise exceptions.SyntaxException("Expected: %s" % ','.join(text), **self.exception_kwargs) def append_node(self, nodecls, *args, **kwargs): kwargs.setdefault('source', self.text) kwargs.setdefault('lineno', self.matched_lineno) kwargs.setdefault('pos', self.matched_charpos) kwargs['filename'] = self.filename node = nodecls(*args, **kwargs) if len(self.tag): self.tag[-1].nodes.append(node) else: self.template.nodes.append(node) if isinstance(node, parsetree.Tag): if len(self.tag): node.parent = self.tag[-1] self.tag.append(node) elif isinstance(node, parsetree.ControlLine): if node.isend: self.control_line.pop() elif node.is_primary: self.control_line.append(node) elif len(self.control_line) and not self.control_line[-1].is_ternary(node.keyword): raise exceptions.SyntaxException("Keyword '%s' not a legal ternary for keyword '%s'" % (node.keyword, self.control_line[-1].keyword), **self.exception_kwargs) def escape_code(self, text): if self.encoding: return text.encode('ascii', 'backslashreplace') else: return text def parse(self): for preproc in self.preprocessor: self.text = preproc(self.text) if not isinstance(self.text, unicode) and self.text.startswith(codecs.BOM_UTF8): self.text = self.text[len(codecs.BOM_UTF8):] parsed_encoding = 'utf-8' me = self.match_encoding() if me is not None and me != 'utf-8': raise exceptions.CompileException("Found utf-8 BOM in file, with conflicting magic encoding comment of '%s'" % me, self.text.decode('utf-8', 'ignore'), 0, 0, self.filename) else: parsed_encoding = self.match_encoding() if parsed_encoding: self.encoding = parsed_encoding if not isinstance(self.text, unicode): if self.encoding: try: self.text = self.text.decode(self.encoding) except UnicodeDecodeError, e: raise exceptions.CompileException("Unicode decode operation of encoding '%s' failed" % self.encoding, self.text.decode('utf-8', 'ignore'), 0, 0, self.filename) else: try: self.text = self.text.decode() except UnicodeDecodeError, e: raise exceptions.CompileException("Could not read template using encoding of 'ascii'. Did you forget a magic encoding comment?", self.text.decode('utf-8', 'ignore'), 0, 0, self.filename) self.textlength = len(self.text) while (True): if self.match_position > self.textlength: break if self.match_end(): break if self.match_expression(): continue if self.match_control_line(): continue if self.match_comment(): continue if self.match_tag_start(): continue if self.match_tag_end(): continue if self.match_python_block(): continue if self.match_text(): continue if self.match_position > self.textlength: break raise exceptions.CompileException("assertion failed") if len(self.tag): raise exceptions.SyntaxException("Unclosed tag: <%%%s>" % self.tag[-1].keyword, **self.exception_kwargs) if len(self.control_line): raise exceptions.SyntaxException("Unterminated control keyword: '%s'" % self.control_line[-1].keyword, self.text, self.control_line[-1].lineno, self.control_line[-1].pos, self.filename) return self.template def match_encoding(self): match = self.match(r'#.*coding[:=]\s*([-\w.]+).*\r?\n') if match: return match.group(1) else: return None def match_tag_start(self): match = self.match(r''' \<% # opening tag (\w+) # keyword ((?:\s+\w+|=|".*?"|'.*?')*) # attrname, = sign, string expression \s* # more whitespace (/)?> # closing ''', re.I | re.S | re.X) if match: (keyword, attr, isend) = (match.group(1).lower(), match.group(2), match.group(3)) self.keyword = keyword attributes = {} if attr: for att in re.findall(r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr): (key, val1, val2) = att attributes[key] = self.escape_code(val1 or val2) self.append_node(parsetree.Tag, keyword, attributes) if isend: self.tag.pop() else: if keyword == 'text': match = self.match(r'(.*?)(?=\</%text>)', re.S) if not match: raise exceptions.SyntaxException("Unclosed tag: <%%%s>" % self.tag[-1].keyword, **self.exception_kwargs) self.append_node(parsetree.Text, match.group(1)) return self.match_tag_end() return True else: return False def match_tag_end(self): match = self.match(r'\</%[\t ]*(.+?)[\t ]*>') if match: if not len(self.tag): raise exceptions.SyntaxException("Closing tag without opening tag: </%%%s>" % match.group(1), **self.exception_kwargs) elif self.tag[-1].keyword != match.group(1): raise exceptions.SyntaxException("Closing tag </%%%s> does not match tag: <%%%s>" % (match.group(1), self.tag[-1].keyword), **self.exception_kwargs) self.tag.pop() return True else: return False def match_end(self): match = self.match(r'\Z', re.S) if match: string = match.group() if string: return string else: return True else: return False def match_text(self): match = self.match(r""" (.*?) # anything, followed by: ( (?<=\n)(?=[ \t]*(?=%|\#\#)) # an eval or line-based comment preceded by a consumed \n and whitespace | (?=\${) # an expression | (?=\#\*) # multiline comment | (?=</?[%&]) # a substitution or block or call start or end # - don't consume | (\\\r?\n) # an escaped newline - throw away | \Z # end of string )""", re.X | re.S) if match: text = match.group(1) self.append_node(parsetree.Text, text) return True else: return False def match_python_block(self): match = self.match(r"<%(!)?") if match: (line, pos) = (self.matched_lineno, self.matched_charpos) (text, end) = self.parse_until_text(r'%>') text = adjust_whitespace(text) self.append_node(parsetree.Code, self.escape_code(text), match.group(1)=='!', lineno=line, pos=pos) return True else: return False def match_expression(self): match = self.match(r"\${") if match: (line, pos) = (self.matched_lineno, self.matched_charpos) (text, end) = self.parse_until_text(r'\|', r'}') if end == '|': (escapes, end) = self.parse_until_text(r'}') else: escapes = "" self.append_node(parsetree.Expression, self.escape_code(text), escapes.strip(), lineno=line, pos=pos) return True else: return False def match_control_line(self): match = self.match(r"(?<=^)[\t ]*(%|##)[\t ]*((?:(?:\\r?\n)|[^\r\n])*)(?:\r?\n|\Z)", re.M) if match: operator = match.group(1) text = match.group(2) if operator == '%': m2 = re.match(r'(end)?(\w+)\s*(.*)', text) if not m2: raise exceptions.SyntaxException("Invalid control line: '%s'" % text, **self.exception_kwargs) (isend, keyword) = m2.group(1, 2) isend = (isend is not None) if isend: if not len(self.control_line): raise exceptions.SyntaxException("No starting keyword '%s' for '%s'" % (keyword, text), **self.exception_kwargs) elif self.control_line[-1].keyword != keyword: raise exceptions.SyntaxException("Keyword '%s' doesn't match keyword '%s'" % (text, self.control_line[-1].keyword), **self.exception_kwargs) self.append_node(parsetree.ControlLine, keyword, isend, self.escape_code(text)) else: self.append_node(parsetree.Comment, text) return True else: return False def match_comment(self): """matches the multiline version of a comment""" match = self.match(r"<%doc>(.*?)</%doc>", re.S) if match: self.append_node(parsetree.Comment, match.group(1)) return True else: return False
Python
# parsetree.py # Copyright (C) 2006, 2007 Michael Bayer mike_mp@zzzcomputing.com # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """defines the parse tree components for Mako templates.""" from mako import exceptions, ast, util, filters import re class Node(object): """base class for a Node in the parse tree.""" def __init__(self, source, lineno, pos, filename): self.source = source self.lineno = lineno self.pos = pos self.filename = filename exception_kwargs = property(lambda self:{'source':self.source, 'lineno':self.lineno, 'pos':self.pos, 'filename':self.filename}) def get_children(self): return [] def accept_visitor(self, visitor): def traverse(node): for n in node.get_children(): n.accept_visitor(visitor) method = getattr(visitor, "visit" + self.__class__.__name__, traverse) method(self) class TemplateNode(Node): """a 'container' node that stores the overall collection of nodes.""" def __init__(self, filename): super(TemplateNode, self).__init__('', 0, 0, filename) self.nodes = [] self.page_attributes = {} def get_children(self): return self.nodes def __repr__(self): return "TemplateNode(%s, %s)" % (repr(self.page_attributes), repr(self.nodes)) class ControlLine(Node): """defines a control line, a line-oriented python line or end tag. % if foo: (markup) % endif """ def __init__(self, keyword, isend, text, **kwargs): super(ControlLine, self).__init__(**kwargs) self.text = text self.keyword = keyword self.isend = isend self.is_primary = keyword in ['for','if', 'while', 'try'] if self.isend: self._declared_identifiers = [] self._undeclared_identifiers = [] else: code = ast.PythonFragment(text, **self.exception_kwargs) (self._declared_identifiers, self._undeclared_identifiers) = (code.declared_identifiers, code.undeclared_identifiers) def declared_identifiers(self): return self._declared_identifiers def undeclared_identifiers(self): return self._undeclared_identifiers def is_ternary(self, keyword): """return true if the given keyword is a ternary keyword for this ControlLine""" return keyword in { 'if':util.Set(['else', 'elif']), 'try':util.Set(['except', 'finally']), 'for':util.Set(['else']) }.get(self.keyword, []) def __repr__(self): return "ControlLine(%s, %s, %s, %s)" % (repr(self.keyword), repr(self.text), repr(self.isend), repr((self.lineno, self.pos))) class Text(Node): """defines plain text in the template.""" def __init__(self, content, **kwargs): super(Text, self).__init__(**kwargs) self.content = content def __repr__(self): return "Text(%s, %s)" % (repr(self.content), repr((self.lineno, self.pos))) class Code(Node): """defines a Python code block, either inline or module level. inline: <% x = 12 %> module level: <%! import logger %> """ def __init__(self, text, ismodule, **kwargs): super(Code, self).__init__(**kwargs) self.text = text self.ismodule = ismodule self.code = ast.PythonCode(text, **self.exception_kwargs) def declared_identifiers(self): return self.code.declared_identifiers def undeclared_identifiers(self): return self.code.undeclared_identifiers def __repr__(self): return "Code(%s, %s, %s)" % (repr(self.text), repr(self.ismodule), repr((self.lineno, self.pos))) class Comment(Node): """defines a comment line. # this is a comment """ def __init__(self, text, **kwargs): super(Comment, self).__init__(**kwargs) self.text = text def __repr__(self): return "Comment(%s, %s)" % (repr(self.text), repr((self.lineno, self.pos))) class Expression(Node): """defines an inline expression. ${x+y} """ def __init__(self, text, escapes, **kwargs): super(Expression, self).__init__(**kwargs) self.text = text self.escapes = escapes self.escapes_code = ast.ArgumentList(escapes, **self.exception_kwargs) self.code = ast.PythonCode(text, **self.exception_kwargs) def declared_identifiers(self): return [] def undeclared_identifiers(self): # TODO: make the "filter" shortcut list configurable at parse/gen time return self.code.undeclared_identifiers.union(self.escapes_code.undeclared_identifiers.difference(util.Set(filters.DEFAULT_ESCAPES.keys()))) def __repr__(self): return "Expression(%s, %s, %s)" % (repr(self.text), repr(self.escapes_code.args), repr((self.lineno, self.pos))) class _TagMeta(type): """metaclass to allow Tag to produce a subclass according to its keyword""" _classmap = {} def __init__(cls, clsname, bases, dict): if cls.__keyword__ is not None: cls._classmap[cls.__keyword__] = cls super(_TagMeta, cls).__init__(clsname, bases, dict) def __call__(cls, keyword, attributes, **kwargs): try: cls = _TagMeta._classmap[keyword] except KeyError: raise exceptions.CompileException("No such tag: '%s'" % keyword, source=kwargs['source'], lineno=kwargs['lineno'], pos=kwargs['pos'], filename=kwargs['filename']) return type.__call__(cls, keyword, attributes, **kwargs) class Tag(Node): """abstract base class for tags. <%sometag/> <%someothertag> stuff </%someothertag> """ __metaclass__ = _TagMeta __keyword__ = None def __init__(self, keyword, attributes, expressions, nonexpressions, required, **kwargs): """construct a new Tag instance. this constructor not called directly, and is only called by subclasses. keyword - the tag keyword attributes - raw dictionary of attribute key/value pairs expressions - a util.Set of identifiers that are legal attributes, which can also contain embedded expressions nonexpressions - a util.Set of identifiers that are legal attributes, which cannot contain embedded expressions **kwargs - other arguments passed to the Node superclass (lineno, pos)""" super(Tag, self).__init__(**kwargs) self.keyword = keyword self.attributes = attributes self._parse_attributes(expressions, nonexpressions) missing = [r for r in required if r not in self.parsed_attributes] if len(missing): raise exceptions.CompileException("Missing attribute(s): %s" % ",".join([repr(m) for m in missing]), **self.exception_kwargs) self.parent = None self.nodes = [] def is_root(self): return self.parent is None def get_children(self): return self.nodes def _parse_attributes(self, expressions, nonexpressions): undeclared_identifiers = util.Set() self.parsed_attributes = {} for key in self.attributes: if key in expressions: expr = [] for x in re.split(r'(\${.+?})', self.attributes[key]): m = re.match(r'^\${(.+?)}$', x) if m: code = ast.PythonCode(m.group(1), **self.exception_kwargs) undeclared_identifiers = undeclared_identifiers.union(code.undeclared_identifiers) expr.append(m.group(1)) else: if x: expr.append(repr(x)) self.parsed_attributes[key] = " + ".join(expr) elif key in nonexpressions: if re.search(r'${.+?}', self.attributes[key]): raise exceptions.CompileException("Attibute '%s' in tag '%s' does not allow embedded expressions" %(key, self.keyword), **self.exception_kwargs) self.parsed_attributes[key] = repr(self.attributes[key]) else: raise exceptions.CompileException("Invalid attribute for tag '%s': '%s'" %(self.keyword, key), **self.exception_kwargs) self.expression_undeclared_identifiers = undeclared_identifiers def declared_identifiers(self): return [] def undeclared_identifiers(self): return self.expression_undeclared_identifiers def __repr__(self): return "%s(%s, %s, %s, %s)" % (self.__class__.__name__, repr(self.keyword), repr(self.attributes), repr((self.lineno, self.pos)), repr([repr(x) for x in self.nodes])) class IncludeTag(Tag): __keyword__ = 'include' def __init__(self, keyword, attributes, **kwargs): super(IncludeTag, self).__init__(keyword, attributes, ('file', 'import', 'args'), (), ('file',), **kwargs) self.page_args = ast.PythonCode("__DUMMY(%s)" % attributes.get('args', ''), **self.exception_kwargs) def declared_identifiers(self): return [] def undeclared_identifiers(self): identifiers = self.page_args.undeclared_identifiers.difference(util.Set(["__DUMMY"])) return identifiers.union(super(IncludeTag, self).undeclared_identifiers()) class NamespaceTag(Tag): __keyword__ = 'namespace' def __init__(self, keyword, attributes, **kwargs): super(NamespaceTag, self).__init__(keyword, attributes, (), ('name','inheritable','file','import','module'), (), **kwargs) self.name = attributes.get('name', '__anon_%s' % hex(abs(id(self)))) if not 'name' in attributes and not 'import' in attributes: raise exceptions.CompileException("'name' and/or 'import' attributes are required for <%namespace>", **self.exception_kwargs) def declared_identifiers(self): return [] class TextTag(Tag): __keyword__ = 'text' def __init__(self, keyword, attributes, **kwargs): super(TextTag, self).__init__(keyword, attributes, (), ('filter'), (), **kwargs) self.filter_args = ast.ArgumentList(attributes.get('filter', ''), **self.exception_kwargs) class DefTag(Tag): __keyword__ = 'def' def __init__(self, keyword, attributes, **kwargs): super(DefTag, self).__init__(keyword, attributes, ('buffered', 'cached', 'cache_key', 'cache_timeout', 'cache_type', 'cache_dir', 'cache_url'), ('name','filter'), ('name',), **kwargs) name = attributes['name'] if re.match(r'^[\w_]+$',name): raise exceptions.CompileException("Missing parenthesis in %def", **self.exception_kwargs) self.function_decl = ast.FunctionDecl("def " + name + ":pass", **self.exception_kwargs) self.name = self.function_decl.funcname self.filter_args = ast.ArgumentList(attributes.get('filter', ''), **self.exception_kwargs) def declared_identifiers(self): return self.function_decl.argnames def undeclared_identifiers(self): res = [] for c in self.function_decl.defaults: res += list(ast.PythonCode(c, **self.exception_kwargs).undeclared_identifiers) return res + list(self.filter_args.undeclared_identifiers.difference(util.Set(filters.DEFAULT_ESCAPES.keys()))) class CallTag(Tag): __keyword__ = 'call' def __init__(self, keyword, attributes, **kwargs): super(CallTag, self).__init__(keyword, attributes, ('args'), ('expr',), ('expr',), **kwargs) self.code = ast.PythonCode(attributes['expr'], **self.exception_kwargs) self.body_decl = ast.FunctionArgs(attributes.get('args', ''), **self.exception_kwargs) def declared_identifiers(self): return self.code.declared_identifiers.union(self.body_decl.argnames) def undeclared_identifiers(self): return self.code.undeclared_identifiers class InheritTag(Tag): __keyword__ = 'inherit' def __init__(self, keyword, attributes, **kwargs): super(InheritTag, self).__init__(keyword, attributes, ('file',), (), ('file',), **kwargs) class PageTag(Tag): __keyword__ = 'page' def __init__(self, keyword, attributes, **kwargs): super(PageTag, self).__init__(keyword, attributes, ('cached', 'cache_key', 'cache_timeout', 'cache_type', 'cache_dir', 'cache_url', 'args', 'expression_filter'), (), (), **kwargs) self.body_decl = ast.FunctionArgs(attributes.get('args', ''), **self.exception_kwargs) self.filter_args = ast.ArgumentList(attributes.get('expression_filter', ''), **self.exception_kwargs) def declared_identifiers(self): return self.body_decl.argnames
Python
"""adds autohandler functionality to Mako templates. requires that the TemplateLookup class is used with templates. usage: <%! from mako.ext.autohandler import autohandler %> <%inherit file="${autohandler(template, context)}"/> or with custom autohandler filename: <%! from mako.ext.autohandler import autohandler %> <%inherit file="${autohandler(template, context, name='somefilename')}"/> """ import posixpath, os, re def autohandler(template, context, name='autohandler'): lookup = context.lookup _template_uri = template.module._template_uri if not lookup.filesystem_checks: try: return lookup._uri_cache[(autohandler, _template_uri, name)] except KeyError: pass tokens = re.findall(r'([^/]+)', posixpath.dirname(_template_uri)) + [name] while len(tokens): path = '/' + '/'.join(tokens) if path != _template_uri and _file_exists(lookup, path): if not lookup.filesystem_checks: return lookup._uri_cache.setdefault((autohandler, _template_uri, name), path) else: return path if len(tokens) == 1: break tokens[-2:] = [name] if not lookup.filesystem_checks: return lookup._uri_cache.setdefault((autohandler, _template_uri, name), None) else: return None def _file_exists(lookup, path): psub = re.sub(r'^/', '',path) for d in lookup.directories: if os.access(d + '/' + psub, os.F_OK): return True else: return False
Python
"""preprocessing functions, used with the 'preprocessor' argument on Template, TemplateLookup""" import re def convert_comments(text): """preprocess old style comments. example: from mako.ext.preprocessors import convert_comments t = Template(..., preprocessor=preprocess_comments)""" return re.sub(r'(?<=\n)\s*#[^#]', "##", text) # TODO def create_tag(callable): """given a callable, extract the *args and **kwargs, and produce a preprocessor that will parse for <%<funcname> <args>> and convert to an appropriate <%call> statement. this allows any custom tag to be created which looks like a pure Mako-style tag.""" raise NotImplementedError("Future functionality....")
Python
import re try: set except NameError: from sets import Set as set from pygments.lexers.web import \ HtmlLexer, XmlLexer, JavascriptLexer, CssLexer from pygments.lexers.agile import PythonLexer from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \ include, using, this from pygments.token import Error, Punctuation, \ Text, Comment, Operator, Keyword, Name, String, Number, Other, Literal from pygments.util import html_doctype_matches, looks_like_xml class MakoLexer(RegexLexer): name = 'Mako' aliases = ['mako'] filenames = ['*.mao'] tokens = { 'root': [ (r'(\s*)(\%)(\s*end(?:\w+))(\n|\Z)', bygroups(Text, Comment.Preproc, Keyword, Other)), (r'(\s*)(\%)([^\n]*)(\n|\Z)', bygroups(Text, Comment.Preproc, using(PythonLexer), Other)), (r'(\s*)(##[^\n]*)(\n|\Z)', bygroups(Text, Comment.Preproc, Other)), (r'''(?s)<%doc>.*?</%doc>''', Comment.Preproc), (r'(<%)(def|call|namespace|text)', bygroups(Comment.Preproc, Name.Builtin), 'tag'), (r'(</%)(def|call|namespace|text)(>)', bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)), (r'<%(?=(include|inherit|namespace|page))', Comment.Preproc, 'ondeftags'), (r'(<%(?:!?))(.*?)(%>)(?s)', bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), (r'(\$\{)(.*?)(\})', bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), (r'''(?sx) (.+?) # anything, followed by: (?: (?<=\n)(?=%|\#\#) | # an eval or comment line (?=\#\*) | # multiline comment (?=</?%) | # a python block # call start or end (?=\$\{) | # a substitution (?<=\n)(?=\s*%) | # - don't consume (\\\n) | # an escaped newline \Z # end of string ) ''', bygroups(Other, Operator)), (r'\s+', Text), ], 'ondeftags': [ (r'<%', Comment.Preproc), (r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin), include('tag'), ], 'tag': [ (r'((?:\w+)\s*=)\s*(".*?")', bygroups(Name.Attribute, String)), (r'/?\s*>', Comment.Preproc, '#pop'), (r'\s+', Text), ], 'attr': [ ('".*?"', String, '#pop'), ("'.*?'", String, '#pop'), (r'[^\s>]+', String, '#pop'), ], } class MakoHtmlLexer(DelegatingLexer): name = 'HTML+Mako' aliases = ['html+mako'] def __init__(self, **options): super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer, **options) class MakoXmlLexer(DelegatingLexer): name = 'XML+Mako' aliases = ['xml+mako'] def __init__(self, **options): super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer, **options) class MakoJavascriptLexer(DelegatingLexer): name = 'JavaScript+Mako' aliases = ['js+mako', 'javascript+mako'] def __init__(self, **options): super(MakoJavascriptLexer, self).__init__(JavascriptLexer, MakoLexer, **options) class MakoCssLexer(DelegatingLexer): name = 'CSS+Mako' aliases = ['css+mako'] def __init__(self, **options): super(MakoCssLexer, self).__init__(CssLexer, MakoLexer, **options)
Python
import re from mako.lookup import TemplateLookup from mako.template import Template class TGPlugin(object): """TurboGears compatible Template Plugin.""" extension = 'mak' def __init__(self, extra_vars_func=None, options=None): self.extra_vars_func = extra_vars_func if not options: options = {} # Pull the options out and initialize the lookup tmpl_options = {} for k, v in options.iteritems(): if k.startswith('mako.'): tmpl_options[k[5:]] = v elif k in ['directories', 'filesystem_checks', 'module_directory']: tmpl_options[k] = v self.tmpl_options = tmpl_options self.lookup = TemplateLookup(**tmpl_options) def load_template(self, templatename, template_string=None): """Loads a template from a file or a string""" if template_string is not None: return Template(template_string, **self.tmpl_options) # Translate TG dot notation to normal / template path if '/' not in templatename and '.' not in templatename: templatename = '/' + templatename.replace('.', '/') + '.' + self.extension # Lookup template return self.lookup.get_template(templatename) def render(self, info, format="html", fragment=False, template=None): if isinstance(template, basestring): template = self.load_template(template) # Load extra vars func if provided if self.extra_vars_func: info.update(self.extra_vars_func()) return template.render(**info)
Python
# codegen.py # Copyright (C) 2006, 2007 Michael Bayer mike_mp@zzzcomputing.com # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """provides functionality for rendering a parsetree constructing into module source code.""" import time import re from mako.pygen import PythonPrinter from mako import util, ast, parsetree, filters MAGIC_NUMBER = 2 def compile(node, uri, filename=None, default_filters=None, buffer_filters=None, imports=None, source_encoding=None): """generate module source code given a parsetree node, uri, and optional source filename""" buf = util.FastEncodingBuffer() printer = PythonPrinter(buf) _GenerateRenderMethod(printer, _CompileContext(uri, filename, default_filters, buffer_filters, imports, source_encoding), node) return buf.getvalue() class _CompileContext(object): def __init__(self, uri, filename, default_filters, buffer_filters, imports, source_encoding): self.uri = uri self.filename = filename self.default_filters = default_filters self.buffer_filters = buffer_filters self.imports = imports self.source_encoding = source_encoding class _GenerateRenderMethod(object): """a template visitor object which generates the full module source for a template.""" def __init__(self, printer, compiler, node): self.printer = printer self.last_source_line = -1 self.compiler = compiler self.node = node self.identifier_stack = [None] self.in_def = isinstance(node, parsetree.DefTag) if self.in_def: name = "render_" + node.name args = node.function_decl.get_argument_expressions() filtered = len(node.filter_args.args) > 0 buffered = eval(node.attributes.get('buffered', 'False')) cached = eval(node.attributes.get('cached', 'False')) defs = None pagetag = None else: defs = self.write_toplevel() pagetag = self.compiler.pagetag name = "render_body" if pagetag is not None: args = pagetag.body_decl.get_argument_expressions() if not pagetag.body_decl.kwargs: args += ['**pageargs'] cached = eval(pagetag.attributes.get('cached', 'False')) else: args = ['**pageargs'] cached = False buffered = filtered = False if args is None: args = ['context'] else: args = [a for a in ['context'] + args] self.write_render_callable(pagetag or node, name, args, buffered, filtered, cached) if defs is not None: for node in defs: _GenerateRenderMethod(printer, compiler, node) identifiers = property(lambda self:self.identifier_stack[-1]) def write_toplevel(self): """traverse a template structure for module-level directives and generate the start of module-level code.""" inherit = [] namespaces = {} module_code = [] encoding =[None] self.compiler.pagetag = None class FindTopLevel(object): def visitInheritTag(s, node): inherit.append(node) def visitNamespaceTag(s, node): namespaces[node.name] = node def visitPageTag(s, node): self.compiler.pagetag = node def visitCode(s, node): if node.ismodule: module_code.append(node) f = FindTopLevel() for n in self.node.nodes: n.accept_visitor(f) self.compiler.namespaces = namespaces module_ident = util.Set() for n in module_code: module_ident = module_ident.union(n.declared_identifiers()) module_identifiers = _Identifiers() module_identifiers.declared = module_ident # module-level names, python code self.printer.writeline("from mako import runtime, filters, cache") self.printer.writeline("UNDEFINED = runtime.UNDEFINED") self.printer.writeline("_magic_number = %s" % repr(MAGIC_NUMBER)) self.printer.writeline("_modified_time = %s" % repr(time.time())) self.printer.writeline("_template_filename=%s" % repr(self.compiler.filename)) self.printer.writeline("_template_uri=%s" % repr(self.compiler.uri)) self.printer.writeline("_template_cache=cache.Cache(__name__, _modified_time)") self.printer.writeline("_source_encoding=%s" % repr(self.compiler.source_encoding)) if self.compiler.imports: buf = '' for imp in self.compiler.imports: buf += imp + "\n" self.printer.writeline(imp) impcode = ast.PythonCode(buf, source='', lineno=0, pos=0, filename='template defined imports') else: impcode = None main_identifiers = module_identifiers.branch(self.node) module_identifiers.topleveldefs = module_identifiers.topleveldefs.union(main_identifiers.topleveldefs) [module_identifiers.declared.add(x) for x in ["UNDEFINED"]] if impcode: [module_identifiers.declared.add(x) for x in impcode.declared_identifiers] self.compiler.identifiers = module_identifiers self.printer.writeline("_exports = %s" % repr([n.name for n in main_identifiers.topleveldefs.values()])) self.printer.write("\n\n") if len(module_code): self.write_module_code(module_code) if len(inherit): self.write_namespaces(namespaces) self.write_inherit(inherit[-1]) elif len(namespaces): self.write_namespaces(namespaces) return main_identifiers.topleveldefs.values() def write_render_callable(self, node, name, args, buffered, filtered, cached): """write a top-level render callable. this could be the main render() method or that of a top-level def.""" self.printer.writelines( "def %s(%s):" % (name, ','.join(args)), "context.caller_stack.push_frame()", "try:" ) if buffered or filtered or cached: self.printer.writeline("context.push_buffer()") self.identifier_stack.append(self.compiler.identifiers.branch(self.node)) if not self.in_def and '**pageargs' in args: self.identifier_stack[-1].argument_declared.add('pageargs') if not self.in_def and (len(self.identifiers.locally_assigned) > 0 or len(self.identifiers.argument_declared)>0): self.printer.writeline("__locals = dict(%s)" % ','.join(["%s=%s" % (x, x) for x in self.identifiers.argument_declared])) self.write_variable_declares(self.identifiers, toplevel=True) for n in self.node.nodes: n.accept_visitor(self) self.write_def_finish(self.node, buffered, filtered, cached) self.printer.writeline(None) self.printer.write("\n\n") if cached: self.write_cache_decorator(node, name, buffered, self.identifiers) def write_module_code(self, module_code): """write module-level template code, i.e. that which is enclosed in <%! %> tags in the template.""" for n in module_code: self.write_source_comment(n) self.printer.write_indented_block(n.text) def write_inherit(self, node): """write the module-level inheritance-determination callable.""" self.printer.writelines( "def _mako_inherit(template, context):", "_mako_generate_namespaces(context)", "return runtime._inherit_from(context, %s, _template_uri)" % (node.parsed_attributes['file']), None ) def write_namespaces(self, namespaces): """write the module-level namespace-generating callable.""" self.printer.writelines( "def _mako_get_namespace(context, name):", "try:", "return context.namespaces[(__name__, name)]", "except KeyError:", "_mako_generate_namespaces(context)", "return context.namespaces[(__name__, name)]", None,None ) self.printer.writeline("def _mako_generate_namespaces(context):") for node in namespaces.values(): if node.attributes.has_key('import'): self.compiler.has_ns_imports = True self.write_source_comment(node) if len(node.nodes): self.printer.writeline("def make_namespace():") export = [] identifiers = self.compiler.identifiers.branch(node) class NSDefVisitor(object): def visitDefTag(s, node): self.write_inline_def(node, identifiers, nested=False) export.append(node.name) vis = NSDefVisitor() for n in node.nodes: n.accept_visitor(vis) self.printer.writeline("return [%s]" % (','.join(export))) self.printer.writeline(None) callable_name = "make_namespace()" else: callable_name = "None" self.printer.writeline("ns = runtime.Namespace(%s, context._clean_inheritance_tokens(), templateuri=%s, callables=%s, calling_uri=_template_uri, module=%s)" % (repr(node.name), node.parsed_attributes.get('file', 'None'), callable_name, node.parsed_attributes.get('module', 'None'))) if eval(node.attributes.get('inheritable', "False")): self.printer.writeline("context['self'].%s = ns" % (node.name)) self.printer.writeline("context.namespaces[(__name__, %s)] = ns" % repr(node.name)) self.printer.write("\n") if not len(namespaces): self.printer.writeline("pass") self.printer.writeline(None) def write_variable_declares(self, identifiers, toplevel=False, limit=None): """write variable declarations at the top of a function. the variable declarations are in the form of callable definitions for defs and/or name lookup within the function's context argument. the names declared are based on the names that are referenced in the function body, which don't otherwise have any explicit assignment operation. names that are assigned within the body are assumed to be locally-scoped variables and are not separately declared. for def callable definitions, if the def is a top-level callable then a 'stub' callable is generated which wraps the current Context into a closure. if the def is not top-level, it is fully rendered as a local closure.""" # collection of all defs available to us in this scope comp_idents = dict([(c.name, c) for c in identifiers.defs]) to_write = util.Set() # write "context.get()" for all variables we are going to need that arent in the namespace yet to_write = to_write.union(identifiers.undeclared) # write closure functions for closures that we define right here to_write = to_write.union(util.Set([c.name for c in identifiers.closuredefs.values()])) # remove identifiers that are declared in the argument signature of the callable to_write = to_write.difference(identifiers.argument_declared) # remove identifiers that we are going to assign to. in this way we mimic Python's behavior, # i.e. assignment to a variable within a block means that variable is now a "locally declared" var, # which cannot be referenced beforehand. to_write = to_write.difference(identifiers.locally_declared) # if a limiting set was sent, constraint to those items in that list # (this is used for the caching decorator) if limit is not None: to_write = to_write.intersection(limit) if toplevel and getattr(self.compiler, 'has_ns_imports', False): self.printer.writeline("_import_ns = {}") self.compiler.has_imports = True for ident, ns in self.compiler.namespaces.iteritems(): if ns.attributes.has_key('import'): self.printer.writeline("_mako_get_namespace(context, %s)._populate(_import_ns, %s)" % (repr(ident), repr(re.split(r'\s*,\s*', ns.attributes['import'])))) for ident in to_write: if ident in comp_idents: comp = comp_idents[ident] if comp.is_root(): self.write_def_decl(comp, identifiers) else: self.write_inline_def(comp, identifiers, nested=True) elif ident in self.compiler.namespaces: self.printer.writeline("%s = _mako_get_namespace(context, %s)" % (ident, repr(ident))) else: if getattr(self.compiler, 'has_ns_imports', False): self.printer.writeline("%s = _import_ns.get(%s, context.get(%s, UNDEFINED))" % (ident, repr(ident), repr(ident))) else: self.printer.writeline("%s = context.get(%s, UNDEFINED)" % (ident, repr(ident))) def write_source_comment(self, node): """write a source comment containing the line number of the corresponding template line.""" if self.last_source_line != node.lineno: self.printer.writeline("# SOURCE LINE %d" % node.lineno) self.last_source_line = node.lineno def write_def_decl(self, node, identifiers): """write a locally-available callable referencing a top-level def""" funcname = node.function_decl.funcname namedecls = node.function_decl.get_argument_expressions() nameargs = node.function_decl.get_argument_expressions(include_defaults=False) if not self.in_def and (len(self.identifiers.locally_assigned) > 0 or len(self.identifiers.argument_declared) > 0): nameargs.insert(0, 'context.locals_(__locals)') else: nameargs.insert(0, 'context') self.printer.writeline("def %s(%s):" % (funcname, ",".join(namedecls))) self.printer.writeline("return render_%s(%s)" % (funcname, ",".join(nameargs))) self.printer.writeline(None) def write_inline_def(self, node, identifiers, nested): """write a locally-available def callable inside an enclosing def.""" namedecls = node.function_decl.get_argument_expressions() self.printer.writeline("def %s(%s):" % (node.name, ",".join(namedecls))) filtered = len(node.filter_args.args) > 0 buffered = eval(node.attributes.get('buffered', 'False')) cached = eval(node.attributes.get('cached', 'False')) self.printer.writelines( "context.caller_stack.push_frame()", "try:" ) if buffered or filtered or cached: self.printer.writelines( "context.push_buffer()", ) identifiers = identifiers.branch(node, nested=nested) self.write_variable_declares(identifiers) self.identifier_stack.append(identifiers) for n in node.nodes: n.accept_visitor(self) self.identifier_stack.pop() self.write_def_finish(node, buffered, filtered, cached) self.printer.writeline(None) if cached: self.write_cache_decorator(node, node.name, False, identifiers, inline=True) def write_def_finish(self, node, buffered, filtered, cached, callstack=True): """write the end section of a rendering function, either outermost or inline. this takes into account if the rendering function was filtered, buffered, etc. and closes the corresponding try: block if any, and writes code to retrieve captured content, apply filters, send proper return value.""" if not buffered and not cached and not filtered: self.printer.writeline("return ''") if callstack: self.printer.writelines( "finally:", "context.caller_stack.pop_frame()", None ) if buffered or filtered or cached: self.printer.writelines( "finally:", "_buf = context.pop_buffer()" ) if callstack: self.printer.writeline("context.caller_stack.pop_frame()") s = "_buf.getvalue()" if filtered: s = self.create_filter_callable(node.filter_args.args, s, False) self.printer.writeline(None) if buffered and not cached: s = self.create_filter_callable(self.compiler.buffer_filters, s, False) if buffered or cached: self.printer.writeline("return %s" % s) else: self.printer.writelines( "context.write(%s)" % s, "return ''" ) def write_cache_decorator(self, node_or_pagetag, name, buffered, identifiers, inline=False): """write a post-function decorator to replace a rendering callable with a cached version of itself.""" self.printer.writeline("__%s = %s" % (name, name)) cachekey = node_or_pagetag.parsed_attributes.get('cache_key', repr(name)) cacheargs = {} for arg in (('cache_type', 'type'), ('cache_dir', 'data_dir'), ('cache_timeout', 'expiretime'), ('cache_url', 'url')): val = node_or_pagetag.parsed_attributes.get(arg[0], None) if val is not None: if arg[1] == 'expiretime': cacheargs[arg[1]] = int(eval(val)) else: cacheargs[arg[1]] = val else: if self.compiler.pagetag is not None: val = self.compiler.pagetag.parsed_attributes.get(arg[0], None) if val is not None: if arg[1] == 'expiretime': cacheargs[arg[1]] == int(eval(val)) else: cacheargs[arg[1]] = val if inline: ctx_arg = "" else: ctx_arg = "context, " self.printer.writeline("def %s(%s*args, **kwargs):" % (name, ctx_arg)) self.write_variable_declares(identifiers, limit=node_or_pagetag.undeclared_identifiers()) if buffered: s = "context.get('local').get_cached(%s, %screatefunc=lambda:__%s(%s*args, **kwargs))" % (cachekey, ''.join(["%s=%s, " % (k,v) for k, v in cacheargs.iteritems()]), name, ctx_arg) # apply buffer_filters s = self.create_filter_callable(self.compiler.buffer_filters, s, False) self.printer.writelines("return " + s,None) else: self.printer.writelines( "context.write(context.get('local').get_cached(%s, %screatefunc=lambda:__%s(%s*args, **kwargs)))" % (cachekey, ''.join(["%s=%s, " % (k,v) for k, v in cacheargs.iteritems()]), name, ctx_arg), "return ''", None ) def create_filter_callable(self, args, target, is_expression): """write a filter-applying expression based on the filters present in the given filter names, adjusting for the global 'default' filter aliases as needed.""" def locate_encode(name): if re.match(r'decode\..+', name): return "filters." + name elif name == 'unicode': return 'unicode' else: return filters.DEFAULT_ESCAPES.get(name, name) if 'n' not in args: if is_expression: if self.compiler.pagetag: args = self.compiler.pagetag.filter_args.args + args if self.compiler.default_filters: args = self.compiler.default_filters + args for e in args: # if filter given as a function, get just the identifier portion if e == 'n': continue m = re.match(r'(.+?)(\(.*\))', e) if m: (ident, fargs) = m.group(1,2) f = locate_encode(ident) e = f + fargs else: x = e e = locate_encode(e) assert e is not None target = "%s(%s)" % (e, target) return target def visitExpression(self, node): self.write_source_comment(node) if len(node.escapes) or (self.compiler.pagetag is not None and len(self.compiler.pagetag.filter_args.args)) or len(self.compiler.default_filters): s = self.create_filter_callable(node.escapes_code.args, "%s" % node.text, True) self.printer.writeline("context.write(%s)" % s) else: self.printer.writeline("context.write(%s)" % node.text) def visitControlLine(self, node): if node.isend: self.printer.writeline(None) else: self.write_source_comment(node) self.printer.writeline(node.text) def visitText(self, node): self.write_source_comment(node) self.printer.writeline("context.write(%s)" % repr(node.content)) def visitTextTag(self, node): filtered = len(node.filter_args.args) > 0 if filtered: self.printer.writelines( "context.push_buffer()", "try:", ) for n in node.nodes: n.accept_visitor(self) if filtered: self.printer.writelines( "finally:", "_buf = context.pop_buffer()", "context.write(%s)" % self.create_filter_callable(node.filter_args.args, "_buf.getvalue()", False), None ) def visitCode(self, node): if not node.ismodule: self.write_source_comment(node) self.printer.write_indented_block(node.text) if not self.in_def and len(self.identifiers.locally_assigned) > 0: # if we are the "template" def, fudge locally declared/modified variables into the "__locals" dictionary, # which is used for def calls within the same template, to simulate "enclosing scope" self.printer.writeline('__locals.update(dict([(k, locals()[k]) for k in [%s] if k in locals()]))' % ','.join([repr(x) for x in node.declared_identifiers()])) def visitIncludeTag(self, node): self.write_source_comment(node) args = node.attributes.get('args') if args: self.printer.writeline("runtime._include_file(context, %s, _template_uri, %s)" % (node.parsed_attributes['file'], args)) else: self.printer.writeline("runtime._include_file(context, %s, _template_uri)" % (node.parsed_attributes['file'])) def visitNamespaceTag(self, node): pass def visitDefTag(self, node): pass def visitCallTag(self, node): self.printer.writeline("def ccall(caller):") export = ['body'] callable_identifiers = self.identifiers.branch(node, nested=True) body_identifiers = callable_identifiers.branch(node, nested=False) # we want the 'caller' passed to ccall to be used for the body() function, # but for other non-body() <%def>s within <%call> we want the current caller off the call stack (if any) body_identifiers.add_declared('caller') self.identifier_stack.append(body_identifiers) class DefVisitor(object): def visitDefTag(s, node): self.write_inline_def(node, callable_identifiers, nested=False) export.append(node.name) # remove defs that are within the <%call> from the "closuredefs" defined # in the body, so they dont render twice if node.name in body_identifiers.closuredefs: del body_identifiers.closuredefs[node.name] vis = DefVisitor() for n in node.nodes: n.accept_visitor(vis) self.identifier_stack.pop() bodyargs = node.body_decl.get_argument_expressions() self.printer.writeline("def body(%s):" % ','.join(bodyargs)) # TODO: figure out best way to specify buffering/nonbuffering (at call time would be better) buffered = False if buffered: self.printer.writelines( "context.push_buffer()", "try:" ) self.write_variable_declares(body_identifiers) self.identifier_stack.append(body_identifiers) for n in node.nodes: n.accept_visitor(self) self.identifier_stack.pop() self.write_def_finish(node, buffered, False, False, callstack=False) self.printer.writelines( None, "return [%s]" % (','.join(export)), None ) self.printer.writelines( # get local reference to current caller, if any "caller = context.caller_stack._get_caller()", # push on caller for nested call "context.caller_stack.nextcaller = runtime.Namespace('caller', context, callables=ccall(caller))", "try:") self.write_source_comment(node) self.printer.writelines( "context.write(unicode(%s))" % node.attributes['expr'], "finally:", "context.caller_stack.nextcaller = None", None ) class _Identifiers(object): """tracks the status of identifier names as template code is rendered.""" def __init__(self, node=None, parent=None, nested=False): if parent is not None: # things that have already been declared in an enclosing namespace (i.e. names we can just use) self.declared = util.Set(parent.declared).union([c.name for c in parent.closuredefs.values()]).union(parent.locally_declared).union(parent.argument_declared) # if these identifiers correspond to a "nested" scope, it means whatever the # parent identifiers had as undeclared will have been declared by that parent, # and therefore we have them in our scope. if nested: self.declared = self.declared.union(parent.undeclared) # top level defs that are available self.topleveldefs = util.SetLikeDict(**parent.topleveldefs) else: self.declared = util.Set() self.topleveldefs = util.SetLikeDict() # things within this level that are referenced before they are declared (e.g. assigned to) self.undeclared = util.Set() # things that are declared locally. some of these things could be in the "undeclared" # list as well if they are referenced before declared self.locally_declared = util.Set() # assignments made in explicit python blocks. these will be propigated to # the context of local def calls. self.locally_assigned = util.Set() # things that are declared in the argument signature of the def callable self.argument_declared = util.Set() # closure defs that are defined in this level self.closuredefs = util.SetLikeDict() self.node = node if node is not None: node.accept_visitor(self) def branch(self, node, **kwargs): """create a new Identifiers for a new Node, with this Identifiers as the parent.""" return _Identifiers(node, self, **kwargs) defs = property(lambda self:util.Set(self.topleveldefs.union(self.closuredefs).values())) def __repr__(self): return "Identifiers(declared=%s, locally_declared=%s, undeclared=%s, topleveldefs=%s, closuredefs=%s, argumenetdeclared=%s)" % (repr(list(self.declared)), repr(list(self.locally_declared)), repr(list(self.undeclared)), repr([c.name for c in self.topleveldefs.values()]), repr([c.name for c in self.closuredefs.values()]), repr(self.argument_declared)) def check_declared(self, node): """update the state of this Identifiers with the undeclared and declared identifiers of the given node.""" for ident in node.undeclared_identifiers(): if ident != 'context' and ident not in self.declared.union(self.locally_declared): self.undeclared.add(ident) for ident in node.declared_identifiers(): self.locally_declared.add(ident) def add_declared(self, ident): self.declared.add(ident) if ident in self.undeclared: self.undeclared.remove(ident) def visitExpression(self, node): self.check_declared(node) def visitControlLine(self, node): self.check_declared(node) def visitCode(self, node): if not node.ismodule: self.check_declared(node) self.locally_assigned = self.locally_assigned.union(node.declared_identifiers()) def visitDefTag(self, node): if node.is_root(): self.topleveldefs[node.name] = node elif node is not self.node: self.closuredefs[node.name] = node for ident in node.undeclared_identifiers(): if ident != 'context' and ident not in self.declared.union(self.locally_declared): self.undeclared.add(ident) # visit defs only one level deep if node is self.node: for ident in node.declared_identifiers(): self.argument_declared.add(ident) for n in node.nodes: n.accept_visitor(self) def visitIncludeTag(self, node): self.check_declared(node) def visitPageTag(self, node): for ident in node.declared_identifiers(): self.argument_declared.add(ident) self.check_declared(node) def visitCallTag(self, node): if node is self.node: for ident in node.undeclared_identifiers(): if ident != 'context' and ident not in self.declared.union(self.locally_declared): self.undeclared.add(ident) for ident in node.declared_identifiers(): self.argument_declared.add(ident) for n in node.nodes: n.accept_visitor(self) else: for ident in node.undeclared_identifiers(): if ident != 'context' and ident not in self.declared.union(self.locally_declared): self.undeclared.add(ident)
Python
# util.py # Copyright (C) 2006, 2007 Michael Bayer mike_mp@zzzcomputing.com # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php try: Set = set except: import sets Set = sets.Set try: from cStringIO import StringIO except: from StringIO import StringIO import weakref, os, time try: import threading import thread except ImportError: import dummy_threading as threading import dummy_thread as thread def verify_directory(dir): """create and/or verify a filesystem directory.""" tries = 0 while not os.access(dir, os.F_OK): try: tries += 1 os.makedirs(dir, 0750) except: if tries > 5: raise class SetLikeDict(dict): """a dictionary that has some setlike methods on it""" def union(self, other): """produce a 'union' of this dict and another (at the key level). values in the second dict take precedence over that of the first""" x = SetLikeDict(**self) x.update(other) return x class FastEncodingBuffer(object): """a very rudimentary buffer that is faster than StringIO, but doesnt crash on unicode data like cStringIO.""" def __init__(self, encoding=None, errors='strict'): self.data = [] self.encoding = encoding self.errors = errors def write(self, text): self.data.append(text) def getvalue(self): if self.encoding: return u''.join(self.data).encode(self.encoding, self.errors) else: return u''.join(self.data) class LRUCache(dict): """A dictionary-like object that stores a limited number of items, discarding lesser used items periodically. this is a rewrite of LRUCache from Myghty to use a periodic timestamp-based paradigm so that synchronization is not really needed. the size management is inexact. """ class _Item(object): def __init__(self, key, value): self.key = key self.value = value self.timestamp = time.time() def __repr__(self): return repr(self.value) def __init__(self, capacity, threshold=.5): self.capacity = capacity self.threshold = threshold def __getitem__(self, key): item = dict.__getitem__(self, key) item.timestamp = time.time() return item.value def values(self): return [i.value for i in dict.values(self)] def setdefault(self, key, value): if key in self: return self[key] else: self[key] = value return value def __setitem__(self, key, value): item = dict.get(self, key) if item is None: item = self._Item(key, value) dict.__setitem__(self, key, item) else: item.value = value self._manage_size() def _manage_size(self): while len(self) > self.capacity + self.capacity * self.threshold: bytime = dict.values(self) bytime.sort(lambda a, b: cmp(b.timestamp, a.timestamp)) for item in bytime[self.capacity:]: try: del self[item.key] except KeyError: # if we couldnt find a key, most likely some other thread broke in # on us. loop around and try again break
Python
# ast.py # Copyright (C) 2006, 2007 Michael Bayer mike_mp@zzzcomputing.com # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """utilities for analyzing expressions and blocks of Python code, as well as generating Python from AST nodes""" from compiler import ast, visitor from compiler import parse as compiler_parse from mako import util, exceptions from StringIO import StringIO import re def parse(code, mode, **exception_kwargs): try: return compiler_parse(code, mode) except SyntaxError, e: raise exceptions.SyntaxException("(%s) %s (%s)" % (e.__class__.__name__, str(e), repr(code[0:50])), **exception_kwargs) class PythonCode(object): """represents information about a string containing Python code""" def __init__(self, code, **exception_kwargs): self.code = code # represents all identifiers which are assigned to at some point in the code self.declared_identifiers = util.Set() # represents all identifiers which are referenced before their assignment, if any self.undeclared_identifiers = util.Set() # note that an identifier can be in both the undeclared and declared lists. # using AST to parse instead of using code.co_varnames, code.co_names has several advantages: # - we can locate an identifier as "undeclared" even if its declared later in the same block of code # - AST is less likely to break with version changes (for example, the behavior of co_names changed a little bit # in python version 2.5) if isinstance(code, basestring): expr = parse(code.lstrip(), "exec", **exception_kwargs) else: expr = code class FindIdentifiers(object): def __init__(self): self.in_function = False self.local_ident_stack = {} def _add_declared(s, name): if not s.in_function: self.declared_identifiers.add(name) def visitClass(self, node, *args): self._add_declared(node.name) def visitAssName(self, node, *args): self._add_declared(node.name) def visitAssign(self, node, *args): # flip around the visiting of Assign so the expression gets evaluated first, # in the case of a clause like "x=x+5" (x is undeclared) self.visit(node.expr, *args) for n in node.nodes: self.visit(n, *args) def visitFunction(self,node, *args): self._add_declared(node.name) # push function state onto stack. dont log any # more identifiers as "declared" until outside of the function, # but keep logging identifiers as "undeclared". # track argument names in each function header so they arent counted as "undeclared" saved = {} inf = self.in_function self.in_function = True for arg in node.argnames: if arg in self.local_ident_stack: saved[arg] = True else: self.local_ident_stack[arg] = True for n in node.getChildNodes(): self.visit(n, *args) self.in_function = inf for arg in node.argnames: if arg not in saved: del self.local_ident_stack[arg] def visitFor(self, node, *args): # flip around visit self.visit(node.list, *args) self.visit(node.assign, *args) self.visit(node.body, *args) def visitName(s, node, *args): if node.name not in __builtins__ and node.name not in self.declared_identifiers and node.name not in s.local_ident_stack: self.undeclared_identifiers.add(node.name) def visitImport(self, node, *args): for (mod, alias) in node.names: if alias is not None: self._add_declared(alias) else: self._add_declared(mod.split('.')[0]) def visitFrom(self, node, *args): for (mod, alias) in node.names: if alias is not None: self._add_declared(alias) else: if mod == '*': raise exceptions.CompileException("'import *' is not supported, since all identifier names must be explicitly declared. Please use the form 'from <modulename> import <name1>, <name2>, ...' instead.", **exception_kwargs) self._add_declared(mod) f = FindIdentifiers() visitor.walk(expr, f) #, walker=walker()) class ArgumentList(object): """parses a fragment of code as a comma-separated list of expressions""" def __init__(self, code, **exception_kwargs): self.codeargs = [] self.args = [] self.declared_identifiers = util.Set() self.undeclared_identifiers = util.Set() class FindTuple(object): def visitTuple(s, node, *args): for n in node.nodes: p = PythonCode(n, **exception_kwargs) self.codeargs.append(p) self.args.append(ExpressionGenerator(n).value()) self.declared_identifiers = self.declared_identifiers.union(p.declared_identifiers) self.undeclared_identifiers = self.undeclared_identifiers.union(p.undeclared_identifiers) if isinstance(code, basestring): if re.match(r"\S", code) and not re.match(r",\s*$", code): # if theres text and no trailing comma, insure its parsed # as a tuple by adding a trailing comma code += "," expr = parse(code, "exec", **exception_kwargs) else: expr = code f = FindTuple() visitor.walk(expr, f) class PythonFragment(PythonCode): """extends PythonCode to provide identifier lookups in partial control statements e.g. for x in 5: elif y==9: except (MyException, e): etc. """ def __init__(self, code, **exception_kwargs): m = re.match(r'^(\w+)(?:\s+(.*?))?:$', code.strip(), re.S) if not m: raise exceptions.CompileException("Fragment '%s' is not a partial control statement" % code, **exception_kwargs) (keyword, expr) = m.group(1,2) if keyword in ['for','if', 'while']: code = code + "pass" elif keyword == 'try': code = code + "pass\nexcept:pass" elif keyword == 'elif' or keyword == 'else': code = "if False:pass\n" + code + "pass" elif keyword == 'except': code = "try:pass\n" + code + "pass" else: raise exceptions.CompileException("Unsupported control keyword: '%s'" % keyword, **exception_kwargs) super(PythonFragment, self).__init__(code, **exception_kwargs) class walker(visitor.ASTVisitor): def dispatch(self, node, *args): print "Node:", str(node) #print "dir:", dir(node) return visitor.ASTVisitor.dispatch(self, node, *args) class FunctionDecl(object): """function declaration""" def __init__(self, code, allow_kwargs=True, **exception_kwargs): self.code = code expr = parse(code, "exec", **exception_kwargs) class ParseFunc(object): def visitFunction(s, node, *args): self.funcname = node.name self.argnames = node.argnames self.defaults = node.defaults self.varargs = node.varargs self.kwargs = node.kwargs f = ParseFunc() visitor.walk(expr, f) if not hasattr(self, 'funcname'): raise exceptions.CompileException("Code '%s' is not a function declaration" % code, **exception_kwargs) if not allow_kwargs and self.kwargs: raise exceptions.CompileException("'**%s' keyword argument not allowed here" % self.argnames[-1], **exception_kwargs) def get_argument_expressions(self, include_defaults=True): """return the argument declarations of this FunctionDecl as a printable list.""" namedecls = [] defaults = [d for d in self.defaults] kwargs = self.kwargs varargs = self.varargs argnames = [f for f in self.argnames] argnames.reverse() for arg in argnames: default = None if kwargs: arg = "**" + arg kwargs = False elif varargs: arg = "*" + arg varargs = False else: default = len(defaults) and defaults.pop() or None if include_defaults and default: namedecls.insert(0, "%s=%s" % (arg, ExpressionGenerator(default).value())) else: namedecls.insert(0, arg) return namedecls class FunctionArgs(FunctionDecl): """the argument portion of a function declaration""" def __init__(self, code, **kwargs): super(FunctionArgs, self).__init__("def ANON(%s):pass" % code, **kwargs) class ExpressionGenerator(object): """given an AST node, generates an equivalent literal Python expression.""" def __init__(self, astnode): self.buf = StringIO() visitor.walk(astnode, self) #, walker=walker()) def value(self): return self.buf.getvalue() def operator(self, op, node, *args): self.buf.write("(") self.visit(node.left, *args) self.buf.write(" %s " % op) self.visit(node.right, *args) self.buf.write(")") def booleanop(self, op, node, *args): self.visit(node.nodes[0]) for n in node.nodes[1:]: self.buf.write(" " + op + " ") self.visit(n, *args) def visitConst(self, node, *args): self.buf.write(repr(node.value)) def visitAssName(self, node, *args): # TODO: figure out OP_ASSIGN, other OP_s self.buf.write(node.name) def visitName(self, node, *args): self.buf.write(node.name) def visitMul(self, node, *args): self.operator("*", node, *args) def visitAnd(self, node, *args): self.booleanop("and", node, *args) def visitOr(self, node, *args): self.booleanop("or", node, *args) def visitBitand(self, node, *args): self.booleanop("&", node, *args) def visitBitor(self, node, *args): self.booleanop("|", node, *args) def visitBitxor(self, node, *args): self.booleanop("^", node, *args) def visitAdd(self, node, *args): self.operator("+", node, *args) def visitGetattr(self, node, *args): self.visit(node.expr, *args) self.buf.write(".%s" % node.attrname) def visitSub(self, node, *args): self.operator("-", node, *args) def visitNot(self, node, *args): self.buf.write("not ") self.visit(node.expr) def visitDiv(self, node, *args): self.operator("/", node, *args) def visitFloorDiv(self, node, *args): self.operator("//", node, *args) def visitSubscript(self, node, *args): self.visit(node.expr) self.buf.write("[") [self.visit(x) for x in node.subs] self.buf.write("]") def visitUnarySub(self, node, *args): self.buf.write("-") self.visit(node.expr) def visitUnaryAdd(self, node, *args): self.buf.write("-") self.visit(node.expr) def visitSlice(self, node, *args): self.visit(node.expr) self.buf.write("[") if node.lower is not None: self.visit(node.lower) self.buf.write(":") if node.upper is not None: self.visit(node.upper) self.buf.write("]") def visitDict(self, node): self.buf.write("{") c = node.getChildren() for i in range(0, len(c), 2): self.visit(c[i]) self.buf.write(": ") self.visit(c[i+1]) if i<len(c) -2: self.buf.write(", ") self.buf.write("}") def visitTuple(self, node): self.buf.write("(") c = node.getChildren() for i in range(0, len(c)): self.visit(c[i]) if i<len(c) - 1: self.buf.write(", ") self.buf.write(")") def visitList(self, node): self.buf.write("[") c = node.getChildren() for i in range(0, len(c)): self.visit(c[i]) if i<len(c) - 1: self.buf.write(", ") self.buf.write("]") def visitListComp(self, node): self.buf.write("[") self.visit(node.expr) self.buf.write(" ") for n in node.quals: self.visit(n) self.buf.write("]") def visitListCompFor(self, node): self.buf.write(" for ") self.visit(node.assign) self.buf.write(" in ") self.visit(node.list) for n in node.ifs: self.visit(n) def visitListCompIf(self, node): self.buf.write(" if ") self.visit(node.test) def visitCompare(self, node): self.visit(node.expr) for tup in node.ops: self.buf.write(tup[0]) self.visit(tup[1]) def visitCallFunc(self, node, *args): self.visit(node.node) self.buf.write("(") if len(node.args): self.visit(node.args[0]) for a in node.args[1:]: self.buf.write(", ") self.visit(a) self.buf.write(")")
Python
# exceptions.py # Copyright (C) 2006, 2007 Michael Bayer mike_mp@zzzcomputing.com # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """exception classes""" import traceback, sys, re class MakoException(Exception): pass class RuntimeException(MakoException): pass def _format_filepos(lineno, pos, filename): if filename is None: return " at line: %d char: %d" % (lineno, pos) else: return " in file '%s' at line: %d char: %d" % (filename, lineno, pos) class CompileException(MakoException): def __init__(self, message, source, lineno, pos, filename): MakoException.__init__(self, message + _format_filepos(lineno, pos, filename)) self.lineno =lineno self.pos = pos self.filename = filename self.source = source class SyntaxException(MakoException): def __init__(self, message, source, lineno, pos, filename): MakoException.__init__(self, message + _format_filepos(lineno, pos, filename)) self.lineno =lineno self.pos = pos self.filename = filename self.source = source class TemplateLookupException(MakoException): pass class TopLevelLookupException(TemplateLookupException): pass class RichTraceback(object): """pulls the current exception from the sys traceback and extracts Mako-specific template information. Usage: RichTraceback() Properties: error - the exception instance. source - source code of the file where the error occured. if the error occured within a compiled template, this is the template source. lineno - line number where the error occured. if the error occured within a compiled template, the line number is adjusted to that of the template source records - a list of 8-tuples containing the original python traceback elements, plus the filename, line number, source line, and full template source for the traceline mapped back to its originating source template, if any for that traceline (else the fields are None). reverse_records - the list of records in reverse traceback - a list of 4-tuples, in the same format as a regular python traceback, with template-corresponding traceback records replacing the originals reverse_traceback - the traceback list in reverse """ def __init__(self): (self.source, self.lineno) = ("", 0) (t, self.error, self.records) = self._init() if self.error is None: self.error = t if isinstance(self.error, CompileException) or isinstance(self.error, SyntaxException): import mako.template self.source = self.error.source self.lineno = self.error.lineno self._has_source = True self.reverse_records = [r for r in self.records] self.reverse_records.reverse() def _get_reformatted_records(self, records): for rec in records: if rec[6] is not None: yield (rec[4], rec[5], rec[2], rec[6]) else: yield tuple(rec[0:4]) traceback = property(lambda self:self._get_reformatted_records(self.records), doc=""" return a list of 4-tuple traceback records (i.e. normal python format) with template-corresponding lines remapped to the originating template """) reverse_traceback = property(lambda self:self._get_reformatted_records(self.reverse_records), doc=""" return the same data as traceback, except in reverse order """) def _init(self): """format a traceback from sys.exc_info() into 7-item tuples, containing the regular four traceback tuple items, plus the original template filename, the line number adjusted relative to the template source, and code line from that line number of the template.""" import mako.template mods = {} (type, value, trcback) = sys.exc_info() rawrecords = traceback.extract_tb(trcback) new_trcback = [] for filename, lineno, function, line in rawrecords: try: (line_map, template_lines) = mods[filename] except KeyError: try: info = mako.template._get_module_info(filename) module_source = info.code template_source = info.source template_filename = info.template_filename or filename except KeyError: new_trcback.append((filename, lineno, function, line, None, None, None, None)) continue template_ln = module_ln = 1 line_map = {} for line in module_source.split("\n"): match = re.match(r'\s*# SOURCE LINE (\d+)', line) if match: template_ln = int(match.group(1)) else: template_ln += 1 module_ln += 1 line_map[module_ln] = template_ln template_lines = [line for line in template_source.split("\n")] mods[filename] = (line_map, template_lines) template_ln = line_map[lineno] if template_ln <= len(template_lines): template_line = template_lines[template_ln - 1] else: template_line = None new_trcback.append((filename, lineno, function, line, template_filename, template_ln, template_line, template_source)) if not self.source: for l in range(len(new_trcback)-1, 0, -1): if new_trcback[l][5]: self.source = new_trcback[l][7] self.lineno = new_trcback[l][5] break else: self.source = file(new_trcback[-1][0]).read() self.lineno = new_trcback[-1][1] return (type, value, new_trcback) def text_error_template(lookup=None): """provides a template that renders a stack trace in a similar format to the Python interpreter, substituting source template filenames, line numbers and code for that of the originating source template, as applicable.""" import mako.template return mako.template.Template(r""" <%! from mako.exceptions import RichTraceback %>\ <% tback = RichTraceback() %>\ Traceback (most recent call last): % for (filename, lineno, function, line) in tback.traceback: File "${filename}", line ${lineno}, in ${function or '?'} ${line | unicode.strip} % endfor ${str(tback.error.__class__.__name__)}: ${str(tback.error)} """) def html_error_template(): """provides a template that renders a stack trace in an HTML format, providing an excerpt of code as well as substituting source template filenames, line numbers and code for that of the originating source template, as applicable. the template's default encoding_errors value is 'htmlentityreplace'. the template has two options: with the full option disabled, only a section of an HTML document is returned. with the css option disabled, the default stylesheet won't be included.""" import mako.template return mako.template.Template(r""" <%! from mako.exceptions import RichTraceback %> <%page args="full=True, css=True"/> % if full: <html> <head> <title>Mako Runtime Error</title> % endif % if css: <style> body { font-family:verdana; margin:10px 30px 10px 30px;} .stacktrace { margin:5px 5px 5px 5px; } .highlight { padding:0px 10px 0px 10px; background-color:#9F9FDF; } .nonhighlight { padding:0px; background-color:#DFDFDF; } .sample { padding:10px; margin:10px 10px 10px 10px; font-family:monospace; } .sampleline { padding:0px 10px 0px 10px; } .sourceline { margin:5px 5px 10px 5px; font-family:monospace;} .location { font-size:80%; } </style> % endif % if full: </head> <body> % endif <h2>Error !</h2> <% tback = RichTraceback() src = tback.source line = tback.lineno if src: lines = src.split('\n') else: lines = None %> <h3>${str(tback.error.__class__.__name__)}: ${str(tback.error)}</h3> % if lines: <div class="sample"> <div class="nonhighlight"> % for index in range(max(0, line-4),min(len(lines), line+5)): % if index + 1 == line: <div class="highlight">${index + 1} ${lines[index] | h}</div> % else: <div class="sampleline">${index + 1} ${lines[index] | h}</div> % endif % endfor </div> </div> % endif <div class="stacktrace"> % for (filename, lineno, function, line) in tback.reverse_traceback: <div class="location">${filename}, line ${lineno}:</div> <div class="sourceline">${line | h}</div> % endfor </div> % if full: </body> </html> % endif """, output_encoding=sys.getdefaultencoding(), encoding_errors='htmlentityreplace')
Python
# __init__.py # Copyright (C) 2006, 2007 Michael Bayer mike_mp@zzzcomputing.com # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php
Python
# template.py # Copyright (C) 2006, 2007 Michael Bayer mike_mp@zzzcomputing.com # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """provides the Template class, a facade for parsing, generating and executing template strings, as well as template runtime operations.""" from mako.lexer import Lexer from mako import codegen from mako import runtime, util, exceptions import imp, time, weakref, tempfile, shutil, os, stat, sys, re class Template(object): """a compiled template""" def __init__(self, text=None, filename=None, uri=None, format_exceptions=False, error_handler=None, lookup=None, output_encoding=None, encoding_errors='strict', module_directory=None, cache_type=None, cache_dir=None, cache_url=None, module_filename=None, input_encoding=None, default_filters=['unicode'], buffer_filters=[], imports=None, preprocessor=None): """construct a new Template instance using either literal template text, or a previously loaded template module text - textual template source, or None if a module is to be provided uri - the uri of this template, or some identifying string. defaults to the full filename given, or "memory:(hex id of this Template)" if no filename filename - filename of the source template, if any format_exceptions - catch exceptions and format them into an error display template """ if uri: self.module_id = re.sub(r'\W', "_", uri) self.uri = uri elif filename: self.module_id = re.sub(r'\W', "_", filename) self.uri = filename else: self.module_id = "memory:" + hex(id(self)) self.uri = self.module_id self.default_filters = default_filters self.buffer_filters = buffer_filters self.input_encoding = input_encoding self.imports = imports self.preprocessor = preprocessor # if plain text, compile code in memory only if text is not None: (code, module) = _compile_text(self, text, filename) self._code = code self._source = text ModuleInfo(module, None, self, filename, code, text) elif filename is not None: # if template filename and a module directory, load # a filesystem-based module file, generating if needed if module_filename is not None: path = module_filename elif module_directory is not None: u = self.uri if u[0] == '/': u = u[1:] path = os.path.abspath(os.path.join(module_directory.replace('/', os.path.sep), u + ".py")) else: path = None if path is not None: util.verify_directory(os.path.dirname(path)) filemtime = os.stat(filename)[stat.ST_MTIME] if not os.access(path, os.F_OK) or os.stat(path)[stat.ST_MTIME] < filemtime: _compile_module_file(self, file(filename).read(), filename, path) module = imp.load_source(self.module_id, path, file(path)) del sys.modules[self.module_id] if module._magic_number != codegen.MAGIC_NUMBER: _compile_module_file(self, file(filename).read(), filename, path) module = imp.load_source(self.module_id, path, file(path)) del sys.modules[self.module_id] ModuleInfo(module, path, self, filename, None, None) else: # template filename and no module directory, compile code # in memory (code, module) = _compile_text(self, file(filename).read(), filename) self._source = None self._code = code ModuleInfo(module, None, self, filename, code, None) else: raise exceptions.RuntimeException("Template requires text or filename") self.module = module self.filename = filename self.callable_ = self.module.render_body self.format_exceptions = format_exceptions self.error_handler = error_handler self.lookup = lookup self.output_encoding = output_encoding self.encoding_errors = encoding_errors self.cache_type = cache_type self.cache_dir = cache_dir self.cache_url = cache_url source = property(lambda self:_get_module_info_from_callable(self.callable_).source, doc="""return the template source code for this Template.""") code = property(lambda self:_get_module_info_from_callable(self.callable_).code, doc="""return the module source code for this Template""") def render(self, *args, **data): """render the output of this template as a string. if the template specifies an output encoding, the string will be encoded accordingly, else the output is raw (raw output uses cStringIO and can't handle multibyte characters). a Context object is created corresponding to the given data. Arguments that are explictly declared by this template's internal rendering method are also pulled from the given *args, **data members.""" return runtime._render(self, self.callable_, args, data) def render_unicode(self, *args, **data): """render the output of this template as a unicode object.""" return runtime._render(self, self.callable_, args, data, as_unicode=True) def render_context(self, context, *args, **kwargs): """render this Template with the given context. the data is written to the context's buffer.""" if getattr(context, '_with_template', None) is None: context._with_template = self runtime._render_context(self, self.callable_, context, *args, **kwargs) def get_def(self, name): """return a def of this template as an individual Template of its own.""" return DefTemplate(self, getattr(self.module, "render_%s" % name)) class DefTemplate(Template): """a Template which represents a callable def in a parent template.""" def __init__(self, parent, callable_): self.parent = parent self.callable_ = callable_ self.default_filters = parent.default_filters self.buffer_filters = parent.buffer_filters self.input_encoding = parent.input_encoding self.imports = parent.imports self.output_encoding = parent.output_encoding self.encoding_errors = parent.encoding_errors self.format_exceptions = parent.format_exceptions self.error_handler = parent.error_handler self.lookup = parent.lookup self.module = parent.module self.filename = parent.filename self.cache_type = parent.cache_type self.cache_dir = parent.cache_dir self.cache_url = parent.cache_url def get_def(self, name): return self.parent.get_def(name) class ModuleInfo(object): """stores information about a module currently loaded into memory, provides reverse lookups of template source, module source code based on a module's identifier.""" _modules = weakref.WeakValueDictionary() def __init__(self, module, module_filename, template, template_filename, module_source, template_source): self.module = module self.module_filename = module_filename self.template_filename = template_filename self.module_source = module_source self.template_source = template_source self._modules[module.__name__] = template._mmarker = self if module_filename: self._modules[module_filename] = self def _get_code(self): if self.module_source is not None: return self.module_source else: return file(self.module_filename).read() code = property(_get_code) def _get_source(self): if self.template_source is not None: if self.module._source_encoding and not isinstance(self.template_source, unicode): return self.template_source.decode(self.module._source_encoding) else: return self.template_source else: if self.module._source_encoding: return file(self.template_filename).read().decode(self.module._source_encoding) else: return file(self.template_filename).read() source = property(_get_source) def _compile_text(template, text, filename): identifier = template.module_id lexer = Lexer(text, filename, input_encoding=template.input_encoding, preprocessor=template.preprocessor) node = lexer.parse() source = codegen.compile(node, template.uri, filename, default_filters=template.default_filters, buffer_filters=template.buffer_filters, imports=template.imports, source_encoding=lexer.encoding) #print source cid = identifier module = imp.new_module(cid) code = compile(source, cid, 'exec') exec code in module.__dict__, module.__dict__ return (source, module) def _compile_module_file(template, text, filename, outputpath): identifier = template.module_id (dest, name) = tempfile.mkstemp() lexer = Lexer(text, filename, input_encoding=template.input_encoding, preprocessor=template.preprocessor) node = lexer.parse() source = codegen.compile(node, template.uri, filename, default_filters=template.default_filters, buffer_filters=template.buffer_filters, imports=template.imports, source_encoding=lexer.encoding) os.write(dest, source) os.close(dest) shutil.move(name, outputpath) def _get_module_info_from_callable(callable_): return _get_module_info(callable_.func_globals['__name__']) def _get_module_info(filename): return ModuleInfo._modules[filename]
Python
# filters.py # Copyright (C) 2006, 2007 Geoffrey T. Dairiki <dairiki@dairiki.org> and Michael Bayer <mike_mp@zzzcomputing.com> # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import re, cgi, urllib, htmlentitydefs, codecs from StringIO import StringIO xml_escapes = { '&' : '&amp;', '>' : '&gt;', '<' : '&lt;', '"' : '&#34;', # also &quot; in html-only "'" : '&#39;' # also &apos; in html-only } # XXX: &quot; is valid in HTML and XML # &apos; is not valid HTML, but is valid XML def html_escape(string): return cgi.escape(string, True) def xml_escape(string): return re.sub(r'([&<"\'>])', lambda m: xml_escapes[m.group()], string) def url_escape(string): # convert into a list of octets string = string.encode("utf8") return urllib.quote_plus(string) def url_unescape(string): text = urllib.unquote_plus(string) if not is_ascii_str(text): text = text.decode("utf8") return text def trim(string): return string.strip() class Decode(object): def __getattr__(self, key): def decode(x): if isinstance(x, unicode): return x if not isinstance(x, str): return str(x) return unicode(x, encoding=key) return decode decode = Decode() _ASCII_re = re.compile(r'\A[\x00-\x7f]*\Z') def is_ascii_str(text): return isinstance(text, str) and _ASCII_re.match(text) ################################################################ class XMLEntityEscaper(object): def __init__(self, codepoint2name, name2codepoint): self.codepoint2entity = dict([(c, u'&%s;' % n) for c,n in codepoint2name.iteritems()]) self.name2codepoint = name2codepoint def escape_entities(self, text): """Replace characters with their character entity references. Only characters corresponding to a named entity are replaced. """ return unicode(text).translate(self.codepoint2entity) def __escape(self, m): codepoint = ord(m.group()) try: return self.codepoint2entity[codepoint] except (KeyError, IndexError): return '&#x%X;' % codepoint __escapable = re.compile(r'["&<>]|[^\x00-\x7f]') def escape(self, text): """Replace characters with their character references. Replace characters by their named entity references. Non-ASCII characters, if they do not have a named entity reference, are replaced by numerical character references. The return value is guaranteed to be ASCII. """ return self.__escapable.sub(self.__escape, unicode(text) ).encode('ascii') # XXX: This regexp will not match all valid XML entity names__. # (It punts on details involving involving CombiningChars and Extenders.) # # .. __: http://www.w3.org/TR/2000/REC-xml-20001006#NT-EntityRef __characterrefs = re.compile(r'''& (?: \#(\d+) | \#x([\da-f]+) | ( (?!\d) [:\w] [-.:\w]+ ) ) ;''', re.X | re.UNICODE) def __unescape(self, m): dval, hval, name = m.groups() if dval: codepoint = int(dval) elif hval: codepoint = int(hval, 16) else: codepoint = self.name2codepoint.get(name, 0xfffd) # U+FFFD = "REPLACEMENT CHARACTER" if codepoint < 128: return chr(codepoint) return unichr(codepoint) def unescape(self, text): """Unescape character references. All character references (both entity references and numerical character references) are unescaped. """ return self.__characterrefs.sub(self.__unescape, text) _html_entities_escaper = XMLEntityEscaper(htmlentitydefs.codepoint2name, htmlentitydefs.name2codepoint) html_entities_escape = _html_entities_escaper.escape_entities html_entities_unescape = _html_entities_escaper.unescape def htmlentityreplace_errors(ex): """An encoding error handler. This python `codecs`_ error handler replaces unencodable characters with HTML entities, or, if no HTML entity exists for the character, XML character references. >>> u'The cost was \u20ac12.'.encode('latin1', 'htmlentityreplace') 'The cost was &euro;12.' """ if isinstance(ex, UnicodeEncodeError): # Handle encoding errors bad_text = ex.object[ex.start:ex.end] text = _html_entities_escaper.escape(bad_text) return (unicode(text), ex.end) raise ex codecs.register_error('htmlentityreplace', htmlentityreplace_errors) # TODO: options to make this dynamic per-compilation will be added in a later release DEFAULT_ESCAPES = { 'x':'filters.xml_escape', 'h':'filters.html_escape', 'u':'filters.url_escape', 'trim':'filters.trim', 'entity':'filters.html_entities_escape', 'unicode':'unicode', 'decode':'decode', 'n':'n' }
Python
from mako import exceptions try: import beaker.container as container import beaker.exceptions try: import beaker.ext.memcached as memcached clsmap = { 'memory':container.MemoryContainer, 'dbm':container.DBMContainer, 'file':container.FileContainer, 'memcached':memcached.MemcachedContainer } except beaker.exceptions.BeakerException: clsmap = { 'memory':container.MemoryContainer, 'dbm':container.DBMContainer, 'file':container.FileContainer, } except ImportError: container = None clsmap = {} class Cache(object): def __init__(self, id, starttime, **kwargs): self.id = id self.starttime = starttime if container is not None: self.context = container.ContainerContext() self._containers = {} self.kwargs = kwargs def put(self, key, value, type='memory', **kwargs): self._get_container(key, type, **kwargs).set_value(value) def get(self, key, type='memory', **kwargs): return self._get_container(key, type, **kwargs).get_value() def _get_container(self, key, type, **kwargs): try: return self._containers[key] except KeyError: if container is None: raise exceptions.RuntimeException("the Beaker package is required to use cache functionality.") kw = self.kwargs.copy() kw.update(kwargs) return self._containers.setdefault(key, clsmap[type](key, self.context, self.id, starttime=self.starttime, **kw))
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- import os,sys from xmldoc import XmlDoc,XmlNode from xpickle import node2data,data2node class Conf(object): def __init__(self,fb,nodeConf): assert type(nodeConf == XmlNode) self.__nodeConf = nodeConf self.__fb = fb def __iter__(self): """ iter les "names" des item de la conf """ for i in self.__nodeConf.getAll(u"key"): yield i.getAttr(u"name") def __len__(self): """ retourne le nb d'items de la conf """ return len(self.__nodeConf.getAll(u"key")) def __getNode(self,n): """ retourne la XmlNode de l'item 'n' ou None """ for i in self.__nodeConf.getAll(u"key"): attr = i.getAttr(u"name") if attr==n: return i return None def __getitem__(self,n): """ retourne la valeur de l'item 'n' dans la conf ou None """ node = self.__getNode(n) if node: # recupere la 1er vrai node (y en a qu'une!) ou non for i in node.childNodes: if i.node.nodeType == i.node.ELEMENT_NODE: return node2data( i.node ) return None def __delitem__(self,n): """ supprime l'item 'n' dans la conf """ node = self.__getNode(n) if node: node.unlink() self.__fb.save() # save conf return True return None def __setitem__(self,n,v): """ màj le contenu de l'item 'n' dans la conf s'il n'existait pas, on cree l'item en question """ node = self.__getNode(n) if node: node.unlink() node=self.__nodeConf.ownerDocument.createNode(u"key",{u"name":n}) newnode = self.__nodeConf.appendChild(node) nd=data2node(v) if nd: newnode.appendChild(XmlNode(nd)) self.__fb.save() # save conf class FeedBox(object): def __init__(self,fileConf=None): if not fileConf: # try to determinate the fileConf try: #windows NT,2k,XP,etc. fallback home = os.environ['APPDATA'] except: try: #all user-based OSes thd = os.path.expanduser("~") if thd == "~": raise home= thd except: try: #*nix fallback home = os.environ['HOME'] except: #What os are people using? home = os.path.dirname(os.path.abspath(__file__)) fileConf = os.path.join(home,".feedBox.xml") print "FeedBox Conf File : ",fileConf self.__file = fileConf if os.path.isfile(fileConf): self.__dom=XmlDoc(file=fileConf) else: # create the conf file self.__dom=XmlDoc(xml="<feedBox><conf/><plugins/></feedBox>") def checkConf(self): """ verifie que vlc est bien positionné et qu'il existe sinon demande à l'utilisateur de le situer renvoi true : si vlc est ok renvoi false : si ça va pas """ c=self.getConf() vlc = c["vlc"] if not( vlc and os.path.isfile(vlc) ): import easygui easygui.msgbox(""" C'est la première fois que vous lancer feedBox. Veuillez selectionner votre VLC ... ""","feedbox") vlc = easygui.fileopenbox() if vlc: c["vlc"] = vlc.decode( sys.getfilesystemencoding() ) return True else: return False else: return True def getConf(self,plug=None): """ recupere l'objet de configuration (Conf()) pour le plugin nommé "plug", sinon la conf globale de feedbox """ if plug: nplug = self.__dom.getAll("plugins")[0] nodes = nplug.getAll(plug) if nodes: return Conf(self,nodes[0]) else: n=nplug.ownerDocument.createNode(plug) node = nplug.appendChild(n) return Conf(self,node) else: return Conf( self, self.__dom.getAll("conf")[0] ) def save(self): """ enregistre la configuration """ self.__dom.saveAs(self.__file) if __name__ == "__main__": #~ try: #~ os.unlink("test.xml") #~ except: #~ pass f = FeedBox("test.xml") c = f.getConf() c["region"]=56 c["region"]=( {u"kkçàak":122,23:(1,2,3)}) print c["koko"] f.save()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- from base64 import urlsafe_b64encode,urlsafe_b64decode def u64enc(f): assert type(f)==unicode a=f.encode("utf_8") b=urlsafe_b64encode( a) return unicode(b,"utf_8") def u64dec(f): assert type(f)==unicode a=f.encode("utf_8") b=urlsafe_b64decode( a) return unicode(b,"utf_8") class OList: """ l=[("martin","jean"),("dupond","yves"),("klein","michel")] or l=[["martin","jean"],["dupond","yves"],["klein","michel"]] ll = OList("nom","prenom")(l) print ll[2].prenom,"==",ll[2][1],"==",l[2][1] """ def __init__(self,*a): self.__n=list(a) def __call__(self,liste): class NList(list): def __init__(self,n,l): assert len(l)==len(n), "No match "+str(n)+" and "+str(l) list.__init__(self,l) for i in n: setattr(self,i,l[ n.index(i) ]) self.__noms = n def __repr__(self): m=" ".join(["""%s='%s'""" % (i,str(getattr(self,i))) for i in self.__noms]) return "<objet %s>" % m return [ NList(self.__n, i) for i in liste ] import math def mkPager(ll,nbperpage,page=None): """ Fabrique un *pager*, en fonction de la list "ll", et du nb d'item à afficher par page "nbperpage". retourne un tuple contenant : - un tuple touches (first,prev,next,last) : contenant le num de page pour la navigation first et last sont toujours renseignés next et prev sont contextuels - une liste cadrée des pages (qui contient forcément celle en cours) - une sous-liste de ll, suivant la page en cours """ # controls et calcul de maxPage assert nbperpage>1 assert type(ll)==list maxPage=int(math.ceil(float(len(ll))/nbperpage)) if page!=None: assert 1<=page<=maxPage, "nb max de page = %d,page=%d"%(maxPage,page) else: page=1 # fabrique de la ligne du "pager navigator" lp=range(1,maxPage+1) if len(lp)>1: idx=lp.index(page) off=5 mi = max(idx-off,0) ma = min(mi+(1+off*2),len(lp)) if idx-(1+off*2)>0: mi = min(idx-off,ma-(1+off*2)) lp=lp[mi:ma] else: lp=[] # retourne les elements affichés dans cette page debut = (page-1)*nbperpage fin = debut+nbperpage # def les touches FIRST/PREV/NEXT/LAST first= maxPage>1 and 1 or None prev= page>1 and page-1 or None next= page<maxPage and page+1 or None last = maxPage>1 and maxPage or None return (first,prev,next,last),lp,ll[debut:fin] def pageNavigator(ll,nb,url,page=1): assert "%d" in url (first,prev,next,last),lp,l=mkPager(ll,nb,page) keyPrev,keyNext="","" lnav=[] if prev: lnav.append( (url%first, "|<") ) lnav.append( (url%prev, "<<") ) keyPrev = url%prev for i in lp: if i==page: lnav.append((None,"<font color='#FFFF00'>%d</font>"%i)) else: lnav.append((url%i,str(i))) if next: lnav.append( (url%next, ">>")) lnav.append( (url%last, ">|")) keyNext = url%next h="" for u,t in lnav: if u: h+= "<a href='%s'>%s</a>" % (u,t) else: h+=t h+=" " return keyPrev,keyNext,h,l if __name__=="__main__": ll=range(5686) h,l= pageNavigator(ll,"browse?page=%d",2) print h print l
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- import inspect from mako.template import Template as MakoTemplate from mako.lookup import TemplateLookup import os #MYLOOKUP = TemplateLookup(directories=["common/","../../common/",".","../.."]) MYLOOKUP = TemplateLookup(directories=[os.path.dirname(__file__),os.getcwd()]) BTN = {} for i in os.listdir("common/static"): if i.lower().endswith(".gif"): BTN[i[:-4]]=i def Template(head,nom=None): global MYLOOKUP assert type(head)==dict #glob = inspect.currentframe(1).f_globals loc = inspect.currentframe(1).f_locals if "self" in loc: del(loc["self"]) if nom: filename=nom else: filename = "templates/"+inspect.currentframe(1).f_code.co_name+".html" # meta par defaut head["home_page"]="/index" head["digit_key"]="false" loc["isFreeBox"] = Template.isFreeBox loc["rlinks"]=[] loc["headers"]="" for key,value in head.items(): # mapping gestion feedbox -> telecommande if key=="BACK": key="star" if key=="NEXT": key="next" if key=="PREV": key="prev" if key=="PLAY": key="pause" # sur HD pas de touche play : uniquement play/pause if key=="PAUSE": key="pause" meta,rl=gen(key,value) loc["headers"]+=meta+"\n" loc["rlinks"].append(rl) mytemplate = MakoTemplate(filename=filename,output_encoding='iso-8859-15', encoding_errors='replace',lookup=MYLOOKUP) return mytemplate.render(**loc) Template.isFreeBox=False def mkButton(key,value): global BTN if key in BTN: key = """<img src="%s">""" % BTN[key] return """<span onclick="document.location='%s';">%s</span>""" % (value,key) def gen(key,value): key=key.lower() if type(value) in (str,unicode): assert '"' not in value button="" if key in "help options red green yellow blue info guide stop play pause rec up down left right star sharp".split(" "): # balise LINK ret= """<link rel="%s" href="%s"> """ % (key,value) if value and value!="none": button=mkButton(key,value) elif key in "back prev next rev fwd pip".split(" "): # balise LINK (freebox HD ONLY !!!!) if value and value!="none": button=mkButton(key,value) ret= """<link rel="%s" href="%s"> """ % (key,value) elif key in "volume_key program_key wait_new_picture digit_key".split(" "): # balise FLAGS ret= """<flags %s="%s">""" % (key,value and "true" or "false") else: # balise META p="" # traitement vrai navigateur if key in "home_page love_page love2_page mail_page mail2_page nochannel_page channel_page star_page sharp_page settings_page".split(" "): # genere un vrai lien if value and value!="none": button=mkButton(key,value) elif key=="refresh": # genere un vrai refresh http pour un vrai navigateur p="""<META HTTP-EQUIV="Refresh" CONTENT="%s">""" % value ret= """<meta name="%s" content="%s">%s""" % (key,value,p) return ret,button
Python
#!/usr/bin/env python """Universal feed parser Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds Visit http://feedparser.org/ for the latest version Visit http://feedparser.org/docs/ for the latest documentation Required: Python 2.1 or later Recommended: Python 2.3 or later Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/> """ __version__ = "4.1"# + "$Revision: 1.92 $"[11:15] + "-cvs" __license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.""" __author__ = "Mark Pilgrim <http://diveintomark.org/>" __contributors__ = ["Jason Diamond <http://injektilo.org/>", "John Beimler <http://john.beimler.org/>", "Fazal Majid <http://www.majid.info/mylos/weblog/>", "Aaron Swartz <http://aaronsw.com/>", "Kevin Marks <http://epeus.blogspot.com/>"] _debug = 0 # HTTP "User-Agent" header to send to servers when downloading feeds. # If you are embedding feedparser in a larger application, you should # change this to your application name and URL. USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__ # HTTP "Accept" header to send to servers when downloading feeds. If you don't # want to send an Accept header, set this to None. ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1" # List of preferred XML parsers, by SAX driver name. These will be tried first, # but if they're not installed, Python will keep searching through its own list # of pre-installed parsers until it finds one that supports everything we need. PREFERRED_XML_PARSERS = ["drv_libxml2"] # If you want feedparser to automatically run HTML markup through HTML Tidy, set # this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html> # or utidylib <http://utidylib.berlios.de/>. TIDY_MARKUP = 0 # List of Python interfaces for HTML Tidy, in order of preference. Only useful # if TIDY_MARKUP = 1 PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"] # ---------- required modules (should come with any Python distribution) ---------- import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2 try: from cStringIO import StringIO as _StringIO except: from StringIO import StringIO as _StringIO # ---------- optional modules (feedparser will work without these, but with reduced functionality) ---------- # gzip is included with most Python distributions, but may not be available if you compiled your own try: import gzip except: gzip = None try: import zlib except: zlib = None # If a real XML parser is available, feedparser will attempt to use it. feedparser has # been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the # Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some # versions of FreeBSD), feedparser will quietly fall back on regex-based parsing. try: import xml.sax xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers from xml.sax.saxutils import escape as _xmlescape _XML_AVAILABLE = 1 except: _XML_AVAILABLE = 0 def _xmlescape(data): data = data.replace('&', '&amp;') data = data.replace('>', '&gt;') data = data.replace('<', '&lt;') return data # base64 support for Atom feeds that contain embedded binary data try: import base64, binascii except: base64 = binascii = None # cjkcodecs and iconv_codec provide support for more character encodings. # Both are available from http://cjkpython.i18n.org/ try: import cjkcodecs.aliases except: pass try: import iconv_codec except: pass # chardet library auto-detects character encodings # Download from http://chardet.feedparser.org/ try: import chardet if _debug: import chardet.constants chardet.constants._debug = 1 except: chardet = None # ---------- don't touch these ---------- class ThingsNobodyCaresAboutButMe(Exception): pass class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass class UndeclaredNamespace(Exception): pass sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') sgmllib.special = re.compile('<!') sgmllib.charref = re.compile('&#(x?[0-9A-Fa-f]+)[^0-9A-Fa-f]') SUPPORTED_VERSIONS = {'': 'unknown', 'rss090': 'RSS 0.90', 'rss091n': 'RSS 0.91 (Netscape)', 'rss091u': 'RSS 0.91 (Userland)', 'rss092': 'RSS 0.92', 'rss093': 'RSS 0.93', 'rss094': 'RSS 0.94', 'rss20': 'RSS 2.0', 'rss10': 'RSS 1.0', 'rss': 'RSS (unknown version)', 'atom01': 'Atom 0.1', 'atom02': 'Atom 0.2', 'atom03': 'Atom 0.3', 'atom10': 'Atom 1.0', 'atom': 'Atom (unknown version)', 'cdf': 'CDF', 'hotrss': 'Hot RSS' } try: UserDict = dict except NameError: # Python 2.1 does not have dict from UserDict import UserDict def dict(aList): rc = {} for k, v in aList: rc[k] = v return rc class FeedParserDict(UserDict): keymap = {'channel': 'feed', 'items': 'entries', 'guid': 'id', 'date': 'updated', 'date_parsed': 'updated_parsed', 'description': ['subtitle', 'summary'], 'url': ['href'], 'modified': 'updated', 'modified_parsed': 'updated_parsed', 'issued': 'published', 'issued_parsed': 'published_parsed', 'copyright': 'rights', 'copyright_detail': 'rights_detail', 'tagline': 'subtitle', 'tagline_detail': 'subtitle_detail'} def __getitem__(self, key): if key == 'category': return UserDict.__getitem__(self, 'tags')[0]['term'] if key == 'categories': return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')] realkey = self.keymap.get(key, key) if type(realkey) == types.ListType: for k in realkey: if UserDict.has_key(self, k): return UserDict.__getitem__(self, k) if UserDict.has_key(self, key): return UserDict.__getitem__(self, key) return UserDict.__getitem__(self, realkey) def __setitem__(self, key, value): for k in self.keymap.keys(): if key == k: key = self.keymap[k] if type(key) == types.ListType: key = key[0] return UserDict.__setitem__(self, key, value) def get(self, key, default=None): if self.has_key(key): return self[key] else: return default def setdefault(self, key, value): if not self.has_key(key): self[key] = value return self[key] def has_key(self, key): try: return hasattr(self, key) or UserDict.has_key(self, key) except AttributeError: return False def __getattr__(self, key): try: return self.__dict__[key] except KeyError: pass try: assert not key.startswith('_') return self.__getitem__(key) except: raise AttributeError, "object has no attribute '%s'" % key def __setattr__(self, key, value): if key.startswith('_') or key == 'data': self.__dict__[key] = value else: return self.__setitem__(key, value) def __contains__(self, key): return self.has_key(key) def zopeCompatibilityHack(): global FeedParserDict del FeedParserDict def FeedParserDict(aDict=None): rc = {} if aDict: rc.update(aDict) return rc _ebcdic_to_ascii_map = None def _ebcdic_to_ascii(s): global _ebcdic_to_ascii_map if not _ebcdic_to_ascii_map: emap = ( 0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201, 202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208, 209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237, 125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243, 92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249, 48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255 ) import string _ebcdic_to_ascii_map = string.maketrans( \ ''.join(map(chr, range(256))), ''.join(map(chr, emap))) return s.translate(_ebcdic_to_ascii_map) _urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)') def _urljoin(base, uri): uri = _urifixer.sub(r'\1\3', uri) return urlparse.urljoin(base, uri) class _FeedParserMixin: namespaces = {'': '', 'http://backend.userland.com/rss': '', 'http://blogs.law.harvard.edu/tech/rss': '', 'http://purl.org/rss/1.0/': '', 'http://my.netscape.com/rdf/simple/0.9/': '', 'http://example.com/newformat#': '', 'http://example.com/necho': '', 'http://purl.org/echo/': '', 'uri/of/echo/namespace#': '', 'http://purl.org/pie/': '', 'http://purl.org/atom/ns#': '', 'http://www.w3.org/2005/Atom': '', 'http://purl.org/rss/1.0/modules/rss091#': '', 'http://webns.net/mvcb/': 'admin', 'http://purl.org/rss/1.0/modules/aggregation/': 'ag', 'http://purl.org/rss/1.0/modules/annotate/': 'annotate', 'http://media.tangent.org/rss/1.0/': 'audio', 'http://backend.userland.com/blogChannelModule': 'blogChannel', 'http://web.resource.org/cc/': 'cc', 'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons', 'http://purl.org/rss/1.0/modules/company': 'co', 'http://purl.org/rss/1.0/modules/content/': 'content', 'http://my.theinfo.org/changed/1.0/rss/': 'cp', 'http://purl.org/dc/elements/1.1/': 'dc', 'http://purl.org/dc/terms/': 'dcterms', 'http://purl.org/rss/1.0/modules/email/': 'email', 'http://purl.org/rss/1.0/modules/event/': 'ev', 'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner', 'http://freshmeat.net/rss/fm/': 'fm', 'http://xmlns.com/foaf/0.1/': 'foaf', 'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo', 'http://postneo.com/icbm/': 'icbm', 'http://purl.org/rss/1.0/modules/image/': 'image', 'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes', 'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes', 'http://purl.org/rss/1.0/modules/link/': 'l', 'http://search.yahoo.com/mrss': 'media', 'http://madskills.com/public/xml/rss/module/pingback/': 'pingback', 'http://prismstandard.org/namespaces/1.2/basic/': 'prism', 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf', 'http://www.w3.org/2000/01/rdf-schema#': 'rdfs', 'http://purl.org/rss/1.0/modules/reference/': 'ref', 'http://purl.org/rss/1.0/modules/richequiv/': 'reqv', 'http://purl.org/rss/1.0/modules/search/': 'search', 'http://purl.org/rss/1.0/modules/slash/': 'slash', 'http://schemas.xmlsoap.org/soap/envelope/': 'soap', 'http://purl.org/rss/1.0/modules/servicestatus/': 'ss', 'http://hacks.benhammersley.com/rss/streaming/': 'str', 'http://purl.org/rss/1.0/modules/subscription/': 'sub', 'http://purl.org/rss/1.0/modules/syndication/': 'sy', 'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo', 'http://purl.org/rss/1.0/modules/threading/': 'thr', 'http://purl.org/rss/1.0/modules/textinput/': 'ti', 'http://madskills.com/public/xml/rss/module/trackback/':'trackback', 'http://wellformedweb.org/commentAPI/': 'wfw', 'http://purl.org/rss/1.0/modules/wiki/': 'wiki', 'http://www.w3.org/1999/xhtml': 'xhtml', 'http://www.w3.org/XML/1998/namespace': 'xml', 'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf' } _matchnamespaces = {} can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'license', 'icon', 'logo'] can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'] can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'] html_types = ['text/html', 'application/xhtml+xml'] def __init__(self, baseuri=None, baselang=None, encoding='utf-8'): if _debug: sys.stderr.write('initializing FeedParser\n') if not self._matchnamespaces: for k, v in self.namespaces.items(): self._matchnamespaces[k.lower()] = v self.feeddata = FeedParserDict() # feed-level data self.encoding = encoding # character encoding self.entries = [] # list of entry-level data self.version = '' # feed type/version, see SUPPORTED_VERSIONS self.namespacesInUse = {} # dictionary of namespaces defined by the feed # the following are used internally to track state; # this is really out of control and should be refactored self.infeed = 0 self.inentry = 0 self.incontent = 0 self.intextinput = 0 self.inimage = 0 self.inauthor = 0 self.incontributor = 0 self.inpublisher = 0 self.insource = 0 self.sourcedata = FeedParserDict() self.contentparams = FeedParserDict() self._summaryKey = None self.namespacemap = {} self.elementstack = [] self.basestack = [] self.langstack = [] self.baseuri = baseuri or '' self.lang = baselang or None if baselang: self.feeddata['language'] = baselang def unknown_starttag(self, tag, attrs): if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs)) # normalize attrs attrs = [(k.lower(), v) for k, v in attrs] attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] # track xml:base and xml:lang attrsD = dict(attrs) baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri self.baseuri = _urljoin(self.baseuri, baseuri) lang = attrsD.get('xml:lang', attrsD.get('lang')) if lang == '': # xml:lang could be explicitly set to '', we need to capture that lang = None elif lang is None: # if no xml:lang is specified, use parent lang lang = self.lang if lang: if tag in ('feed', 'rss', 'rdf:RDF'): self.feeddata['language'] = lang self.lang = lang self.basestack.append(self.baseuri) self.langstack.append(lang) # track namespaces for prefix, uri in attrs: if prefix.startswith('xmlns:'): self.trackNamespace(prefix[6:], uri) elif prefix == 'xmlns': self.trackNamespace(None, uri) # track inline content if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): # element declared itself as escaped markup, but it isn't really self.contentparams['type'] = 'application/xhtml+xml' if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': # Note: probably shouldn't simply recreate localname here, but # our namespace handling isn't actually 100% correct in cases where # the feed redefines the default namespace (which is actually # the usual case for inline content, thanks Sam), so here we # cheat and just reconstruct the element based on localname # because that compensates for the bugs in our namespace handling. # This will horribly munge inline content with non-empty qnames, # but nobody actually does that, so I'm not fixing it. tag = tag.split(':')[-1] return self.handle_data('<%s%s>' % (tag, ''.join([' %s="%s"' % t for t in attrs])), escape=0) # match namespaces if tag.find(':') <> -1: prefix, suffix = tag.split(':', 1) else: prefix, suffix = '', tag prefix = self.namespacemap.get(prefix, prefix) if prefix: prefix = prefix + '_' # special hack for better tracking of empty textinput/image elements in illformed feeds if (not prefix) and tag not in ('title', 'link', 'description', 'name'): self.intextinput = 0 if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'): self.inimage = 0 # call special handler (if defined) or default handler methodname = '_start_' + prefix + suffix try: method = getattr(self, methodname) return method(attrsD) except AttributeError: return self.push(prefix + suffix, 1) def unknown_endtag(self, tag): if _debug: sys.stderr.write('end %s\n' % tag) # match namespaces if tag.find(':') <> -1: prefix, suffix = tag.split(':', 1) else: prefix, suffix = '', tag prefix = self.namespacemap.get(prefix, prefix) if prefix: prefix = prefix + '_' # call special handler (if defined) or default handler methodname = '_end_' + prefix + suffix try: method = getattr(self, methodname) method() except AttributeError: self.pop(prefix + suffix) # track inline content if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): # element declared itself as escaped markup, but it isn't really self.contentparams['type'] = 'application/xhtml+xml' if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': tag = tag.split(':')[-1] self.handle_data('</%s>' % tag, escape=0) # track xml:base and xml:lang going out of scope if self.basestack: self.basestack.pop() if self.basestack and self.basestack[-1]: self.baseuri = self.basestack[-1] if self.langstack: self.langstack.pop() if self.langstack: # and (self.langstack[-1] is not None): self.lang = self.langstack[-1] def handle_charref(self, ref): # called for each character reference, e.g. for '&#160;', ref will be '160' if not self.elementstack: return ref = ref.lower() if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'): text = '&#%s;' % ref else: if ref[0] == 'x': c = int(ref[1:], 16) else: c = int(ref) text = unichr(c).encode('utf-8') self.elementstack[-1][2].append(text) def handle_entityref(self, ref): # called for each entity reference, e.g. for '&copy;', ref will be 'copy' if not self.elementstack: return if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref) if ref in ('lt', 'gt', 'quot', 'amp', 'apos'): text = '&%s;' % ref else: # entity resolution graciously donated by Aaron Swartz def name2cp(k): import htmlentitydefs if hasattr(htmlentitydefs, 'name2codepoint'): # requires Python 2.3 return htmlentitydefs.name2codepoint[k] k = htmlentitydefs.entitydefs[k] if k.startswith('&#') and k.endswith(';'): return int(k[2:-1]) # not in latin-1 return ord(k) try: name2cp(ref) except KeyError: text = '&%s;' % ref else: text = unichr(name2cp(ref)).encode('utf-8') self.elementstack[-1][2].append(text) def handle_data(self, text, escape=1): # called for each block of plain text, i.e. outside of any tag and # not containing any character or entity references if not self.elementstack: return if escape and self.contentparams.get('type') == 'application/xhtml+xml': text = _xmlescape(text) self.elementstack[-1][2].append(text) def handle_comment(self, text): # called for each comment, e.g. <!-- insert message here --> pass def handle_pi(self, text): # called for each processing instruction, e.g. <?instruction> pass def handle_decl(self, text): pass def parse_declaration(self, i): # override internal declaration handler to handle CDATA blocks if _debug: sys.stderr.write('entering parse_declaration\n') if self.rawdata[i:i+9] == '<![CDATA[': k = self.rawdata.find(']]>', i) if k == -1: k = len(self.rawdata) self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0) return k+3 else: k = self.rawdata.find('>', i) return k+1 def mapContentType(self, contentType): contentType = contentType.lower() if contentType == 'text': contentType = 'text/plain' elif contentType == 'html': contentType = 'text/html' elif contentType == 'xhtml': contentType = 'application/xhtml+xml' return contentType def trackNamespace(self, prefix, uri): loweruri = uri.lower() if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version: self.version = 'rss090' if loweruri == 'http://purl.org/rss/1.0/' and not self.version: self.version = 'rss10' if loweruri == 'http://www.w3.org/2005/atom' and not self.version: self.version = 'atom10' if loweruri.find('backend.userland.com/rss') <> -1: # match any backend.userland.com namespace uri = 'http://backend.userland.com/rss' loweruri = uri if self._matchnamespaces.has_key(loweruri): self.namespacemap[prefix] = self._matchnamespaces[loweruri] self.namespacesInUse[self._matchnamespaces[loweruri]] = uri else: self.namespacesInUse[prefix or ''] = uri def resolveURI(self, uri): return _urljoin(self.baseuri or '', uri) def decodeEntities(self, element, data): return data def push(self, element, expectingText): self.elementstack.append([element, expectingText, []]) def pop(self, element, stripWhitespace=1): if not self.elementstack: return if self.elementstack[-1][0] != element: return element, expectingText, pieces = self.elementstack.pop() output = ''.join(pieces) if stripWhitespace: output = output.strip() if not expectingText: return output # decode base64 content if base64 and self.contentparams.get('base64', 0): try: output = base64.decodestring(output) except binascii.Error: pass except binascii.Incomplete: pass # resolve relative URIs if (element in self.can_be_relative_uri) and output: output = self.resolveURI(output) # decode entities within embedded markup if not self.contentparams.get('base64', 0): output = self.decodeEntities(element, output) # remove temporary cruft from contentparams try: del self.contentparams['mode'] except KeyError: pass try: del self.contentparams['base64'] except KeyError: pass # resolve relative URIs within embedded markup if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types: if element in self.can_contain_relative_uris: output = _resolveRelativeURIs(output, self.baseuri, self.encoding) # sanitize embedded markup if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types: if element in self.can_contain_dangerous_markup: output = _sanitizeHTML(output, self.encoding) if self.encoding and type(output) != type(u''): try: output = unicode(output, self.encoding) except: pass # categories/tags/keywords/whatever are handled in _end_category if element == 'category': return output # store output in appropriate place(s) if self.inentry and not self.insource: if element == 'content': self.entries[-1].setdefault(element, []) contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output self.entries[-1][element].append(contentparams) elif element == 'link': self.entries[-1][element] = output if output: self.entries[-1]['links'][-1]['href'] = output else: if element == 'description': element = 'summary' self.entries[-1][element] = output if self.incontent: contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output self.entries[-1][element + '_detail'] = contentparams elif (self.infeed or self.insource) and (not self.intextinput) and (not self.inimage): context = self._getContext() if element == 'description': element = 'subtitle' context[element] = output if element == 'link': context['links'][-1]['href'] = output elif self.incontent: contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output context[element + '_detail'] = contentparams return output def pushContent(self, tag, attrsD, defaultContentType, expectingText): self.incontent += 1 self.contentparams = FeedParserDict({ 'type': self.mapContentType(attrsD.get('type', defaultContentType)), 'language': self.lang, 'base': self.baseuri}) self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams) self.push(tag, expectingText) def popContent(self, tag): value = self.pop(tag) self.incontent -= 1 self.contentparams.clear() return value def _mapToStandardPrefix(self, name): colonpos = name.find(':') if colonpos <> -1: prefix = name[:colonpos] suffix = name[colonpos+1:] prefix = self.namespacemap.get(prefix, prefix) name = prefix + ':' + suffix return name def _getAttribute(self, attrsD, name): return attrsD.get(self._mapToStandardPrefix(name)) def _isBase64(self, attrsD, contentparams): if attrsD.get('mode', '') == 'base64': return 1 if self.contentparams['type'].startswith('text/'): return 0 if self.contentparams['type'].endswith('+xml'): return 0 if self.contentparams['type'].endswith('/xml'): return 0 return 1 def _itsAnHrefDamnIt(self, attrsD): href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None))) if href: try: del attrsD['url'] except KeyError: pass try: del attrsD['uri'] except KeyError: pass attrsD['href'] = href return attrsD def _save(self, key, value): context = self._getContext() context.setdefault(key, value) def _start_rss(self, attrsD): versionmap = {'0.91': 'rss091u', '0.92': 'rss092', '0.93': 'rss093', '0.94': 'rss094'} if not self.version: attr_version = attrsD.get('version', '') version = versionmap.get(attr_version) if version: self.version = version elif attr_version.startswith('2.'): self.version = 'rss20' else: self.version = 'rss' def _start_dlhottitles(self, attrsD): self.version = 'hotrss' def _start_channel(self, attrsD): self.infeed = 1 self._cdf_common(attrsD) _start_feedinfo = _start_channel def _cdf_common(self, attrsD): if attrsD.has_key('lastmod'): self._start_modified({}) self.elementstack[-1][-1] = attrsD['lastmod'] self._end_modified() if attrsD.has_key('href'): self._start_link({}) self.elementstack[-1][-1] = attrsD['href'] self._end_link() def _start_feed(self, attrsD): self.infeed = 1 versionmap = {'0.1': 'atom01', '0.2': 'atom02', '0.3': 'atom03'} if not self.version: attr_version = attrsD.get('version') version = versionmap.get(attr_version) if version: self.version = version else: self.version = 'atom' def _end_channel(self): self.infeed = 0 _end_feed = _end_channel def _start_image(self, attrsD): self.inimage = 1 self.push('image', 0) context = self._getContext() context.setdefault('image', FeedParserDict()) def _end_image(self): self.pop('image') self.inimage = 0 def _start_textinput(self, attrsD): self.intextinput = 1 self.push('textinput', 0) context = self._getContext() context.setdefault('textinput', FeedParserDict()) _start_textInput = _start_textinput def _end_textinput(self): self.pop('textinput') self.intextinput = 0 _end_textInput = _end_textinput def _start_author(self, attrsD): self.inauthor = 1 self.push('author', 1) _start_managingeditor = _start_author _start_dc_author = _start_author _start_dc_creator = _start_author _start_itunes_author = _start_author def _end_author(self): self.pop('author') self.inauthor = 0 self._sync_author_detail() _end_managingeditor = _end_author _end_dc_author = _end_author _end_dc_creator = _end_author _end_itunes_author = _end_author def _start_itunes_owner(self, attrsD): self.inpublisher = 1 self.push('publisher', 0) def _end_itunes_owner(self): self.pop('publisher') self.inpublisher = 0 self._sync_author_detail('publisher') def _start_contributor(self, attrsD): self.incontributor = 1 context = self._getContext() context.setdefault('contributors', []) context['contributors'].append(FeedParserDict()) self.push('contributor', 0) def _end_contributor(self): self.pop('contributor') self.incontributor = 0 def _start_dc_contributor(self, attrsD): self.incontributor = 1 context = self._getContext() context.setdefault('contributors', []) context['contributors'].append(FeedParserDict()) self.push('name', 0) def _end_dc_contributor(self): self._end_name() self.incontributor = 0 def _start_name(self, attrsD): self.push('name', 0) _start_itunes_name = _start_name def _end_name(self): value = self.pop('name') if self.inpublisher: self._save_author('name', value, 'publisher') elif self.inauthor: self._save_author('name', value) elif self.incontributor: self._save_contributor('name', value) elif self.intextinput: context = self._getContext() context['textinput']['name'] = value _end_itunes_name = _end_name def _start_width(self, attrsD): self.push('width', 0) def _end_width(self): value = self.pop('width') try: value = int(value) except: value = 0 if self.inimage: context = self._getContext() context['image']['width'] = value def _start_height(self, attrsD): self.push('height', 0) def _end_height(self): value = self.pop('height') try: value = int(value) except: value = 0 if self.inimage: context = self._getContext() context['image']['height'] = value def _start_url(self, attrsD): self.push('href', 1) _start_homepage = _start_url _start_uri = _start_url def _end_url(self): value = self.pop('href') if self.inauthor: self._save_author('href', value) elif self.incontributor: self._save_contributor('href', value) elif self.inimage: context = self._getContext() context['image']['href'] = value elif self.intextinput: context = self._getContext() context['textinput']['link'] = value _end_homepage = _end_url _end_uri = _end_url def _start_email(self, attrsD): self.push('email', 0) _start_itunes_email = _start_email def _end_email(self): value = self.pop('email') if self.inpublisher: self._save_author('email', value, 'publisher') elif self.inauthor: self._save_author('email', value) elif self.incontributor: self._save_contributor('email', value) _end_itunes_email = _end_email def _getContext(self): if self.insource: context = self.sourcedata elif self.inentry: context = self.entries[-1] else: context = self.feeddata return context def _save_author(self, key, value, prefix='author'): context = self._getContext() context.setdefault(prefix + '_detail', FeedParserDict()) context[prefix + '_detail'][key] = value self._sync_author_detail() def _save_contributor(self, key, value): context = self._getContext() context.setdefault('contributors', [FeedParserDict()]) context['contributors'][-1][key] = value def _sync_author_detail(self, key='author'): context = self._getContext() detail = context.get('%s_detail' % key) if detail: name = detail.get('name') email = detail.get('email') if name and email: context[key] = '%s (%s)' % (name, email) elif name: context[key] = name elif email: context[key] = email else: author = context.get(key) if not author: return emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))''', author) if not emailmatch: return email = emailmatch.group(0) # probably a better way to do the following, but it passes all the tests author = author.replace(email, '') author = author.replace('()', '') author = author.strip() if author and (author[0] == '('): author = author[1:] if author and (author[-1] == ')'): author = author[:-1] author = author.strip() context.setdefault('%s_detail' % key, FeedParserDict()) context['%s_detail' % key]['name'] = author context['%s_detail' % key]['email'] = email def _start_subtitle(self, attrsD): self.pushContent('subtitle', attrsD, 'text/plain', 1) _start_tagline = _start_subtitle _start_itunes_subtitle = _start_subtitle def _end_subtitle(self): self.popContent('subtitle') _end_tagline = _end_subtitle _end_itunes_subtitle = _end_subtitle def _start_rights(self, attrsD): self.pushContent('rights', attrsD, 'text/plain', 1) _start_dc_rights = _start_rights _start_copyright = _start_rights def _end_rights(self): self.popContent('rights') _end_dc_rights = _end_rights _end_copyright = _end_rights def _start_item(self, attrsD): self.entries.append(FeedParserDict()) self.push('item', 0) self.inentry = 1 self.guidislink = 0 id = self._getAttribute(attrsD, 'rdf:about') if id: context = self._getContext() context['id'] = id self._cdf_common(attrsD) _start_entry = _start_item _start_product = _start_item def _end_item(self): self.pop('item') self.inentry = 0 _end_entry = _end_item def _start_dc_language(self, attrsD): self.push('language', 1) _start_language = _start_dc_language def _end_dc_language(self): self.lang = self.pop('language') _end_language = _end_dc_language def _start_dc_publisher(self, attrsD): self.push('publisher', 1) _start_webmaster = _start_dc_publisher def _end_dc_publisher(self): self.pop('publisher') self._sync_author_detail('publisher') _end_webmaster = _end_dc_publisher def _start_published(self, attrsD): self.push('published', 1) _start_dcterms_issued = _start_published _start_issued = _start_published def _end_published(self): value = self.pop('published') self._save('published_parsed', _parse_date(value)) _end_dcterms_issued = _end_published _end_issued = _end_published def _start_updated(self, attrsD): self.push('updated', 1) _start_modified = _start_updated _start_dcterms_modified = _start_updated _start_pubdate = _start_updated _start_dc_date = _start_updated def _end_updated(self): value = self.pop('updated') parsed_value = _parse_date(value) self._save('updated_parsed', parsed_value) _end_modified = _end_updated _end_dcterms_modified = _end_updated _end_pubdate = _end_updated _end_dc_date = _end_updated def _start_created(self, attrsD): self.push('created', 1) _start_dcterms_created = _start_created def _end_created(self): value = self.pop('created') self._save('created_parsed', _parse_date(value)) _end_dcterms_created = _end_created def _start_expirationdate(self, attrsD): self.push('expired', 1) def _end_expirationdate(self): self._save('expired_parsed', _parse_date(self.pop('expired'))) def _start_cc_license(self, attrsD): self.push('license', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('license') def _start_creativecommons_license(self, attrsD): self.push('license', 1) def _end_creativecommons_license(self): self.pop('license') def _addTag(self, term, scheme, label): context = self._getContext() tags = context.setdefault('tags', []) if (not term) and (not scheme) and (not label): return value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label}) if value not in tags: tags.append(FeedParserDict({'term': term, 'scheme': scheme, 'label': label})) def _start_category(self, attrsD): if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD)) term = attrsD.get('term') scheme = attrsD.get('scheme', attrsD.get('domain')) label = attrsD.get('label') self._addTag(term, scheme, label) self.push('category', 1) _start_dc_subject = _start_category _start_keywords = _start_category def _end_itunes_keywords(self): for term in self.pop('itunes_keywords').split(): self._addTag(term, 'http://www.itunes.com/', None) def _start_itunes_category(self, attrsD): self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None) self.push('category', 1) def _end_category(self): value = self.pop('category') if not value: return context = self._getContext() tags = context['tags'] if value and len(tags) and not tags[-1]['term']: tags[-1]['term'] = value else: self._addTag(value, None, None) _end_dc_subject = _end_category _end_keywords = _end_category _end_itunes_category = _end_category def _start_cloud(self, attrsD): self._getContext()['cloud'] = FeedParserDict(attrsD) def _start_link(self, attrsD): attrsD.setdefault('rel', 'alternate') attrsD.setdefault('type', 'text/html') attrsD = self._itsAnHrefDamnIt(attrsD) if attrsD.has_key('href'): attrsD['href'] = self.resolveURI(attrsD['href']) expectingText = self.infeed or self.inentry or self.insource context = self._getContext() context.setdefault('links', []) context['links'].append(FeedParserDict(attrsD)) if attrsD['rel'] == 'enclosure': self._start_enclosure(attrsD) if attrsD.has_key('href'): expectingText = 0 if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types): context['link'] = attrsD['href'] else: self.push('link', expectingText) _start_producturl = _start_link def _end_link(self): value = self.pop('link') context = self._getContext() if self.intextinput: context['textinput']['link'] = value if self.inimage: context['image']['link'] = value _end_producturl = _end_link def _start_guid(self, attrsD): self.guidislink = (attrsD.get('ispermalink', 'true') == 'true') self.push('id', 1) def _end_guid(self): value = self.pop('id') self._save('guidislink', self.guidislink and not self._getContext().has_key('link')) if self.guidislink: # guid acts as link, but only if 'ispermalink' is not present or is 'true', # and only if the item doesn't already have a link element self._save('link', value) def _start_title(self, attrsD): self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) _start_dc_title = _start_title _start_media_title = _start_title def _end_title(self): value = self.popContent('title') context = self._getContext() if self.intextinput: context['textinput']['title'] = value elif self.inimage: context['image']['title'] = value _end_dc_title = _end_title _end_media_title = _end_title def _start_description(self, attrsD): context = self._getContext() if context.has_key('summary'): self._summaryKey = 'content' self._start_content(attrsD) else: self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource) def _start_abstract(self, attrsD): self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) def _end_description(self): if self._summaryKey == 'content': self._end_content() else: value = self.popContent('description') context = self._getContext() if self.intextinput: context['textinput']['description'] = value elif self.inimage: context['image']['description'] = value self._summaryKey = None _end_abstract = _end_description def _start_info(self, attrsD): self.pushContent('info', attrsD, 'text/plain', 1) _start_feedburner_browserfriendly = _start_info def _end_info(self): self.popContent('info') _end_feedburner_browserfriendly = _end_info def _start_generator(self, attrsD): if attrsD: attrsD = self._itsAnHrefDamnIt(attrsD) if attrsD.has_key('href'): attrsD['href'] = self.resolveURI(attrsD['href']) self._getContext()['generator_detail'] = FeedParserDict(attrsD) self.push('generator', 1) def _end_generator(self): value = self.pop('generator') context = self._getContext() if context.has_key('generator_detail'): context['generator_detail']['name'] = value def _start_admin_generatoragent(self, attrsD): self.push('generator', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('generator') self._getContext()['generator_detail'] = FeedParserDict({'href': value}) def _start_admin_errorreportsto(self, attrsD): self.push('errorreportsto', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('errorreportsto') def _start_summary(self, attrsD): context = self._getContext() if context.has_key('summary'): self._summaryKey = 'content' self._start_content(attrsD) else: self._summaryKey = 'summary' self.pushContent(self._summaryKey, attrsD, 'text/plain', 1) _start_itunes_summary = _start_summary def _end_summary(self): if self._summaryKey == 'content': self._end_content() else: self.popContent(self._summaryKey or 'summary') self._summaryKey = None _end_itunes_summary = _end_summary def _start_enclosure(self, attrsD): attrsD = self._itsAnHrefDamnIt(attrsD) self._getContext().setdefault('enclosures', []).append(FeedParserDict(attrsD)) href = attrsD.get('href') if href: context = self._getContext() if not context.get('id'): context['id'] = href def _start_source(self, attrsD): self.insource = 1 def _end_source(self): self.insource = 0 self._getContext()['source'] = copy.deepcopy(self.sourcedata) self.sourcedata.clear() def _start_content(self, attrsD): self.pushContent('content', attrsD, 'text/plain', 1) src = attrsD.get('src') if src: self.contentparams['src'] = src self.push('content', 1) def _start_prodlink(self, attrsD): self.pushContent('content', attrsD, 'text/html', 1) def _start_body(self, attrsD): self.pushContent('content', attrsD, 'application/xhtml+xml', 1) _start_xhtml_body = _start_body def _start_content_encoded(self, attrsD): self.pushContent('content', attrsD, 'text/html', 1) _start_fullitem = _start_content_encoded def _end_content(self): copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types) value = self.popContent('content') if copyToDescription: self._save('description', value) _end_body = _end_content _end_xhtml_body = _end_content _end_content_encoded = _end_content _end_fullitem = _end_content _end_prodlink = _end_content def _start_itunes_image(self, attrsD): self.push('itunes_image', 0) self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')}) _start_itunes_link = _start_itunes_image def _end_itunes_block(self): value = self.pop('itunes_block', 0) self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0 def _end_itunes_explicit(self): value = self.pop('itunes_explicit', 0) self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0 if _XML_AVAILABLE: class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler): def __init__(self, baseuri, baselang, encoding): if _debug: sys.stderr.write('trying StrictFeedParser\n') xml.sax.handler.ContentHandler.__init__(self) _FeedParserMixin.__init__(self, baseuri, baselang, encoding) self.bozo = 0 self.exc = None def startPrefixMapping(self, prefix, uri): self.trackNamespace(prefix, uri) def startElementNS(self, name, qname, attrs): namespace, localname = name lowernamespace = str(namespace or '').lower() if lowernamespace.find('backend.userland.com/rss') <> -1: # match any backend.userland.com namespace namespace = 'http://backend.userland.com/rss' lowernamespace = namespace if qname and qname.find(':') > 0: givenprefix = qname.split(':')[0] else: givenprefix = None prefix = self._matchnamespaces.get(lowernamespace, givenprefix) if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix): raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix if prefix: localname = prefix + ':' + localname localname = str(localname).lower() if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname)) # qname implementation is horribly broken in Python 2.1 (it # doesn't report any), and slightly broken in Python 2.2 (it # doesn't report the xml: namespace). So we match up namespaces # with a known list first, and then possibly override them with # the qnames the SAX parser gives us (if indeed it gives us any # at all). Thanks to MatejC for helping me test this and # tirelessly telling me that it didn't work yet. attrsD = {} for (namespace, attrlocalname), attrvalue in attrs._attrs.items(): lowernamespace = (namespace or '').lower() prefix = self._matchnamespaces.get(lowernamespace, '') if prefix: attrlocalname = prefix + ':' + attrlocalname attrsD[str(attrlocalname).lower()] = attrvalue for qname in attrs.getQNames(): attrsD[str(qname).lower()] = attrs.getValueByQName(qname) self.unknown_starttag(localname, attrsD.items()) def characters(self, text): self.handle_data(text) def endElementNS(self, name, qname): namespace, localname = name lowernamespace = str(namespace or '').lower() if qname and qname.find(':') > 0: givenprefix = qname.split(':')[0] else: givenprefix = '' prefix = self._matchnamespaces.get(lowernamespace, givenprefix) if prefix: localname = prefix + ':' + localname localname = str(localname).lower() self.unknown_endtag(localname) def error(self, exc): self.bozo = 1 self.exc = exc def fatalError(self, exc): self.error(exc) raise exc class _BaseHTMLProcessor(sgmllib.SGMLParser): elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr', 'img', 'input', 'isindex', 'link', 'meta', 'param'] def __init__(self, encoding): self.encoding = encoding if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding) sgmllib.SGMLParser.__init__(self) def reset(self): self.pieces = [] sgmllib.SGMLParser.reset(self) def _shorttag_replace(self, match): tag = match.group(1) if tag in self.elements_no_end_tag: return '<' + tag + ' />' else: return '<' + tag + '></' + tag + '>' def feed(self, data): data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'&lt;!\1', data) #data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data) data = data.replace('&#39;', "'") data = data.replace('&#34;', '"') if self.encoding and type(data) == type(u''): data = data.encode(self.encoding) sgmllib.SGMLParser.feed(self, data) def normalize_attrs(self, attrs): # utility method to be called by descendants attrs = [(k.lower(), v) for k, v in attrs] attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] return attrs def unknown_starttag(self, tag, attrs): # called for each start tag # attrs is a list of (attr, value) tuples # e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')] if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag) uattrs = [] # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds for key, value in attrs: if type(value) != type(u''): value = unicode(value, self.encoding) uattrs.append((unicode(key, self.encoding), value)) strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding) if tag in self.elements_no_end_tag: self.pieces.append('<%(tag)s%(strattrs)s />' % locals()) else: self.pieces.append('<%(tag)s%(strattrs)s>' % locals()) def unknown_endtag(self, tag): # called for each end tag, e.g. for </pre>, tag will be 'pre' # Reconstruct the original end tag. if tag not in self.elements_no_end_tag: self.pieces.append("</%(tag)s>" % locals()) def handle_charref(self, ref): # called for each character reference, e.g. for '&#160;', ref will be '160' # Reconstruct the original character reference. self.pieces.append('&#%(ref)s;' % locals()) def handle_entityref(self, ref): # called for each entity reference, e.g. for '&copy;', ref will be 'copy' # Reconstruct the original entity reference. self.pieces.append('&%(ref)s;' % locals()) def handle_data(self, text): # called for each block of plain text, i.e. outside of any tag and # not containing any character or entity references # Store the original text verbatim. if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text) self.pieces.append(text) def handle_comment(self, text): # called for each HTML comment, e.g. <!-- insert Javascript code here --> # Reconstruct the original comment. self.pieces.append('<!--%(text)s-->' % locals()) def handle_pi(self, text): # called for each processing instruction, e.g. <?instruction> # Reconstruct original processing instruction. self.pieces.append('<?%(text)s>' % locals()) def handle_decl(self, text): # called for the DOCTYPE, if present, e.g. # <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" # "http://www.w3.org/TR/html4/loose.dtd"> # Reconstruct original DOCTYPE self.pieces.append('<!%(text)s>' % locals()) _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match def _scan_name(self, i, declstartpos): rawdata = self.rawdata n = len(rawdata) if i == n: return None, -1 m = self._new_declname_match(rawdata, i) if m: s = m.group() name = s.strip() if (i + len(s)) == n: return None, -1 # end of buffer return name.lower(), m.end() else: self.handle_data(rawdata) # self.updatepos(declstartpos, i) return None, -1 def output(self): '''Return processed HTML as a single string''' return ''.join([str(p) for p in self.pieces]) class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor): def __init__(self, baseuri, baselang, encoding): sgmllib.SGMLParser.__init__(self) _FeedParserMixin.__init__(self, baseuri, baselang, encoding) def decodeEntities(self, element, data): data = data.replace('&#60;', '&lt;') data = data.replace('&#x3c;', '&lt;') data = data.replace('&#62;', '&gt;') data = data.replace('&#x3e;', '&gt;') data = data.replace('&#38;', '&amp;') data = data.replace('&#x26;', '&amp;') data = data.replace('&#34;', '&quot;') data = data.replace('&#x22;', '&quot;') data = data.replace('&#39;', '&apos;') data = data.replace('&#x27;', '&apos;') if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): data = data.replace('&lt;', '<') data = data.replace('&gt;', '>') data = data.replace('&amp;', '&') data = data.replace('&quot;', '"') data = data.replace('&apos;', "'") return data class _RelativeURIResolver(_BaseHTMLProcessor): relative_uris = [('a', 'href'), ('applet', 'codebase'), ('area', 'href'), ('blockquote', 'cite'), ('body', 'background'), ('del', 'cite'), ('form', 'action'), ('frame', 'longdesc'), ('frame', 'src'), ('iframe', 'longdesc'), ('iframe', 'src'), ('head', 'profile'), ('img', 'longdesc'), ('img', 'src'), ('img', 'usemap'), ('input', 'src'), ('input', 'usemap'), ('ins', 'cite'), ('link', 'href'), ('object', 'classid'), ('object', 'codebase'), ('object', 'data'), ('object', 'usemap'), ('q', 'cite'), ('script', 'src')] def __init__(self, baseuri, encoding): _BaseHTMLProcessor.__init__(self, encoding) self.baseuri = baseuri def resolveURI(self, uri): return _urljoin(self.baseuri, uri) def unknown_starttag(self, tag, attrs): attrs = self.normalize_attrs(attrs) attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs] _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) def _resolveRelativeURIs(htmlSource, baseURI, encoding): if _debug: sys.stderr.write('entering _resolveRelativeURIs\n') p = _RelativeURIResolver(baseURI, encoding) p.feed(htmlSource) return p.output() class _HTMLSanitizer(_BaseHTMLProcessor): acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big', 'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset', 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup', 'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike', 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var'] acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', 'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing', 'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols', 'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', 'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', 'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type', 'usemap', 'valign', 'value', 'vspace', 'width'] unacceptable_elements_with_end_tag = ['script', 'applet'] def reset(self): _BaseHTMLProcessor.reset(self) self.unacceptablestack = 0 def unknown_starttag(self, tag, attrs): if not tag in self.acceptable_elements: if tag in self.unacceptable_elements_with_end_tag: self.unacceptablestack += 1 return attrs = self.normalize_attrs(attrs) attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes] _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) def unknown_endtag(self, tag): if not tag in self.acceptable_elements: if tag in self.unacceptable_elements_with_end_tag: self.unacceptablestack -= 1 return _BaseHTMLProcessor.unknown_endtag(self, tag) def handle_pi(self, text): pass def handle_decl(self, text): pass def handle_data(self, text): if not self.unacceptablestack: _BaseHTMLProcessor.handle_data(self, text) def _sanitizeHTML(htmlSource, encoding): p = _HTMLSanitizer(encoding) p.feed(htmlSource) data = p.output() if TIDY_MARKUP: # loop through list of preferred Tidy interfaces looking for one that's installed, # then set up a common _tidy function to wrap the interface-specific API. _tidy = None for tidy_interface in PREFERRED_TIDY_INTERFACES: try: if tidy_interface == "uTidy": from tidy import parseString as _utidy def _tidy(data, **kwargs): return str(_utidy(data, **kwargs)) break elif tidy_interface == "mxTidy": from mx.Tidy import Tidy as _mxtidy def _tidy(data, **kwargs): nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs) return data break except: pass if _tidy: utf8 = type(data) == type(u'') if utf8: data = data.encode('utf-8') data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8") if utf8: data = unicode(data, 'utf-8') if data.count('<body'): data = data.split('<body', 1)[1] if data.count('>'): data = data.split('>', 1)[1] if data.count('</body'): data = data.split('</body', 1)[0] data = data.strip().replace('\r\n', '\n') return data class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler): def http_error_default(self, req, fp, code, msg, headers): if ((code / 100) == 3) and (code != 304): return self.http_error_302(req, fp, code, msg, headers) infourl = urllib.addinfourl(fp, headers, req.get_full_url()) infourl.status = code return infourl def http_error_302(self, req, fp, code, msg, headers): if headers.dict.has_key('location'): infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers) else: infourl = urllib.addinfourl(fp, headers, req.get_full_url()) if not hasattr(infourl, 'status'): infourl.status = code return infourl def http_error_301(self, req, fp, code, msg, headers): if headers.dict.has_key('location'): infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers) else: infourl = urllib.addinfourl(fp, headers, req.get_full_url()) if not hasattr(infourl, 'status'): infourl.status = code return infourl http_error_300 = http_error_302 http_error_303 = http_error_302 http_error_307 = http_error_302 def http_error_401(self, req, fp, code, msg, headers): # Check if # - server requires digest auth, AND # - we tried (unsuccessfully) with basic auth, AND # - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions) # If all conditions hold, parse authentication information # out of the Authorization header we sent the first time # (for the username and password) and the WWW-Authenticate # header the server sent back (for the realm) and retry # the request with the appropriate digest auth headers instead. # This evil genius hack has been brought to you by Aaron Swartz. host = urlparse.urlparse(req.get_full_url())[1] try: assert sys.version.split()[0] >= '2.3.3' assert base64 != None user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':') realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0] self.add_password(realm, host, user, passw) retry = self.http_error_auth_reqed('www-authenticate', host, req, headers) self.reset_retry_count() return retry except: return self.http_error_default(req, fp, code, msg, headers) def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers): """URL, filename, or string --> stream This function lets you define parsers that take any input source (URL, pathname to local or network file, or actual data as a string) and deal with it in a uniform manner. Returned object is guaranteed to have all the basic stdio read methods (read, readline, readlines). Just .close() the object when you're done with it. If the etag argument is supplied, it will be used as the value of an If-None-Match request header. If the modified argument is supplied, it must be a tuple of 9 integers as returned by gmtime() in the standard Python time module. This MUST be in GMT (Greenwich Mean Time). The formatted date/time will be used as the value of an If-Modified-Since request header. If the agent argument is supplied, it will be used as the value of a User-Agent request header. If the referrer argument is supplied, it will be used as the value of a Referer[sic] request header. If handlers is supplied, it is a list of handlers used to build a urllib2 opener. """ if hasattr(url_file_stream_or_string, 'read'): return url_file_stream_or_string if url_file_stream_or_string == '-': return sys.stdin if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'): if not agent: agent = USER_AGENT # test for inline user:password for basic auth auth = None if base64: urltype, rest = urllib.splittype(url_file_stream_or_string) realhost, rest = urllib.splithost(rest) if realhost: user_passwd, realhost = urllib.splituser(realhost) if user_passwd: url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest) auth = base64.encodestring(user_passwd).strip() # try to open with urllib2 (to use optional headers) request = urllib2.Request(url_file_stream_or_string) request.add_header('User-Agent', agent) if etag: request.add_header('If-None-Match', etag) if modified: # format into an RFC 1123-compliant timestamp. We can't use # time.strftime() since the %a and %b directives can be affected # by the current locale, but RFC 2616 states that dates must be # in English. short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5])) if referrer: request.add_header('Referer', referrer) if gzip and zlib: request.add_header('Accept-encoding', 'gzip, deflate') elif gzip: request.add_header('Accept-encoding', 'gzip') elif zlib: request.add_header('Accept-encoding', 'deflate') else: request.add_header('Accept-encoding', '') if auth: request.add_header('Authorization', 'Basic %s' % auth) if ACCEPT_HEADER: request.add_header('Accept', ACCEPT_HEADER) request.add_header('A-IM', 'feed') # RFC 3229 support opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers)) opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent try: return opener.open(request) finally: opener.close() # JohnD # try to open with native open function (if url_file_stream_or_string is a filename) try: return open(url_file_stream_or_string) except: pass # treat url_file_stream_or_string as string return _StringIO(str(url_file_stream_or_string)) _date_handlers = [] def registerDateHandler(func): '''Register a date handler function (takes string, returns 9-tuple date in GMT)''' _date_handlers.insert(0, func) # ISO-8601 date parsing routines written by Fazal Majid. # The ISO 8601 standard is very convoluted and irregular - a full ISO 8601 # parser is beyond the scope of feedparser and would be a worthwhile addition # to the Python library. # A single regular expression cannot parse ISO 8601 date formats into groups # as the standard is highly irregular (for instance is 030104 2003-01-04 or # 0301-04-01), so we use templates instead. # Please note the order in templates is significant because we need a # greedy match. _iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO', 'YY-?MM-?DD', 'YY-?OOO', 'YYYY', '-YY-?MM', '-OOO', '-YY', '--MM-?DD', '--MM', '---DD', 'CC', ''] _iso8601_re = [ tmpl.replace( 'YYYY', r'(?P<year>\d{4})').replace( 'YY', r'(?P<year>\d\d)').replace( 'MM', r'(?P<month>[01]\d)').replace( 'DD', r'(?P<day>[0123]\d)').replace( 'OOO', r'(?P<ordinal>[0123]\d\d)').replace( 'CC', r'(?P<century>\d\d$)') + r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})' + r'(:(?P<second>\d{2}))?' + r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?' for tmpl in _iso8601_tmpl] del tmpl _iso8601_matches = [re.compile(regex).match for regex in _iso8601_re] del regex def _parse_date_iso8601(dateString): '''Parse a variety of ISO-8601-compatible formats like 20040105''' m = None for _iso8601_match in _iso8601_matches: m = _iso8601_match(dateString) if m: break if not m: return if m.span() == (0, 0): return params = m.groupdict() ordinal = params.get('ordinal', 0) if ordinal: ordinal = int(ordinal) else: ordinal = 0 year = params.get('year', '--') if not year or year == '--': year = time.gmtime()[0] elif len(year) == 2: # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993 year = 100 * int(time.gmtime()[0] / 100) + int(year) else: year = int(year) month = params.get('month', '-') if not month or month == '-': # ordinals are NOT normalized by mktime, we simulate them # by setting month=1, day=ordinal if ordinal: month = 1 else: month = time.gmtime()[1] month = int(month) day = params.get('day', 0) if not day: # see above if ordinal: day = ordinal elif params.get('century', 0) or \ params.get('year', 0) or params.get('month', 0): day = 1 else: day = time.gmtime()[2] else: day = int(day) # special case of the century - is the first year of the 21st century # 2000 or 2001 ? The debate goes on... if 'century' in params.keys(): year = (int(params['century']) - 1) * 100 + 1 # in ISO 8601 most fields are optional for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']: if not params.get(field, None): params[field] = 0 hour = int(params.get('hour', 0)) minute = int(params.get('minute', 0)) second = int(params.get('second', 0)) # weekday is normalized by mktime(), we can ignore it weekday = 0 # daylight savings is complex, but not needed for feedparser's purposes # as time zones, if specified, include mention of whether it is active # (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and # and most implementations have DST bugs daylight_savings_flag = 0 tm = [year, month, day, hour, minute, second, weekday, ordinal, daylight_savings_flag] # ISO 8601 time zone adjustments tz = params.get('tz') if tz and tz != 'Z': if tz[0] == '-': tm[3] += int(params.get('tzhour', 0)) tm[4] += int(params.get('tzmin', 0)) elif tz[0] == '+': tm[3] -= int(params.get('tzhour', 0)) tm[4] -= int(params.get('tzmin', 0)) else: return None # Python's time.mktime() is a wrapper around the ANSI C mktime(3c) # which is guaranteed to normalize d/m/y/h/m/s. # Many implementations have bugs, but we'll pretend they don't. return time.localtime(time.mktime(tm)) registerDateHandler(_parse_date_iso8601) # 8-bit date handling routines written by ytrewq1. _korean_year = u'\ub144' # b3e2 in euc-kr _korean_month = u'\uc6d4' # bff9 in euc-kr _korean_day = u'\uc77c' # c0cf in euc-kr _korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr _korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr _korean_onblog_date_re = \ re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \ (_korean_year, _korean_month, _korean_day)) _korean_nate_date_re = \ re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \ (_korean_am, _korean_pm)) def _parse_date_onblog(dateString): '''Parse a string according to the OnBlog 8-bit date format''' m = _korean_onblog_date_re.match(dateString) if not m: return w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ 'zonediff': '+09:00'} if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_onblog) def _parse_date_nate(dateString): '''Parse a string according to the Nate 8-bit date format''' m = _korean_nate_date_re.match(dateString) if not m: return hour = int(m.group(5)) ampm = m.group(4) if (ampm == _korean_pm): hour += 12 hour = str(hour) if len(hour) == 1: hour = '0' + hour w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\ 'zonediff': '+09:00'} if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_nate) _mssql_date_re = \ re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?') def _parse_date_mssql(dateString): '''Parse a string according to the MS SQL date format''' m = _mssql_date_re.match(dateString) if not m: return w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ 'zonediff': '+09:00'} if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_mssql) # Unicode strings for Greek date strings _greek_months = \ { \ u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7 u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7 u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7 u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7 u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7 u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7 u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7 u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7 u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7 u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7 u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7 u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7 u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7 u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7 u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7 u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7 u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7 u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7 u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7 } _greek_wdays = \ { \ u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7 u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7 u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7 u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7 u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7 u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7 u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7 } _greek_date_format_re = \ re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)') def _parse_date_greek(dateString): '''Parse a string according to a Greek 8-bit date format.''' m = _greek_date_format_re.match(dateString) if not m: return try: wday = _greek_wdays[m.group(1)] month = _greek_months[m.group(3)] except: return rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \ {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\ 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\ 'zonediff': m.group(8)} if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date) return _parse_date_rfc822(rfc822date) registerDateHandler(_parse_date_greek) # Unicode strings for Hungarian date strings _hungarian_months = \ { \ u'janu\u00e1r': u'01', # e1 in iso-8859-2 u'febru\u00e1ri': u'02', # e1 in iso-8859-2 u'm\u00e1rcius': u'03', # e1 in iso-8859-2 u'\u00e1prilis': u'04', # e1 in iso-8859-2 u'm\u00e1ujus': u'05', # e1 in iso-8859-2 u'j\u00fanius': u'06', # fa in iso-8859-2 u'j\u00falius': u'07', # fa in iso-8859-2 u'augusztus': u'08', u'szeptember': u'09', u'okt\u00f3ber': u'10', # f3 in iso-8859-2 u'november': u'11', u'december': u'12', } _hungarian_date_format_re = \ re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))') def _parse_date_hungarian(dateString): '''Parse a string according to a Hungarian 8-bit date format.''' m = _hungarian_date_format_re.match(dateString) if not m: return try: month = _hungarian_months[m.group(2)] day = m.group(3) if len(day) == 1: day = '0' + day hour = m.group(4) if len(hour) == 1: hour = '0' + hour except: return w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \ {'year': m.group(1), 'month': month, 'day': day,\ 'hour': hour, 'minute': m.group(5),\ 'zonediff': m.group(6)} if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_hungarian) # W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by # Drake and licensed under the Python license. Removed all range checking # for month, day, hour, minute, and second, since mktime will normalize # these later def _parse_date_w3dtf(dateString): def __extract_date(m): year = int(m.group('year')) if year < 100: year = 100 * int(time.gmtime()[0] / 100) + int(year) if year < 1000: return 0, 0, 0 julian = m.group('julian') if julian: julian = int(julian) month = julian / 30 + 1 day = julian % 30 + 1 jday = None while jday != julian: t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0)) jday = time.gmtime(t)[-2] diff = abs(jday - julian) if jday > julian: if diff < day: day = day - diff else: month = month - 1 day = 31 elif jday < julian: if day + diff < 28: day = day + diff else: month = month + 1 return year, month, day month = m.group('month') day = 1 if month is None: month = 1 else: month = int(month) day = m.group('day') if day: day = int(day) else: day = 1 return year, month, day def __extract_time(m): if not m: return 0, 0, 0 hours = m.group('hours') if not hours: return 0, 0, 0 hours = int(hours) minutes = int(m.group('minutes')) seconds = m.group('seconds') if seconds: seconds = int(seconds) else: seconds = 0 return hours, minutes, seconds def __extract_tzd(m): '''Return the Time Zone Designator as an offset in seconds from UTC.''' if not m: return 0 tzd = m.group('tzd') if not tzd: return 0 if tzd == 'Z': return 0 hours = int(m.group('tzdhours')) minutes = m.group('tzdminutes') if minutes: minutes = int(minutes) else: minutes = 0 offset = (hours*60 + minutes) * 60 if tzd[0] == '+': return -offset return offset __date_re = ('(?P<year>\d\d\d\d)' '(?:(?P<dsep>-|)' '(?:(?P<julian>\d\d\d)' '|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?') __tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)' __tzd_rx = re.compile(__tzd_re) __time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)' '(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?' + __tzd_re) __datetime_re = '%s(?:T%s)?' % (__date_re, __time_re) __datetime_rx = re.compile(__datetime_re) m = __datetime_rx.match(dateString) if (m is None) or (m.group() != dateString): return gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0) if gmt[0] == 0: return return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone) registerDateHandler(_parse_date_w3dtf) def _parse_date_rfc822(dateString): '''Parse an RFC822, RFC1123, RFC2822, or asctime-style date''' data = dateString.split() if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames: del data[0] if len(data) == 4: s = data[3] i = s.find('+') if i > 0: data[3:] = [s[:i], s[i+1:]] else: data.append('') dateString = " ".join(data) if len(data) < 5: dateString += ' 00:00:00 GMT' tm = rfc822.parsedate_tz(dateString) if tm: return time.gmtime(rfc822.mktime_tz(tm)) # rfc822.py defines several time zones, but we define some extra ones. # 'ET' is equivalent to 'EST', etc. _additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800} rfc822._timezones.update(_additional_timezones) registerDateHandler(_parse_date_rfc822) def _parse_date(dateString): '''Parses a variety of date formats into a 9-tuple in GMT''' for handler in _date_handlers: try: date9tuple = handler(dateString) if not date9tuple: continue if len(date9tuple) != 9: if _debug: sys.stderr.write('date handler function must return 9-tuple\n') raise ValueError map(int, date9tuple) return date9tuple except Exception, e: if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e))) pass return None def _getCharacterEncoding(http_headers, xml_data): '''Get the character encoding of the XML document http_headers is a dictionary xml_data is a raw string (not Unicode) This is so much trickier than it sounds, it's not even funny. According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type is application/xml, application/*+xml, application/xml-external-parsed-entity, or application/xml-dtd, the encoding given in the charset parameter of the HTTP Content-Type takes precedence over the encoding given in the XML prefix within the document, and defaults to 'utf-8' if neither are specified. But, if the HTTP Content-Type is text/xml, text/*+xml, or text/xml-external-parsed-entity, the encoding given in the XML prefix within the document is ALWAYS IGNORED and only the encoding given in the charset parameter of the HTTP Content-Type header should be respected, and it defaults to 'us-ascii' if not specified. Furthermore, discussion on the atom-syntax mailing list with the author of RFC 3023 leads me to the conclusion that any document served with a Content-Type of text/* and no charset parameter must be treated as us-ascii. (We now do this.) And also that it must always be flagged as non-well-formed. (We now do this too.) If Content-Type is unspecified (input was local file or non-HTTP source) or unrecognized (server just got it totally wrong), then go by the encoding given in the XML prefix of the document and default to 'iso-8859-1' as per the HTTP specification (RFC 2616). Then, assuming we didn't find a character encoding in the HTTP headers (and the HTTP Content-type allowed us to look in the body), we need to sniff the first few bytes of the XML data and try to determine whether the encoding is ASCII-compatible. Section F of the XML specification shows the way here: http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info If the sniffed encoding is not ASCII-compatible, we need to make it ASCII compatible so that we can sniff further into the XML declaration to find the encoding attribute, which will tell us the true encoding. Of course, none of this guarantees that we will be able to parse the feed in the declared character encoding (assuming it was declared correctly, which many are not). CJKCodecs and iconv_codec help a lot; you should definitely install them if you can. http://cjkpython.i18n.org/ ''' def _parseHTTPContentType(content_type): '''takes HTTP Content-Type header and returns (content type, charset) If no charset is specified, returns (content type, '') If no content type is specified, returns ('', '') Both return parameters are guaranteed to be lowercase strings ''' content_type = content_type or '' content_type, params = cgi.parse_header(content_type) return content_type, params.get('charset', '').replace("'", '') sniffed_xml_encoding = '' xml_encoding = '' true_encoding = '' http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type')) # Must sniff for non-ASCII-compatible character encodings before # searching for XML declaration. This heuristic is defined in # section F of the XML specification: # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info try: if xml_data[:4] == '\x4c\x6f\xa7\x94': # EBCDIC xml_data = _ebcdic_to_ascii(xml_data) elif xml_data[:4] == '\x00\x3c\x00\x3f': # UTF-16BE sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'): # UTF-16BE with BOM sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x3f\x00': # UTF-16LE sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'): # UTF-16LE with BOM sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') elif xml_data[:4] == '\x00\x00\x00\x3c': # UTF-32BE sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x00\x00': # UTF-32LE sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') elif xml_data[:4] == '\x00\x00\xfe\xff': # UTF-32BE with BOM sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') elif xml_data[:4] == '\xff\xfe\x00\x00': # UTF-32LE with BOM sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') elif xml_data[:3] == '\xef\xbb\xbf': # UTF-8 with BOM sniffed_xml_encoding = 'utf-8' xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') else: # ASCII-compatible pass xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data) except: xml_encoding_match = None if xml_encoding_match: xml_encoding = xml_encoding_match.groups()[0].lower() if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): xml_encoding = sniffed_xml_encoding acceptable_content_type = 0 application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity') text_content_types = ('text/xml', 'text/xml-external-parsed-entity') if (http_content_type in application_content_types) or \ (http_content_type.startswith('application/') and http_content_type.endswith('+xml')): acceptable_content_type = 1 true_encoding = http_encoding or xml_encoding or 'utf-8' elif (http_content_type in text_content_types) or \ (http_content_type.startswith('text/')) and http_content_type.endswith('+xml'): acceptable_content_type = 1 true_encoding = http_encoding or 'us-ascii' elif http_content_type.startswith('text/'): true_encoding = http_encoding or 'us-ascii' elif http_headers and (not http_headers.has_key('content-type')): true_encoding = xml_encoding or 'iso-8859-1' else: true_encoding = xml_encoding or 'utf-8' return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type def _toUTF8(data, encoding): '''Changes an XML data stream on the fly to specify a new encoding data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already encoding is a string recognized by encodings.aliases ''' if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding) # strip Byte Order Mark (if present) if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'): if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-16be': sys.stderr.write('trying utf-16be instead\n') encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'): if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-16le': sys.stderr.write('trying utf-16le instead\n') encoding = 'utf-16le' data = data[2:] elif data[:3] == '\xef\xbb\xbf': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-8': sys.stderr.write('trying utf-8 instead\n') encoding = 'utf-8' data = data[3:] elif data[:4] == '\x00\x00\xfe\xff': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-32be': sys.stderr.write('trying utf-32be instead\n') encoding = 'utf-32be' data = data[4:] elif data[:4] == '\xff\xfe\x00\x00': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-32le': sys.stderr.write('trying utf-32le instead\n') encoding = 'utf-32le' data = data[4:] newdata = unicode(data, encoding) if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding) declmatch = re.compile('^<\?xml[^>]*?>') newdecl = '''<?xml version='1.0' encoding='utf-8'?>''' if declmatch.search(newdata): newdata = declmatch.sub(newdecl, newdata) else: newdata = newdecl + u'\n' + newdata return newdata.encode('utf-8') def _stripDoctype(data): '''Strips DOCTYPE from XML document, returns (rss_version, stripped_data) rss_version may be 'rss091n' or None stripped_data is the same XML document, minus the DOCTYPE ''' entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE) data = entity_pattern.sub('', data) doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE) doctype_results = doctype_pattern.findall(data) doctype = doctype_results and doctype_results[0] or '' if doctype.lower().count('netscape'): version = 'rss091n' else: version = None data = doctype_pattern.sub('', data) return version, data def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]): '''Parse a feed from a URL, file, stream, or string''' result = FeedParserDict() result['feed'] = FeedParserDict() result['entries'] = [] if _XML_AVAILABLE: result['bozo'] = 0 if type(handlers) == types.InstanceType: handlers = [handlers] try: f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers) data = f.read() except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = '' f = None # if feed is gzip-compressed, decompress it if f and data and hasattr(f, 'headers'): if gzip and f.headers.get('content-encoding', '') == 'gzip': try: data = gzip.GzipFile(fileobj=_StringIO(data)).read() except Exception, e: # Some feeds claim to be gzipped but they're not, so # we get garbage. Ideally, we should re-request the # feed without the 'Accept-encoding: gzip' header, # but we don't. result['bozo'] = 1 result['bozo_exception'] = e data = '' elif zlib and f.headers.get('content-encoding', '') == 'deflate': try: data = zlib.decompress(data, -zlib.MAX_WBITS) except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = '' # save HTTP headers if hasattr(f, 'info'): info = f.info() result['etag'] = info.getheader('ETag') last_modified = info.getheader('Last-Modified') if last_modified: result['modified'] = _parse_date(last_modified) if hasattr(f, 'url'): result['href'] = f.url result['status'] = 200 if hasattr(f, 'status'): result['status'] = f.status if hasattr(f, 'headers'): result['headers'] = f.headers.dict if hasattr(f, 'close'): f.close() # there are four encodings to keep track of: # - http_encoding is the encoding declared in the Content-Type HTTP header # - xml_encoding is the encoding declared in the <?xml declaration # - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data # - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications http_headers = result.get('headers', {}) result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \ _getCharacterEncoding(http_headers, data) if http_headers and (not acceptable_content_type): if http_headers.has_key('content-type'): bozo_message = '%s is not an XML media type' % http_headers['content-type'] else: bozo_message = 'no Content-type specified' result['bozo'] = 1 result['bozo_exception'] = NonXMLContentType(bozo_message) result['version'], data = _stripDoctype(data) baseuri = http_headers.get('content-location', result.get('href')) baselang = http_headers.get('content-language', None) # if server sent 304, we're done if result.get('status', 0) == 304: result['version'] = '' result['debug_message'] = 'The feed has not changed since you last checked, ' + \ 'so the server sent no data. This is a feature, not a bug!' return result # if there was a problem downloading, we're done if not data: return result # determine character encoding use_strict_parser = 0 known_encoding = 0 tried_encodings = [] # try: HTTP encoding, declared XML encoding, encoding sniffed from BOM for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding): if not proposed_encoding: continue if proposed_encoding in tried_encodings: continue tried_encodings.append(proposed_encoding) try: data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 break except: pass # if no luck and we have auto-detection library, try that if (not known_encoding) and chardet: try: proposed_encoding = chardet.detect(data)['encoding'] if proposed_encoding and (proposed_encoding not in tried_encodings): tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck and we haven't tried utf-8 yet, try that if (not known_encoding) and ('utf-8' not in tried_encodings): try: proposed_encoding = 'utf-8' tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck and we haven't tried windows-1252 yet, try that if (not known_encoding) and ('windows-1252' not in tried_encodings): try: proposed_encoding = 'windows-1252' tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck, give up if not known_encoding: result['bozo'] = 1 result['bozo_exception'] = CharacterEncodingUnknown( \ 'document encoding unknown, I tried ' + \ '%s, %s, utf-8, and windows-1252 but nothing worked' % \ (result['encoding'], xml_encoding)) result['encoding'] = '' elif proposed_encoding != result['encoding']: result['bozo'] = 1 result['bozo_exception'] = CharacterEncodingOverride( \ 'documented declared as %s, but parsed as %s' % \ (result['encoding'], proposed_encoding)) result['encoding'] = proposed_encoding if not _XML_AVAILABLE: use_strict_parser = 0 if use_strict_parser: # initialize the SAX parser feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8') saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS) saxparser.setFeature(xml.sax.handler.feature_namespaces, 1) saxparser.setContentHandler(feedparser) saxparser.setErrorHandler(feedparser) source = xml.sax.xmlreader.InputSource() source.setByteStream(_StringIO(data)) if hasattr(saxparser, '_ns_stack'): # work around bug in built-in SAX parser (doesn't recognize xml: namespace) # PyXML doesn't have this problem, and it doesn't have _ns_stack either saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'}) try: saxparser.parse(source) except Exception, e: if _debug: import traceback traceback.print_stack() traceback.print_exc() sys.stderr.write('xml parsing failed\n') result['bozo'] = 1 result['bozo_exception'] = feedparser.exc or e use_strict_parser = 0 if not use_strict_parser: feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '') feedparser.feed(data) result['feed'] = feedparser.feeddata result['entries'] = feedparser.entries result['version'] = result['version'] or feedparser.version result['namespaces'] = feedparser.namespacesInUse return result if __name__ == '__main__': if not sys.argv[1:]: print __doc__ sys.exit(0) else: urls = sys.argv[1:] zopeCompatibilityHack() from pprint import pprint for url in urls: print url print result = parse(url) pprint(result) print #REVISION HISTORY #1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements, # added Simon Fell's test suite #1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections #2.0 - 10/19/2002 # JD - use inchannel to watch out for image and textinput elements which can # also contain title, link, and description elements # JD - check for isPermaLink='false' attribute on guid elements # JD - replaced openAnything with open_resource supporting ETag and # If-Modified-Since request headers # JD - parse now accepts etag, modified, agent, and referrer optional # arguments # JD - modified parse to return a dictionary instead of a tuple so that any # etag or modified information can be returned and cached by the caller #2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything # because of etag/modified, return the old etag/modified to the caller to # indicate why nothing is being returned #2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its # useless. Fixes the problem JD was addressing by adding it. #2.1 - 11/14/2002 - MAP - added gzip support #2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent. # start_admingeneratoragent is an example of how to handle elements with # only attributes, no content. #2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify); # also, make sure we send the User-Agent even if urllib2 isn't available. # Match any variation of backend.userland.com/rss namespace. #2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is. #2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's # snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed # project name #2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree); # removed unnecessary urllib code -- urllib2 should always be available anyway; # return actual url, status, and full HTTP headers (as result['url'], # result['status'], and result['headers']) if parsing a remote feed over HTTP -- # this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>; # added the latest namespace-of-the-week for RSS 2.0 #2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom # User-Agent (otherwise urllib2 sends two, which confuses some servers) #2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for # inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds #2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or # textInput, and also to return the character encoding (if specified) #2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking # nested divs within content (JohnD); fixed missing sys import (JohanS); # fixed regular expression to capture XML character encoding (Andrei); # added support for Atom 0.3-style links; fixed bug with textInput tracking; # added support for cloud (MartijnP); added support for multiple # category/dc:subject (MartijnP); normalize content model: 'description' gets # description (which can come from description, summary, or full content if no # description), 'content' gets dict of base/language/type/value (which can come # from content:encoded, xhtml:body, content, or fullitem); # fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang # tracking; fixed bug tracking unknown tags; fixed bug tracking content when # <content> element is not in default namespace (like Pocketsoap feed); # resolve relative URLs in link, guid, docs, url, comments, wfw:comment, # wfw:commentRSS; resolve relative URLs within embedded HTML markup in # description, xhtml:body, content, content:encoded, title, subtitle, # summary, info, tagline, and copyright; added support for pingback and # trackback namespaces #2.7 - 1/5/2004 - MAP - really added support for trackback and pingback # namespaces, as opposed to 2.6 when I said I did but didn't really; # sanitize HTML markup within some elements; added mxTidy support (if # installed) to tidy HTML markup within some elements; fixed indentation # bug in _parse_date (FazalM); use socket.setdefaulttimeout if available # (FazalM); universal date parsing and normalization (FazalM): 'created', modified', # 'issued' are parsed into 9-tuple date format and stored in 'created_parsed', # 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified' # and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa #2.7.1 - 1/9/2004 - MAP - fixed bug handling &quot; and &apos;. fixed memory # leak not closing url opener (JohnD); added dc:publisher support (MarekK); # added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK) #2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in # encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL); # fixed relative URI processing for guid (skadz); added ICBM support; added # base64 support #2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many # blogspot.com sites); added _debug variable #2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing #3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available); # added several new supported namespaces; fixed bug tracking naked markup in # description; added support for enclosure; added support for source; re-added # support for cloud which got dropped somehow; added support for expirationDate #3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking # xml:base URI, one for documents that don't define one explicitly and one for # documents that define an outer and an inner xml:base that goes out of scope # before the end of the document #3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level #3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version'] # will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized; # added support for creativeCommons:license and cc:license; added support for # full Atom content model in title, tagline, info, copyright, summary; fixed bug # with gzip encoding (not always telling server we support it when we do) #3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail # (dictionary of 'name', 'url', 'email'); map author to author_detail if author # contains name + email address #3.0b8 - 1/28/2004 - MAP - added support for contributor #3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added # support for summary #3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from # xml.util.iso8601 #3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain # dangerous markup; fiddled with decodeEntities (not right); liberalized # date parsing even further #3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right); # added support to Atom 0.2 subtitle; added support for Atom content model # in copyright; better sanitizing of dangerous HTML elements with end tags # (script, frameset) #3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img, # etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />) #3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under # Python 2.1 #3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS; # fixed bug capturing author and contributor URL; fixed bug resolving relative # links in author and contributor URL; fixed bug resolvin relative links in # generator URL; added support for recognizing RSS 1.0; passed Simon Fell's # namespace tests, and included them permanently in the test suite with his # permission; fixed namespace handling under Python 2.1 #3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15) #3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023 #3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei); # use libxml2 (if available) #3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author # name was in parentheses; removed ultra-problematic mxTidy support; patch to # workaround crash in PyXML/expat when encountering invalid entities # (MarkMoraes); support for textinput/textInput #3.0b20 - 4/7/2004 - MAP - added CDF support #3.0b21 - 4/14/2004 - MAP - added Hot RSS support #3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in # results dict; changed results dict to allow getting values with results.key # as well as results[key]; work around embedded illformed HTML with half # a DOCTYPE; work around malformed Content-Type header; if character encoding # is wrong, try several common ones before falling back to regexes (if this # works, bozo_exception is set to CharacterEncodingOverride); fixed character # encoding issues in BaseHTMLProcessor by tracking encoding and converting # from Unicode to raw strings before feeding data to sgmllib.SGMLParser; # convert each value in results to Unicode (if possible), even if using # regex-based parsing #3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain # high-bit characters in attributes in embedded HTML in description (thanks # Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in # FeedParserDict; tweaked FeedParserDict.has_key to return True if asking # about a mapped key #3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and # results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could # cause the same encoding to be tried twice (even if it failed the first time); # fixed DOCTYPE stripping when DOCTYPE contained entity declarations; # better textinput and image tracking in illformed RSS 1.0 feeds #3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed # my blink tag tests #3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that # failed to parse utf-16 encoded feeds; made source into a FeedParserDict; # duplicate admin:generatorAgent/@rdf:resource in generator_detail.url; # added support for image; refactored parse() fallback logic to try other # encodings if SAX parsing fails (previously it would only try other encodings # if re-encoding failed); remove unichr madness in normalize_attrs now that # we're properly tracking encoding in and out of BaseHTMLProcessor; set # feed.language from root-level xml:lang; set entry.id from rdf:about; # send Accept header #3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between # iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are # windows-1252); fixed regression that could cause the same encoding to be # tried twice (even if it failed the first time) #3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types; # recover from malformed content-type header parameter with no equals sign # ('text/xml; charset:iso-8859-1') #3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities # to Unicode equivalents in illformed feeds (aaronsw); added and # passed tests for converting character entities to Unicode equivalents # in illformed feeds (aaronsw); test for valid parsers when setting # XML_AVAILABLE; make version and encoding available when server returns # a 304; add handlers parameter to pass arbitrary urllib2 handlers (like # digest auth or proxy support); add code to parse username/password # out of url and send as basic authentication; expose downloading-related # exceptions in bozo_exception (aaronsw); added __contains__ method to # FeedParserDict (aaronsw); added publisher_detail (aaronsw) #3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always # convert feed to UTF-8 before passing to XML parser; completely revamped # logic for determining character encoding and attempting XML parsing # (much faster); increased default timeout to 20 seconds; test for presence # of Location header on redirects; added tests for many alternate character # encodings; support various EBCDIC encodings; support UTF-16BE and # UTF16-LE with or without a BOM; support UTF-8 with a BOM; support # UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no # XML parsers are available; added support for 'Content-encoding: deflate'; # send blank 'Accept-encoding: ' header if neither gzip nor zlib modules # are available #3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure # problem tracking xml:base and xml:lang if element declares it, child # doesn't, first grandchild redeclares it, and second grandchild doesn't; # refactored date parsing; defined public registerDateHandler so callers # can add support for additional date formats at runtime; added support # for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added # zopeCompatibilityHack() which turns FeedParserDict into a regular # dictionary, required for Zope compatibility, and also makes command- # line debugging easier because pprint module formats real dictionaries # better than dictionary-like objects; added NonXMLContentType exception, # which is stored in bozo_exception when a feed is served with a non-XML # media type such as 'text/plain'; respect Content-Language as default # language if not xml:lang is present; cloud dict is now FeedParserDict; # generator dict is now FeedParserDict; better tracking of xml:lang, # including support for xml:lang='' to unset the current language; # recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default # namespace; don't overwrite final status on redirects (scenarios: # redirecting to a URL that returns 304, redirecting to a URL that # redirects to another URL with a different type of redirect); add # support for HTTP 303 redirects #4.0 - MAP - support for relative URIs in xml:base attribute; fixed # encoding issue with mxTidy (phopkins); preliminary support for RFC 3229; # support for Atom 1.0; support for iTunes extensions; new 'tags' for # categories/keywords/etc. as array of dict # {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0 # terminology; parse RFC 822-style dates with no time; lots of other # bug fixes #4.1 - MAP - removed socket timeout; added support for chardet library
Python
"""Beautiful Soup Elixir and Tonic "The Screen-Scraper's Friend" http://www.crummy.com/software/BeautifulSoup/ Beautiful Soup parses a (possibly invalid) XML or HTML document into a tree representation. It provides methods and Pythonic idioms that make it easy to navigate, search, and modify the tree. A well-formed XML/HTML document yields a well-formed data structure. An ill-formed XML/HTML document yields a correspondingly ill-formed data structure. If your document is only locally well-formed, you can use this library to find and process the well-formed part of it. Beautiful Soup works with Python 2.2 and up. It has no external dependencies, but you'll have more success at converting data to UTF-8 if you also install these three packages: * chardet, for auto-detecting character encodings http://chardet.feedparser.org/ * cjkcodecs and iconv_codec, which add more encodings to the ones supported by stock Python. http://cjkpython.i18n.org/ Beautiful Soup defines classes for two main parsing strategies: * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific language that kind of looks like XML. * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid or invalid. This class has web browser-like heuristics for obtaining a sensible parse tree in the face of common HTML errors. Beautiful Soup also defines a class (UnicodeDammit) for autodetecting the encoding of an HTML or XML document, and converting it to Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser. For more than you ever wanted to know about Beautiful Soup, see the documentation: http://www.crummy.com/software/BeautifulSoup/documentation.html Here, have some legalese: Copyright (c) 2004-2010, Leonard Richardson All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the the Beautiful Soup Consortium and All Night Kosher Bakery nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT. """ from __future__ import generators __author__ = "Leonard Richardson (leonardr@segfault.org)" __version__ = "3.2.0" __copyright__ = "Copyright (c) 2004-2010 Leonard Richardson" __license__ = "New-style BSD" from sgmllib import SGMLParser, SGMLParseError import codecs import markupbase import types import re import sgmllib try: from htmlentitydefs import name2codepoint except ImportError: name2codepoint = {} try: set except NameError: from sets import Set as set #These hacks make Beautiful Soup able to parse XML with namespaces sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match DEFAULT_OUTPUT_ENCODING = "utf-8" def _match_css_class(str): """Build a RE to match the given CSS class.""" return re.compile(r"(^|.*\s)%s($|\s)" % str) # First, the classes that represent markup elements. class PageElement(object): """Contains the navigational information for some part of the page (either a tag or a piece of text)""" def setup(self, parent=None, previous=None): """Sets up the initial relations between this element and other elements.""" self.parent = parent self.previous = previous self.next = None self.previousSibling = None self.nextSibling = None if self.parent and self.parent.contents: self.previousSibling = self.parent.contents[-1] self.previousSibling.nextSibling = self def replaceWith(self, replaceWith): oldParent = self.parent myIndex = self.parent.index(self) if hasattr(replaceWith, "parent")\ and replaceWith.parent is self.parent: # We're replacing this element with one of its siblings. index = replaceWith.parent.index(replaceWith) if index and index < myIndex: # Furthermore, it comes before this element. That # means that when we extract it, the index of this # element will change. myIndex = myIndex - 1 self.extract() oldParent.insert(myIndex, replaceWith) def replaceWithChildren(self): myParent = self.parent myIndex = self.parent.index(self) self.extract() reversedChildren = list(self.contents) reversedChildren.reverse() for child in reversedChildren: myParent.insert(myIndex, child) def extract(self): """Destructively rips this element out of the tree.""" if self.parent: try: del self.parent.contents[self.parent.index(self)] except ValueError: pass #Find the two elements that would be next to each other if #this element (and any children) hadn't been parsed. Connect #the two. lastChild = self._lastRecursiveChild() nextElement = lastChild.next if self.previous: self.previous.next = nextElement if nextElement: nextElement.previous = self.previous self.previous = None lastChild.next = None self.parent = None if self.previousSibling: self.previousSibling.nextSibling = self.nextSibling if self.nextSibling: self.nextSibling.previousSibling = self.previousSibling self.previousSibling = self.nextSibling = None return self def _lastRecursiveChild(self): "Finds the last element beneath this object to be parsed." lastChild = self while hasattr(lastChild, 'contents') and lastChild.contents: lastChild = lastChild.contents[-1] return lastChild def insert(self, position, newChild): if isinstance(newChild, basestring) \ and not isinstance(newChild, NavigableString): newChild = NavigableString(newChild) position = min(position, len(self.contents)) if hasattr(newChild, 'parent') and newChild.parent is not None: # We're 'inserting' an element that's already one # of this object's children. if newChild.parent is self: index = self.index(newChild) if index > position: # Furthermore we're moving it further down the # list of this object's children. That means that # when we extract this element, our target index # will jump down one. position = position - 1 newChild.extract() newChild.parent = self previousChild = None if position == 0: newChild.previousSibling = None newChild.previous = self else: previousChild = self.contents[position-1] newChild.previousSibling = previousChild newChild.previousSibling.nextSibling = newChild newChild.previous = previousChild._lastRecursiveChild() if newChild.previous: newChild.previous.next = newChild newChildsLastElement = newChild._lastRecursiveChild() if position >= len(self.contents): newChild.nextSibling = None parent = self parentsNextSibling = None while not parentsNextSibling: parentsNextSibling = parent.nextSibling parent = parent.parent if not parent: # This is the last element in the document. break if parentsNextSibling: newChildsLastElement.next = parentsNextSibling else: newChildsLastElement.next = None else: nextChild = self.contents[position] newChild.nextSibling = nextChild if newChild.nextSibling: newChild.nextSibling.previousSibling = newChild newChildsLastElement.next = nextChild if newChildsLastElement.next: newChildsLastElement.next.previous = newChildsLastElement self.contents.insert(position, newChild) def append(self, tag): """Appends the given tag to the contents of this tag.""" self.insert(len(self.contents), tag) def findNext(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears after this Tag in the document.""" return self._findOne(self.findAllNext, name, attrs, text, **kwargs) def findAllNext(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all items that match the given criteria and appear after this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.nextGenerator, **kwargs) def findNextSibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears after this Tag in the document.""" return self._findOne(self.findNextSiblings, name, attrs, text, **kwargs) def findNextSiblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear after this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.nextSiblingGenerator, **kwargs) fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x def findPrevious(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears before this Tag in the document.""" return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs) def findAllPrevious(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all items that match the given criteria and appear before this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.previousGenerator, **kwargs) fetchPrevious = findAllPrevious # Compatibility with pre-3.x def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears before this Tag in the document.""" return self._findOne(self.findPreviousSiblings, name, attrs, text, **kwargs) def findPreviousSiblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear before this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.previousSiblingGenerator, **kwargs) fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x def findParent(self, name=None, attrs={}, **kwargs): """Returns the closest parent of this Tag that matches the given criteria.""" # NOTE: We can't use _findOne because findParents takes a different # set of arguments. r = None l = self.findParents(name, attrs, 1) if l: r = l[0] return r def findParents(self, name=None, attrs={}, limit=None, **kwargs): """Returns the parents of this Tag that match the given criteria.""" return self._findAll(name, attrs, None, limit, self.parentGenerator, **kwargs) fetchParents = findParents # Compatibility with pre-3.x #These methods do the real heavy lifting. def _findOne(self, method, name, attrs, text, **kwargs): r = None l = method(name, attrs, text, 1, **kwargs) if l: r = l[0] return r def _findAll(self, name, attrs, text, limit, generator, **kwargs): "Iterates over a generator looking for things that match." if isinstance(name, SoupStrainer): strainer = name # (Possibly) special case some findAll*(...) searches elif text is None and not limit and not attrs and not kwargs: # findAll*(True) if name is True: return [element for element in generator() if isinstance(element, Tag)] # findAll*('tag-name') elif isinstance(name, basestring): return [element for element in generator() if isinstance(element, Tag) and element.name == name] else: strainer = SoupStrainer(name, attrs, text, **kwargs) # Build a SoupStrainer else: strainer = SoupStrainer(name, attrs, text, **kwargs) results = ResultSet(strainer) g = generator() while True: try: i = g.next() except StopIteration: break if i: found = strainer.search(i) if found: results.append(found) if limit and len(results) >= limit: break return results #These Generators can be used to navigate starting from both #NavigableStrings and Tags. def nextGenerator(self): i = self while i is not None: i = i.next yield i def nextSiblingGenerator(self): i = self while i is not None: i = i.nextSibling yield i def previousGenerator(self): i = self while i is not None: i = i.previous yield i def previousSiblingGenerator(self): i = self while i is not None: i = i.previousSibling yield i def parentGenerator(self): i = self while i is not None: i = i.parent yield i # Utility methods def substituteEncoding(self, str, encoding=None): encoding = encoding or "utf-8" return str.replace("%SOUP-ENCODING%", encoding) def toEncoding(self, s, encoding=None): """Encodes an object to a string in some encoding, or to Unicode. .""" if isinstance(s, unicode): if encoding: s = s.encode(encoding) elif isinstance(s, str): if encoding: s = s.encode(encoding) else: s = unicode(s) else: if encoding: s = self.toEncoding(str(s), encoding) else: s = unicode(s) return s class NavigableString(unicode, PageElement): def __new__(cls, value): """Create a new NavigableString. When unpickling a NavigableString, this method is called with the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be passed in to the superclass's __new__ or the superclass won't know how to handle non-ASCII characters. """ if isinstance(value, unicode): return unicode.__new__(cls, value) return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING) def __getnewargs__(self): return (NavigableString.__str__(self),) def __getattr__(self, attr): """text.string gives you text. This is for backwards compatibility for Navigable*String, but for CData* it lets you get the string without the CData wrapper.""" if attr == 'string': return self else: raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr) def __unicode__(self): return str(self).decode(DEFAULT_OUTPUT_ENCODING) def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): if encoding: return self.encode(encoding) else: return self class CData(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding) class ProcessingInstruction(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): output = self if "%SOUP-ENCODING%" in output: output = self.substituteEncoding(output, encoding) return "<?%s?>" % self.toEncoding(output, encoding) class Comment(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<!--%s-->" % NavigableString.__str__(self, encoding) class Declaration(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<!%s>" % NavigableString.__str__(self, encoding) class Tag(PageElement): """Represents a found HTML tag with its attributes and contents.""" def _invert(h): "Cheap function to invert a hash." i = {} for k,v in h.items(): i[v] = k return i XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'", "quot" : '"', "amp" : "&", "lt" : "<", "gt" : ">" } XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS) def _convertEntities(self, match): """Used in a call to re.sub to replace HTML, XML, and numeric entities with the appropriate Unicode characters. If HTML entities are being converted, any unrecognized entities are escaped.""" x = match.group(1) if self.convertHTMLEntities and x in name2codepoint: return unichr(name2codepoint[x]) elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS: if self.convertXMLEntities: return self.XML_ENTITIES_TO_SPECIAL_CHARS[x] else: return u'&%s;' % x elif len(x) > 0 and x[0] == '#': # Handle numeric entities if len(x) > 1 and x[1] == 'x': return unichr(int(x[2:], 16)) else: return unichr(int(x[1:])) elif self.escapeUnrecognizedEntities: return u'&amp;%s;' % x else: return u'&%s;' % x def __init__(self, parser, name, attrs=None, parent=None, previous=None): "Basic constructor." # We don't actually store the parser object: that lets extracted # chunks be garbage-collected self.parserClass = parser.__class__ self.isSelfClosing = parser.isSelfClosingTag(name) self.name = name if attrs is None: attrs = [] elif isinstance(attrs, dict): attrs = attrs.items() self.attrs = attrs self.contents = [] self.setup(parent, previous) self.hidden = False self.containsSubstitutions = False self.convertHTMLEntities = parser.convertHTMLEntities self.convertXMLEntities = parser.convertXMLEntities self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities # Convert any HTML, XML, or numeric entities in the attribute values. convert = lambda(k, val): (k, re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);", self._convertEntities, val)) self.attrs = map(convert, self.attrs) def getString(self): if (len(self.contents) == 1 and isinstance(self.contents[0], NavigableString)): return self.contents[0] def setString(self, string): """Replace the contents of the tag with a string""" self.clear() self.append(string) string = property(getString, setString) def getText(self, separator=u""): if not len(self.contents): return u"" stopNode = self._lastRecursiveChild().next strings = [] current = self.contents[0] while current is not stopNode: if isinstance(current, NavigableString): strings.append(current.strip()) current = current.next return separator.join(strings) text = property(getText) def get(self, key, default=None): """Returns the value of the 'key' attribute for the tag, or the value given for 'default' if it doesn't have that attribute.""" return self._getAttrMap().get(key, default) def clear(self): """Extract all children.""" for child in self.contents[:]: child.extract() def index(self, element): for i, child in enumerate(self.contents): if child is element: return i raise ValueError("Tag.index: element not in tag") def has_key(self, key): return self._getAttrMap().has_key(key) def __getitem__(self, key): """tag[key] returns the value of the 'key' attribute for the tag, and throws an exception if it's not there.""" return self._getAttrMap()[key] def __iter__(self): "Iterating over a tag iterates over its contents." return iter(self.contents) def __len__(self): "The length of a tag is the length of its list of contents." return len(self.contents) def __contains__(self, x): return x in self.contents def __nonzero__(self): "A tag is non-None even if it has no contents." return True def __setitem__(self, key, value): """Setting tag[key] sets the value of the 'key' attribute for the tag.""" self._getAttrMap() self.attrMap[key] = value found = False for i in range(0, len(self.attrs)): if self.attrs[i][0] == key: self.attrs[i] = (key, value) found = True if not found: self.attrs.append((key, value)) self._getAttrMap()[key] = value def __delitem__(self, key): "Deleting tag[key] deletes all 'key' attributes for the tag." for item in self.attrs: if item[0] == key: self.attrs.remove(item) #We don't break because bad HTML can define the same #attribute multiple times. self._getAttrMap() if self.attrMap.has_key(key): del self.attrMap[key] def __call__(self, *args, **kwargs): """Calling a tag like a function is the same as calling its findAll() method. Eg. tag('a') returns a list of all the A tags found within this tag.""" return apply(self.findAll, args, kwargs) def __getattr__(self, tag): #print "Getattr %s.%s" % (self.__class__, tag) if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3: return self.find(tag[:-3]) elif tag.find('__') != 0: return self.find(tag) raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag) def __eq__(self, other): """Returns true iff this tag has the same name, the same attributes, and the same contents (recursively) as the given tag. NOTE: right now this will return false if two tags have the same attributes in a different order. Should this be fixed?""" if other is self: return True if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other): return False for i in range(0, len(self.contents)): if self.contents[i] != other.contents[i]: return False return True def __ne__(self, other): """Returns true iff this tag is not identical to the other tag, as defined in __eq__.""" return not self == other def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING): """Renders this tag as a string.""" return self.__str__(encoding) def __unicode__(self): return self.__str__(None) BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|" + "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)" + ")") def _sub_entity(self, x): """Used with a regular expression to substitute the appropriate XML entity for an XML special character.""" return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";" def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): """Returns a string or Unicode representation of this tag and its contents. To get Unicode, pass None for encoding. NOTE: since Python's HTML parser consumes whitespace, this method is not certain to reproduce the whitespace present in the original string.""" encodedName = self.toEncoding(self.name, encoding) attrs = [] if self.attrs: for key, val in self.attrs: fmt = '%s="%s"' if isinstance(val, basestring): if self.containsSubstitutions and '%SOUP-ENCODING%' in val: val = self.substituteEncoding(val, encoding) # The attribute value either: # # * Contains no embedded double quotes or single quotes. # No problem: we enclose it in double quotes. # * Contains embedded single quotes. No problem: # double quotes work here too. # * Contains embedded double quotes. No problem: # we enclose it in single quotes. # * Embeds both single _and_ double quotes. This # can't happen naturally, but it can happen if # you modify an attribute value after parsing # the document. Now we have a bit of a # problem. We solve it by enclosing the # attribute in single quotes, and escaping any # embedded single quotes to XML entities. if '"' in val: fmt = "%s='%s'" if "'" in val: # TODO: replace with apos when # appropriate. val = val.replace("'", "&squot;") # Now we're okay w/r/t quotes. But the attribute # value might also contain angle brackets, or # ampersands that aren't part of entities. We need # to escape those to XML entities too. val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val) attrs.append(fmt % (self.toEncoding(key, encoding), self.toEncoding(val, encoding))) close = '' closeTag = '' if self.isSelfClosing: close = ' /' else: closeTag = '</%s>' % encodedName indentTag, indentContents = 0, 0 if prettyPrint: indentTag = indentLevel space = (' ' * (indentTag-1)) indentContents = indentTag + 1 contents = self.renderContents(encoding, prettyPrint, indentContents) if self.hidden: s = contents else: s = [] attributeString = '' if attrs: attributeString = ' ' + ' '.join(attrs) if prettyPrint: s.append(space) s.append('<%s%s%s>' % (encodedName, attributeString, close)) if prettyPrint: s.append("\n") s.append(contents) if prettyPrint and contents and contents[-1] != "\n": s.append("\n") if prettyPrint and closeTag: s.append(space) s.append(closeTag) if prettyPrint and closeTag and self.nextSibling: s.append("\n") s = ''.join(s) return s def decompose(self): """Recursively destroys the contents of this tree.""" self.extract() if len(self.contents) == 0: return current = self.contents[0] while current is not None: next = current.next if isinstance(current, Tag): del current.contents[:] current.parent = None current.previous = None current.previousSibling = None current.next = None current.nextSibling = None current = next def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING): return self.__str__(encoding, True) def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): """Renders the contents of this tag as a string in the given encoding. If encoding is None, returns a Unicode string..""" s=[] for c in self: text = None if isinstance(c, NavigableString): text = c.__str__(encoding) elif isinstance(c, Tag): s.append(c.__str__(encoding, prettyPrint, indentLevel)) if text and prettyPrint: text = text.strip() if text: if prettyPrint: s.append(" " * (indentLevel-1)) s.append(text) if prettyPrint: s.append("\n") return ''.join(s) #Soup methods def find(self, name=None, attrs={}, recursive=True, text=None, **kwargs): """Return only the first child of this Tag matching the given criteria.""" r = None l = self.findAll(name, attrs, recursive, text, 1, **kwargs) if l: r = l[0] return r findChild = find def findAll(self, name=None, attrs={}, recursive=True, text=None, limit=None, **kwargs): """Extracts a list of Tag objects that match the given criteria. You can specify the name of the Tag and any attributes you want the Tag to have. The value of a key-value pair in the 'attrs' map can be a string, a list of strings, a regular expression object, or a callable that takes a string and returns whether or not the string matches for some custom definition of 'matches'. The same is true of the tag name.""" generator = self.recursiveChildGenerator if not recursive: generator = self.childGenerator return self._findAll(name, attrs, text, limit, generator, **kwargs) findChildren = findAll # Pre-3.x compatibility methods first = find fetch = findAll def fetchText(self, text=None, recursive=True, limit=None): return self.findAll(text=text, recursive=recursive, limit=limit) def firstText(self, text=None, recursive=True): return self.find(text=text, recursive=recursive) #Private methods def _getAttrMap(self): """Initializes a map representation of this tag's attributes, if not already initialized.""" if not getattr(self, 'attrMap'): self.attrMap = {} for (key, value) in self.attrs: self.attrMap[key] = value return self.attrMap #Generator methods def childGenerator(self): # Just use the iterator from the contents return iter(self.contents) def recursiveChildGenerator(self): if not len(self.contents): raise StopIteration stopNode = self._lastRecursiveChild().next current = self.contents[0] while current is not stopNode: yield current current = current.next # Next, a couple classes to represent queries and their results. class SoupStrainer: """Encapsulates a number of ways of matching a markup element (tag or text).""" def __init__(self, name=None, attrs={}, text=None, **kwargs): self.name = name if isinstance(attrs, basestring): kwargs['class'] = _match_css_class(attrs) attrs = None if kwargs: if attrs: attrs = attrs.copy() attrs.update(kwargs) else: attrs = kwargs self.attrs = attrs self.text = text def __str__(self): if self.text: return self.text else: return "%s|%s" % (self.name, self.attrs) def searchTag(self, markupName=None, markupAttrs={}): found = None markup = None if isinstance(markupName, Tag): markup = markupName markupAttrs = markup callFunctionWithTagData = callable(self.name) \ and not isinstance(markupName, Tag) if (not self.name) \ or callFunctionWithTagData \ or (markup and self._matches(markup, self.name)) \ or (not markup and self._matches(markupName, self.name)): if callFunctionWithTagData: match = self.name(markupName, markupAttrs) else: match = True markupAttrMap = None for attr, matchAgainst in self.attrs.items(): if not markupAttrMap: if hasattr(markupAttrs, 'get'): markupAttrMap = markupAttrs else: markupAttrMap = {} for k,v in markupAttrs: markupAttrMap[k] = v attrValue = markupAttrMap.get(attr) if not self._matches(attrValue, matchAgainst): match = False break if match: if markup: found = markup else: found = markupName return found def search(self, markup): #print 'looking for %s in %s' % (self, markup) found = None # If given a list of items, scan it for a text element that # matches. if hasattr(markup, "__iter__") \ and not isinstance(markup, Tag): for element in markup: if isinstance(element, NavigableString) \ and self.search(element): found = element break # If it's a Tag, make sure its name or attributes match. # Don't bother with Tags if we're searching for text. elif isinstance(markup, Tag): if not self.text: found = self.searchTag(markup) # If it's text, make sure the text matches. elif isinstance(markup, NavigableString) or \ isinstance(markup, basestring): if self._matches(markup, self.text): found = markup else: raise Exception, "I don't know how to match against a %s" \ % markup.__class__ return found def _matches(self, markup, matchAgainst): #print "Matching %s against %s" % (markup, matchAgainst) result = False if matchAgainst is True: result = markup is not None elif callable(matchAgainst): result = matchAgainst(markup) else: #Custom match methods take the tag as an argument, but all #other ways of matching match the tag name as a string. if isinstance(markup, Tag): markup = markup.name if markup and not isinstance(markup, basestring): markup = unicode(markup) #Now we know that chunk is either a string, or None. if hasattr(matchAgainst, 'match'): # It's a regexp object. result = markup and matchAgainst.search(markup) elif hasattr(matchAgainst, '__iter__'): # list-like result = markup in matchAgainst elif hasattr(matchAgainst, 'items'): result = markup.has_key(matchAgainst) elif matchAgainst and isinstance(markup, basestring): if isinstance(markup, unicode): matchAgainst = unicode(matchAgainst) else: matchAgainst = str(matchAgainst) if not result: result = matchAgainst == markup return result class ResultSet(list): """A ResultSet is just a list that keeps track of the SoupStrainer that created it.""" def __init__(self, source): list.__init__([]) self.source = source # Now, some helper functions. def buildTagMap(default, *args): """Turns a list of maps, lists, or scalars into a single map. Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and NESTING_RESET_TAGS maps out of lists and partial maps.""" built = {} for portion in args: if hasattr(portion, 'items'): #It's a map. Merge it. for k,v in portion.items(): built[k] = v elif hasattr(portion, '__iter__'): # is a list #It's a list. Map each item to the default. for k in portion: built[k] = default else: #It's a scalar. Map it to the default. built[portion] = default return built # Now, the parser classes. class BeautifulStoneSoup(Tag, SGMLParser): """This class contains the basic parser and search code. It defines a parser that knows nothing about tag behavior except for the following: You can't close a tag without closing all the tags it encloses. That is, "<foo><bar></foo>" actually means "<foo><bar></bar></foo>". [Another possible explanation is "<foo><bar /></foo>", but since this class defines no SELF_CLOSING_TAGS, it will never use that explanation.] This class is useful for parsing XML or made-up markup languages, or when BeautifulSoup makes an assumption counter to what you were expecting.""" SELF_CLOSING_TAGS = {} NESTABLE_TAGS = {} RESET_NESTING_TAGS = {} QUOTE_TAGS = {} PRESERVE_WHITESPACE_TAGS = [] MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'), lambda x: x.group(1) + ' />'), (re.compile('<!\s+([^<>]*)>'), lambda x: '<!' + x.group(1) + '>') ] ROOT_TAG_NAME = u'[document]' HTML_ENTITIES = "html" XML_ENTITIES = "xml" XHTML_ENTITIES = "xhtml" # TODO: This only exists for backwards-compatibility ALL_ENTITIES = XHTML_ENTITIES # Used when determining whether a text node is all whitespace and # can be replaced with a single space. A text node that contains # fancy Unicode spaces (usually non-breaking) should be left # alone. STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, } def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None, markupMassage=True, smartQuotesTo=XML_ENTITIES, convertEntities=None, selfClosingTags=None, isHTML=False): """The Soup object is initialized as the 'root tag', and the provided markup (which can be a string or a file-like object) is fed into the underlying parser. sgmllib will process most bad HTML, and the BeautifulSoup class has some tricks for dealing with some HTML that kills sgmllib, but Beautiful Soup can nonetheless choke or lose data if your data uses self-closing tags or declarations incorrectly. By default, Beautiful Soup uses regexes to sanitize input, avoiding the vast majority of these problems. If the problems don't apply to you, pass in False for markupMassage, and you'll get better performance. The default parser massage techniques fix the two most common instances of invalid HTML that choke sgmllib: <br/> (No space between name of closing tag and tag close) <! --Comment--> (Extraneous whitespace in declaration) You can pass in a custom list of (RE object, replace method) tuples to get Beautiful Soup to scrub your input the way you want.""" self.parseOnlyThese = parseOnlyThese self.fromEncoding = fromEncoding self.smartQuotesTo = smartQuotesTo self.convertEntities = convertEntities # Set the rules for how we'll deal with the entities we # encounter if self.convertEntities: # It doesn't make sense to convert encoded characters to # entities even while you're converting entities to Unicode. # Just convert it all to Unicode. self.smartQuotesTo = None if convertEntities == self.HTML_ENTITIES: self.convertXMLEntities = False self.convertHTMLEntities = True self.escapeUnrecognizedEntities = True elif convertEntities == self.XHTML_ENTITIES: self.convertXMLEntities = True self.convertHTMLEntities = True self.escapeUnrecognizedEntities = False elif convertEntities == self.XML_ENTITIES: self.convertXMLEntities = True self.convertHTMLEntities = False self.escapeUnrecognizedEntities = False else: self.convertXMLEntities = False self.convertHTMLEntities = False self.escapeUnrecognizedEntities = False self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags) SGMLParser.__init__(self) if hasattr(markup, 'read'): # It's a file-type object. markup = markup.read() self.markup = markup self.markupMassage = markupMassage try: self._feed(isHTML=isHTML) except StopParsing: pass self.markup = None # The markup can now be GCed def convert_charref(self, name): """This method fixes a bug in Python's SGMLParser.""" try: n = int(name) except ValueError: return if not 0 <= n <= 127 : # ASCII ends at 127, not 255 return return self.convert_codepoint(n) def _feed(self, inDocumentEncoding=None, isHTML=False): # Convert the document to Unicode. markup = self.markup if isinstance(markup, unicode): if not hasattr(self, 'originalEncoding'): self.originalEncoding = None else: dammit = UnicodeDammit\ (markup, [self.fromEncoding, inDocumentEncoding], smartQuotesTo=self.smartQuotesTo, isHTML=isHTML) markup = dammit.unicode self.originalEncoding = dammit.originalEncoding self.declaredHTMLEncoding = dammit.declaredHTMLEncoding if markup: if self.markupMassage: if not hasattr(self.markupMassage, "__iter__"): self.markupMassage = self.MARKUP_MASSAGE for fix, m in self.markupMassage: markup = fix.sub(m, markup) # TODO: We get rid of markupMassage so that the # soup object can be deepcopied later on. Some # Python installations can't copy regexes. If anyone # was relying on the existence of markupMassage, this # might cause problems. del(self.markupMassage) self.reset() SGMLParser.feed(self, markup) # Close out any unfinished strings and close all the open tags. self.endData() while self.currentTag.name != self.ROOT_TAG_NAME: self.popTag() def __getattr__(self, methodName): """This method routes method call requests to either the SGMLParser superclass or the Tag superclass, depending on the method name.""" #print "__getattr__ called on %s.%s" % (self.__class__, methodName) if methodName.startswith('start_') or methodName.startswith('end_') \ or methodName.startswith('do_'): return SGMLParser.__getattr__(self, methodName) elif not methodName.startswith('__'): return Tag.__getattr__(self, methodName) else: raise AttributeError def isSelfClosingTag(self, name): """Returns true iff the given string is the name of a self-closing tag according to this parser.""" return self.SELF_CLOSING_TAGS.has_key(name) \ or self.instanceSelfClosingTags.has_key(name) def reset(self): Tag.__init__(self, self, self.ROOT_TAG_NAME) self.hidden = 1 SGMLParser.reset(self) self.currentData = [] self.currentTag = None self.tagStack = [] self.quoteStack = [] self.pushTag(self) def popTag(self): tag = self.tagStack.pop() #print "Pop", tag.name if self.tagStack: self.currentTag = self.tagStack[-1] return self.currentTag def pushTag(self, tag): #print "Push", tag.name if self.currentTag: self.currentTag.contents.append(tag) self.tagStack.append(tag) self.currentTag = self.tagStack[-1] def endData(self, containerClass=NavigableString): if self.currentData: currentData = u''.join(self.currentData) if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and not set([tag.name for tag in self.tagStack]).intersection( self.PRESERVE_WHITESPACE_TAGS)): if '\n' in currentData: currentData = '\n' else: currentData = ' ' self.currentData = [] if self.parseOnlyThese and len(self.tagStack) <= 1 and \ (not self.parseOnlyThese.text or \ not self.parseOnlyThese.search(currentData)): return o = containerClass(currentData) o.setup(self.currentTag, self.previous) if self.previous: self.previous.next = o self.previous = o self.currentTag.contents.append(o) def _popToTag(self, name, inclusivePop=True): """Pops the tag stack up to and including the most recent instance of the given tag. If inclusivePop is false, pops the tag stack up to but *not* including the most recent instqance of the given tag.""" #print "Popping to %s" % name if name == self.ROOT_TAG_NAME: return numPops = 0 mostRecentTag = None for i in range(len(self.tagStack)-1, 0, -1): if name == self.tagStack[i].name: numPops = len(self.tagStack)-i break if not inclusivePop: numPops = numPops - 1 for i in range(0, numPops): mostRecentTag = self.popTag() return mostRecentTag def _smartPop(self, name): """We need to pop up to the previous tag of this type, unless one of this tag's nesting reset triggers comes between this tag and the previous tag of this type, OR unless this tag is a generic nesting trigger and another generic nesting trigger comes between this tag and the previous tag of this type. Examples: <p>Foo<b>Bar *<p>* should pop to 'p', not 'b'. <p>Foo<table>Bar *<p>* should pop to 'table', not 'p'. <p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'. <li><ul><li> *<li>* should pop to 'ul', not the first 'li'. <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr' <td><tr><td> *<td>* should pop to 'tr', not the first 'td' """ nestingResetTriggers = self.NESTABLE_TAGS.get(name) isNestable = nestingResetTriggers != None isResetNesting = self.RESET_NESTING_TAGS.has_key(name) popTo = None inclusive = True for i in range(len(self.tagStack)-1, 0, -1): p = self.tagStack[i] if (not p or p.name == name) and not isNestable: #Non-nestable tags get popped to the top or to their #last occurance. popTo = name break if (nestingResetTriggers is not None and p.name in nestingResetTriggers) \ or (nestingResetTriggers is None and isResetNesting and self.RESET_NESTING_TAGS.has_key(p.name)): #If we encounter one of the nesting reset triggers #peculiar to this tag, or we encounter another tag #that causes nesting to reset, pop up to but not #including that tag. popTo = p.name inclusive = False break p = p.parent if popTo: self._popToTag(popTo, inclusive) def unknown_starttag(self, name, attrs, selfClosing=0): #print "Start tag %s: %s" % (name, attrs) if self.quoteStack: #This is not a real tag. #print "<%s> is not real!" % name attrs = ''.join([' %s="%s"' % (x, y) for x, y in attrs]) self.handle_data('<%s%s>' % (name, attrs)) return self.endData() if not self.isSelfClosingTag(name) and not selfClosing: self._smartPop(name) if self.parseOnlyThese and len(self.tagStack) <= 1 \ and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)): return tag = Tag(self, name, attrs, self.currentTag, self.previous) if self.previous: self.previous.next = tag self.previous = tag self.pushTag(tag) if selfClosing or self.isSelfClosingTag(name): self.popTag() if name in self.QUOTE_TAGS: #print "Beginning quote (%s)" % name self.quoteStack.append(name) self.literal = 1 return tag def unknown_endtag(self, name): #print "End tag %s" % name if self.quoteStack and self.quoteStack[-1] != name: #This is not a real end tag. #print "</%s> is not real!" % name self.handle_data('</%s>' % name) return self.endData() self._popToTag(name) if self.quoteStack and self.quoteStack[-1] == name: self.quoteStack.pop() self.literal = (len(self.quoteStack) > 0) def handle_data(self, data): self.currentData.append(data) def _toStringSubclass(self, text, subclass): """Adds a certain piece of text to the tree as a NavigableString subclass.""" self.endData() self.handle_data(text) self.endData(subclass) def handle_pi(self, text): """Handle a processing instruction as a ProcessingInstruction object, possibly one with a %SOUP-ENCODING% slot into which an encoding will be plugged later.""" if text[:3] == "xml": text = u"xml version='1.0' encoding='%SOUP-ENCODING%'" self._toStringSubclass(text, ProcessingInstruction) def handle_comment(self, text): "Handle comments as Comment objects." self._toStringSubclass(text, Comment) def handle_charref(self, ref): "Handle character references as data." if self.convertEntities: data = unichr(int(ref)) else: data = '&#%s;' % ref self.handle_data(data) def handle_entityref(self, ref): """Handle entity references as data, possibly converting known HTML and/or XML entity references to the corresponding Unicode characters.""" data = None if self.convertHTMLEntities: try: data = unichr(name2codepoint[ref]) except KeyError: pass if not data and self.convertXMLEntities: data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref) if not data and self.convertHTMLEntities and \ not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref): # TODO: We've got a problem here. We're told this is # an entity reference, but it's not an XML entity # reference or an HTML entity reference. Nonetheless, # the logical thing to do is to pass it through as an # unrecognized entity reference. # # Except: when the input is "&carol;" this function # will be called with input "carol". When the input is # "AT&T", this function will be called with input # "T". We have no way of knowing whether a semicolon # was present originally, so we don't know whether # this is an unknown entity or just a misplaced # ampersand. # # The more common case is a misplaced ampersand, so I # escape the ampersand and omit the trailing semicolon. data = "&amp;%s" % ref if not data: # This case is different from the one above, because we # haven't already gone through a supposedly comprehensive # mapping of entities to Unicode characters. We might not # have gone through any mapping at all. So the chances are # very high that this is a real entity, and not a # misplaced ampersand. data = "&%s;" % ref self.handle_data(data) def handle_decl(self, data): "Handle DOCTYPEs and the like as Declaration objects." self._toStringSubclass(data, Declaration) def parse_declaration(self, i): """Treat a bogus SGML declaration as raw data. Treat a CDATA declaration as a CData object.""" j = None if self.rawdata[i:i+9] == '<![CDATA[': k = self.rawdata.find(']]>', i) if k == -1: k = len(self.rawdata) data = self.rawdata[i+9:k] j = k+3 self._toStringSubclass(data, CData) else: try: j = SGMLParser.parse_declaration(self, i) except SGMLParseError: toHandle = self.rawdata[i:] self.handle_data(toHandle) j = i + len(toHandle) return j class BeautifulSoup(BeautifulStoneSoup): """This parser knows the following facts about HTML: * Some tags have no closing tag and should be interpreted as being closed as soon as they are encountered. * The text inside some tags (ie. 'script') may contain tags which are not really part of the document and which should be parsed as text, not tags. If you want to parse the text as tags, you can always fetch it and parse it explicitly. * Tag nesting rules: Most tags can't be nested at all. For instance, the occurance of a <p> tag should implicitly close the previous <p> tag. <p>Para1<p>Para2 should be transformed into: <p>Para1</p><p>Para2 Some tags can be nested arbitrarily. For instance, the occurance of a <blockquote> tag should _not_ implicitly close the previous <blockquote> tag. Alice said: <blockquote>Bob said: <blockquote>Blah should NOT be transformed into: Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah Some tags can be nested, but the nesting is reset by the interposition of other tags. For instance, a <tr> tag should implicitly close the previous <tr> tag within the same <table>, but not close a <tr> tag in another table. <table><tr>Blah<tr>Blah should be transformed into: <table><tr>Blah</tr><tr>Blah but, <tr>Blah<table><tr>Blah should NOT be transformed into <tr>Blah<table></tr><tr>Blah Differing assumptions about tag nesting rules are a major source of problems with the BeautifulSoup class. If BeautifulSoup is not treating as nestable a tag your page author treats as nestable, try ICantBelieveItsBeautifulSoup, MinimalSoup, or BeautifulStoneSoup before writing your own subclass.""" def __init__(self, *args, **kwargs): if not kwargs.has_key('smartQuotesTo'): kwargs['smartQuotesTo'] = self.HTML_ENTITIES kwargs['isHTML'] = True BeautifulStoneSoup.__init__(self, *args, **kwargs) SELF_CLOSING_TAGS = buildTagMap(None, ('br' , 'hr', 'input', 'img', 'meta', 'spacer', 'link', 'frame', 'base', 'col')) PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea']) QUOTE_TAGS = {'script' : None, 'textarea' : None} #According to the HTML standard, each of these inline tags can #contain another tag of the same type. Furthermore, it's common #to actually use these tags this way. NESTABLE_INLINE_TAGS = ('span', 'font', 'q', 'object', 'bdo', 'sub', 'sup', 'center') #According to the HTML standard, these block tags can contain #another tag of the same type. Furthermore, it's common #to actually use these tags this way. NESTABLE_BLOCK_TAGS = ('blockquote', 'div', 'fieldset', 'ins', 'del') #Lists can contain other lists, but there are restrictions. NESTABLE_LIST_TAGS = { 'ol' : [], 'ul' : [], 'li' : ['ul', 'ol'], 'dl' : [], 'dd' : ['dl'], 'dt' : ['dl'] } #Tables can contain other tables, but there are restrictions. NESTABLE_TABLE_TAGS = {'table' : [], 'tr' : ['table', 'tbody', 'tfoot', 'thead'], 'td' : ['tr'], 'th' : ['tr'], 'thead' : ['table'], 'tbody' : ['table'], 'tfoot' : ['table'], } NON_NESTABLE_BLOCK_TAGS = ('address', 'form', 'p', 'pre') #If one of these tags is encountered, all tags up to the next tag of #this type are popped. RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript', NON_NESTABLE_BLOCK_TAGS, NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS, NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) # Used to detect the charset in a META tag; see start_meta CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M) def start_meta(self, attrs): """Beautiful Soup can detect a charset included in a META tag, try to convert the document to that charset, and re-parse the document from the beginning.""" httpEquiv = None contentType = None contentTypeIndex = None tagNeedsEncodingSubstitution = False for i in range(0, len(attrs)): key, value = attrs[i] key = key.lower() if key == 'http-equiv': httpEquiv = value elif key == 'content': contentType = value contentTypeIndex = i if httpEquiv and contentType: # It's an interesting meta tag. match = self.CHARSET_RE.search(contentType) if match: if (self.declaredHTMLEncoding is not None or self.originalEncoding == self.fromEncoding): # An HTML encoding was sniffed while converting # the document to Unicode, or an HTML encoding was # sniffed during a previous pass through the # document, or an encoding was specified # explicitly and it worked. Rewrite the meta tag. def rewrite(match): return match.group(1) + "%SOUP-ENCODING%" newAttr = self.CHARSET_RE.sub(rewrite, contentType) attrs[contentTypeIndex] = (attrs[contentTypeIndex][0], newAttr) tagNeedsEncodingSubstitution = True else: # This is our first pass through the document. # Go through it again with the encoding information. newCharset = match.group(3) if newCharset and newCharset != self.originalEncoding: self.declaredHTMLEncoding = newCharset self._feed(self.declaredHTMLEncoding) raise StopParsing pass tag = self.unknown_starttag("meta", attrs) if tag and tagNeedsEncodingSubstitution: tag.containsSubstitutions = True class StopParsing(Exception): pass class ICantBelieveItsBeautifulSoup(BeautifulSoup): """The BeautifulSoup class is oriented towards skipping over common HTML errors like unclosed tags. However, sometimes it makes errors of its own. For instance, consider this fragment: <b>Foo<b>Bar</b></b> This is perfectly valid (if bizarre) HTML. However, the BeautifulSoup class will implicitly close the first b tag when it encounters the second 'b'. It will think the author wrote "<b>Foo<b>Bar", and didn't close the first 'b' tag, because there's no real-world reason to bold something that's already bold. When it encounters '</b></b>' it will close two more 'b' tags, for a grand total of three tags closed instead of two. This can throw off the rest of your document structure. The same is true of a number of other tags, listed below. It's much more common for someone to forget to close a 'b' tag than to actually use nested 'b' tags, and the BeautifulSoup class handles the common case. This class handles the not-co-common case: where you can't believe someone wrote what they did, but it's valid HTML and BeautifulSoup screwed up by assuming it wouldn't be.""" I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \ ('em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong', 'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b', 'big') I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ('noscript',) NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS, I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS, I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS) class MinimalSoup(BeautifulSoup): """The MinimalSoup class is for parsing HTML that contains pathologically bad markup. It makes no assumptions about tag nesting, but it does know which tags are self-closing, that <script> tags contain Javascript and should not be parsed, that META tags may contain encoding information, and so on. This also makes it better for subclassing than BeautifulStoneSoup or BeautifulSoup.""" RESET_NESTING_TAGS = buildTagMap('noscript') NESTABLE_TAGS = {} class BeautifulSOAP(BeautifulStoneSoup): """This class will push a tag with only a single string child into the tag's parent as an attribute. The attribute's name is the tag name, and the value is the string child. An example should give the flavor of the change: <foo><bar>baz</bar></foo> => <foo bar="baz"><bar>baz</bar></foo> You can then access fooTag['bar'] instead of fooTag.barTag.string. This is, of course, useful for scraping structures that tend to use subelements instead of attributes, such as SOAP messages. Note that it modifies its input, so don't print the modified version out. I'm not sure how many people really want to use this class; let me know if you do. Mainly I like the name.""" def popTag(self): if len(self.tagStack) > 1: tag = self.tagStack[-1] parent = self.tagStack[-2] parent._getAttrMap() if (isinstance(tag, Tag) and len(tag.contents) == 1 and isinstance(tag.contents[0], NavigableString) and not parent.attrMap.has_key(tag.name)): parent[tag.name] = tag.contents[0] BeautifulStoneSoup.popTag(self) #Enterprise class names! It has come to our attention that some people #think the names of the Beautiful Soup parser classes are too silly #and "unprofessional" for use in enterprise screen-scraping. We feel #your pain! For such-minded folk, the Beautiful Soup Consortium And #All-Night Kosher Bakery recommends renaming this file to #"RobustParser.py" (or, in cases of extreme enterprisiness, #"RobustParserBeanInterface.class") and using the following #enterprise-friendly class aliases: class RobustXMLParser(BeautifulStoneSoup): pass class RobustHTMLParser(BeautifulSoup): pass class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup): pass class RobustInsanelyWackAssHTMLParser(MinimalSoup): pass class SimplifyingSOAPParser(BeautifulSOAP): pass ###################################################### # # Bonus library: Unicode, Dammit # # This class forces XML data into a standard format (usually to UTF-8 # or Unicode). It is heavily based on code from Mark Pilgrim's # Universal Feed Parser. It does not rewrite the XML or HTML to # reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi # (XML) and BeautifulSoup.start_meta (HTML). # Autodetects character encodings. # Download from http://chardet.feedparser.org/ try: import chardet # import chardet.constants # chardet.constants._debug = 1 except ImportError: chardet = None # cjkcodecs and iconv_codec make Python know about more character encodings. # Both are available from http://cjkpython.i18n.org/ # They're built in if you use Python 2.4. try: import cjkcodecs.aliases except ImportError: pass try: import iconv_codec except ImportError: pass class UnicodeDammit: """A class for detecting the encoding of a *ML document and converting it to a Unicode string. If the source encoding is windows-1252, can replace MS smart quotes with their HTML or XML equivalents.""" # This dictionary maps commonly seen values for "charset" in HTML # meta tags to the corresponding Python codec names. It only covers # values that aren't in Python's aliases and can't be determined # by the heuristics in find_codec. CHARSET_ALIASES = { "macintosh" : "mac-roman", "x-sjis" : "shift-jis" } def __init__(self, markup, overrideEncodings=[], smartQuotesTo='xml', isHTML=False): self.declaredHTMLEncoding = None self.markup, documentEncoding, sniffedEncoding = \ self._detectEncoding(markup, isHTML) self.smartQuotesTo = smartQuotesTo self.triedEncodings = [] if markup == '' or isinstance(markup, unicode): self.originalEncoding = None self.unicode = unicode(markup) return u = None for proposedEncoding in overrideEncodings: u = self._convertFrom(proposedEncoding) if u: break if not u: for proposedEncoding in (documentEncoding, sniffedEncoding): u = self._convertFrom(proposedEncoding) if u: break # If no luck and we have auto-detection library, try that: if not u and chardet and not isinstance(self.markup, unicode): u = self._convertFrom(chardet.detect(self.markup)['encoding']) # As a last resort, try utf-8 and windows-1252: if not u: for proposed_encoding in ("utf-8", "windows-1252"): u = self._convertFrom(proposed_encoding) if u: break self.unicode = u if not u: self.originalEncoding = None def _subMSChar(self, orig): """Changes a MS smart quote character to an XML or HTML entity.""" sub = self.MS_CHARS.get(orig) if isinstance(sub, tuple): if self.smartQuotesTo == 'xml': sub = '&#x%s;' % sub[1] else: sub = '&%s;' % sub[0] return sub def _convertFrom(self, proposed): proposed = self.find_codec(proposed) if not proposed or proposed in self.triedEncodings: return None self.triedEncodings.append(proposed) markup = self.markup # Convert smart quotes to HTML if coming from an encoding # that might have them. if self.smartQuotesTo and proposed.lower() in("windows-1252", "iso-8859-1", "iso-8859-2"): markup = re.compile("([\x80-\x9f])").sub \ (lambda(x): self._subMSChar(x.group(1)), markup) try: # print "Trying to convert document to %s" % proposed u = self._toUnicode(markup, proposed) self.markup = u self.originalEncoding = proposed except Exception, e: # print "That didn't work!" # print e return None #print "Correct encoding: %s" % proposed return self.markup def _toUnicode(self, data, encoding): '''Given a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliases''' # strip Byte Order Mark (if present) if (len(data) >= 4) and (data[:2] == '\xfe\xff') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16le' data = data[2:] elif data[:3] == '\xef\xbb\xbf': encoding = 'utf-8' data = data[3:] elif data[:4] == '\x00\x00\xfe\xff': encoding = 'utf-32be' data = data[4:] elif data[:4] == '\xff\xfe\x00\x00': encoding = 'utf-32le' data = data[4:] newdata = unicode(data, encoding) return newdata def _detectEncoding(self, xml_data, isHTML=False): """Given a document, tries to detect its XML encoding.""" xml_encoding = sniffed_xml_encoding = None try: if xml_data[:4] == '\x4c\x6f\xa7\x94': # EBCDIC xml_data = self._ebcdic_to_ascii(xml_data) elif xml_data[:4] == '\x00\x3c\x00\x3f': # UTF-16BE sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \ and (xml_data[2:4] != '\x00\x00'): # UTF-16BE with BOM sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x3f\x00': # UTF-16LE sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \ (xml_data[2:4] != '\x00\x00'): # UTF-16LE with BOM sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') elif xml_data[:4] == '\x00\x00\x00\x3c': # UTF-32BE sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x00\x00': # UTF-32LE sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') elif xml_data[:4] == '\x00\x00\xfe\xff': # UTF-32BE with BOM sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') elif xml_data[:4] == '\xff\xfe\x00\x00': # UTF-32LE with BOM sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') elif xml_data[:3] == '\xef\xbb\xbf': # UTF-8 with BOM sniffed_xml_encoding = 'utf-8' xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') else: sniffed_xml_encoding = 'ascii' pass except: xml_encoding_match = None xml_encoding_match = re.compile( '^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data) if not xml_encoding_match and isHTML: regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I) xml_encoding_match = regexp.search(xml_data) if xml_encoding_match is not None: xml_encoding = xml_encoding_match.groups()[0].lower() if isHTML: self.declaredHTMLEncoding = xml_encoding if sniffed_xml_encoding and \ (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): xml_encoding = sniffed_xml_encoding return xml_data, xml_encoding, sniffed_xml_encoding def find_codec(self, charset): return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \ or (charset and self._codec(charset.replace("-", ""))) \ or (charset and self._codec(charset.replace("-", "_"))) \ or charset def _codec(self, charset): if not charset: return charset codec = None try: codecs.lookup(charset) codec = charset except (LookupError, ValueError): pass return codec EBCDIC_TO_ASCII_MAP = None def _ebcdic_to_ascii(self, s): c = self.__class__ if not c.EBCDIC_TO_ASCII_MAP: emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200, 201,202,106,107,108,109,110,111,112,113,114,203,204,205, 206,207,208,209,126,115,116,117,118,119,120,121,122,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72, 73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81, 82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89, 90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57, 250,251,252,253,254,255) import string c.EBCDIC_TO_ASCII_MAP = string.maketrans( \ ''.join(map(chr, range(256))), ''.join(map(chr, emap))) return s.translate(c.EBCDIC_TO_ASCII_MAP) MS_CHARS = { '\x80' : ('euro', '20AC'), '\x81' : ' ', '\x82' : ('sbquo', '201A'), '\x83' : ('fnof', '192'), '\x84' : ('bdquo', '201E'), '\x85' : ('hellip', '2026'), '\x86' : ('dagger', '2020'), '\x87' : ('Dagger', '2021'), '\x88' : ('circ', '2C6'), '\x89' : ('permil', '2030'), '\x8A' : ('Scaron', '160'), '\x8B' : ('lsaquo', '2039'), '\x8C' : ('OElig', '152'), '\x8D' : '?', '\x8E' : ('#x17D', '17D'), '\x8F' : '?', '\x90' : '?', '\x91' : ('lsquo', '2018'), '\x92' : ('rsquo', '2019'), '\x93' : ('ldquo', '201C'), '\x94' : ('rdquo', '201D'), '\x95' : ('bull', '2022'), '\x96' : ('ndash', '2013'), '\x97' : ('mdash', '2014'), '\x98' : ('tilde', '2DC'), '\x99' : ('trade', '2122'), '\x9a' : ('scaron', '161'), '\x9b' : ('rsaquo', '203A'), '\x9c' : ('oelig', '153'), '\x9d' : '?', '\x9e' : ('#x17E', '17E'), '\x9f' : ('Yuml', ''),} ####################################################################### #By default, act as an HTML pretty-printer. if __name__ == '__main__': import sys soup = BeautifulSoup(sys.stdin) print soup.prettify()
Python
import operator import math class vec2d(object): """2d vector class, supports vector and scalar operators, and also provides a bunch of high level functions """ __slots__ = ['x', 'y'] def __init__(self, x_or_pair, y = None): if y == None: self.x = x_or_pair[0] self.y = x_or_pair[1] else: self.x = x_or_pair self.y = y def __len__(self): return 2 def __getitem__(self, key): if key == 0: return self.x elif key == 1: return self.y else: raise IndexError("Invalid subscript "+str(key)+" to vec2d") def __setitem__(self, key, value): if key == 0: self.x = value elif key == 1: self.y = value else: raise IndexError("Invalid subscript "+str(key)+" to vec2d") # String representaion (for debugging) def __repr__(self): return 'vec2d(%s, %s)' % (self.x, self.y) # Comparison def __eq__(self, other): if hasattr(other, "__getitem__") and len(other) == 2: return self.x == other[0] and self.y == other[1] else: return False def __ne__(self, other): if hasattr(other, "__getitem__") and len(other) == 2: return self.x != other[0] or self.y != other[1] else: return True def __nonzero__(self): return self.x or self.y # Generic operator handlers def _o2(self, other, f): "Any two-operator operation where the left operand is a vec2d" if isinstance(other, vec2d): return vec2d(f(self.x, other.x), f(self.y, other.y)) elif (hasattr(other, "__getitem__")): return vec2d(f(self.x, other[0]), f(self.y, other[1])) else: return vec2d(f(self.x, other), f(self.y, other)) def _r_o2(self, other, f): "Any two-operator operation where the right operand is a vec2d" if (hasattr(other, "__getitem__")): return vec2d(f(other[0], self.x), f(other[1], self.y)) else: return vec2d(f(other, self.x), f(other, self.y)) def _io(self, other, f): "inplace operator" if (hasattr(other, "__getitem__")): self.x = f(self.x, other[0]) self.y = f(self.y, other[1]) else: self.x = f(self.x, other) self.y = f(self.y, other) return self # Addition def __add__(self, other): if isinstance(other, vec2d): return vec2d(self.x + other.x, self.y + other.y) elif hasattr(other, "__getitem__"): return vec2d(self.x + other[0], self.y + other[1]) else: return vec2d(self.x + other, self.y + other) __radd__ = __add__ def __iadd__(self, other): if isinstance(other, vec2d): self.x += other.x self.y += other.y elif hasattr(other, "__getitem__"): self.x += other[0] self.y += other[1] else: self.x += other self.y += other return self # Subtraction def __sub__(self, other): if isinstance(other, vec2d): return vec2d(self.x - other.x, self.y - other.y) elif (hasattr(other, "__getitem__")): return vec2d(self.x - other[0], self.y - other[1]) else: return vec2d(self.x - other, self.y - other) def __rsub__(self, other): if isinstance(other, vec2d): return vec2d(other.x - self.x, other.y - self.y) if (hasattr(other, "__getitem__")): return vec2d(other[0] - self.x, other[1] - self.y) else: return vec2d(other - self.x, other - self.y) def __isub__(self, other): if isinstance(other, vec2d): self.x -= other.x self.y -= other.y elif (hasattr(other, "__getitem__")): self.x -= other[0] self.y -= other[1] else: self.x -= other self.y -= other return self # Multiplication def __mul__(self, other): if isinstance(other, vec2d): return vec2d(self.x*other.y, self.y*other.y) if (hasattr(other, "__getitem__")): return vec2d(self.x*other[0], self.y*other[1]) else: return vec2d(self.x*other, self.y*other) __rmul__ = __mul__ def __imul__(self, other): if isinstance(other, vec2d): self.x *= other.x self.y *= other.y elif (hasattr(other, "__getitem__")): self.x *= other[0] self.y *= other[1] else: self.x *= other self.y *= other return self # Division def __div__(self, other): return self._o2(other, operator.div) def __rdiv__(self, other): return self._r_o2(other, operator.div) def __idiv__(self, other): return self._io(other, operator.div) def __floordiv__(self, other): return self._o2(other, operator.floordiv) def __rfloordiv__(self, other): return self._r_o2(other, operator.floordiv) def __ifloordiv__(self, other): return self._io(other, operator.floordiv) def __truediv__(self, other): return self._o2(other, operator.truediv) def __rtruediv__(self, other): return self._r_o2(other, operator.truediv) def __itruediv__(self, other): return self._io(other, operator.floordiv) # Modulo def __mod__(self, other): return self._o2(other, operator.mod) def __rmod__(self, other): return self._r_o2(other, operator.mod) def __divmod__(self, other): return self._o2(other, operator.divmod) def __rdivmod__(self, other): return self._r_o2(other, operator.divmod) # Exponentation def __pow__(self, other): return self._o2(other, operator.pow) def __rpow__(self, other): return self._r_o2(other, operator.pow) # Bitwise operators def __lshift__(self, other): return self._o2(other, operator.lshift) def __rlshift__(self, other): return self._r_o2(other, operator.lshift) def __rshift__(self, other): return self._o2(other, operator.rshift) def __rrshift__(self, other): return self._r_o2(other, operator.rshift) def __and__(self, other): return self._o2(other, operator.and_) __rand__ = __and__ def __or__(self, other): return self._o2(other, operator.or_) __ror__ = __or__ def __xor__(self, other): return self._o2(other, operator.xor) __rxor__ = __xor__ # Unary operations def __neg__(self): return vec2d(operator.neg(self.x), operator.neg(self.y)) def __pos__(self): return vec2d(operator.pos(self.x), operator.pos(self.y)) def __abs__(self): return vec2d(abs(self.x), abs(self.y)) def __invert__(self): return vec2d(-self.x, -self.y) # vectory functions def get_length_sqrd(self): return self.x**2 + self.y**2 def get_length(self): return math.sqrt(self.x**2 + self.y**2) def __setlength(self, value): length = self.get_length() if length > 0: self.x *= value/length self.y *= value/length else: self.x = 0 self.y = 0 length = property(get_length, __setlength, None, "gets or sets the magnitude of the vector") def rotate(self, angle_degrees): radians = math.radians(angle_degrees) cos = math.cos(radians) sin = math.sin(radians) x = self.x*cos - self.y*sin y = self.x*sin + self.y*cos self.x = x self.y = y def rotated(self, angle_degrees): radians = math.radians(angle_degrees) cos = math.cos(radians) sin = math.sin(radians) x = self.x*cos - self.y*sin y = self.x*sin + self.y*cos return vec2d(x, y) def get_angle(self): if (self.get_length_sqrd() == 0): return 0 return math.degrees(math.atan2(self.y, self.x)) def __setangle(self, angle_degrees): self.x = self.length self.y = 0 self.rotate(angle_degrees) angle = property(get_angle, __setangle, None, "gets or sets the angle of a vector") def get_angle_between(self, other): cross = self.x*other[1] - self.y*other[0] dot = self.x*other[0] + self.y*other[1] return math.degrees(math.atan2(cross, dot)) def normalized(self): length = self.length if length != 0: return self/length return vec2d(self) def normalize_return_length(self): length = self.length if length != 0: self.x /= length self.y /= length return length def perpendicular(self): return vec2d(-self.y, self.x) def perpendicular_normal(self): length = self.length if length != 0: return vec2d(-self.y/length, self.x/length) return vec2d(self) def dot(self, other): return float(self.x*other[0] + self.y*other[1]) def get_distance(self, other): return math.sqrt((self.x - other[0])**2 + (self.y - other[1])**2) def get_dist_sqrd(self, other): return (self.x - other[0])**2 + (self.y - other[1])**2 def projection(self, other): other_length_sqrd = other[0]*other[0] + other[1]*other[1] projected_length_times_other_length = self.dot(other) return other*(projected_length_times_other_length/other_length_sqrd) def cross(self, other): return self.x*other[1] - self.y*other[0] def interpolate_to(self, other, range): return vec2d(self.x + (other[0] - self.x)*range, self.y + (other[1] - self.y)*range) def convert_to_basis(self, x_vector, y_vector): return vec2d(self.dot(x_vector)/x_vector.get_length_sqrd(), self.dot(y_vector)/y_vector.get_length_sqrd()) def __getstate__(self): return [self.x, self.y] def __setstate__(self, dict): self.x, self.y = dict def tup(self): return (self.x,self.y) def inttup(self): return (int(self.x), int(self.y))
Python
#!/usr/bin/env python """Universal feed parser Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds Visit http://feedparser.org/ for the latest version Visit http://feedparser.org/docs/ for the latest documentation Required: Python 2.1 or later Recommended: Python 2.3 or later Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/> """ __version__ = "4.1"# + "$Revision: 1.92 $"[11:15] + "-cvs" __license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.""" __author__ = "Mark Pilgrim <http://diveintomark.org/>" __contributors__ = ["Jason Diamond <http://injektilo.org/>", "John Beimler <http://john.beimler.org/>", "Fazal Majid <http://www.majid.info/mylos/weblog/>", "Aaron Swartz <http://aaronsw.com/>", "Kevin Marks <http://epeus.blogspot.com/>"] _debug = 0 # HTTP "User-Agent" header to send to servers when downloading feeds. # If you are embedding feedparser in a larger application, you should # change this to your application name and URL. USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__ # HTTP "Accept" header to send to servers when downloading feeds. If you don't # want to send an Accept header, set this to None. ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1" # List of preferred XML parsers, by SAX driver name. These will be tried first, # but if they're not installed, Python will keep searching through its own list # of pre-installed parsers until it finds one that supports everything we need. PREFERRED_XML_PARSERS = ["drv_libxml2"] # If you want feedparser to automatically run HTML markup through HTML Tidy, set # this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html> # or utidylib <http://utidylib.berlios.de/>. TIDY_MARKUP = 0 # List of Python interfaces for HTML Tidy, in order of preference. Only useful # if TIDY_MARKUP = 1 PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"] # ---------- required modules (should come with any Python distribution) ---------- import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2 try: from cStringIO import StringIO as _StringIO except: from StringIO import StringIO as _StringIO # ---------- optional modules (feedparser will work without these, but with reduced functionality) ---------- # gzip is included with most Python distributions, but may not be available if you compiled your own try: import gzip except: gzip = None try: import zlib except: zlib = None # If a real XML parser is available, feedparser will attempt to use it. feedparser has # been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the # Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some # versions of FreeBSD), feedparser will quietly fall back on regex-based parsing. try: import xml.sax xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers from xml.sax.saxutils import escape as _xmlescape _XML_AVAILABLE = 1 except: _XML_AVAILABLE = 0 def _xmlescape(data): data = data.replace('&', '&amp;') data = data.replace('>', '&gt;') data = data.replace('<', '&lt;') return data # base64 support for Atom feeds that contain embedded binary data try: import base64, binascii except: base64 = binascii = None # cjkcodecs and iconv_codec provide support for more character encodings. # Both are available from http://cjkpython.i18n.org/ try: import cjkcodecs.aliases except: pass try: import iconv_codec except: pass # chardet library auto-detects character encodings # Download from http://chardet.feedparser.org/ try: import chardet if _debug: import chardet.constants chardet.constants._debug = 1 except: chardet = None # ---------- don't touch these ---------- class ThingsNobodyCaresAboutButMe(Exception): pass class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass class UndeclaredNamespace(Exception): pass sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') sgmllib.special = re.compile('<!') sgmllib.charref = re.compile('&#(x?[0-9A-Fa-f]+)[^0-9A-Fa-f]') SUPPORTED_VERSIONS = {'': 'unknown', 'rss090': 'RSS 0.90', 'rss091n': 'RSS 0.91 (Netscape)', 'rss091u': 'RSS 0.91 (Userland)', 'rss092': 'RSS 0.92', 'rss093': 'RSS 0.93', 'rss094': 'RSS 0.94', 'rss20': 'RSS 2.0', 'rss10': 'RSS 1.0', 'rss': 'RSS (unknown version)', 'atom01': 'Atom 0.1', 'atom02': 'Atom 0.2', 'atom03': 'Atom 0.3', 'atom10': 'Atom 1.0', 'atom': 'Atom (unknown version)', 'cdf': 'CDF', 'hotrss': 'Hot RSS' } try: UserDict = dict except NameError: # Python 2.1 does not have dict from UserDict import UserDict def dict(aList): rc = {} for k, v in aList: rc[k] = v return rc class FeedParserDict(UserDict): keymap = {'channel': 'feed', 'items': 'entries', 'guid': 'id', 'date': 'updated', 'date_parsed': 'updated_parsed', 'description': ['subtitle', 'summary'], 'url': ['href'], 'modified': 'updated', 'modified_parsed': 'updated_parsed', 'issued': 'published', 'issued_parsed': 'published_parsed', 'copyright': 'rights', 'copyright_detail': 'rights_detail', 'tagline': 'subtitle', 'tagline_detail': 'subtitle_detail'} def __getitem__(self, key): if key == 'category': return UserDict.__getitem__(self, 'tags')[0]['term'] if key == 'categories': return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')] realkey = self.keymap.get(key, key) if type(realkey) == types.ListType: for k in realkey: if UserDict.has_key(self, k): return UserDict.__getitem__(self, k) if UserDict.has_key(self, key): return UserDict.__getitem__(self, key) return UserDict.__getitem__(self, realkey) def __setitem__(self, key, value): for k in self.keymap.keys(): if key == k: key = self.keymap[k] if type(key) == types.ListType: key = key[0] return UserDict.__setitem__(self, key, value) def get(self, key, default=None): if self.has_key(key): return self[key] else: return default def setdefault(self, key, value): if not self.has_key(key): self[key] = value return self[key] def has_key(self, key): try: return hasattr(self, key) or UserDict.has_key(self, key) except AttributeError: return False def __getattr__(self, key): try: return self.__dict__[key] except KeyError: pass try: assert not key.startswith('_') return self.__getitem__(key) except: raise AttributeError, "object has no attribute '%s'" % key def __setattr__(self, key, value): if key.startswith('_') or key == 'data': self.__dict__[key] = value else: return self.__setitem__(key, value) def __contains__(self, key): return self.has_key(key) def zopeCompatibilityHack(): global FeedParserDict del FeedParserDict def FeedParserDict(aDict=None): rc = {} if aDict: rc.update(aDict) return rc _ebcdic_to_ascii_map = None def _ebcdic_to_ascii(s): global _ebcdic_to_ascii_map if not _ebcdic_to_ascii_map: emap = ( 0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201, 202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208, 209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237, 125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243, 92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249, 48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255 ) import string _ebcdic_to_ascii_map = string.maketrans( \ ''.join(map(chr, range(256))), ''.join(map(chr, emap))) return s.translate(_ebcdic_to_ascii_map) _urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)') def _urljoin(base, uri): uri = _urifixer.sub(r'\1\3', uri) return urlparse.urljoin(base, uri) class _FeedParserMixin: namespaces = {'': '', 'http://backend.userland.com/rss': '', 'http://blogs.law.harvard.edu/tech/rss': '', 'http://purl.org/rss/1.0/': '', 'http://my.netscape.com/rdf/simple/0.9/': '', 'http://example.com/newformat#': '', 'http://example.com/necho': '', 'http://purl.org/echo/': '', 'uri/of/echo/namespace#': '', 'http://purl.org/pie/': '', 'http://purl.org/atom/ns#': '', 'http://www.w3.org/2005/Atom': '', 'http://purl.org/rss/1.0/modules/rss091#': '', 'http://webns.net/mvcb/': 'admin', 'http://purl.org/rss/1.0/modules/aggregation/': 'ag', 'http://purl.org/rss/1.0/modules/annotate/': 'annotate', 'http://media.tangent.org/rss/1.0/': 'audio', 'http://backend.userland.com/blogChannelModule': 'blogChannel', 'http://web.resource.org/cc/': 'cc', 'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons', 'http://purl.org/rss/1.0/modules/company': 'co', 'http://purl.org/rss/1.0/modules/content/': 'content', 'http://my.theinfo.org/changed/1.0/rss/': 'cp', 'http://purl.org/dc/elements/1.1/': 'dc', 'http://purl.org/dc/terms/': 'dcterms', 'http://purl.org/rss/1.0/modules/email/': 'email', 'http://purl.org/rss/1.0/modules/event/': 'ev', 'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner', 'http://freshmeat.net/rss/fm/': 'fm', 'http://xmlns.com/foaf/0.1/': 'foaf', 'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo', 'http://postneo.com/icbm/': 'icbm', 'http://purl.org/rss/1.0/modules/image/': 'image', 'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes', 'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes', 'http://purl.org/rss/1.0/modules/link/': 'l', 'http://search.yahoo.com/mrss': 'media', 'http://madskills.com/public/xml/rss/module/pingback/': 'pingback', 'http://prismstandard.org/namespaces/1.2/basic/': 'prism', 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf', 'http://www.w3.org/2000/01/rdf-schema#': 'rdfs', 'http://purl.org/rss/1.0/modules/reference/': 'ref', 'http://purl.org/rss/1.0/modules/richequiv/': 'reqv', 'http://purl.org/rss/1.0/modules/search/': 'search', 'http://purl.org/rss/1.0/modules/slash/': 'slash', 'http://schemas.xmlsoap.org/soap/envelope/': 'soap', 'http://purl.org/rss/1.0/modules/servicestatus/': 'ss', 'http://hacks.benhammersley.com/rss/streaming/': 'str', 'http://purl.org/rss/1.0/modules/subscription/': 'sub', 'http://purl.org/rss/1.0/modules/syndication/': 'sy', 'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo', 'http://purl.org/rss/1.0/modules/threading/': 'thr', 'http://purl.org/rss/1.0/modules/textinput/': 'ti', 'http://madskills.com/public/xml/rss/module/trackback/':'trackback', 'http://wellformedweb.org/commentAPI/': 'wfw', 'http://purl.org/rss/1.0/modules/wiki/': 'wiki', 'http://www.w3.org/1999/xhtml': 'xhtml', 'http://www.w3.org/XML/1998/namespace': 'xml', 'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf' } _matchnamespaces = {} can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'license', 'icon', 'logo'] can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'] can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'] html_types = ['text/html', 'application/xhtml+xml'] def __init__(self, baseuri=None, baselang=None, encoding='utf-8'): if _debug: sys.stderr.write('initializing FeedParser\n') if not self._matchnamespaces: for k, v in self.namespaces.items(): self._matchnamespaces[k.lower()] = v self.feeddata = FeedParserDict() # feed-level data self.encoding = encoding # character encoding self.entries = [] # list of entry-level data self.version = '' # feed type/version, see SUPPORTED_VERSIONS self.namespacesInUse = {} # dictionary of namespaces defined by the feed # the following are used internally to track state; # this is really out of control and should be refactored self.infeed = 0 self.inentry = 0 self.incontent = 0 self.intextinput = 0 self.inimage = 0 self.inauthor = 0 self.incontributor = 0 self.inpublisher = 0 self.insource = 0 self.sourcedata = FeedParserDict() self.contentparams = FeedParserDict() self._summaryKey = None self.namespacemap = {} self.elementstack = [] self.basestack = [] self.langstack = [] self.baseuri = baseuri or '' self.lang = baselang or None if baselang: self.feeddata['language'] = baselang def unknown_starttag(self, tag, attrs): if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs)) # normalize attrs attrs = [(k.lower(), v) for k, v in attrs] attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] # track xml:base and xml:lang attrsD = dict(attrs) baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri self.baseuri = _urljoin(self.baseuri, baseuri) lang = attrsD.get('xml:lang', attrsD.get('lang')) if lang == '': # xml:lang could be explicitly set to '', we need to capture that lang = None elif lang is None: # if no xml:lang is specified, use parent lang lang = self.lang if lang: if tag in ('feed', 'rss', 'rdf:RDF'): self.feeddata['language'] = lang self.lang = lang self.basestack.append(self.baseuri) self.langstack.append(lang) # track namespaces for prefix, uri in attrs: if prefix.startswith('xmlns:'): self.trackNamespace(prefix[6:], uri) elif prefix == 'xmlns': self.trackNamespace(None, uri) # track inline content if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): # element declared itself as escaped markup, but it isn't really self.contentparams['type'] = 'application/xhtml+xml' if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': # Note: probably shouldn't simply recreate localname here, but # our namespace handling isn't actually 100% correct in cases where # the feed redefines the default namespace (which is actually # the usual case for inline content, thanks Sam), so here we # cheat and just reconstruct the element based on localname # because that compensates for the bugs in our namespace handling. # This will horribly munge inline content with non-empty qnames, # but nobody actually does that, so I'm not fixing it. tag = tag.split(':')[-1] return self.handle_data('<%s%s>' % (tag, ''.join([' %s="%s"' % t for t in attrs])), escape=0) # match namespaces if tag.find(':') <> -1: prefix, suffix = tag.split(':', 1) else: prefix, suffix = '', tag prefix = self.namespacemap.get(prefix, prefix) if prefix: prefix = prefix + '_' # special hack for better tracking of empty textinput/image elements in illformed feeds if (not prefix) and tag not in ('title', 'link', 'description', 'name'): self.intextinput = 0 if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'): self.inimage = 0 # call special handler (if defined) or default handler methodname = '_start_' + prefix + suffix try: method = getattr(self, methodname) return method(attrsD) except AttributeError: return self.push(prefix + suffix, 1) def unknown_endtag(self, tag): if _debug: sys.stderr.write('end %s\n' % tag) # match namespaces if tag.find(':') <> -1: prefix, suffix = tag.split(':', 1) else: prefix, suffix = '', tag prefix = self.namespacemap.get(prefix, prefix) if prefix: prefix = prefix + '_' # call special handler (if defined) or default handler methodname = '_end_' + prefix + suffix try: method = getattr(self, methodname) method() except AttributeError: self.pop(prefix + suffix) # track inline content if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): # element declared itself as escaped markup, but it isn't really self.contentparams['type'] = 'application/xhtml+xml' if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': tag = tag.split(':')[-1] self.handle_data('</%s>' % tag, escape=0) # track xml:base and xml:lang going out of scope if self.basestack: self.basestack.pop() if self.basestack and self.basestack[-1]: self.baseuri = self.basestack[-1] if self.langstack: self.langstack.pop() if self.langstack: # and (self.langstack[-1] is not None): self.lang = self.langstack[-1] def handle_charref(self, ref): # called for each character reference, e.g. for '&#160;', ref will be '160' if not self.elementstack: return ref = ref.lower() if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'): text = '&#%s;' % ref else: if ref[0] == 'x': c = int(ref[1:], 16) else: c = int(ref) text = unichr(c).encode('utf-8') self.elementstack[-1][2].append(text) def handle_entityref(self, ref): # called for each entity reference, e.g. for '&copy;', ref will be 'copy' if not self.elementstack: return if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref) if ref in ('lt', 'gt', 'quot', 'amp', 'apos'): text = '&%s;' % ref else: # entity resolution graciously donated by Aaron Swartz def name2cp(k): import htmlentitydefs if hasattr(htmlentitydefs, 'name2codepoint'): # requires Python 2.3 return htmlentitydefs.name2codepoint[k] k = htmlentitydefs.entitydefs[k] if k.startswith('&#') and k.endswith(';'): return int(k[2:-1]) # not in latin-1 return ord(k) try: name2cp(ref) except KeyError: text = '&%s;' % ref else: text = unichr(name2cp(ref)).encode('utf-8') self.elementstack[-1][2].append(text) def handle_data(self, text, escape=1): # called for each block of plain text, i.e. outside of any tag and # not containing any character or entity references if not self.elementstack: return if escape and self.contentparams.get('type') == 'application/xhtml+xml': text = _xmlescape(text) self.elementstack[-1][2].append(text) def handle_comment(self, text): # called for each comment, e.g. <!-- insert message here --> pass def handle_pi(self, text): # called for each processing instruction, e.g. <?instruction> pass def handle_decl(self, text): pass def parse_declaration(self, i): # override internal declaration handler to handle CDATA blocks if _debug: sys.stderr.write('entering parse_declaration\n') if self.rawdata[i:i+9] == '<![CDATA[': k = self.rawdata.find(']]>', i) if k == -1: k = len(self.rawdata) self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0) return k+3 else: k = self.rawdata.find('>', i) return k+1 def mapContentType(self, contentType): contentType = contentType.lower() if contentType == 'text': contentType = 'text/plain' elif contentType == 'html': contentType = 'text/html' elif contentType == 'xhtml': contentType = 'application/xhtml+xml' return contentType def trackNamespace(self, prefix, uri): loweruri = uri.lower() if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version: self.version = 'rss090' if loweruri == 'http://purl.org/rss/1.0/' and not self.version: self.version = 'rss10' if loweruri == 'http://www.w3.org/2005/atom' and not self.version: self.version = 'atom10' if loweruri.find('backend.userland.com/rss') <> -1: # match any backend.userland.com namespace uri = 'http://backend.userland.com/rss' loweruri = uri if self._matchnamespaces.has_key(loweruri): self.namespacemap[prefix] = self._matchnamespaces[loweruri] self.namespacesInUse[self._matchnamespaces[loweruri]] = uri else: self.namespacesInUse[prefix or ''] = uri def resolveURI(self, uri): return _urljoin(self.baseuri or '', uri) def decodeEntities(self, element, data): return data def push(self, element, expectingText): self.elementstack.append([element, expectingText, []]) def pop(self, element, stripWhitespace=1): if not self.elementstack: return if self.elementstack[-1][0] != element: return element, expectingText, pieces = self.elementstack.pop() output = ''.join(pieces) if stripWhitespace: output = output.strip() if not expectingText: return output # decode base64 content if base64 and self.contentparams.get('base64', 0): try: output = base64.decodestring(output) except binascii.Error: pass except binascii.Incomplete: pass # resolve relative URIs if (element in self.can_be_relative_uri) and output: output = self.resolveURI(output) # decode entities within embedded markup if not self.contentparams.get('base64', 0): output = self.decodeEntities(element, output) # remove temporary cruft from contentparams try: del self.contentparams['mode'] except KeyError: pass try: del self.contentparams['base64'] except KeyError: pass # resolve relative URIs within embedded markup if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types: if element in self.can_contain_relative_uris: output = _resolveRelativeURIs(output, self.baseuri, self.encoding) # sanitize embedded markup if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types: if element in self.can_contain_dangerous_markup: output = _sanitizeHTML(output, self.encoding) if self.encoding and type(output) != type(u''): try: output = unicode(output, self.encoding) except: pass # categories/tags/keywords/whatever are handled in _end_category if element == 'category': return output # store output in appropriate place(s) if self.inentry and not self.insource: if element == 'content': self.entries[-1].setdefault(element, []) contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output self.entries[-1][element].append(contentparams) elif element == 'link': self.entries[-1][element] = output if output: self.entries[-1]['links'][-1]['href'] = output else: if element == 'description': element = 'summary' self.entries[-1][element] = output if self.incontent: contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output self.entries[-1][element + '_detail'] = contentparams elif (self.infeed or self.insource) and (not self.intextinput) and (not self.inimage): context = self._getContext() if element == 'description': element = 'subtitle' context[element] = output if element == 'link': context['links'][-1]['href'] = output elif self.incontent: contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output context[element + '_detail'] = contentparams return output def pushContent(self, tag, attrsD, defaultContentType, expectingText): self.incontent += 1 self.contentparams = FeedParserDict({ 'type': self.mapContentType(attrsD.get('type', defaultContentType)), 'language': self.lang, 'base': self.baseuri}) self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams) self.push(tag, expectingText) def popContent(self, tag): value = self.pop(tag) self.incontent -= 1 self.contentparams.clear() return value def _mapToStandardPrefix(self, name): colonpos = name.find(':') if colonpos <> -1: prefix = name[:colonpos] suffix = name[colonpos+1:] prefix = self.namespacemap.get(prefix, prefix) name = prefix + ':' + suffix return name def _getAttribute(self, attrsD, name): return attrsD.get(self._mapToStandardPrefix(name)) def _isBase64(self, attrsD, contentparams): if attrsD.get('mode', '') == 'base64': return 1 if self.contentparams['type'].startswith('text/'): return 0 if self.contentparams['type'].endswith('+xml'): return 0 if self.contentparams['type'].endswith('/xml'): return 0 return 1 def _itsAnHrefDamnIt(self, attrsD): href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None))) if href: try: del attrsD['url'] except KeyError: pass try: del attrsD['uri'] except KeyError: pass attrsD['href'] = href return attrsD def _save(self, key, value): context = self._getContext() context.setdefault(key, value) def _start_rss(self, attrsD): versionmap = {'0.91': 'rss091u', '0.92': 'rss092', '0.93': 'rss093', '0.94': 'rss094'} if not self.version: attr_version = attrsD.get('version', '') version = versionmap.get(attr_version) if version: self.version = version elif attr_version.startswith('2.'): self.version = 'rss20' else: self.version = 'rss' def _start_dlhottitles(self, attrsD): self.version = 'hotrss' def _start_channel(self, attrsD): self.infeed = 1 self._cdf_common(attrsD) _start_feedinfo = _start_channel def _cdf_common(self, attrsD): if attrsD.has_key('lastmod'): self._start_modified({}) self.elementstack[-1][-1] = attrsD['lastmod'] self._end_modified() if attrsD.has_key('href'): self._start_link({}) self.elementstack[-1][-1] = attrsD['href'] self._end_link() def _start_feed(self, attrsD): self.infeed = 1 versionmap = {'0.1': 'atom01', '0.2': 'atom02', '0.3': 'atom03'} if not self.version: attr_version = attrsD.get('version') version = versionmap.get(attr_version) if version: self.version = version else: self.version = 'atom' def _end_channel(self): self.infeed = 0 _end_feed = _end_channel def _start_image(self, attrsD): self.inimage = 1 self.push('image', 0) context = self._getContext() context.setdefault('image', FeedParserDict()) def _end_image(self): self.pop('image') self.inimage = 0 def _start_textinput(self, attrsD): self.intextinput = 1 self.push('textinput', 0) context = self._getContext() context.setdefault('textinput', FeedParserDict()) _start_textInput = _start_textinput def _end_textinput(self): self.pop('textinput') self.intextinput = 0 _end_textInput = _end_textinput def _start_author(self, attrsD): self.inauthor = 1 self.push('author', 1) _start_managingeditor = _start_author _start_dc_author = _start_author _start_dc_creator = _start_author _start_itunes_author = _start_author def _end_author(self): self.pop('author') self.inauthor = 0 self._sync_author_detail() _end_managingeditor = _end_author _end_dc_author = _end_author _end_dc_creator = _end_author _end_itunes_author = _end_author def _start_itunes_owner(self, attrsD): self.inpublisher = 1 self.push('publisher', 0) def _end_itunes_owner(self): self.pop('publisher') self.inpublisher = 0 self._sync_author_detail('publisher') def _start_contributor(self, attrsD): self.incontributor = 1 context = self._getContext() context.setdefault('contributors', []) context['contributors'].append(FeedParserDict()) self.push('contributor', 0) def _end_contributor(self): self.pop('contributor') self.incontributor = 0 def _start_dc_contributor(self, attrsD): self.incontributor = 1 context = self._getContext() context.setdefault('contributors', []) context['contributors'].append(FeedParserDict()) self.push('name', 0) def _end_dc_contributor(self): self._end_name() self.incontributor = 0 def _start_name(self, attrsD): self.push('name', 0) _start_itunes_name = _start_name def _end_name(self): value = self.pop('name') if self.inpublisher: self._save_author('name', value, 'publisher') elif self.inauthor: self._save_author('name', value) elif self.incontributor: self._save_contributor('name', value) elif self.intextinput: context = self._getContext() context['textinput']['name'] = value _end_itunes_name = _end_name def _start_width(self, attrsD): self.push('width', 0) def _end_width(self): value = self.pop('width') try: value = int(value) except: value = 0 if self.inimage: context = self._getContext() context['image']['width'] = value def _start_height(self, attrsD): self.push('height', 0) def _end_height(self): value = self.pop('height') try: value = int(value) except: value = 0 if self.inimage: context = self._getContext() context['image']['height'] = value def _start_url(self, attrsD): self.push('href', 1) _start_homepage = _start_url _start_uri = _start_url def _end_url(self): value = self.pop('href') if self.inauthor: self._save_author('href', value) elif self.incontributor: self._save_contributor('href', value) elif self.inimage: context = self._getContext() context['image']['href'] = value elif self.intextinput: context = self._getContext() context['textinput']['link'] = value _end_homepage = _end_url _end_uri = _end_url def _start_email(self, attrsD): self.push('email', 0) _start_itunes_email = _start_email def _end_email(self): value = self.pop('email') if self.inpublisher: self._save_author('email', value, 'publisher') elif self.inauthor: self._save_author('email', value) elif self.incontributor: self._save_contributor('email', value) _end_itunes_email = _end_email def _getContext(self): if self.insource: context = self.sourcedata elif self.inentry: context = self.entries[-1] else: context = self.feeddata return context def _save_author(self, key, value, prefix='author'): context = self._getContext() context.setdefault(prefix + '_detail', FeedParserDict()) context[prefix + '_detail'][key] = value self._sync_author_detail() def _save_contributor(self, key, value): context = self._getContext() context.setdefault('contributors', [FeedParserDict()]) context['contributors'][-1][key] = value def _sync_author_detail(self, key='author'): context = self._getContext() detail = context.get('%s_detail' % key) if detail: name = detail.get('name') email = detail.get('email') if name and email: context[key] = '%s (%s)' % (name, email) elif name: context[key] = name elif email: context[key] = email else: author = context.get(key) if not author: return emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))''', author) if not emailmatch: return email = emailmatch.group(0) # probably a better way to do the following, but it passes all the tests author = author.replace(email, '') author = author.replace('()', '') author = author.strip() if author and (author[0] == '('): author = author[1:] if author and (author[-1] == ')'): author = author[:-1] author = author.strip() context.setdefault('%s_detail' % key, FeedParserDict()) context['%s_detail' % key]['name'] = author context['%s_detail' % key]['email'] = email def _start_subtitle(self, attrsD): self.pushContent('subtitle', attrsD, 'text/plain', 1) _start_tagline = _start_subtitle _start_itunes_subtitle = _start_subtitle def _end_subtitle(self): self.popContent('subtitle') _end_tagline = _end_subtitle _end_itunes_subtitle = _end_subtitle def _start_rights(self, attrsD): self.pushContent('rights', attrsD, 'text/plain', 1) _start_dc_rights = _start_rights _start_copyright = _start_rights def _end_rights(self): self.popContent('rights') _end_dc_rights = _end_rights _end_copyright = _end_rights def _start_item(self, attrsD): self.entries.append(FeedParserDict()) self.push('item', 0) self.inentry = 1 self.guidislink = 0 id = self._getAttribute(attrsD, 'rdf:about') if id: context = self._getContext() context['id'] = id self._cdf_common(attrsD) _start_entry = _start_item _start_product = _start_item def _end_item(self): self.pop('item') self.inentry = 0 _end_entry = _end_item def _start_dc_language(self, attrsD): self.push('language', 1) _start_language = _start_dc_language def _end_dc_language(self): self.lang = self.pop('language') _end_language = _end_dc_language def _start_dc_publisher(self, attrsD): self.push('publisher', 1) _start_webmaster = _start_dc_publisher def _end_dc_publisher(self): self.pop('publisher') self._sync_author_detail('publisher') _end_webmaster = _end_dc_publisher def _start_published(self, attrsD): self.push('published', 1) _start_dcterms_issued = _start_published _start_issued = _start_published def _end_published(self): value = self.pop('published') self._save('published_parsed', _parse_date(value)) _end_dcterms_issued = _end_published _end_issued = _end_published def _start_updated(self, attrsD): self.push('updated', 1) _start_modified = _start_updated _start_dcterms_modified = _start_updated _start_pubdate = _start_updated _start_dc_date = _start_updated def _end_updated(self): value = self.pop('updated') parsed_value = _parse_date(value) self._save('updated_parsed', parsed_value) _end_modified = _end_updated _end_dcterms_modified = _end_updated _end_pubdate = _end_updated _end_dc_date = _end_updated def _start_created(self, attrsD): self.push('created', 1) _start_dcterms_created = _start_created def _end_created(self): value = self.pop('created') self._save('created_parsed', _parse_date(value)) _end_dcterms_created = _end_created def _start_expirationdate(self, attrsD): self.push('expired', 1) def _end_expirationdate(self): self._save('expired_parsed', _parse_date(self.pop('expired'))) def _start_cc_license(self, attrsD): self.push('license', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('license') def _start_creativecommons_license(self, attrsD): self.push('license', 1) def _end_creativecommons_license(self): self.pop('license') def _addTag(self, term, scheme, label): context = self._getContext() tags = context.setdefault('tags', []) if (not term) and (not scheme) and (not label): return value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label}) if value not in tags: tags.append(FeedParserDict({'term': term, 'scheme': scheme, 'label': label})) def _start_category(self, attrsD): if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD)) term = attrsD.get('term') scheme = attrsD.get('scheme', attrsD.get('domain')) label = attrsD.get('label') self._addTag(term, scheme, label) self.push('category', 1) _start_dc_subject = _start_category _start_keywords = _start_category def _end_itunes_keywords(self): for term in self.pop('itunes_keywords').split(): self._addTag(term, 'http://www.itunes.com/', None) def _start_itunes_category(self, attrsD): self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None) self.push('category', 1) def _end_category(self): value = self.pop('category') if not value: return context = self._getContext() tags = context['tags'] if value and len(tags) and not tags[-1]['term']: tags[-1]['term'] = value else: self._addTag(value, None, None) _end_dc_subject = _end_category _end_keywords = _end_category _end_itunes_category = _end_category def _start_cloud(self, attrsD): self._getContext()['cloud'] = FeedParserDict(attrsD) def _start_link(self, attrsD): attrsD.setdefault('rel', 'alternate') attrsD.setdefault('type', 'text/html') attrsD = self._itsAnHrefDamnIt(attrsD) if attrsD.has_key('href'): attrsD['href'] = self.resolveURI(attrsD['href']) expectingText = self.infeed or self.inentry or self.insource context = self._getContext() context.setdefault('links', []) context['links'].append(FeedParserDict(attrsD)) if attrsD['rel'] == 'enclosure': self._start_enclosure(attrsD) if attrsD.has_key('href'): expectingText = 0 if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types): context['link'] = attrsD['href'] else: self.push('link', expectingText) _start_producturl = _start_link def _end_link(self): value = self.pop('link') context = self._getContext() if self.intextinput: context['textinput']['link'] = value if self.inimage: context['image']['link'] = value _end_producturl = _end_link def _start_guid(self, attrsD): self.guidislink = (attrsD.get('ispermalink', 'true') == 'true') self.push('id', 1) def _end_guid(self): value = self.pop('id') self._save('guidislink', self.guidislink and not self._getContext().has_key('link')) if self.guidislink: # guid acts as link, but only if 'ispermalink' is not present or is 'true', # and only if the item doesn't already have a link element self._save('link', value) def _start_title(self, attrsD): self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) _start_dc_title = _start_title _start_media_title = _start_title def _end_title(self): value = self.popContent('title') context = self._getContext() if self.intextinput: context['textinput']['title'] = value elif self.inimage: context['image']['title'] = value _end_dc_title = _end_title _end_media_title = _end_title def _start_description(self, attrsD): context = self._getContext() if context.has_key('summary'): self._summaryKey = 'content' self._start_content(attrsD) else: self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource) def _start_abstract(self, attrsD): self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) def _end_description(self): if self._summaryKey == 'content': self._end_content() else: value = self.popContent('description') context = self._getContext() if self.intextinput: context['textinput']['description'] = value elif self.inimage: context['image']['description'] = value self._summaryKey = None _end_abstract = _end_description def _start_info(self, attrsD): self.pushContent('info', attrsD, 'text/plain', 1) _start_feedburner_browserfriendly = _start_info def _end_info(self): self.popContent('info') _end_feedburner_browserfriendly = _end_info def _start_generator(self, attrsD): if attrsD: attrsD = self._itsAnHrefDamnIt(attrsD) if attrsD.has_key('href'): attrsD['href'] = self.resolveURI(attrsD['href']) self._getContext()['generator_detail'] = FeedParserDict(attrsD) self.push('generator', 1) def _end_generator(self): value = self.pop('generator') context = self._getContext() if context.has_key('generator_detail'): context['generator_detail']['name'] = value def _start_admin_generatoragent(self, attrsD): self.push('generator', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('generator') self._getContext()['generator_detail'] = FeedParserDict({'href': value}) def _start_admin_errorreportsto(self, attrsD): self.push('errorreportsto', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('errorreportsto') def _start_summary(self, attrsD): context = self._getContext() if context.has_key('summary'): self._summaryKey = 'content' self._start_content(attrsD) else: self._summaryKey = 'summary' self.pushContent(self._summaryKey, attrsD, 'text/plain', 1) _start_itunes_summary = _start_summary def _end_summary(self): if self._summaryKey == 'content': self._end_content() else: self.popContent(self._summaryKey or 'summary') self._summaryKey = None _end_itunes_summary = _end_summary def _start_enclosure(self, attrsD): attrsD = self._itsAnHrefDamnIt(attrsD) self._getContext().setdefault('enclosures', []).append(FeedParserDict(attrsD)) href = attrsD.get('href') if href: context = self._getContext() if not context.get('id'): context['id'] = href def _start_source(self, attrsD): self.insource = 1 def _end_source(self): self.insource = 0 self._getContext()['source'] = copy.deepcopy(self.sourcedata) self.sourcedata.clear() def _start_content(self, attrsD): self.pushContent('content', attrsD, 'text/plain', 1) src = attrsD.get('src') if src: self.contentparams['src'] = src self.push('content', 1) def _start_prodlink(self, attrsD): self.pushContent('content', attrsD, 'text/html', 1) def _start_body(self, attrsD): self.pushContent('content', attrsD, 'application/xhtml+xml', 1) _start_xhtml_body = _start_body def _start_content_encoded(self, attrsD): self.pushContent('content', attrsD, 'text/html', 1) _start_fullitem = _start_content_encoded def _end_content(self): copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types) value = self.popContent('content') if copyToDescription: self._save('description', value) _end_body = _end_content _end_xhtml_body = _end_content _end_content_encoded = _end_content _end_fullitem = _end_content _end_prodlink = _end_content def _start_itunes_image(self, attrsD): self.push('itunes_image', 0) self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')}) _start_itunes_link = _start_itunes_image def _end_itunes_block(self): value = self.pop('itunes_block', 0) self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0 def _end_itunes_explicit(self): value = self.pop('itunes_explicit', 0) self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0 if _XML_AVAILABLE: class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler): def __init__(self, baseuri, baselang, encoding): if _debug: sys.stderr.write('trying StrictFeedParser\n') xml.sax.handler.ContentHandler.__init__(self) _FeedParserMixin.__init__(self, baseuri, baselang, encoding) self.bozo = 0 self.exc = None def startPrefixMapping(self, prefix, uri): self.trackNamespace(prefix, uri) def startElementNS(self, name, qname, attrs): namespace, localname = name lowernamespace = str(namespace or '').lower() if lowernamespace.find('backend.userland.com/rss') <> -1: # match any backend.userland.com namespace namespace = 'http://backend.userland.com/rss' lowernamespace = namespace if qname and qname.find(':') > 0: givenprefix = qname.split(':')[0] else: givenprefix = None prefix = self._matchnamespaces.get(lowernamespace, givenprefix) if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix): raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix if prefix: localname = prefix + ':' + localname localname = str(localname).lower() if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname)) # qname implementation is horribly broken in Python 2.1 (it # doesn't report any), and slightly broken in Python 2.2 (it # doesn't report the xml: namespace). So we match up namespaces # with a known list first, and then possibly override them with # the qnames the SAX parser gives us (if indeed it gives us any # at all). Thanks to MatejC for helping me test this and # tirelessly telling me that it didn't work yet. attrsD = {} for (namespace, attrlocalname), attrvalue in attrs._attrs.items(): lowernamespace = (namespace or '').lower() prefix = self._matchnamespaces.get(lowernamespace, '') if prefix: attrlocalname = prefix + ':' + attrlocalname attrsD[str(attrlocalname).lower()] = attrvalue for qname in attrs.getQNames(): attrsD[str(qname).lower()] = attrs.getValueByQName(qname) self.unknown_starttag(localname, attrsD.items()) def characters(self, text): self.handle_data(text) def endElementNS(self, name, qname): namespace, localname = name lowernamespace = str(namespace or '').lower() if qname and qname.find(':') > 0: givenprefix = qname.split(':')[0] else: givenprefix = '' prefix = self._matchnamespaces.get(lowernamespace, givenprefix) if prefix: localname = prefix + ':' + localname localname = str(localname).lower() self.unknown_endtag(localname) def error(self, exc): self.bozo = 1 self.exc = exc def fatalError(self, exc): self.error(exc) raise exc class _BaseHTMLProcessor(sgmllib.SGMLParser): elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr', 'img', 'input', 'isindex', 'link', 'meta', 'param'] def __init__(self, encoding): self.encoding = encoding if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding) sgmllib.SGMLParser.__init__(self) def reset(self): self.pieces = [] sgmllib.SGMLParser.reset(self) def _shorttag_replace(self, match): tag = match.group(1) if tag in self.elements_no_end_tag: return '<' + tag + ' />' else: return '<' + tag + '></' + tag + '>' def feed(self, data): data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'&lt;!\1', data) #data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data) data = data.replace('&#39;', "'") data = data.replace('&#34;', '"') if self.encoding and type(data) == type(u''): data = data.encode(self.encoding) sgmllib.SGMLParser.feed(self, data) def normalize_attrs(self, attrs): # utility method to be called by descendants attrs = [(k.lower(), v) for k, v in attrs] attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] return attrs def unknown_starttag(self, tag, attrs): # called for each start tag # attrs is a list of (attr, value) tuples # e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')] if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag) uattrs = [] # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds for key, value in attrs: if type(value) != type(u''): value = unicode(value, self.encoding) uattrs.append((unicode(key, self.encoding), value)) strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding) if tag in self.elements_no_end_tag: self.pieces.append('<%(tag)s%(strattrs)s />' % locals()) else: self.pieces.append('<%(tag)s%(strattrs)s>' % locals()) def unknown_endtag(self, tag): # called for each end tag, e.g. for </pre>, tag will be 'pre' # Reconstruct the original end tag. if tag not in self.elements_no_end_tag: self.pieces.append("</%(tag)s>" % locals()) def handle_charref(self, ref): # called for each character reference, e.g. for '&#160;', ref will be '160' # Reconstruct the original character reference. self.pieces.append('&#%(ref)s;' % locals()) def handle_entityref(self, ref): # called for each entity reference, e.g. for '&copy;', ref will be 'copy' # Reconstruct the original entity reference. self.pieces.append('&%(ref)s;' % locals()) def handle_data(self, text): # called for each block of plain text, i.e. outside of any tag and # not containing any character or entity references # Store the original text verbatim. if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text) self.pieces.append(text) def handle_comment(self, text): # called for each HTML comment, e.g. <!-- insert Javascript code here --> # Reconstruct the original comment. self.pieces.append('<!--%(text)s-->' % locals()) def handle_pi(self, text): # called for each processing instruction, e.g. <?instruction> # Reconstruct original processing instruction. self.pieces.append('<?%(text)s>' % locals()) def handle_decl(self, text): # called for the DOCTYPE, if present, e.g. # <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" # "http://www.w3.org/TR/html4/loose.dtd"> # Reconstruct original DOCTYPE self.pieces.append('<!%(text)s>' % locals()) _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match def _scan_name(self, i, declstartpos): rawdata = self.rawdata n = len(rawdata) if i == n: return None, -1 m = self._new_declname_match(rawdata, i) if m: s = m.group() name = s.strip() if (i + len(s)) == n: return None, -1 # end of buffer return name.lower(), m.end() else: self.handle_data(rawdata) # self.updatepos(declstartpos, i) return None, -1 def output(self): '''Return processed HTML as a single string''' return ''.join([str(p) for p in self.pieces]) class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor): def __init__(self, baseuri, baselang, encoding): sgmllib.SGMLParser.__init__(self) _FeedParserMixin.__init__(self, baseuri, baselang, encoding) def decodeEntities(self, element, data): data = data.replace('&#60;', '&lt;') data = data.replace('&#x3c;', '&lt;') data = data.replace('&#62;', '&gt;') data = data.replace('&#x3e;', '&gt;') data = data.replace('&#38;', '&amp;') data = data.replace('&#x26;', '&amp;') data = data.replace('&#34;', '&quot;') data = data.replace('&#x22;', '&quot;') data = data.replace('&#39;', '&apos;') data = data.replace('&#x27;', '&apos;') if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): data = data.replace('&lt;', '<') data = data.replace('&gt;', '>') data = data.replace('&amp;', '&') data = data.replace('&quot;', '"') data = data.replace('&apos;', "'") return data class _RelativeURIResolver(_BaseHTMLProcessor): relative_uris = [('a', 'href'), ('applet', 'codebase'), ('area', 'href'), ('blockquote', 'cite'), ('body', 'background'), ('del', 'cite'), ('form', 'action'), ('frame', 'longdesc'), ('frame', 'src'), ('iframe', 'longdesc'), ('iframe', 'src'), ('head', 'profile'), ('img', 'longdesc'), ('img', 'src'), ('img', 'usemap'), ('input', 'src'), ('input', 'usemap'), ('ins', 'cite'), ('link', 'href'), ('object', 'classid'), ('object', 'codebase'), ('object', 'data'), ('object', 'usemap'), ('q', 'cite'), ('script', 'src')] def __init__(self, baseuri, encoding): _BaseHTMLProcessor.__init__(self, encoding) self.baseuri = baseuri def resolveURI(self, uri): return _urljoin(self.baseuri, uri) def unknown_starttag(self, tag, attrs): attrs = self.normalize_attrs(attrs) attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs] _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) def _resolveRelativeURIs(htmlSource, baseURI, encoding): if _debug: sys.stderr.write('entering _resolveRelativeURIs\n') p = _RelativeURIResolver(baseURI, encoding) p.feed(htmlSource) return p.output() class _HTMLSanitizer(_BaseHTMLProcessor): acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big', 'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset', 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup', 'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike', 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var'] acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', 'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing', 'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols', 'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', 'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', 'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type', 'usemap', 'valign', 'value', 'vspace', 'width'] unacceptable_elements_with_end_tag = ['script', 'applet'] def reset(self): _BaseHTMLProcessor.reset(self) self.unacceptablestack = 0 def unknown_starttag(self, tag, attrs): if not tag in self.acceptable_elements: if tag in self.unacceptable_elements_with_end_tag: self.unacceptablestack += 1 return attrs = self.normalize_attrs(attrs) attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes] _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) def unknown_endtag(self, tag): if not tag in self.acceptable_elements: if tag in self.unacceptable_elements_with_end_tag: self.unacceptablestack -= 1 return _BaseHTMLProcessor.unknown_endtag(self, tag) def handle_pi(self, text): pass def handle_decl(self, text): pass def handle_data(self, text): if not self.unacceptablestack: _BaseHTMLProcessor.handle_data(self, text) def _sanitizeHTML(htmlSource, encoding): p = _HTMLSanitizer(encoding) p.feed(htmlSource) data = p.output() if TIDY_MARKUP: # loop through list of preferred Tidy interfaces looking for one that's installed, # then set up a common _tidy function to wrap the interface-specific API. _tidy = None for tidy_interface in PREFERRED_TIDY_INTERFACES: try: if tidy_interface == "uTidy": from tidy import parseString as _utidy def _tidy(data, **kwargs): return str(_utidy(data, **kwargs)) break elif tidy_interface == "mxTidy": from mx.Tidy import Tidy as _mxtidy def _tidy(data, **kwargs): nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs) return data break except: pass if _tidy: utf8 = type(data) == type(u'') if utf8: data = data.encode('utf-8') data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8") if utf8: data = unicode(data, 'utf-8') if data.count('<body'): data = data.split('<body', 1)[1] if data.count('>'): data = data.split('>', 1)[1] if data.count('</body'): data = data.split('</body', 1)[0] data = data.strip().replace('\r\n', '\n') return data class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler): def http_error_default(self, req, fp, code, msg, headers): if ((code / 100) == 3) and (code != 304): return self.http_error_302(req, fp, code, msg, headers) infourl = urllib.addinfourl(fp, headers, req.get_full_url()) infourl.status = code return infourl def http_error_302(self, req, fp, code, msg, headers): if headers.dict.has_key('location'): infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers) else: infourl = urllib.addinfourl(fp, headers, req.get_full_url()) if not hasattr(infourl, 'status'): infourl.status = code return infourl def http_error_301(self, req, fp, code, msg, headers): if headers.dict.has_key('location'): infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers) else: infourl = urllib.addinfourl(fp, headers, req.get_full_url()) if not hasattr(infourl, 'status'): infourl.status = code return infourl http_error_300 = http_error_302 http_error_303 = http_error_302 http_error_307 = http_error_302 def http_error_401(self, req, fp, code, msg, headers): # Check if # - server requires digest auth, AND # - we tried (unsuccessfully) with basic auth, AND # - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions) # If all conditions hold, parse authentication information # out of the Authorization header we sent the first time # (for the username and password) and the WWW-Authenticate # header the server sent back (for the realm) and retry # the request with the appropriate digest auth headers instead. # This evil genius hack has been brought to you by Aaron Swartz. host = urlparse.urlparse(req.get_full_url())[1] try: assert sys.version.split()[0] >= '2.3.3' assert base64 != None user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':') realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0] self.add_password(realm, host, user, passw) retry = self.http_error_auth_reqed('www-authenticate', host, req, headers) self.reset_retry_count() return retry except: return self.http_error_default(req, fp, code, msg, headers) def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers): """URL, filename, or string --> stream This function lets you define parsers that take any input source (URL, pathname to local or network file, or actual data as a string) and deal with it in a uniform manner. Returned object is guaranteed to have all the basic stdio read methods (read, readline, readlines). Just .close() the object when you're done with it. If the etag argument is supplied, it will be used as the value of an If-None-Match request header. If the modified argument is supplied, it must be a tuple of 9 integers as returned by gmtime() in the standard Python time module. This MUST be in GMT (Greenwich Mean Time). The formatted date/time will be used as the value of an If-Modified-Since request header. If the agent argument is supplied, it will be used as the value of a User-Agent request header. If the referrer argument is supplied, it will be used as the value of a Referer[sic] request header. If handlers is supplied, it is a list of handlers used to build a urllib2 opener. """ if hasattr(url_file_stream_or_string, 'read'): return url_file_stream_or_string if url_file_stream_or_string == '-': return sys.stdin if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'): if not agent: agent = USER_AGENT # test for inline user:password for basic auth auth = None if base64: urltype, rest = urllib.splittype(url_file_stream_or_string) realhost, rest = urllib.splithost(rest) if realhost: user_passwd, realhost = urllib.splituser(realhost) if user_passwd: url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest) auth = base64.encodestring(user_passwd).strip() # try to open with urllib2 (to use optional headers) request = urllib2.Request(url_file_stream_or_string) request.add_header('User-Agent', agent) if etag: request.add_header('If-None-Match', etag) if modified: # format into an RFC 1123-compliant timestamp. We can't use # time.strftime() since the %a and %b directives can be affected # by the current locale, but RFC 2616 states that dates must be # in English. short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5])) if referrer: request.add_header('Referer', referrer) if gzip and zlib: request.add_header('Accept-encoding', 'gzip, deflate') elif gzip: request.add_header('Accept-encoding', 'gzip') elif zlib: request.add_header('Accept-encoding', 'deflate') else: request.add_header('Accept-encoding', '') if auth: request.add_header('Authorization', 'Basic %s' % auth) if ACCEPT_HEADER: request.add_header('Accept', ACCEPT_HEADER) request.add_header('A-IM', 'feed') # RFC 3229 support opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers)) opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent try: return opener.open(request) finally: opener.close() # JohnD # try to open with native open function (if url_file_stream_or_string is a filename) try: return open(url_file_stream_or_string) except: pass # treat url_file_stream_or_string as string return _StringIO(str(url_file_stream_or_string)) _date_handlers = [] def registerDateHandler(func): '''Register a date handler function (takes string, returns 9-tuple date in GMT)''' _date_handlers.insert(0, func) # ISO-8601 date parsing routines written by Fazal Majid. # The ISO 8601 standard is very convoluted and irregular - a full ISO 8601 # parser is beyond the scope of feedparser and would be a worthwhile addition # to the Python library. # A single regular expression cannot parse ISO 8601 date formats into groups # as the standard is highly irregular (for instance is 030104 2003-01-04 or # 0301-04-01), so we use templates instead. # Please note the order in templates is significant because we need a # greedy match. _iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO', 'YY-?MM-?DD', 'YY-?OOO', 'YYYY', '-YY-?MM', '-OOO', '-YY', '--MM-?DD', '--MM', '---DD', 'CC', ''] _iso8601_re = [ tmpl.replace( 'YYYY', r'(?P<year>\d{4})').replace( 'YY', r'(?P<year>\d\d)').replace( 'MM', r'(?P<month>[01]\d)').replace( 'DD', r'(?P<day>[0123]\d)').replace( 'OOO', r'(?P<ordinal>[0123]\d\d)').replace( 'CC', r'(?P<century>\d\d$)') + r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})' + r'(:(?P<second>\d{2}))?' + r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?' for tmpl in _iso8601_tmpl] del tmpl _iso8601_matches = [re.compile(regex).match for regex in _iso8601_re] del regex def _parse_date_iso8601(dateString): '''Parse a variety of ISO-8601-compatible formats like 20040105''' m = None for _iso8601_match in _iso8601_matches: m = _iso8601_match(dateString) if m: break if not m: return if m.span() == (0, 0): return params = m.groupdict() ordinal = params.get('ordinal', 0) if ordinal: ordinal = int(ordinal) else: ordinal = 0 year = params.get('year', '--') if not year or year == '--': year = time.gmtime()[0] elif len(year) == 2: # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993 year = 100 * int(time.gmtime()[0] / 100) + int(year) else: year = int(year) month = params.get('month', '-') if not month or month == '-': # ordinals are NOT normalized by mktime, we simulate them # by setting month=1, day=ordinal if ordinal: month = 1 else: month = time.gmtime()[1] month = int(month) day = params.get('day', 0) if not day: # see above if ordinal: day = ordinal elif params.get('century', 0) or \ params.get('year', 0) or params.get('month', 0): day = 1 else: day = time.gmtime()[2] else: day = int(day) # special case of the century - is the first year of the 21st century # 2000 or 2001 ? The debate goes on... if 'century' in params.keys(): year = (int(params['century']) - 1) * 100 + 1 # in ISO 8601 most fields are optional for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']: if not params.get(field, None): params[field] = 0 hour = int(params.get('hour', 0)) minute = int(params.get('minute', 0)) second = int(params.get('second', 0)) # weekday is normalized by mktime(), we can ignore it weekday = 0 # daylight savings is complex, but not needed for feedparser's purposes # as time zones, if specified, include mention of whether it is active # (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and # and most implementations have DST bugs daylight_savings_flag = 0 tm = [year, month, day, hour, minute, second, weekday, ordinal, daylight_savings_flag] # ISO 8601 time zone adjustments tz = params.get('tz') if tz and tz != 'Z': if tz[0] == '-': tm[3] += int(params.get('tzhour', 0)) tm[4] += int(params.get('tzmin', 0)) elif tz[0] == '+': tm[3] -= int(params.get('tzhour', 0)) tm[4] -= int(params.get('tzmin', 0)) else: return None # Python's time.mktime() is a wrapper around the ANSI C mktime(3c) # which is guaranteed to normalize d/m/y/h/m/s. # Many implementations have bugs, but we'll pretend they don't. return time.localtime(time.mktime(tm)) registerDateHandler(_parse_date_iso8601) # 8-bit date handling routines written by ytrewq1. _korean_year = u'\ub144' # b3e2 in euc-kr _korean_month = u'\uc6d4' # bff9 in euc-kr _korean_day = u'\uc77c' # c0cf in euc-kr _korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr _korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr _korean_onblog_date_re = \ re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \ (_korean_year, _korean_month, _korean_day)) _korean_nate_date_re = \ re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \ (_korean_am, _korean_pm)) def _parse_date_onblog(dateString): '''Parse a string according to the OnBlog 8-bit date format''' m = _korean_onblog_date_re.match(dateString) if not m: return w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ 'zonediff': '+09:00'} if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_onblog) def _parse_date_nate(dateString): '''Parse a string according to the Nate 8-bit date format''' m = _korean_nate_date_re.match(dateString) if not m: return hour = int(m.group(5)) ampm = m.group(4) if (ampm == _korean_pm): hour += 12 hour = str(hour) if len(hour) == 1: hour = '0' + hour w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\ 'zonediff': '+09:00'} if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_nate) _mssql_date_re = \ re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?') def _parse_date_mssql(dateString): '''Parse a string according to the MS SQL date format''' m = _mssql_date_re.match(dateString) if not m: return w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ 'zonediff': '+09:00'} if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_mssql) # Unicode strings for Greek date strings _greek_months = \ { \ u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7 u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7 u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7 u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7 u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7 u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7 u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7 u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7 u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7 u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7 u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7 u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7 u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7 u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7 u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7 u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7 u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7 u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7 u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7 } _greek_wdays = \ { \ u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7 u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7 u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7 u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7 u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7 u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7 u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7 } _greek_date_format_re = \ re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)') def _parse_date_greek(dateString): '''Parse a string according to a Greek 8-bit date format.''' m = _greek_date_format_re.match(dateString) if not m: return try: wday = _greek_wdays[m.group(1)] month = _greek_months[m.group(3)] except: return rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \ {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\ 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\ 'zonediff': m.group(8)} if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date) return _parse_date_rfc822(rfc822date) registerDateHandler(_parse_date_greek) # Unicode strings for Hungarian date strings _hungarian_months = \ { \ u'janu\u00e1r': u'01', # e1 in iso-8859-2 u'febru\u00e1ri': u'02', # e1 in iso-8859-2 u'm\u00e1rcius': u'03', # e1 in iso-8859-2 u'\u00e1prilis': u'04', # e1 in iso-8859-2 u'm\u00e1ujus': u'05', # e1 in iso-8859-2 u'j\u00fanius': u'06', # fa in iso-8859-2 u'j\u00falius': u'07', # fa in iso-8859-2 u'augusztus': u'08', u'szeptember': u'09', u'okt\u00f3ber': u'10', # f3 in iso-8859-2 u'november': u'11', u'december': u'12', } _hungarian_date_format_re = \ re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))') def _parse_date_hungarian(dateString): '''Parse a string according to a Hungarian 8-bit date format.''' m = _hungarian_date_format_re.match(dateString) if not m: return try: month = _hungarian_months[m.group(2)] day = m.group(3) if len(day) == 1: day = '0' + day hour = m.group(4) if len(hour) == 1: hour = '0' + hour except: return w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \ {'year': m.group(1), 'month': month, 'day': day,\ 'hour': hour, 'minute': m.group(5),\ 'zonediff': m.group(6)} if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_hungarian) # W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by # Drake and licensed under the Python license. Removed all range checking # for month, day, hour, minute, and second, since mktime will normalize # these later def _parse_date_w3dtf(dateString): def __extract_date(m): year = int(m.group('year')) if year < 100: year = 100 * int(time.gmtime()[0] / 100) + int(year) if year < 1000: return 0, 0, 0 julian = m.group('julian') if julian: julian = int(julian) month = julian / 30 + 1 day = julian % 30 + 1 jday = None while jday != julian: t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0)) jday = time.gmtime(t)[-2] diff = abs(jday - julian) if jday > julian: if diff < day: day = day - diff else: month = month - 1 day = 31 elif jday < julian: if day + diff < 28: day = day + diff else: month = month + 1 return year, month, day month = m.group('month') day = 1 if month is None: month = 1 else: month = int(month) day = m.group('day') if day: day = int(day) else: day = 1 return year, month, day def __extract_time(m): if not m: return 0, 0, 0 hours = m.group('hours') if not hours: return 0, 0, 0 hours = int(hours) minutes = int(m.group('minutes')) seconds = m.group('seconds') if seconds: seconds = int(seconds) else: seconds = 0 return hours, minutes, seconds def __extract_tzd(m): '''Return the Time Zone Designator as an offset in seconds from UTC.''' if not m: return 0 tzd = m.group('tzd') if not tzd: return 0 if tzd == 'Z': return 0 hours = int(m.group('tzdhours')) minutes = m.group('tzdminutes') if minutes: minutes = int(minutes) else: minutes = 0 offset = (hours*60 + minutes) * 60 if tzd[0] == '+': return -offset return offset __date_re = ('(?P<year>\d\d\d\d)' '(?:(?P<dsep>-|)' '(?:(?P<julian>\d\d\d)' '|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?') __tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)' __tzd_rx = re.compile(__tzd_re) __time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)' '(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?' + __tzd_re) __datetime_re = '%s(?:T%s)?' % (__date_re, __time_re) __datetime_rx = re.compile(__datetime_re) m = __datetime_rx.match(dateString) if (m is None) or (m.group() != dateString): return gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0) if gmt[0] == 0: return return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone) registerDateHandler(_parse_date_w3dtf) def _parse_date_rfc822(dateString): '''Parse an RFC822, RFC1123, RFC2822, or asctime-style date''' data = dateString.split() if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames: del data[0] if len(data) == 4: s = data[3] i = s.find('+') if i > 0: data[3:] = [s[:i], s[i+1:]] else: data.append('') dateString = " ".join(data) if len(data) < 5: dateString += ' 00:00:00 GMT' tm = rfc822.parsedate_tz(dateString) if tm: return time.gmtime(rfc822.mktime_tz(tm)) # rfc822.py defines several time zones, but we define some extra ones. # 'ET' is equivalent to 'EST', etc. _additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800} rfc822._timezones.update(_additional_timezones) registerDateHandler(_parse_date_rfc822) def _parse_date(dateString): '''Parses a variety of date formats into a 9-tuple in GMT''' for handler in _date_handlers: try: date9tuple = handler(dateString) if not date9tuple: continue if len(date9tuple) != 9: if _debug: sys.stderr.write('date handler function must return 9-tuple\n') raise ValueError map(int, date9tuple) return date9tuple except Exception, e: if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e))) pass return None def _getCharacterEncoding(http_headers, xml_data): '''Get the character encoding of the XML document http_headers is a dictionary xml_data is a raw string (not Unicode) This is so much trickier than it sounds, it's not even funny. According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type is application/xml, application/*+xml, application/xml-external-parsed-entity, or application/xml-dtd, the encoding given in the charset parameter of the HTTP Content-Type takes precedence over the encoding given in the XML prefix within the document, and defaults to 'utf-8' if neither are specified. But, if the HTTP Content-Type is text/xml, text/*+xml, or text/xml-external-parsed-entity, the encoding given in the XML prefix within the document is ALWAYS IGNORED and only the encoding given in the charset parameter of the HTTP Content-Type header should be respected, and it defaults to 'us-ascii' if not specified. Furthermore, discussion on the atom-syntax mailing list with the author of RFC 3023 leads me to the conclusion that any document served with a Content-Type of text/* and no charset parameter must be treated as us-ascii. (We now do this.) And also that it must always be flagged as non-well-formed. (We now do this too.) If Content-Type is unspecified (input was local file or non-HTTP source) or unrecognized (server just got it totally wrong), then go by the encoding given in the XML prefix of the document and default to 'iso-8859-1' as per the HTTP specification (RFC 2616). Then, assuming we didn't find a character encoding in the HTTP headers (and the HTTP Content-type allowed us to look in the body), we need to sniff the first few bytes of the XML data and try to determine whether the encoding is ASCII-compatible. Section F of the XML specification shows the way here: http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info If the sniffed encoding is not ASCII-compatible, we need to make it ASCII compatible so that we can sniff further into the XML declaration to find the encoding attribute, which will tell us the true encoding. Of course, none of this guarantees that we will be able to parse the feed in the declared character encoding (assuming it was declared correctly, which many are not). CJKCodecs and iconv_codec help a lot; you should definitely install them if you can. http://cjkpython.i18n.org/ ''' def _parseHTTPContentType(content_type): '''takes HTTP Content-Type header and returns (content type, charset) If no charset is specified, returns (content type, '') If no content type is specified, returns ('', '') Both return parameters are guaranteed to be lowercase strings ''' content_type = content_type or '' content_type, params = cgi.parse_header(content_type) return content_type, params.get('charset', '').replace("'", '') sniffed_xml_encoding = '' xml_encoding = '' true_encoding = '' http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type')) # Must sniff for non-ASCII-compatible character encodings before # searching for XML declaration. This heuristic is defined in # section F of the XML specification: # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info try: if xml_data[:4] == '\x4c\x6f\xa7\x94': # EBCDIC xml_data = _ebcdic_to_ascii(xml_data) elif xml_data[:4] == '\x00\x3c\x00\x3f': # UTF-16BE sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'): # UTF-16BE with BOM sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x3f\x00': # UTF-16LE sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'): # UTF-16LE with BOM sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') elif xml_data[:4] == '\x00\x00\x00\x3c': # UTF-32BE sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x00\x00': # UTF-32LE sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') elif xml_data[:4] == '\x00\x00\xfe\xff': # UTF-32BE with BOM sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') elif xml_data[:4] == '\xff\xfe\x00\x00': # UTF-32LE with BOM sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') elif xml_data[:3] == '\xef\xbb\xbf': # UTF-8 with BOM sniffed_xml_encoding = 'utf-8' xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') else: # ASCII-compatible pass xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data) except: xml_encoding_match = None if xml_encoding_match: xml_encoding = xml_encoding_match.groups()[0].lower() if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): xml_encoding = sniffed_xml_encoding acceptable_content_type = 0 application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity') text_content_types = ('text/xml', 'text/xml-external-parsed-entity') if (http_content_type in application_content_types) or \ (http_content_type.startswith('application/') and http_content_type.endswith('+xml')): acceptable_content_type = 1 true_encoding = http_encoding or xml_encoding or 'utf-8' elif (http_content_type in text_content_types) or \ (http_content_type.startswith('text/')) and http_content_type.endswith('+xml'): acceptable_content_type = 1 true_encoding = http_encoding or 'us-ascii' elif http_content_type.startswith('text/'): true_encoding = http_encoding or 'us-ascii' elif http_headers and (not http_headers.has_key('content-type')): true_encoding = xml_encoding or 'iso-8859-1' else: true_encoding = xml_encoding or 'utf-8' return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type def _toUTF8(data, encoding): '''Changes an XML data stream on the fly to specify a new encoding data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already encoding is a string recognized by encodings.aliases ''' if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding) # strip Byte Order Mark (if present) if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'): if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-16be': sys.stderr.write('trying utf-16be instead\n') encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'): if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-16le': sys.stderr.write('trying utf-16le instead\n') encoding = 'utf-16le' data = data[2:] elif data[:3] == '\xef\xbb\xbf': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-8': sys.stderr.write('trying utf-8 instead\n') encoding = 'utf-8' data = data[3:] elif data[:4] == '\x00\x00\xfe\xff': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-32be': sys.stderr.write('trying utf-32be instead\n') encoding = 'utf-32be' data = data[4:] elif data[:4] == '\xff\xfe\x00\x00': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-32le': sys.stderr.write('trying utf-32le instead\n') encoding = 'utf-32le' data = data[4:] newdata = unicode(data, encoding) if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding) declmatch = re.compile('^<\?xml[^>]*?>') newdecl = '''<?xml version='1.0' encoding='utf-8'?>''' if declmatch.search(newdata): newdata = declmatch.sub(newdecl, newdata) else: newdata = newdecl + u'\n' + newdata return newdata.encode('utf-8') def _stripDoctype(data): '''Strips DOCTYPE from XML document, returns (rss_version, stripped_data) rss_version may be 'rss091n' or None stripped_data is the same XML document, minus the DOCTYPE ''' entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE) data = entity_pattern.sub('', data) doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE) doctype_results = doctype_pattern.findall(data) doctype = doctype_results and doctype_results[0] or '' if doctype.lower().count('netscape'): version = 'rss091n' else: version = None data = doctype_pattern.sub('', data) return version, data def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]): '''Parse a feed from a URL, file, stream, or string''' result = FeedParserDict() result['feed'] = FeedParserDict() result['entries'] = [] if _XML_AVAILABLE: result['bozo'] = 0 if type(handlers) == types.InstanceType: handlers = [handlers] try: f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers) data = f.read() except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = '' f = None # if feed is gzip-compressed, decompress it if f and data and hasattr(f, 'headers'): if gzip and f.headers.get('content-encoding', '') == 'gzip': try: data = gzip.GzipFile(fileobj=_StringIO(data)).read() except Exception, e: # Some feeds claim to be gzipped but they're not, so # we get garbage. Ideally, we should re-request the # feed without the 'Accept-encoding: gzip' header, # but we don't. result['bozo'] = 1 result['bozo_exception'] = e data = '' elif zlib and f.headers.get('content-encoding', '') == 'deflate': try: data = zlib.decompress(data, -zlib.MAX_WBITS) except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = '' # save HTTP headers if hasattr(f, 'info'): info = f.info() result['etag'] = info.getheader('ETag') last_modified = info.getheader('Last-Modified') if last_modified: result['modified'] = _parse_date(last_modified) if hasattr(f, 'url'): result['href'] = f.url result['status'] = 200 if hasattr(f, 'status'): result['status'] = f.status if hasattr(f, 'headers'): result['headers'] = f.headers.dict if hasattr(f, 'close'): f.close() # there are four encodings to keep track of: # - http_encoding is the encoding declared in the Content-Type HTTP header # - xml_encoding is the encoding declared in the <?xml declaration # - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data # - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications http_headers = result.get('headers', {}) result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \ _getCharacterEncoding(http_headers, data) if http_headers and (not acceptable_content_type): if http_headers.has_key('content-type'): bozo_message = '%s is not an XML media type' % http_headers['content-type'] else: bozo_message = 'no Content-type specified' result['bozo'] = 1 result['bozo_exception'] = NonXMLContentType(bozo_message) result['version'], data = _stripDoctype(data) baseuri = http_headers.get('content-location', result.get('href')) baselang = http_headers.get('content-language', None) # if server sent 304, we're done if result.get('status', 0) == 304: result['version'] = '' result['debug_message'] = 'The feed has not changed since you last checked, ' + \ 'so the server sent no data. This is a feature, not a bug!' return result # if there was a problem downloading, we're done if not data: return result # determine character encoding use_strict_parser = 0 known_encoding = 0 tried_encodings = [] # try: HTTP encoding, declared XML encoding, encoding sniffed from BOM for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding): if not proposed_encoding: continue if proposed_encoding in tried_encodings: continue tried_encodings.append(proposed_encoding) try: data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 break except: pass # if no luck and we have auto-detection library, try that if (not known_encoding) and chardet: try: proposed_encoding = chardet.detect(data)['encoding'] if proposed_encoding and (proposed_encoding not in tried_encodings): tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck and we haven't tried utf-8 yet, try that if (not known_encoding) and ('utf-8' not in tried_encodings): try: proposed_encoding = 'utf-8' tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck and we haven't tried windows-1252 yet, try that if (not known_encoding) and ('windows-1252' not in tried_encodings): try: proposed_encoding = 'windows-1252' tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck, give up if not known_encoding: result['bozo'] = 1 result['bozo_exception'] = CharacterEncodingUnknown( \ 'document encoding unknown, I tried ' + \ '%s, %s, utf-8, and windows-1252 but nothing worked' % \ (result['encoding'], xml_encoding)) result['encoding'] = '' elif proposed_encoding != result['encoding']: result['bozo'] = 1 result['bozo_exception'] = CharacterEncodingOverride( \ 'documented declared as %s, but parsed as %s' % \ (result['encoding'], proposed_encoding)) result['encoding'] = proposed_encoding if not _XML_AVAILABLE: use_strict_parser = 0 if use_strict_parser: # initialize the SAX parser feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8') saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS) saxparser.setFeature(xml.sax.handler.feature_namespaces, 1) saxparser.setContentHandler(feedparser) saxparser.setErrorHandler(feedparser) source = xml.sax.xmlreader.InputSource() source.setByteStream(_StringIO(data)) if hasattr(saxparser, '_ns_stack'): # work around bug in built-in SAX parser (doesn't recognize xml: namespace) # PyXML doesn't have this problem, and it doesn't have _ns_stack either saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'}) try: saxparser.parse(source) except Exception, e: if _debug: import traceback traceback.print_stack() traceback.print_exc() sys.stderr.write('xml parsing failed\n') result['bozo'] = 1 result['bozo_exception'] = feedparser.exc or e use_strict_parser = 0 if not use_strict_parser: feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '') feedparser.feed(data) result['feed'] = feedparser.feeddata result['entries'] = feedparser.entries result['version'] = result['version'] or feedparser.version result['namespaces'] = feedparser.namespacesInUse return result if __name__ == '__main__': if not sys.argv[1:]: print __doc__ sys.exit(0) else: urls = sys.argv[1:] zopeCompatibilityHack() from pprint import pprint for url in urls: print url print result = parse(url) pprint(result) print #REVISION HISTORY #1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements, # added Simon Fell's test suite #1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections #2.0 - 10/19/2002 # JD - use inchannel to watch out for image and textinput elements which can # also contain title, link, and description elements # JD - check for isPermaLink='false' attribute on guid elements # JD - replaced openAnything with open_resource supporting ETag and # If-Modified-Since request headers # JD - parse now accepts etag, modified, agent, and referrer optional # arguments # JD - modified parse to return a dictionary instead of a tuple so that any # etag or modified information can be returned and cached by the caller #2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything # because of etag/modified, return the old etag/modified to the caller to # indicate why nothing is being returned #2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its # useless. Fixes the problem JD was addressing by adding it. #2.1 - 11/14/2002 - MAP - added gzip support #2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent. # start_admingeneratoragent is an example of how to handle elements with # only attributes, no content. #2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify); # also, make sure we send the User-Agent even if urllib2 isn't available. # Match any variation of backend.userland.com/rss namespace. #2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is. #2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's # snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed # project name #2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree); # removed unnecessary urllib code -- urllib2 should always be available anyway; # return actual url, status, and full HTTP headers (as result['url'], # result['status'], and result['headers']) if parsing a remote feed over HTTP -- # this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>; # added the latest namespace-of-the-week for RSS 2.0 #2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom # User-Agent (otherwise urllib2 sends two, which confuses some servers) #2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for # inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds #2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or # textInput, and also to return the character encoding (if specified) #2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking # nested divs within content (JohnD); fixed missing sys import (JohanS); # fixed regular expression to capture XML character encoding (Andrei); # added support for Atom 0.3-style links; fixed bug with textInput tracking; # added support for cloud (MartijnP); added support for multiple # category/dc:subject (MartijnP); normalize content model: 'description' gets # description (which can come from description, summary, or full content if no # description), 'content' gets dict of base/language/type/value (which can come # from content:encoded, xhtml:body, content, or fullitem); # fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang # tracking; fixed bug tracking unknown tags; fixed bug tracking content when # <content> element is not in default namespace (like Pocketsoap feed); # resolve relative URLs in link, guid, docs, url, comments, wfw:comment, # wfw:commentRSS; resolve relative URLs within embedded HTML markup in # description, xhtml:body, content, content:encoded, title, subtitle, # summary, info, tagline, and copyright; added support for pingback and # trackback namespaces #2.7 - 1/5/2004 - MAP - really added support for trackback and pingback # namespaces, as opposed to 2.6 when I said I did but didn't really; # sanitize HTML markup within some elements; added mxTidy support (if # installed) to tidy HTML markup within some elements; fixed indentation # bug in _parse_date (FazalM); use socket.setdefaulttimeout if available # (FazalM); universal date parsing and normalization (FazalM): 'created', modified', # 'issued' are parsed into 9-tuple date format and stored in 'created_parsed', # 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified' # and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa #2.7.1 - 1/9/2004 - MAP - fixed bug handling &quot; and &apos;. fixed memory # leak not closing url opener (JohnD); added dc:publisher support (MarekK); # added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK) #2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in # encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL); # fixed relative URI processing for guid (skadz); added ICBM support; added # base64 support #2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many # blogspot.com sites); added _debug variable #2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing #3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available); # added several new supported namespaces; fixed bug tracking naked markup in # description; added support for enclosure; added support for source; re-added # support for cloud which got dropped somehow; added support for expirationDate #3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking # xml:base URI, one for documents that don't define one explicitly and one for # documents that define an outer and an inner xml:base that goes out of scope # before the end of the document #3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level #3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version'] # will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized; # added support for creativeCommons:license and cc:license; added support for # full Atom content model in title, tagline, info, copyright, summary; fixed bug # with gzip encoding (not always telling server we support it when we do) #3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail # (dictionary of 'name', 'url', 'email'); map author to author_detail if author # contains name + email address #3.0b8 - 1/28/2004 - MAP - added support for contributor #3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added # support for summary #3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from # xml.util.iso8601 #3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain # dangerous markup; fiddled with decodeEntities (not right); liberalized # date parsing even further #3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right); # added support to Atom 0.2 subtitle; added support for Atom content model # in copyright; better sanitizing of dangerous HTML elements with end tags # (script, frameset) #3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img, # etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />) #3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under # Python 2.1 #3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS; # fixed bug capturing author and contributor URL; fixed bug resolving relative # links in author and contributor URL; fixed bug resolvin relative links in # generator URL; added support for recognizing RSS 1.0; passed Simon Fell's # namespace tests, and included them permanently in the test suite with his # permission; fixed namespace handling under Python 2.1 #3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15) #3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023 #3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei); # use libxml2 (if available) #3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author # name was in parentheses; removed ultra-problematic mxTidy support; patch to # workaround crash in PyXML/expat when encountering invalid entities # (MarkMoraes); support for textinput/textInput #3.0b20 - 4/7/2004 - MAP - added CDF support #3.0b21 - 4/14/2004 - MAP - added Hot RSS support #3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in # results dict; changed results dict to allow getting values with results.key # as well as results[key]; work around embedded illformed HTML with half # a DOCTYPE; work around malformed Content-Type header; if character encoding # is wrong, try several common ones before falling back to regexes (if this # works, bozo_exception is set to CharacterEncodingOverride); fixed character # encoding issues in BaseHTMLProcessor by tracking encoding and converting # from Unicode to raw strings before feeding data to sgmllib.SGMLParser; # convert each value in results to Unicode (if possible), even if using # regex-based parsing #3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain # high-bit characters in attributes in embedded HTML in description (thanks # Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in # FeedParserDict; tweaked FeedParserDict.has_key to return True if asking # about a mapped key #3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and # results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could # cause the same encoding to be tried twice (even if it failed the first time); # fixed DOCTYPE stripping when DOCTYPE contained entity declarations; # better textinput and image tracking in illformed RSS 1.0 feeds #3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed # my blink tag tests #3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that # failed to parse utf-16 encoded feeds; made source into a FeedParserDict; # duplicate admin:generatorAgent/@rdf:resource in generator_detail.url; # added support for image; refactored parse() fallback logic to try other # encodings if SAX parsing fails (previously it would only try other encodings # if re-encoding failed); remove unichr madness in normalize_attrs now that # we're properly tracking encoding in and out of BaseHTMLProcessor; set # feed.language from root-level xml:lang; set entry.id from rdf:about; # send Accept header #3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between # iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are # windows-1252); fixed regression that could cause the same encoding to be # tried twice (even if it failed the first time) #3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types; # recover from malformed content-type header parameter with no equals sign # ('text/xml; charset:iso-8859-1') #3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities # to Unicode equivalents in illformed feeds (aaronsw); added and # passed tests for converting character entities to Unicode equivalents # in illformed feeds (aaronsw); test for valid parsers when setting # XML_AVAILABLE; make version and encoding available when server returns # a 304; add handlers parameter to pass arbitrary urllib2 handlers (like # digest auth or proxy support); add code to parse username/password # out of url and send as basic authentication; expose downloading-related # exceptions in bozo_exception (aaronsw); added __contains__ method to # FeedParserDict (aaronsw); added publisher_detail (aaronsw) #3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always # convert feed to UTF-8 before passing to XML parser; completely revamped # logic for determining character encoding and attempting XML parsing # (much faster); increased default timeout to 20 seconds; test for presence # of Location header on redirects; added tests for many alternate character # encodings; support various EBCDIC encodings; support UTF-16BE and # UTF16-LE with or without a BOM; support UTF-8 with a BOM; support # UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no # XML parsers are available; added support for 'Content-encoding: deflate'; # send blank 'Accept-encoding: ' header if neither gzip nor zlib modules # are available #3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure # problem tracking xml:base and xml:lang if element declares it, child # doesn't, first grandchild redeclares it, and second grandchild doesn't; # refactored date parsing; defined public registerDateHandler so callers # can add support for additional date formats at runtime; added support # for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added # zopeCompatibilityHack() which turns FeedParserDict into a regular # dictionary, required for Zope compatibility, and also makes command- # line debugging easier because pprint module formats real dictionaries # better than dictionary-like objects; added NonXMLContentType exception, # which is stored in bozo_exception when a feed is served with a non-XML # media type such as 'text/plain'; respect Content-Language as default # language if not xml:lang is present; cloud dict is now FeedParserDict; # generator dict is now FeedParserDict; better tracking of xml:lang, # including support for xml:lang='' to unset the current language; # recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default # namespace; don't overwrite final status on redirects (scenarios: # redirecting to a URL that returns 304, redirecting to a URL that # redirects to another URL with a different type of redirect); add # support for HTTP 303 redirects #4.0 - MAP - support for relative URIs in xml:base attribute; fixed # encoding issue with mxTidy (phopkins); preliminary support for RFC 3229; # support for Atom 1.0; support for iTunes extensions; new 'tags' for # categories/keywords/etc. as array of dict # {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0 # terminology; parse RFC 822-style dates with no time; lots of other # bug fixes #4.1 - MAP - removed socket timeout; added support for chardet library
Python
# -*- coding:utf-8 -*- """ Created on 21.12.2010 @author: Leon """ import feedparser as fp import re re.LOCALE from BeautifulSoup import * import urllib import nltk import threading from threading import Thread def transformUmlaute(h): p = '' p.encode("utf-8") for c in h: c = str(c) if c=='\xc3': pass elif c=='\xbc' or c=='ü': p+='ue' elif c=='\xa4' or c=='ä': p+='ae' elif c=='\xb6' or c=='ö': p+='oe' elif c=='\x9c' or c=='Ü': p+='Ue' elif c=='\x84' or c=='Ä': p+='Ae' elif c=='\x96' or c=='Ö': p+='Oe' elif c=='\x9f': p+='ss' else: p+=c return p # german stopwords, Umlaute transformed. # stopwords = list(transformUmlaute(w) for w in nltk.corpus.stopwords.words('german')) stopwords = nltk.corpus.stopwords.words('english') feedlist =[#'http://rss1.smashingmagazine.com/feed/', #'http://rss1.smashingmagazine.com/feed/?f=smashing-network' 'http://feeds.reuters.com/reuters/topNews', 'http://feeds.reuters.com/reuters/businessNews', 'http://feeds.reuters.com/reuters/worldNews', 'http://feeds2.feedburner.com/time/world', 'http://feeds2.feedburner.com/time/business', 'http://feeds2.feedburner.com/time/politics', 'http://rss.cnn.com/rss/edition.rss', 'http://rss.cnn.com/rss/edition_world.rss', 'http://www.nytimes.com/services/xml/rss/nyt/World.xml', 'http://www.nytimes.com/services/xml/rss/nyt/Economy.xml', #'http://api.leakfeed.com/v1/cables/latest.rss', #'http://www.whitehouse.gov/feed/press' ] def stripTags(html, invalid_tags): soup = BeautifulSoup(html) for tag in soup.findAll(True): if tag.name in invalid_tags: s = "" for c in tag.contents: if type(c) != NavigableString: c = stripTags(unicode(c), invalid_tags) s += unicode(c) tag.replaceWith(s) return soup #replaces <tags> with space ' ' def stripHTML(h): p='' s=0 for c in h: if c=='<': s=1 elif c=='>': s=0 p+=' ' elif s==0: p+=c return p # only allow 1 thread to use the posTagger at a time posLock = threading._allocate_lock() # Only returns text parts with at least minWords words in a row without # too much whitespace between the words def getPosWordsFromLongParas(text, minWords = 5, maxGap = 5): result = [] nouns = [] desc = [] # 1. replace all whitespace with space character text = re.sub("\s", " ", text) # 2. split the text where at least maxGap whitespace occurs. text = text.split((maxGap)*' ') for para in text: # 3. If para is not empty string: # Split the paragraphs into words # Additionally, pre-test for enough words if para != '' and len(para.split(' ')) >= minWords: #print " para: %s" %para tok = nltk.word_tokenize(para) #print " tokenized: %s" %tok # attention: at the moment only 1 thread for pos tagging allowed at once. with posLock: posTagged = nltk.pos_tag(tok) #print " posTagged: %s" %posTagged # no empty strings for posWord in posTagged: # only add words. if posWord[1] in ("NN", "NNP", "NNS", "NNPS", "FW") : nouns.append(posWord) elif posWord[1] in ("RB", "RBR", "RBS", "JJ", "JJR", "JJS"): desc.append(posWord) # 4. if there are enough words in it, add the paragraph to the result # 6. Return all paragraphs which are long enough return (nouns, desc) def cleanHTML(html): "Remove <script> and <style> tags." soup = BeautifulSoup(html) for script in soup.findAll("script"): script.extract() for style in soup.findAll("style"): style.extract() comments = soup.findAll(text=lambda text:isinstance(text, Comment)) [comment.extract() for comment in comments] return soup import time def getWordsFromUrl(url): nouns = [] t0 = time.time() # Get a file-like object for the Python Web site's home page. try: f = urllib.urlopen(url) # Read from the object, storing the page's contents in 's'. s = f.read() f.close() t1 = time.time() # print " processing nouns from: %s" %url # strip formatting tags s = stripTags(s, ['b', 'i', 'u', 'h', 'p']) tStripTags = time.time() #print " tStripTags = %s" % (tStripTags-t1) s = cleanHTML(str(s)) s = str(s) tCleanHTML = time.time() #print " tCleanHTML = %s" % (tCleanHTML-tStripTags) # replace tags with white space s = stripHTML(s) tStripHTML = time.time() s = str(s) #print " tStripHTML = %s" % (tStripHTML-tCleanHTML) posTaggedWords = getPosWordsFromLongParas(s, minWords=50, maxGap=3) nouns = posTaggedWords[0] desc = posTaggedWords[1] print " Nouns: %s" % nouns print " Descriptions: %s" % desc tPosTag = time.time() print " tPosTag = %s" % (tPosTag - tStripHTML) res = [] for wordPos in nouns: word = wordPos[0] pos = wordPos[1] # only add Nouns. # strip all non-characters and make lower-case and transform Umlaute word = transformUmlaute(word) word = re.sub("\W", "", word).lower() # dont add empty nouns or stopwords if word != '' and word not in stopwords: res.append(word) t2 = time.time() print " url: %s | download: %s | processing: %s | total: %s" % (url, (t1-t0), (t2-t1), (t2-t0)) except: " something went wrong getting words from url" return res def getWordDict(words): wDict = {} for w in words: try: wDict[w] += 1 except: wDict[w] = 1 return wDict # Print title + description #output = '' #for url in feedlist: # output += "URL: %s\n" % url # f = fp.parse(url) # # output += "Title of feed: %s\n" % f['feed']['title'] # # for item in f.entries: # title = stripHTML(item.title) # text = stripHTML(item.description) # link = item.link # # output += " %s: %s\n %s\n" % (title, link, text) # #print output ################################### import operator import matplotlib.pyplot as plt from graphThing import * entryList = [] entryLock = threading._allocate_lock() curETlock = threading._allocate_lock() curET = [] class EntryThread(Thread): def __init__(self, nodeTarget): super(EntryThread, self).__init__() self._stop = threading.Event() self.nodeTarget = nodeTarget def stop(self): self._stop.set() def getNextEntry(self): entry = None with entryLock: if len(entryList)>0: entry = entryList.pop() return entry def run(self): entry = self.getNextEntry() while not self._stop.is_set(): if self._stop.is_set(): break if entry != None: words = [] words = getWordsFromUrl(entry.link) wDict = getWordDict(words) wList = sorted(wDict.iteritems(), key=operator.itemgetter(1), reverse=True) # create tagcloud top10 = [] # get top 10 for i in range(0,min(10,len(wList))): top10.append(wList[i]) try: if len(top10) > 0: self.nodeTarget.updateNodes(top10) except: print " could not pass top10" entry = self.getNextEntry() class CrawlThread(Thread): def __init__(self, nodeTarget, feedLock): super(CrawlThread, self).__init__() self._stop = threading.Event() self.nodeTarget = nodeTarget self.feedLock = feedLock self.name = "" def stop(self): self._stop.set() def getNextFeed(self): nextFeed = None with self.feedLock: if len(feedlist)>0: nextFeed = feedlist.pop() return nextFeed def run(self): nextFeed = self.getNextFeed() while nextFeed != None: if self._stop.is_set(): break print "Starting to Crawl Feed: %s" % nextFeed feed = fp.parse(nextFeed) for entry in feed.entries: if self._stop.is_set(): break with entryLock: entryList.append(entry) # start 3 Threads per Feed. with curETlock: if len(curET) < 4: print "starting entry thread number %s" % len(curET) t = EntryThread(self.nodeTarget) curET.append(t) t.start() nextFeed = self.getNextFeed() class FeedAggregator(Thread): def __init__(self, nodeTarget): super(FeedAggregator, self).__init__() self._stop = threading.Event() self.nodeTarget = nodeTarget self.feedLock = threading._allocate_lock() self.currentThreads = [] def stop(self): for current in self.currentThreads: current.stop() self._stop.set() def run(self): # own thread for each feed, but max. 10 n = len(feedlist) for i in range (0,min(n,3)): print "starting feed thread number %s" % (i+1) newCrawler = CrawlThread(self.nodeTarget, self.feedLock) self.currentThreads.append(newCrawler) newCrawler.start() for current in self.currentThreads: current.join() s = graphMain() fg = FeedAggregator(s) s.nodeGetter = fg s.run()
Python
#!/usr/bin/env python """Universal feed parser Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds Visit http://feedparser.org/ for the latest version Visit http://feedparser.org/docs/ for the latest documentation Required: Python 2.1 or later Recommended: Python 2.3 or later Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/> """ __version__ = "4.1"# + "$Revision: 1.92 $"[11:15] + "-cvs" __license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.""" __author__ = "Mark Pilgrim <http://diveintomark.org/>" __contributors__ = ["Jason Diamond <http://injektilo.org/>", "John Beimler <http://john.beimler.org/>", "Fazal Majid <http://www.majid.info/mylos/weblog/>", "Aaron Swartz <http://aaronsw.com/>", "Kevin Marks <http://epeus.blogspot.com/>"] _debug = 0 # HTTP "User-Agent" header to send to servers when downloading feeds. # If you are embedding feedparser in a larger application, you should # change this to your application name and URL. USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__ # HTTP "Accept" header to send to servers when downloading feeds. If you don't # want to send an Accept header, set this to None. ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1" # List of preferred XML parsers, by SAX driver name. These will be tried first, # but if they're not installed, Python will keep searching through its own list # of pre-installed parsers until it finds one that supports everything we need. PREFERRED_XML_PARSERS = ["drv_libxml2"] # If you want feedparser to automatically run HTML markup through HTML Tidy, set # this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html> # or utidylib <http://utidylib.berlios.de/>. TIDY_MARKUP = 0 # List of Python interfaces for HTML Tidy, in order of preference. Only useful # if TIDY_MARKUP = 1 PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"] # ---------- required modules (should come with any Python distribution) ---------- import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2 try: from cStringIO import StringIO as _StringIO except: from StringIO import StringIO as _StringIO # ---------- optional modules (feedparser will work without these, but with reduced functionality) ---------- # gzip is included with most Python distributions, but may not be available if you compiled your own try: import gzip except: gzip = None try: import zlib except: zlib = None # If a real XML parser is available, feedparser will attempt to use it. feedparser has # been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the # Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some # versions of FreeBSD), feedparser will quietly fall back on regex-based parsing. try: import xml.sax xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers from xml.sax.saxutils import escape as _xmlescape _XML_AVAILABLE = 1 except: _XML_AVAILABLE = 0 def _xmlescape(data): data = data.replace('&', '&amp;') data = data.replace('>', '&gt;') data = data.replace('<', '&lt;') return data # base64 support for Atom feeds that contain embedded binary data try: import base64, binascii except: base64 = binascii = None # cjkcodecs and iconv_codec provide support for more character encodings. # Both are available from http://cjkpython.i18n.org/ try: import cjkcodecs.aliases except: pass try: import iconv_codec except: pass # chardet library auto-detects character encodings # Download from http://chardet.feedparser.org/ try: import chardet if _debug: import chardet.constants chardet.constants._debug = 1 except: chardet = None # ---------- don't touch these ---------- class ThingsNobodyCaresAboutButMe(Exception): pass class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass class UndeclaredNamespace(Exception): pass sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') sgmllib.special = re.compile('<!') sgmllib.charref = re.compile('&#(x?[0-9A-Fa-f]+)[^0-9A-Fa-f]') SUPPORTED_VERSIONS = {'': 'unknown', 'rss090': 'RSS 0.90', 'rss091n': 'RSS 0.91 (Netscape)', 'rss091u': 'RSS 0.91 (Userland)', 'rss092': 'RSS 0.92', 'rss093': 'RSS 0.93', 'rss094': 'RSS 0.94', 'rss20': 'RSS 2.0', 'rss10': 'RSS 1.0', 'rss': 'RSS (unknown version)', 'atom01': 'Atom 0.1', 'atom02': 'Atom 0.2', 'atom03': 'Atom 0.3', 'atom10': 'Atom 1.0', 'atom': 'Atom (unknown version)', 'cdf': 'CDF', 'hotrss': 'Hot RSS' } try: UserDict = dict except NameError: # Python 2.1 does not have dict from UserDict import UserDict def dict(aList): rc = {} for k, v in aList: rc[k] = v return rc class FeedParserDict(UserDict): keymap = {'channel': 'feed', 'items': 'entries', 'guid': 'id', 'date': 'updated', 'date_parsed': 'updated_parsed', 'description': ['subtitle', 'summary'], 'url': ['href'], 'modified': 'updated', 'modified_parsed': 'updated_parsed', 'issued': 'published', 'issued_parsed': 'published_parsed', 'copyright': 'rights', 'copyright_detail': 'rights_detail', 'tagline': 'subtitle', 'tagline_detail': 'subtitle_detail'} def __getitem__(self, key): if key == 'category': return UserDict.__getitem__(self, 'tags')[0]['term'] if key == 'categories': return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')] realkey = self.keymap.get(key, key) if type(realkey) == types.ListType: for k in realkey: if UserDict.has_key(self, k): return UserDict.__getitem__(self, k) if UserDict.has_key(self, key): return UserDict.__getitem__(self, key) return UserDict.__getitem__(self, realkey) def __setitem__(self, key, value): for k in self.keymap.keys(): if key == k: key = self.keymap[k] if type(key) == types.ListType: key = key[0] return UserDict.__setitem__(self, key, value) def get(self, key, default=None): if self.has_key(key): return self[key] else: return default def setdefault(self, key, value): if not self.has_key(key): self[key] = value return self[key] def has_key(self, key): try: return hasattr(self, key) or UserDict.has_key(self, key) except AttributeError: return False def __getattr__(self, key): try: return self.__dict__[key] except KeyError: pass try: assert not key.startswith('_') return self.__getitem__(key) except: raise AttributeError, "object has no attribute '%s'" % key def __setattr__(self, key, value): if key.startswith('_') or key == 'data': self.__dict__[key] = value else: return self.__setitem__(key, value) def __contains__(self, key): return self.has_key(key) def zopeCompatibilityHack(): global FeedParserDict del FeedParserDict def FeedParserDict(aDict=None): rc = {} if aDict: rc.update(aDict) return rc _ebcdic_to_ascii_map = None def _ebcdic_to_ascii(s): global _ebcdic_to_ascii_map if not _ebcdic_to_ascii_map: emap = ( 0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201, 202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208, 209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237, 125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243, 92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249, 48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255 ) import string _ebcdic_to_ascii_map = string.maketrans( \ ''.join(map(chr, range(256))), ''.join(map(chr, emap))) return s.translate(_ebcdic_to_ascii_map) _urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)') def _urljoin(base, uri): uri = _urifixer.sub(r'\1\3', uri) return urlparse.urljoin(base, uri) class _FeedParserMixin: namespaces = {'': '', 'http://backend.userland.com/rss': '', 'http://blogs.law.harvard.edu/tech/rss': '', 'http://purl.org/rss/1.0/': '', 'http://my.netscape.com/rdf/simple/0.9/': '', 'http://example.com/newformat#': '', 'http://example.com/necho': '', 'http://purl.org/echo/': '', 'uri/of/echo/namespace#': '', 'http://purl.org/pie/': '', 'http://purl.org/atom/ns#': '', 'http://www.w3.org/2005/Atom': '', 'http://purl.org/rss/1.0/modules/rss091#': '', 'http://webns.net/mvcb/': 'admin', 'http://purl.org/rss/1.0/modules/aggregation/': 'ag', 'http://purl.org/rss/1.0/modules/annotate/': 'annotate', 'http://media.tangent.org/rss/1.0/': 'audio', 'http://backend.userland.com/blogChannelModule': 'blogChannel', 'http://web.resource.org/cc/': 'cc', 'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons', 'http://purl.org/rss/1.0/modules/company': 'co', 'http://purl.org/rss/1.0/modules/content/': 'content', 'http://my.theinfo.org/changed/1.0/rss/': 'cp', 'http://purl.org/dc/elements/1.1/': 'dc', 'http://purl.org/dc/terms/': 'dcterms', 'http://purl.org/rss/1.0/modules/email/': 'email', 'http://purl.org/rss/1.0/modules/event/': 'ev', 'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner', 'http://freshmeat.net/rss/fm/': 'fm', 'http://xmlns.com/foaf/0.1/': 'foaf', 'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo', 'http://postneo.com/icbm/': 'icbm', 'http://purl.org/rss/1.0/modules/image/': 'image', 'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes', 'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes', 'http://purl.org/rss/1.0/modules/link/': 'l', 'http://search.yahoo.com/mrss': 'media', 'http://madskills.com/public/xml/rss/module/pingback/': 'pingback', 'http://prismstandard.org/namespaces/1.2/basic/': 'prism', 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf', 'http://www.w3.org/2000/01/rdf-schema#': 'rdfs', 'http://purl.org/rss/1.0/modules/reference/': 'ref', 'http://purl.org/rss/1.0/modules/richequiv/': 'reqv', 'http://purl.org/rss/1.0/modules/search/': 'search', 'http://purl.org/rss/1.0/modules/slash/': 'slash', 'http://schemas.xmlsoap.org/soap/envelope/': 'soap', 'http://purl.org/rss/1.0/modules/servicestatus/': 'ss', 'http://hacks.benhammersley.com/rss/streaming/': 'str', 'http://purl.org/rss/1.0/modules/subscription/': 'sub', 'http://purl.org/rss/1.0/modules/syndication/': 'sy', 'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo', 'http://purl.org/rss/1.0/modules/threading/': 'thr', 'http://purl.org/rss/1.0/modules/textinput/': 'ti', 'http://madskills.com/public/xml/rss/module/trackback/':'trackback', 'http://wellformedweb.org/commentAPI/': 'wfw', 'http://purl.org/rss/1.0/modules/wiki/': 'wiki', 'http://www.w3.org/1999/xhtml': 'xhtml', 'http://www.w3.org/XML/1998/namespace': 'xml', 'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf' } _matchnamespaces = {} can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'license', 'icon', 'logo'] can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'] can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'] html_types = ['text/html', 'application/xhtml+xml'] def __init__(self, baseuri=None, baselang=None, encoding='utf-8'): if _debug: sys.stderr.write('initializing FeedParser\n') if not self._matchnamespaces: for k, v in self.namespaces.items(): self._matchnamespaces[k.lower()] = v self.feeddata = FeedParserDict() # feed-level data self.encoding = encoding # character encoding self.entries = [] # list of entry-level data self.version = '' # feed type/version, see SUPPORTED_VERSIONS self.namespacesInUse = {} # dictionary of namespaces defined by the feed # the following are used internally to track state; # this is really out of control and should be refactored self.infeed = 0 self.inentry = 0 self.incontent = 0 self.intextinput = 0 self.inimage = 0 self.inauthor = 0 self.incontributor = 0 self.inpublisher = 0 self.insource = 0 self.sourcedata = FeedParserDict() self.contentparams = FeedParserDict() self._summaryKey = None self.namespacemap = {} self.elementstack = [] self.basestack = [] self.langstack = [] self.baseuri = baseuri or '' self.lang = baselang or None if baselang: self.feeddata['language'] = baselang def unknown_starttag(self, tag, attrs): if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs)) # normalize attrs attrs = [(k.lower(), v) for k, v in attrs] attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] # track xml:base and xml:lang attrsD = dict(attrs) baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri self.baseuri = _urljoin(self.baseuri, baseuri) lang = attrsD.get('xml:lang', attrsD.get('lang')) if lang == '': # xml:lang could be explicitly set to '', we need to capture that lang = None elif lang is None: # if no xml:lang is specified, use parent lang lang = self.lang if lang: if tag in ('feed', 'rss', 'rdf:RDF'): self.feeddata['language'] = lang self.lang = lang self.basestack.append(self.baseuri) self.langstack.append(lang) # track namespaces for prefix, uri in attrs: if prefix.startswith('xmlns:'): self.trackNamespace(prefix[6:], uri) elif prefix == 'xmlns': self.trackNamespace(None, uri) # track inline content if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): # element declared itself as escaped markup, but it isn't really self.contentparams['type'] = 'application/xhtml+xml' if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': # Note: probably shouldn't simply recreate localname here, but # our namespace handling isn't actually 100% correct in cases where # the feed redefines the default namespace (which is actually # the usual case for inline content, thanks Sam), so here we # cheat and just reconstruct the element based on localname # because that compensates for the bugs in our namespace handling. # This will horribly munge inline content with non-empty qnames, # but nobody actually does that, so I'm not fixing it. tag = tag.split(':')[-1] return self.handle_data('<%s%s>' % (tag, ''.join([' %s="%s"' % t for t in attrs])), escape=0) # match namespaces if tag.find(':') <> -1: prefix, suffix = tag.split(':', 1) else: prefix, suffix = '', tag prefix = self.namespacemap.get(prefix, prefix) if prefix: prefix = prefix + '_' # special hack for better tracking of empty textinput/image elements in illformed feeds if (not prefix) and tag not in ('title', 'link', 'description', 'name'): self.intextinput = 0 if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'): self.inimage = 0 # call special handler (if defined) or default handler methodname = '_start_' + prefix + suffix try: method = getattr(self, methodname) return method(attrsD) except AttributeError: return self.push(prefix + suffix, 1) def unknown_endtag(self, tag): if _debug: sys.stderr.write('end %s\n' % tag) # match namespaces if tag.find(':') <> -1: prefix, suffix = tag.split(':', 1) else: prefix, suffix = '', tag prefix = self.namespacemap.get(prefix, prefix) if prefix: prefix = prefix + '_' # call special handler (if defined) or default handler methodname = '_end_' + prefix + suffix try: method = getattr(self, methodname) method() except AttributeError: self.pop(prefix + suffix) # track inline content if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): # element declared itself as escaped markup, but it isn't really self.contentparams['type'] = 'application/xhtml+xml' if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': tag = tag.split(':')[-1] self.handle_data('</%s>' % tag, escape=0) # track xml:base and xml:lang going out of scope if self.basestack: self.basestack.pop() if self.basestack and self.basestack[-1]: self.baseuri = self.basestack[-1] if self.langstack: self.langstack.pop() if self.langstack: # and (self.langstack[-1] is not None): self.lang = self.langstack[-1] def handle_charref(self, ref): # called for each character reference, e.g. for '&#160;', ref will be '160' if not self.elementstack: return ref = ref.lower() if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'): text = '&#%s;' % ref else: if ref[0] == 'x': c = int(ref[1:], 16) else: c = int(ref) text = unichr(c).encode('utf-8') self.elementstack[-1][2].append(text) def handle_entityref(self, ref): # called for each entity reference, e.g. for '&copy;', ref will be 'copy' if not self.elementstack: return if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref) if ref in ('lt', 'gt', 'quot', 'amp', 'apos'): text = '&%s;' % ref else: # entity resolution graciously donated by Aaron Swartz def name2cp(k): import htmlentitydefs if hasattr(htmlentitydefs, 'name2codepoint'): # requires Python 2.3 return htmlentitydefs.name2codepoint[k] k = htmlentitydefs.entitydefs[k] if k.startswith('&#') and k.endswith(';'): return int(k[2:-1]) # not in latin-1 return ord(k) try: name2cp(ref) except KeyError: text = '&%s;' % ref else: text = unichr(name2cp(ref)).encode('utf-8') self.elementstack[-1][2].append(text) def handle_data(self, text, escape=1): # called for each block of plain text, i.e. outside of any tag and # not containing any character or entity references if not self.elementstack: return if escape and self.contentparams.get('type') == 'application/xhtml+xml': text = _xmlescape(text) self.elementstack[-1][2].append(text) def handle_comment(self, text): # called for each comment, e.g. <!-- insert message here --> pass def handle_pi(self, text): # called for each processing instruction, e.g. <?instruction> pass def handle_decl(self, text): pass def parse_declaration(self, i): # override internal declaration handler to handle CDATA blocks if _debug: sys.stderr.write('entering parse_declaration\n') if self.rawdata[i:i+9] == '<![CDATA[': k = self.rawdata.find(']]>', i) if k == -1: k = len(self.rawdata) self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0) return k+3 else: k = self.rawdata.find('>', i) return k+1 def mapContentType(self, contentType): contentType = contentType.lower() if contentType == 'text': contentType = 'text/plain' elif contentType == 'html': contentType = 'text/html' elif contentType == 'xhtml': contentType = 'application/xhtml+xml' return contentType def trackNamespace(self, prefix, uri): loweruri = uri.lower() if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version: self.version = 'rss090' if loweruri == 'http://purl.org/rss/1.0/' and not self.version: self.version = 'rss10' if loweruri == 'http://www.w3.org/2005/atom' and not self.version: self.version = 'atom10' if loweruri.find('backend.userland.com/rss') <> -1: # match any backend.userland.com namespace uri = 'http://backend.userland.com/rss' loweruri = uri if self._matchnamespaces.has_key(loweruri): self.namespacemap[prefix] = self._matchnamespaces[loweruri] self.namespacesInUse[self._matchnamespaces[loweruri]] = uri else: self.namespacesInUse[prefix or ''] = uri def resolveURI(self, uri): return _urljoin(self.baseuri or '', uri) def decodeEntities(self, element, data): return data def push(self, element, expectingText): self.elementstack.append([element, expectingText, []]) def pop(self, element, stripWhitespace=1): if not self.elementstack: return if self.elementstack[-1][0] != element: return element, expectingText, pieces = self.elementstack.pop() output = ''.join(pieces) if stripWhitespace: output = output.strip() if not expectingText: return output # decode base64 content if base64 and self.contentparams.get('base64', 0): try: output = base64.decodestring(output) except binascii.Error: pass except binascii.Incomplete: pass # resolve relative URIs if (element in self.can_be_relative_uri) and output: output = self.resolveURI(output) # decode entities within embedded markup if not self.contentparams.get('base64', 0): output = self.decodeEntities(element, output) # remove temporary cruft from contentparams try: del self.contentparams['mode'] except KeyError: pass try: del self.contentparams['base64'] except KeyError: pass # resolve relative URIs within embedded markup if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types: if element in self.can_contain_relative_uris: output = _resolveRelativeURIs(output, self.baseuri, self.encoding) # sanitize embedded markup if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types: if element in self.can_contain_dangerous_markup: output = _sanitizeHTML(output, self.encoding) if self.encoding and type(output) != type(u''): try: output = unicode(output, self.encoding) except: pass # categories/tags/keywords/whatever are handled in _end_category if element == 'category': return output # store output in appropriate place(s) if self.inentry and not self.insource: if element == 'content': self.entries[-1].setdefault(element, []) contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output self.entries[-1][element].append(contentparams) elif element == 'link': self.entries[-1][element] = output if output: self.entries[-1]['links'][-1]['href'] = output else: if element == 'description': element = 'summary' self.entries[-1][element] = output if self.incontent: contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output self.entries[-1][element + '_detail'] = contentparams elif (self.infeed or self.insource) and (not self.intextinput) and (not self.inimage): context = self._getContext() if element == 'description': element = 'subtitle' context[element] = output if element == 'link': context['links'][-1]['href'] = output elif self.incontent: contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output context[element + '_detail'] = contentparams return output def pushContent(self, tag, attrsD, defaultContentType, expectingText): self.incontent += 1 self.contentparams = FeedParserDict({ 'type': self.mapContentType(attrsD.get('type', defaultContentType)), 'language': self.lang, 'base': self.baseuri}) self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams) self.push(tag, expectingText) def popContent(self, tag): value = self.pop(tag) self.incontent -= 1 self.contentparams.clear() return value def _mapToStandardPrefix(self, name): colonpos = name.find(':') if colonpos <> -1: prefix = name[:colonpos] suffix = name[colonpos+1:] prefix = self.namespacemap.get(prefix, prefix) name = prefix + ':' + suffix return name def _getAttribute(self, attrsD, name): return attrsD.get(self._mapToStandardPrefix(name)) def _isBase64(self, attrsD, contentparams): if attrsD.get('mode', '') == 'base64': return 1 if self.contentparams['type'].startswith('text/'): return 0 if self.contentparams['type'].endswith('+xml'): return 0 if self.contentparams['type'].endswith('/xml'): return 0 return 1 def _itsAnHrefDamnIt(self, attrsD): href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None))) if href: try: del attrsD['url'] except KeyError: pass try: del attrsD['uri'] except KeyError: pass attrsD['href'] = href return attrsD def _save(self, key, value): context = self._getContext() context.setdefault(key, value) def _start_rss(self, attrsD): versionmap = {'0.91': 'rss091u', '0.92': 'rss092', '0.93': 'rss093', '0.94': 'rss094'} if not self.version: attr_version = attrsD.get('version', '') version = versionmap.get(attr_version) if version: self.version = version elif attr_version.startswith('2.'): self.version = 'rss20' else: self.version = 'rss' def _start_dlhottitles(self, attrsD): self.version = 'hotrss' def _start_channel(self, attrsD): self.infeed = 1 self._cdf_common(attrsD) _start_feedinfo = _start_channel def _cdf_common(self, attrsD): if attrsD.has_key('lastmod'): self._start_modified({}) self.elementstack[-1][-1] = attrsD['lastmod'] self._end_modified() if attrsD.has_key('href'): self._start_link({}) self.elementstack[-1][-1] = attrsD['href'] self._end_link() def _start_feed(self, attrsD): self.infeed = 1 versionmap = {'0.1': 'atom01', '0.2': 'atom02', '0.3': 'atom03'} if not self.version: attr_version = attrsD.get('version') version = versionmap.get(attr_version) if version: self.version = version else: self.version = 'atom' def _end_channel(self): self.infeed = 0 _end_feed = _end_channel def _start_image(self, attrsD): self.inimage = 1 self.push('image', 0) context = self._getContext() context.setdefault('image', FeedParserDict()) def _end_image(self): self.pop('image') self.inimage = 0 def _start_textinput(self, attrsD): self.intextinput = 1 self.push('textinput', 0) context = self._getContext() context.setdefault('textinput', FeedParserDict()) _start_textInput = _start_textinput def _end_textinput(self): self.pop('textinput') self.intextinput = 0 _end_textInput = _end_textinput def _start_author(self, attrsD): self.inauthor = 1 self.push('author', 1) _start_managingeditor = _start_author _start_dc_author = _start_author _start_dc_creator = _start_author _start_itunes_author = _start_author def _end_author(self): self.pop('author') self.inauthor = 0 self._sync_author_detail() _end_managingeditor = _end_author _end_dc_author = _end_author _end_dc_creator = _end_author _end_itunes_author = _end_author def _start_itunes_owner(self, attrsD): self.inpublisher = 1 self.push('publisher', 0) def _end_itunes_owner(self): self.pop('publisher') self.inpublisher = 0 self._sync_author_detail('publisher') def _start_contributor(self, attrsD): self.incontributor = 1 context = self._getContext() context.setdefault('contributors', []) context['contributors'].append(FeedParserDict()) self.push('contributor', 0) def _end_contributor(self): self.pop('contributor') self.incontributor = 0 def _start_dc_contributor(self, attrsD): self.incontributor = 1 context = self._getContext() context.setdefault('contributors', []) context['contributors'].append(FeedParserDict()) self.push('name', 0) def _end_dc_contributor(self): self._end_name() self.incontributor = 0 def _start_name(self, attrsD): self.push('name', 0) _start_itunes_name = _start_name def _end_name(self): value = self.pop('name') if self.inpublisher: self._save_author('name', value, 'publisher') elif self.inauthor: self._save_author('name', value) elif self.incontributor: self._save_contributor('name', value) elif self.intextinput: context = self._getContext() context['textinput']['name'] = value _end_itunes_name = _end_name def _start_width(self, attrsD): self.push('width', 0) def _end_width(self): value = self.pop('width') try: value = int(value) except: value = 0 if self.inimage: context = self._getContext() context['image']['width'] = value def _start_height(self, attrsD): self.push('height', 0) def _end_height(self): value = self.pop('height') try: value = int(value) except: value = 0 if self.inimage: context = self._getContext() context['image']['height'] = value def _start_url(self, attrsD): self.push('href', 1) _start_homepage = _start_url _start_uri = _start_url def _end_url(self): value = self.pop('href') if self.inauthor: self._save_author('href', value) elif self.incontributor: self._save_contributor('href', value) elif self.inimage: context = self._getContext() context['image']['href'] = value elif self.intextinput: context = self._getContext() context['textinput']['link'] = value _end_homepage = _end_url _end_uri = _end_url def _start_email(self, attrsD): self.push('email', 0) _start_itunes_email = _start_email def _end_email(self): value = self.pop('email') if self.inpublisher: self._save_author('email', value, 'publisher') elif self.inauthor: self._save_author('email', value) elif self.incontributor: self._save_contributor('email', value) _end_itunes_email = _end_email def _getContext(self): if self.insource: context = self.sourcedata elif self.inentry: context = self.entries[-1] else: context = self.feeddata return context def _save_author(self, key, value, prefix='author'): context = self._getContext() context.setdefault(prefix + '_detail', FeedParserDict()) context[prefix + '_detail'][key] = value self._sync_author_detail() def _save_contributor(self, key, value): context = self._getContext() context.setdefault('contributors', [FeedParserDict()]) context['contributors'][-1][key] = value def _sync_author_detail(self, key='author'): context = self._getContext() detail = context.get('%s_detail' % key) if detail: name = detail.get('name') email = detail.get('email') if name and email: context[key] = '%s (%s)' % (name, email) elif name: context[key] = name elif email: context[key] = email else: author = context.get(key) if not author: return emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))''', author) if not emailmatch: return email = emailmatch.group(0) # probably a better way to do the following, but it passes all the tests author = author.replace(email, '') author = author.replace('()', '') author = author.strip() if author and (author[0] == '('): author = author[1:] if author and (author[-1] == ')'): author = author[:-1] author = author.strip() context.setdefault('%s_detail' % key, FeedParserDict()) context['%s_detail' % key]['name'] = author context['%s_detail' % key]['email'] = email def _start_subtitle(self, attrsD): self.pushContent('subtitle', attrsD, 'text/plain', 1) _start_tagline = _start_subtitle _start_itunes_subtitle = _start_subtitle def _end_subtitle(self): self.popContent('subtitle') _end_tagline = _end_subtitle _end_itunes_subtitle = _end_subtitle def _start_rights(self, attrsD): self.pushContent('rights', attrsD, 'text/plain', 1) _start_dc_rights = _start_rights _start_copyright = _start_rights def _end_rights(self): self.popContent('rights') _end_dc_rights = _end_rights _end_copyright = _end_rights def _start_item(self, attrsD): self.entries.append(FeedParserDict()) self.push('item', 0) self.inentry = 1 self.guidislink = 0 id = self._getAttribute(attrsD, 'rdf:about') if id: context = self._getContext() context['id'] = id self._cdf_common(attrsD) _start_entry = _start_item _start_product = _start_item def _end_item(self): self.pop('item') self.inentry = 0 _end_entry = _end_item def _start_dc_language(self, attrsD): self.push('language', 1) _start_language = _start_dc_language def _end_dc_language(self): self.lang = self.pop('language') _end_language = _end_dc_language def _start_dc_publisher(self, attrsD): self.push('publisher', 1) _start_webmaster = _start_dc_publisher def _end_dc_publisher(self): self.pop('publisher') self._sync_author_detail('publisher') _end_webmaster = _end_dc_publisher def _start_published(self, attrsD): self.push('published', 1) _start_dcterms_issued = _start_published _start_issued = _start_published def _end_published(self): value = self.pop('published') self._save('published_parsed', _parse_date(value)) _end_dcterms_issued = _end_published _end_issued = _end_published def _start_updated(self, attrsD): self.push('updated', 1) _start_modified = _start_updated _start_dcterms_modified = _start_updated _start_pubdate = _start_updated _start_dc_date = _start_updated def _end_updated(self): value = self.pop('updated') parsed_value = _parse_date(value) self._save('updated_parsed', parsed_value) _end_modified = _end_updated _end_dcterms_modified = _end_updated _end_pubdate = _end_updated _end_dc_date = _end_updated def _start_created(self, attrsD): self.push('created', 1) _start_dcterms_created = _start_created def _end_created(self): value = self.pop('created') self._save('created_parsed', _parse_date(value)) _end_dcterms_created = _end_created def _start_expirationdate(self, attrsD): self.push('expired', 1) def _end_expirationdate(self): self._save('expired_parsed', _parse_date(self.pop('expired'))) def _start_cc_license(self, attrsD): self.push('license', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('license') def _start_creativecommons_license(self, attrsD): self.push('license', 1) def _end_creativecommons_license(self): self.pop('license') def _addTag(self, term, scheme, label): context = self._getContext() tags = context.setdefault('tags', []) if (not term) and (not scheme) and (not label): return value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label}) if value not in tags: tags.append(FeedParserDict({'term': term, 'scheme': scheme, 'label': label})) def _start_category(self, attrsD): if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD)) term = attrsD.get('term') scheme = attrsD.get('scheme', attrsD.get('domain')) label = attrsD.get('label') self._addTag(term, scheme, label) self.push('category', 1) _start_dc_subject = _start_category _start_keywords = _start_category def _end_itunes_keywords(self): for term in self.pop('itunes_keywords').split(): self._addTag(term, 'http://www.itunes.com/', None) def _start_itunes_category(self, attrsD): self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None) self.push('category', 1) def _end_category(self): value = self.pop('category') if not value: return context = self._getContext() tags = context['tags'] if value and len(tags) and not tags[-1]['term']: tags[-1]['term'] = value else: self._addTag(value, None, None) _end_dc_subject = _end_category _end_keywords = _end_category _end_itunes_category = _end_category def _start_cloud(self, attrsD): self._getContext()['cloud'] = FeedParserDict(attrsD) def _start_link(self, attrsD): attrsD.setdefault('rel', 'alternate') attrsD.setdefault('type', 'text/html') attrsD = self._itsAnHrefDamnIt(attrsD) if attrsD.has_key('href'): attrsD['href'] = self.resolveURI(attrsD['href']) expectingText = self.infeed or self.inentry or self.insource context = self._getContext() context.setdefault('links', []) context['links'].append(FeedParserDict(attrsD)) if attrsD['rel'] == 'enclosure': self._start_enclosure(attrsD) if attrsD.has_key('href'): expectingText = 0 if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types): context['link'] = attrsD['href'] else: self.push('link', expectingText) _start_producturl = _start_link def _end_link(self): value = self.pop('link') context = self._getContext() if self.intextinput: context['textinput']['link'] = value if self.inimage: context['image']['link'] = value _end_producturl = _end_link def _start_guid(self, attrsD): self.guidislink = (attrsD.get('ispermalink', 'true') == 'true') self.push('id', 1) def _end_guid(self): value = self.pop('id') self._save('guidislink', self.guidislink and not self._getContext().has_key('link')) if self.guidislink: # guid acts as link, but only if 'ispermalink' is not present or is 'true', # and only if the item doesn't already have a link element self._save('link', value) def _start_title(self, attrsD): self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) _start_dc_title = _start_title _start_media_title = _start_title def _end_title(self): value = self.popContent('title') context = self._getContext() if self.intextinput: context['textinput']['title'] = value elif self.inimage: context['image']['title'] = value _end_dc_title = _end_title _end_media_title = _end_title def _start_description(self, attrsD): context = self._getContext() if context.has_key('summary'): self._summaryKey = 'content' self._start_content(attrsD) else: self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource) def _start_abstract(self, attrsD): self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) def _end_description(self): if self._summaryKey == 'content': self._end_content() else: value = self.popContent('description') context = self._getContext() if self.intextinput: context['textinput']['description'] = value elif self.inimage: context['image']['description'] = value self._summaryKey = None _end_abstract = _end_description def _start_info(self, attrsD): self.pushContent('info', attrsD, 'text/plain', 1) _start_feedburner_browserfriendly = _start_info def _end_info(self): self.popContent('info') _end_feedburner_browserfriendly = _end_info def _start_generator(self, attrsD): if attrsD: attrsD = self._itsAnHrefDamnIt(attrsD) if attrsD.has_key('href'): attrsD['href'] = self.resolveURI(attrsD['href']) self._getContext()['generator_detail'] = FeedParserDict(attrsD) self.push('generator', 1) def _end_generator(self): value = self.pop('generator') context = self._getContext() if context.has_key('generator_detail'): context['generator_detail']['name'] = value def _start_admin_generatoragent(self, attrsD): self.push('generator', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('generator') self._getContext()['generator_detail'] = FeedParserDict({'href': value}) def _start_admin_errorreportsto(self, attrsD): self.push('errorreportsto', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('errorreportsto') def _start_summary(self, attrsD): context = self._getContext() if context.has_key('summary'): self._summaryKey = 'content' self._start_content(attrsD) else: self._summaryKey = 'summary' self.pushContent(self._summaryKey, attrsD, 'text/plain', 1) _start_itunes_summary = _start_summary def _end_summary(self): if self._summaryKey == 'content': self._end_content() else: self.popContent(self._summaryKey or 'summary') self._summaryKey = None _end_itunes_summary = _end_summary def _start_enclosure(self, attrsD): attrsD = self._itsAnHrefDamnIt(attrsD) self._getContext().setdefault('enclosures', []).append(FeedParserDict(attrsD)) href = attrsD.get('href') if href: context = self._getContext() if not context.get('id'): context['id'] = href def _start_source(self, attrsD): self.insource = 1 def _end_source(self): self.insource = 0 self._getContext()['source'] = copy.deepcopy(self.sourcedata) self.sourcedata.clear() def _start_content(self, attrsD): self.pushContent('content', attrsD, 'text/plain', 1) src = attrsD.get('src') if src: self.contentparams['src'] = src self.push('content', 1) def _start_prodlink(self, attrsD): self.pushContent('content', attrsD, 'text/html', 1) def _start_body(self, attrsD): self.pushContent('content', attrsD, 'application/xhtml+xml', 1) _start_xhtml_body = _start_body def _start_content_encoded(self, attrsD): self.pushContent('content', attrsD, 'text/html', 1) _start_fullitem = _start_content_encoded def _end_content(self): copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types) value = self.popContent('content') if copyToDescription: self._save('description', value) _end_body = _end_content _end_xhtml_body = _end_content _end_content_encoded = _end_content _end_fullitem = _end_content _end_prodlink = _end_content def _start_itunes_image(self, attrsD): self.push('itunes_image', 0) self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')}) _start_itunes_link = _start_itunes_image def _end_itunes_block(self): value = self.pop('itunes_block', 0) self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0 def _end_itunes_explicit(self): value = self.pop('itunes_explicit', 0) self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0 if _XML_AVAILABLE: class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler): def __init__(self, baseuri, baselang, encoding): if _debug: sys.stderr.write('trying StrictFeedParser\n') xml.sax.handler.ContentHandler.__init__(self) _FeedParserMixin.__init__(self, baseuri, baselang, encoding) self.bozo = 0 self.exc = None def startPrefixMapping(self, prefix, uri): self.trackNamespace(prefix, uri) def startElementNS(self, name, qname, attrs): namespace, localname = name lowernamespace = str(namespace or '').lower() if lowernamespace.find('backend.userland.com/rss') <> -1: # match any backend.userland.com namespace namespace = 'http://backend.userland.com/rss' lowernamespace = namespace if qname and qname.find(':') > 0: givenprefix = qname.split(':')[0] else: givenprefix = None prefix = self._matchnamespaces.get(lowernamespace, givenprefix) if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix): raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix if prefix: localname = prefix + ':' + localname localname = str(localname).lower() if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname)) # qname implementation is horribly broken in Python 2.1 (it # doesn't report any), and slightly broken in Python 2.2 (it # doesn't report the xml: namespace). So we match up namespaces # with a known list first, and then possibly override them with # the qnames the SAX parser gives us (if indeed it gives us any # at all). Thanks to MatejC for helping me test this and # tirelessly telling me that it didn't work yet. attrsD = {} for (namespace, attrlocalname), attrvalue in attrs._attrs.items(): lowernamespace = (namespace or '').lower() prefix = self._matchnamespaces.get(lowernamespace, '') if prefix: attrlocalname = prefix + ':' + attrlocalname attrsD[str(attrlocalname).lower()] = attrvalue for qname in attrs.getQNames(): attrsD[str(qname).lower()] = attrs.getValueByQName(qname) self.unknown_starttag(localname, attrsD.items()) def characters(self, text): self.handle_data(text) def endElementNS(self, name, qname): namespace, localname = name lowernamespace = str(namespace or '').lower() if qname and qname.find(':') > 0: givenprefix = qname.split(':')[0] else: givenprefix = '' prefix = self._matchnamespaces.get(lowernamespace, givenprefix) if prefix: localname = prefix + ':' + localname localname = str(localname).lower() self.unknown_endtag(localname) def error(self, exc): self.bozo = 1 self.exc = exc def fatalError(self, exc): self.error(exc) raise exc class _BaseHTMLProcessor(sgmllib.SGMLParser): elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr', 'img', 'input', 'isindex', 'link', 'meta', 'param'] def __init__(self, encoding): self.encoding = encoding if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding) sgmllib.SGMLParser.__init__(self) def reset(self): self.pieces = [] sgmllib.SGMLParser.reset(self) def _shorttag_replace(self, match): tag = match.group(1) if tag in self.elements_no_end_tag: return '<' + tag + ' />' else: return '<' + tag + '></' + tag + '>' def feed(self, data): data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'&lt;!\1', data) #data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data) data = data.replace('&#39;', "'") data = data.replace('&#34;', '"') if self.encoding and type(data) == type(u''): data = data.encode(self.encoding) sgmllib.SGMLParser.feed(self, data) def normalize_attrs(self, attrs): # utility method to be called by descendants attrs = [(k.lower(), v) for k, v in attrs] attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] return attrs def unknown_starttag(self, tag, attrs): # called for each start tag # attrs is a list of (attr, value) tuples # e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')] if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag) uattrs = [] # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds for key, value in attrs: if type(value) != type(u''): value = unicode(value, self.encoding) uattrs.append((unicode(key, self.encoding), value)) strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding) if tag in self.elements_no_end_tag: self.pieces.append('<%(tag)s%(strattrs)s />' % locals()) else: self.pieces.append('<%(tag)s%(strattrs)s>' % locals()) def unknown_endtag(self, tag): # called for each end tag, e.g. for </pre>, tag will be 'pre' # Reconstruct the original end tag. if tag not in self.elements_no_end_tag: self.pieces.append("</%(tag)s>" % locals()) def handle_charref(self, ref): # called for each character reference, e.g. for '&#160;', ref will be '160' # Reconstruct the original character reference. self.pieces.append('&#%(ref)s;' % locals()) def handle_entityref(self, ref): # called for each entity reference, e.g. for '&copy;', ref will be 'copy' # Reconstruct the original entity reference. self.pieces.append('&%(ref)s;' % locals()) def handle_data(self, text): # called for each block of plain text, i.e. outside of any tag and # not containing any character or entity references # Store the original text verbatim. if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text) self.pieces.append(text) def handle_comment(self, text): # called for each HTML comment, e.g. <!-- insert Javascript code here --> # Reconstruct the original comment. self.pieces.append('<!--%(text)s-->' % locals()) def handle_pi(self, text): # called for each processing instruction, e.g. <?instruction> # Reconstruct original processing instruction. self.pieces.append('<?%(text)s>' % locals()) def handle_decl(self, text): # called for the DOCTYPE, if present, e.g. # <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" # "http://www.w3.org/TR/html4/loose.dtd"> # Reconstruct original DOCTYPE self.pieces.append('<!%(text)s>' % locals()) _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match def _scan_name(self, i, declstartpos): rawdata = self.rawdata n = len(rawdata) if i == n: return None, -1 m = self._new_declname_match(rawdata, i) if m: s = m.group() name = s.strip() if (i + len(s)) == n: return None, -1 # end of buffer return name.lower(), m.end() else: self.handle_data(rawdata) # self.updatepos(declstartpos, i) return None, -1 def output(self): '''Return processed HTML as a single string''' return ''.join([str(p) for p in self.pieces]) class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor): def __init__(self, baseuri, baselang, encoding): sgmllib.SGMLParser.__init__(self) _FeedParserMixin.__init__(self, baseuri, baselang, encoding) def decodeEntities(self, element, data): data = data.replace('&#60;', '&lt;') data = data.replace('&#x3c;', '&lt;') data = data.replace('&#62;', '&gt;') data = data.replace('&#x3e;', '&gt;') data = data.replace('&#38;', '&amp;') data = data.replace('&#x26;', '&amp;') data = data.replace('&#34;', '&quot;') data = data.replace('&#x22;', '&quot;') data = data.replace('&#39;', '&apos;') data = data.replace('&#x27;', '&apos;') if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): data = data.replace('&lt;', '<') data = data.replace('&gt;', '>') data = data.replace('&amp;', '&') data = data.replace('&quot;', '"') data = data.replace('&apos;', "'") return data class _RelativeURIResolver(_BaseHTMLProcessor): relative_uris = [('a', 'href'), ('applet', 'codebase'), ('area', 'href'), ('blockquote', 'cite'), ('body', 'background'), ('del', 'cite'), ('form', 'action'), ('frame', 'longdesc'), ('frame', 'src'), ('iframe', 'longdesc'), ('iframe', 'src'), ('head', 'profile'), ('img', 'longdesc'), ('img', 'src'), ('img', 'usemap'), ('input', 'src'), ('input', 'usemap'), ('ins', 'cite'), ('link', 'href'), ('object', 'classid'), ('object', 'codebase'), ('object', 'data'), ('object', 'usemap'), ('q', 'cite'), ('script', 'src')] def __init__(self, baseuri, encoding): _BaseHTMLProcessor.__init__(self, encoding) self.baseuri = baseuri def resolveURI(self, uri): return _urljoin(self.baseuri, uri) def unknown_starttag(self, tag, attrs): attrs = self.normalize_attrs(attrs) attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs] _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) def _resolveRelativeURIs(htmlSource, baseURI, encoding): if _debug: sys.stderr.write('entering _resolveRelativeURIs\n') p = _RelativeURIResolver(baseURI, encoding) p.feed(htmlSource) return p.output() class _HTMLSanitizer(_BaseHTMLProcessor): acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big', 'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset', 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup', 'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike', 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var'] acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', 'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing', 'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols', 'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', 'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', 'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type', 'usemap', 'valign', 'value', 'vspace', 'width'] unacceptable_elements_with_end_tag = ['script', 'applet'] def reset(self): _BaseHTMLProcessor.reset(self) self.unacceptablestack = 0 def unknown_starttag(self, tag, attrs): if not tag in self.acceptable_elements: if tag in self.unacceptable_elements_with_end_tag: self.unacceptablestack += 1 return attrs = self.normalize_attrs(attrs) attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes] _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) def unknown_endtag(self, tag): if not tag in self.acceptable_elements: if tag in self.unacceptable_elements_with_end_tag: self.unacceptablestack -= 1 return _BaseHTMLProcessor.unknown_endtag(self, tag) def handle_pi(self, text): pass def handle_decl(self, text): pass def handle_data(self, text): if not self.unacceptablestack: _BaseHTMLProcessor.handle_data(self, text) def _sanitizeHTML(htmlSource, encoding): p = _HTMLSanitizer(encoding) p.feed(htmlSource) data = p.output() if TIDY_MARKUP: # loop through list of preferred Tidy interfaces looking for one that's installed, # then set up a common _tidy function to wrap the interface-specific API. _tidy = None for tidy_interface in PREFERRED_TIDY_INTERFACES: try: if tidy_interface == "uTidy": from tidy import parseString as _utidy def _tidy(data, **kwargs): return str(_utidy(data, **kwargs)) break elif tidy_interface == "mxTidy": from mx.Tidy import Tidy as _mxtidy def _tidy(data, **kwargs): nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs) return data break except: pass if _tidy: utf8 = type(data) == type(u'') if utf8: data = data.encode('utf-8') data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8") if utf8: data = unicode(data, 'utf-8') if data.count('<body'): data = data.split('<body', 1)[1] if data.count('>'): data = data.split('>', 1)[1] if data.count('</body'): data = data.split('</body', 1)[0] data = data.strip().replace('\r\n', '\n') return data class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler): def http_error_default(self, req, fp, code, msg, headers): if ((code / 100) == 3) and (code != 304): return self.http_error_302(req, fp, code, msg, headers) infourl = urllib.addinfourl(fp, headers, req.get_full_url()) infourl.status = code return infourl def http_error_302(self, req, fp, code, msg, headers): if headers.dict.has_key('location'): infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers) else: infourl = urllib.addinfourl(fp, headers, req.get_full_url()) if not hasattr(infourl, 'status'): infourl.status = code return infourl def http_error_301(self, req, fp, code, msg, headers): if headers.dict.has_key('location'): infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers) else: infourl = urllib.addinfourl(fp, headers, req.get_full_url()) if not hasattr(infourl, 'status'): infourl.status = code return infourl http_error_300 = http_error_302 http_error_303 = http_error_302 http_error_307 = http_error_302 def http_error_401(self, req, fp, code, msg, headers): # Check if # - server requires digest auth, AND # - we tried (unsuccessfully) with basic auth, AND # - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions) # If all conditions hold, parse authentication information # out of the Authorization header we sent the first time # (for the username and password) and the WWW-Authenticate # header the server sent back (for the realm) and retry # the request with the appropriate digest auth headers instead. # This evil genius hack has been brought to you by Aaron Swartz. host = urlparse.urlparse(req.get_full_url())[1] try: assert sys.version.split()[0] >= '2.3.3' assert base64 != None user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':') realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0] self.add_password(realm, host, user, passw) retry = self.http_error_auth_reqed('www-authenticate', host, req, headers) self.reset_retry_count() return retry except: return self.http_error_default(req, fp, code, msg, headers) def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers): """URL, filename, or string --> stream This function lets you define parsers that take any input source (URL, pathname to local or network file, or actual data as a string) and deal with it in a uniform manner. Returned object is guaranteed to have all the basic stdio read methods (read, readline, readlines). Just .close() the object when you're done with it. If the etag argument is supplied, it will be used as the value of an If-None-Match request header. If the modified argument is supplied, it must be a tuple of 9 integers as returned by gmtime() in the standard Python time module. This MUST be in GMT (Greenwich Mean Time). The formatted date/time will be used as the value of an If-Modified-Since request header. If the agent argument is supplied, it will be used as the value of a User-Agent request header. If the referrer argument is supplied, it will be used as the value of a Referer[sic] request header. If handlers is supplied, it is a list of handlers used to build a urllib2 opener. """ if hasattr(url_file_stream_or_string, 'read'): return url_file_stream_or_string if url_file_stream_or_string == '-': return sys.stdin if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'): if not agent: agent = USER_AGENT # test for inline user:password for basic auth auth = None if base64: urltype, rest = urllib.splittype(url_file_stream_or_string) realhost, rest = urllib.splithost(rest) if realhost: user_passwd, realhost = urllib.splituser(realhost) if user_passwd: url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest) auth = base64.encodestring(user_passwd).strip() # try to open with urllib2 (to use optional headers) request = urllib2.Request(url_file_stream_or_string) request.add_header('User-Agent', agent) if etag: request.add_header('If-None-Match', etag) if modified: # format into an RFC 1123-compliant timestamp. We can't use # time.strftime() since the %a and %b directives can be affected # by the current locale, but RFC 2616 states that dates must be # in English. short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5])) if referrer: request.add_header('Referer', referrer) if gzip and zlib: request.add_header('Accept-encoding', 'gzip, deflate') elif gzip: request.add_header('Accept-encoding', 'gzip') elif zlib: request.add_header('Accept-encoding', 'deflate') else: request.add_header('Accept-encoding', '') if auth: request.add_header('Authorization', 'Basic %s' % auth) if ACCEPT_HEADER: request.add_header('Accept', ACCEPT_HEADER) request.add_header('A-IM', 'feed') # RFC 3229 support opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers)) opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent try: return opener.open(request) finally: opener.close() # JohnD # try to open with native open function (if url_file_stream_or_string is a filename) try: return open(url_file_stream_or_string) except: pass # treat url_file_stream_or_string as string return _StringIO(str(url_file_stream_or_string)) _date_handlers = [] def registerDateHandler(func): '''Register a date handler function (takes string, returns 9-tuple date in GMT)''' _date_handlers.insert(0, func) # ISO-8601 date parsing routines written by Fazal Majid. # The ISO 8601 standard is very convoluted and irregular - a full ISO 8601 # parser is beyond the scope of feedparser and would be a worthwhile addition # to the Python library. # A single regular expression cannot parse ISO 8601 date formats into groups # as the standard is highly irregular (for instance is 030104 2003-01-04 or # 0301-04-01), so we use templates instead. # Please note the order in templates is significant because we need a # greedy match. _iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO', 'YY-?MM-?DD', 'YY-?OOO', 'YYYY', '-YY-?MM', '-OOO', '-YY', '--MM-?DD', '--MM', '---DD', 'CC', ''] _iso8601_re = [ tmpl.replace( 'YYYY', r'(?P<year>\d{4})').replace( 'YY', r'(?P<year>\d\d)').replace( 'MM', r'(?P<month>[01]\d)').replace( 'DD', r'(?P<day>[0123]\d)').replace( 'OOO', r'(?P<ordinal>[0123]\d\d)').replace( 'CC', r'(?P<century>\d\d$)') + r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})' + r'(:(?P<second>\d{2}))?' + r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?' for tmpl in _iso8601_tmpl] del tmpl _iso8601_matches = [re.compile(regex).match for regex in _iso8601_re] del regex def _parse_date_iso8601(dateString): '''Parse a variety of ISO-8601-compatible formats like 20040105''' m = None for _iso8601_match in _iso8601_matches: m = _iso8601_match(dateString) if m: break if not m: return if m.span() == (0, 0): return params = m.groupdict() ordinal = params.get('ordinal', 0) if ordinal: ordinal = int(ordinal) else: ordinal = 0 year = params.get('year', '--') if not year or year == '--': year = time.gmtime()[0] elif len(year) == 2: # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993 year = 100 * int(time.gmtime()[0] / 100) + int(year) else: year = int(year) month = params.get('month', '-') if not month or month == '-': # ordinals are NOT normalized by mktime, we simulate them # by setting month=1, day=ordinal if ordinal: month = 1 else: month = time.gmtime()[1] month = int(month) day = params.get('day', 0) if not day: # see above if ordinal: day = ordinal elif params.get('century', 0) or \ params.get('year', 0) or params.get('month', 0): day = 1 else: day = time.gmtime()[2] else: day = int(day) # special case of the century - is the first year of the 21st century # 2000 or 2001 ? The debate goes on... if 'century' in params.keys(): year = (int(params['century']) - 1) * 100 + 1 # in ISO 8601 most fields are optional for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']: if not params.get(field, None): params[field] = 0 hour = int(params.get('hour', 0)) minute = int(params.get('minute', 0)) second = int(params.get('second', 0)) # weekday is normalized by mktime(), we can ignore it weekday = 0 # daylight savings is complex, but not needed for feedparser's purposes # as time zones, if specified, include mention of whether it is active # (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and # and most implementations have DST bugs daylight_savings_flag = 0 tm = [year, month, day, hour, minute, second, weekday, ordinal, daylight_savings_flag] # ISO 8601 time zone adjustments tz = params.get('tz') if tz and tz != 'Z': if tz[0] == '-': tm[3] += int(params.get('tzhour', 0)) tm[4] += int(params.get('tzmin', 0)) elif tz[0] == '+': tm[3] -= int(params.get('tzhour', 0)) tm[4] -= int(params.get('tzmin', 0)) else: return None # Python's time.mktime() is a wrapper around the ANSI C mktime(3c) # which is guaranteed to normalize d/m/y/h/m/s. # Many implementations have bugs, but we'll pretend they don't. return time.localtime(time.mktime(tm)) registerDateHandler(_parse_date_iso8601) # 8-bit date handling routines written by ytrewq1. _korean_year = u'\ub144' # b3e2 in euc-kr _korean_month = u'\uc6d4' # bff9 in euc-kr _korean_day = u'\uc77c' # c0cf in euc-kr _korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr _korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr _korean_onblog_date_re = \ re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \ (_korean_year, _korean_month, _korean_day)) _korean_nate_date_re = \ re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \ (_korean_am, _korean_pm)) def _parse_date_onblog(dateString): '''Parse a string according to the OnBlog 8-bit date format''' m = _korean_onblog_date_re.match(dateString) if not m: return w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ 'zonediff': '+09:00'} if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_onblog) def _parse_date_nate(dateString): '''Parse a string according to the Nate 8-bit date format''' m = _korean_nate_date_re.match(dateString) if not m: return hour = int(m.group(5)) ampm = m.group(4) if (ampm == _korean_pm): hour += 12 hour = str(hour) if len(hour) == 1: hour = '0' + hour w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\ 'zonediff': '+09:00'} if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_nate) _mssql_date_re = \ re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?') def _parse_date_mssql(dateString): '''Parse a string according to the MS SQL date format''' m = _mssql_date_re.match(dateString) if not m: return w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ 'zonediff': '+09:00'} if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_mssql) # Unicode strings for Greek date strings _greek_months = \ { \ u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7 u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7 u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7 u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7 u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7 u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7 u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7 u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7 u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7 u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7 u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7 u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7 u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7 u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7 u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7 u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7 u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7 u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7 u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7 } _greek_wdays = \ { \ u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7 u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7 u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7 u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7 u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7 u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7 u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7 } _greek_date_format_re = \ re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)') def _parse_date_greek(dateString): '''Parse a string according to a Greek 8-bit date format.''' m = _greek_date_format_re.match(dateString) if not m: return try: wday = _greek_wdays[m.group(1)] month = _greek_months[m.group(3)] except: return rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \ {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\ 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\ 'zonediff': m.group(8)} if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date) return _parse_date_rfc822(rfc822date) registerDateHandler(_parse_date_greek) # Unicode strings for Hungarian date strings _hungarian_months = \ { \ u'janu\u00e1r': u'01', # e1 in iso-8859-2 u'febru\u00e1ri': u'02', # e1 in iso-8859-2 u'm\u00e1rcius': u'03', # e1 in iso-8859-2 u'\u00e1prilis': u'04', # e1 in iso-8859-2 u'm\u00e1ujus': u'05', # e1 in iso-8859-2 u'j\u00fanius': u'06', # fa in iso-8859-2 u'j\u00falius': u'07', # fa in iso-8859-2 u'augusztus': u'08', u'szeptember': u'09', u'okt\u00f3ber': u'10', # f3 in iso-8859-2 u'november': u'11', u'december': u'12', } _hungarian_date_format_re = \ re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))') def _parse_date_hungarian(dateString): '''Parse a string according to a Hungarian 8-bit date format.''' m = _hungarian_date_format_re.match(dateString) if not m: return try: month = _hungarian_months[m.group(2)] day = m.group(3) if len(day) == 1: day = '0' + day hour = m.group(4) if len(hour) == 1: hour = '0' + hour except: return w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \ {'year': m.group(1), 'month': month, 'day': day,\ 'hour': hour, 'minute': m.group(5),\ 'zonediff': m.group(6)} if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_hungarian) # W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by # Drake and licensed under the Python license. Removed all range checking # for month, day, hour, minute, and second, since mktime will normalize # these later def _parse_date_w3dtf(dateString): def __extract_date(m): year = int(m.group('year')) if year < 100: year = 100 * int(time.gmtime()[0] / 100) + int(year) if year < 1000: return 0, 0, 0 julian = m.group('julian') if julian: julian = int(julian) month = julian / 30 + 1 day = julian % 30 + 1 jday = None while jday != julian: t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0)) jday = time.gmtime(t)[-2] diff = abs(jday - julian) if jday > julian: if diff < day: day = day - diff else: month = month - 1 day = 31 elif jday < julian: if day + diff < 28: day = day + diff else: month = month + 1 return year, month, day month = m.group('month') day = 1 if month is None: month = 1 else: month = int(month) day = m.group('day') if day: day = int(day) else: day = 1 return year, month, day def __extract_time(m): if not m: return 0, 0, 0 hours = m.group('hours') if not hours: return 0, 0, 0 hours = int(hours) minutes = int(m.group('minutes')) seconds = m.group('seconds') if seconds: seconds = int(seconds) else: seconds = 0 return hours, minutes, seconds def __extract_tzd(m): '''Return the Time Zone Designator as an offset in seconds from UTC.''' if not m: return 0 tzd = m.group('tzd') if not tzd: return 0 if tzd == 'Z': return 0 hours = int(m.group('tzdhours')) minutes = m.group('tzdminutes') if minutes: minutes = int(minutes) else: minutes = 0 offset = (hours*60 + minutes) * 60 if tzd[0] == '+': return -offset return offset __date_re = ('(?P<year>\d\d\d\d)' '(?:(?P<dsep>-|)' '(?:(?P<julian>\d\d\d)' '|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?') __tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)' __tzd_rx = re.compile(__tzd_re) __time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)' '(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?' + __tzd_re) __datetime_re = '%s(?:T%s)?' % (__date_re, __time_re) __datetime_rx = re.compile(__datetime_re) m = __datetime_rx.match(dateString) if (m is None) or (m.group() != dateString): return gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0) if gmt[0] == 0: return return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone) registerDateHandler(_parse_date_w3dtf) def _parse_date_rfc822(dateString): '''Parse an RFC822, RFC1123, RFC2822, or asctime-style date''' data = dateString.split() if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames: del data[0] if len(data) == 4: s = data[3] i = s.find('+') if i > 0: data[3:] = [s[:i], s[i+1:]] else: data.append('') dateString = " ".join(data) if len(data) < 5: dateString += ' 00:00:00 GMT' tm = rfc822.parsedate_tz(dateString) if tm: return time.gmtime(rfc822.mktime_tz(tm)) # rfc822.py defines several time zones, but we define some extra ones. # 'ET' is equivalent to 'EST', etc. _additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800} rfc822._timezones.update(_additional_timezones) registerDateHandler(_parse_date_rfc822) def _parse_date(dateString): '''Parses a variety of date formats into a 9-tuple in GMT''' for handler in _date_handlers: try: date9tuple = handler(dateString) if not date9tuple: continue if len(date9tuple) != 9: if _debug: sys.stderr.write('date handler function must return 9-tuple\n') raise ValueError map(int, date9tuple) return date9tuple except Exception, e: if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e))) pass return None def _getCharacterEncoding(http_headers, xml_data): '''Get the character encoding of the XML document http_headers is a dictionary xml_data is a raw string (not Unicode) This is so much trickier than it sounds, it's not even funny. According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type is application/xml, application/*+xml, application/xml-external-parsed-entity, or application/xml-dtd, the encoding given in the charset parameter of the HTTP Content-Type takes precedence over the encoding given in the XML prefix within the document, and defaults to 'utf-8' if neither are specified. But, if the HTTP Content-Type is text/xml, text/*+xml, or text/xml-external-parsed-entity, the encoding given in the XML prefix within the document is ALWAYS IGNORED and only the encoding given in the charset parameter of the HTTP Content-Type header should be respected, and it defaults to 'us-ascii' if not specified. Furthermore, discussion on the atom-syntax mailing list with the author of RFC 3023 leads me to the conclusion that any document served with a Content-Type of text/* and no charset parameter must be treated as us-ascii. (We now do this.) And also that it must always be flagged as non-well-formed. (We now do this too.) If Content-Type is unspecified (input was local file or non-HTTP source) or unrecognized (server just got it totally wrong), then go by the encoding given in the XML prefix of the document and default to 'iso-8859-1' as per the HTTP specification (RFC 2616). Then, assuming we didn't find a character encoding in the HTTP headers (and the HTTP Content-type allowed us to look in the body), we need to sniff the first few bytes of the XML data and try to determine whether the encoding is ASCII-compatible. Section F of the XML specification shows the way here: http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info If the sniffed encoding is not ASCII-compatible, we need to make it ASCII compatible so that we can sniff further into the XML declaration to find the encoding attribute, which will tell us the true encoding. Of course, none of this guarantees that we will be able to parse the feed in the declared character encoding (assuming it was declared correctly, which many are not). CJKCodecs and iconv_codec help a lot; you should definitely install them if you can. http://cjkpython.i18n.org/ ''' def _parseHTTPContentType(content_type): '''takes HTTP Content-Type header and returns (content type, charset) If no charset is specified, returns (content type, '') If no content type is specified, returns ('', '') Both return parameters are guaranteed to be lowercase strings ''' content_type = content_type or '' content_type, params = cgi.parse_header(content_type) return content_type, params.get('charset', '').replace("'", '') sniffed_xml_encoding = '' xml_encoding = '' true_encoding = '' http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type')) # Must sniff for non-ASCII-compatible character encodings before # searching for XML declaration. This heuristic is defined in # section F of the XML specification: # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info try: if xml_data[:4] == '\x4c\x6f\xa7\x94': # EBCDIC xml_data = _ebcdic_to_ascii(xml_data) elif xml_data[:4] == '\x00\x3c\x00\x3f': # UTF-16BE sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'): # UTF-16BE with BOM sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x3f\x00': # UTF-16LE sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'): # UTF-16LE with BOM sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') elif xml_data[:4] == '\x00\x00\x00\x3c': # UTF-32BE sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x00\x00': # UTF-32LE sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') elif xml_data[:4] == '\x00\x00\xfe\xff': # UTF-32BE with BOM sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') elif xml_data[:4] == '\xff\xfe\x00\x00': # UTF-32LE with BOM sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') elif xml_data[:3] == '\xef\xbb\xbf': # UTF-8 with BOM sniffed_xml_encoding = 'utf-8' xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') else: # ASCII-compatible pass xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data) except: xml_encoding_match = None if xml_encoding_match: xml_encoding = xml_encoding_match.groups()[0].lower() if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): xml_encoding = sniffed_xml_encoding acceptable_content_type = 0 application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity') text_content_types = ('text/xml', 'text/xml-external-parsed-entity') if (http_content_type in application_content_types) or \ (http_content_type.startswith('application/') and http_content_type.endswith('+xml')): acceptable_content_type = 1 true_encoding = http_encoding or xml_encoding or 'utf-8' elif (http_content_type in text_content_types) or \ (http_content_type.startswith('text/')) and http_content_type.endswith('+xml'): acceptable_content_type = 1 true_encoding = http_encoding or 'us-ascii' elif http_content_type.startswith('text/'): true_encoding = http_encoding or 'us-ascii' elif http_headers and (not http_headers.has_key('content-type')): true_encoding = xml_encoding or 'iso-8859-1' else: true_encoding = xml_encoding or 'utf-8' return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type def _toUTF8(data, encoding): '''Changes an XML data stream on the fly to specify a new encoding data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already encoding is a string recognized by encodings.aliases ''' if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding) # strip Byte Order Mark (if present) if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'): if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-16be': sys.stderr.write('trying utf-16be instead\n') encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'): if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-16le': sys.stderr.write('trying utf-16le instead\n') encoding = 'utf-16le' data = data[2:] elif data[:3] == '\xef\xbb\xbf': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-8': sys.stderr.write('trying utf-8 instead\n') encoding = 'utf-8' data = data[3:] elif data[:4] == '\x00\x00\xfe\xff': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-32be': sys.stderr.write('trying utf-32be instead\n') encoding = 'utf-32be' data = data[4:] elif data[:4] == '\xff\xfe\x00\x00': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-32le': sys.stderr.write('trying utf-32le instead\n') encoding = 'utf-32le' data = data[4:] newdata = unicode(data, encoding) if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding) declmatch = re.compile('^<\?xml[^>]*?>') newdecl = '''<?xml version='1.0' encoding='utf-8'?>''' if declmatch.search(newdata): newdata = declmatch.sub(newdecl, newdata) else: newdata = newdecl + u'\n' + newdata return newdata.encode('utf-8') def _stripDoctype(data): '''Strips DOCTYPE from XML document, returns (rss_version, stripped_data) rss_version may be 'rss091n' or None stripped_data is the same XML document, minus the DOCTYPE ''' entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE) data = entity_pattern.sub('', data) doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE) doctype_results = doctype_pattern.findall(data) doctype = doctype_results and doctype_results[0] or '' if doctype.lower().count('netscape'): version = 'rss091n' else: version = None data = doctype_pattern.sub('', data) return version, data def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]): '''Parse a feed from a URL, file, stream, or string''' result = FeedParserDict() result['feed'] = FeedParserDict() result['entries'] = [] if _XML_AVAILABLE: result['bozo'] = 0 if type(handlers) == types.InstanceType: handlers = [handlers] try: f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers) data = f.read() except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = '' f = None # if feed is gzip-compressed, decompress it if f and data and hasattr(f, 'headers'): if gzip and f.headers.get('content-encoding', '') == 'gzip': try: data = gzip.GzipFile(fileobj=_StringIO(data)).read() except Exception, e: # Some feeds claim to be gzipped but they're not, so # we get garbage. Ideally, we should re-request the # feed without the 'Accept-encoding: gzip' header, # but we don't. result['bozo'] = 1 result['bozo_exception'] = e data = '' elif zlib and f.headers.get('content-encoding', '') == 'deflate': try: data = zlib.decompress(data, -zlib.MAX_WBITS) except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = '' # save HTTP headers if hasattr(f, 'info'): info = f.info() result['etag'] = info.getheader('ETag') last_modified = info.getheader('Last-Modified') if last_modified: result['modified'] = _parse_date(last_modified) if hasattr(f, 'url'): result['href'] = f.url result['status'] = 200 if hasattr(f, 'status'): result['status'] = f.status if hasattr(f, 'headers'): result['headers'] = f.headers.dict if hasattr(f, 'close'): f.close() # there are four encodings to keep track of: # - http_encoding is the encoding declared in the Content-Type HTTP header # - xml_encoding is the encoding declared in the <?xml declaration # - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data # - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications http_headers = result.get('headers', {}) result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \ _getCharacterEncoding(http_headers, data) if http_headers and (not acceptable_content_type): if http_headers.has_key('content-type'): bozo_message = '%s is not an XML media type' % http_headers['content-type'] else: bozo_message = 'no Content-type specified' result['bozo'] = 1 result['bozo_exception'] = NonXMLContentType(bozo_message) result['version'], data = _stripDoctype(data) baseuri = http_headers.get('content-location', result.get('href')) baselang = http_headers.get('content-language', None) # if server sent 304, we're done if result.get('status', 0) == 304: result['version'] = '' result['debug_message'] = 'The feed has not changed since you last checked, ' + \ 'so the server sent no data. This is a feature, not a bug!' return result # if there was a problem downloading, we're done if not data: return result # determine character encoding use_strict_parser = 0 known_encoding = 0 tried_encodings = [] # try: HTTP encoding, declared XML encoding, encoding sniffed from BOM for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding): if not proposed_encoding: continue if proposed_encoding in tried_encodings: continue tried_encodings.append(proposed_encoding) try: data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 break except: pass # if no luck and we have auto-detection library, try that if (not known_encoding) and chardet: try: proposed_encoding = chardet.detect(data)['encoding'] if proposed_encoding and (proposed_encoding not in tried_encodings): tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck and we haven't tried utf-8 yet, try that if (not known_encoding) and ('utf-8' not in tried_encodings): try: proposed_encoding = 'utf-8' tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck and we haven't tried windows-1252 yet, try that if (not known_encoding) and ('windows-1252' not in tried_encodings): try: proposed_encoding = 'windows-1252' tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck, give up if not known_encoding: result['bozo'] = 1 result['bozo_exception'] = CharacterEncodingUnknown( \ 'document encoding unknown, I tried ' + \ '%s, %s, utf-8, and windows-1252 but nothing worked' % \ (result['encoding'], xml_encoding)) result['encoding'] = '' elif proposed_encoding != result['encoding']: result['bozo'] = 1 result['bozo_exception'] = CharacterEncodingOverride( \ 'documented declared as %s, but parsed as %s' % \ (result['encoding'], proposed_encoding)) result['encoding'] = proposed_encoding if not _XML_AVAILABLE: use_strict_parser = 0 if use_strict_parser: # initialize the SAX parser feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8') saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS) saxparser.setFeature(xml.sax.handler.feature_namespaces, 1) saxparser.setContentHandler(feedparser) saxparser.setErrorHandler(feedparser) source = xml.sax.xmlreader.InputSource() source.setByteStream(_StringIO(data)) if hasattr(saxparser, '_ns_stack'): # work around bug in built-in SAX parser (doesn't recognize xml: namespace) # PyXML doesn't have this problem, and it doesn't have _ns_stack either saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'}) try: saxparser.parse(source) except Exception, e: if _debug: import traceback traceback.print_stack() traceback.print_exc() sys.stderr.write('xml parsing failed\n') result['bozo'] = 1 result['bozo_exception'] = feedparser.exc or e use_strict_parser = 0 if not use_strict_parser: feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '') feedparser.feed(data) result['feed'] = feedparser.feeddata result['entries'] = feedparser.entries result['version'] = result['version'] or feedparser.version result['namespaces'] = feedparser.namespacesInUse return result if __name__ == '__main__': if not sys.argv[1:]: print __doc__ sys.exit(0) else: urls = sys.argv[1:] zopeCompatibilityHack() from pprint import pprint for url in urls: print url print result = parse(url) pprint(result) print #REVISION HISTORY #1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements, # added Simon Fell's test suite #1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections #2.0 - 10/19/2002 # JD - use inchannel to watch out for image and textinput elements which can # also contain title, link, and description elements # JD - check for isPermaLink='false' attribute on guid elements # JD - replaced openAnything with open_resource supporting ETag and # If-Modified-Since request headers # JD - parse now accepts etag, modified, agent, and referrer optional # arguments # JD - modified parse to return a dictionary instead of a tuple so that any # etag or modified information can be returned and cached by the caller #2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything # because of etag/modified, return the old etag/modified to the caller to # indicate why nothing is being returned #2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its # useless. Fixes the problem JD was addressing by adding it. #2.1 - 11/14/2002 - MAP - added gzip support #2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent. # start_admingeneratoragent is an example of how to handle elements with # only attributes, no content. #2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify); # also, make sure we send the User-Agent even if urllib2 isn't available. # Match any variation of backend.userland.com/rss namespace. #2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is. #2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's # snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed # project name #2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree); # removed unnecessary urllib code -- urllib2 should always be available anyway; # return actual url, status, and full HTTP headers (as result['url'], # result['status'], and result['headers']) if parsing a remote feed over HTTP -- # this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>; # added the latest namespace-of-the-week for RSS 2.0 #2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom # User-Agent (otherwise urllib2 sends two, which confuses some servers) #2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for # inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds #2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or # textInput, and also to return the character encoding (if specified) #2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking # nested divs within content (JohnD); fixed missing sys import (JohanS); # fixed regular expression to capture XML character encoding (Andrei); # added support for Atom 0.3-style links; fixed bug with textInput tracking; # added support for cloud (MartijnP); added support for multiple # category/dc:subject (MartijnP); normalize content model: 'description' gets # description (which can come from description, summary, or full content if no # description), 'content' gets dict of base/language/type/value (which can come # from content:encoded, xhtml:body, content, or fullitem); # fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang # tracking; fixed bug tracking unknown tags; fixed bug tracking content when # <content> element is not in default namespace (like Pocketsoap feed); # resolve relative URLs in link, guid, docs, url, comments, wfw:comment, # wfw:commentRSS; resolve relative URLs within embedded HTML markup in # description, xhtml:body, content, content:encoded, title, subtitle, # summary, info, tagline, and copyright; added support for pingback and # trackback namespaces #2.7 - 1/5/2004 - MAP - really added support for trackback and pingback # namespaces, as opposed to 2.6 when I said I did but didn't really; # sanitize HTML markup within some elements; added mxTidy support (if # installed) to tidy HTML markup within some elements; fixed indentation # bug in _parse_date (FazalM); use socket.setdefaulttimeout if available # (FazalM); universal date parsing and normalization (FazalM): 'created', modified', # 'issued' are parsed into 9-tuple date format and stored in 'created_parsed', # 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified' # and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa #2.7.1 - 1/9/2004 - MAP - fixed bug handling &quot; and &apos;. fixed memory # leak not closing url opener (JohnD); added dc:publisher support (MarekK); # added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK) #2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in # encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL); # fixed relative URI processing for guid (skadz); added ICBM support; added # base64 support #2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many # blogspot.com sites); added _debug variable #2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing #3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available); # added several new supported namespaces; fixed bug tracking naked markup in # description; added support for enclosure; added support for source; re-added # support for cloud which got dropped somehow; added support for expirationDate #3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking # xml:base URI, one for documents that don't define one explicitly and one for # documents that define an outer and an inner xml:base that goes out of scope # before the end of the document #3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level #3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version'] # will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized; # added support for creativeCommons:license and cc:license; added support for # full Atom content model in title, tagline, info, copyright, summary; fixed bug # with gzip encoding (not always telling server we support it when we do) #3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail # (dictionary of 'name', 'url', 'email'); map author to author_detail if author # contains name + email address #3.0b8 - 1/28/2004 - MAP - added support for contributor #3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added # support for summary #3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from # xml.util.iso8601 #3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain # dangerous markup; fiddled with decodeEntities (not right); liberalized # date parsing even further #3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right); # added support to Atom 0.2 subtitle; added support for Atom content model # in copyright; better sanitizing of dangerous HTML elements with end tags # (script, frameset) #3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img, # etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />) #3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under # Python 2.1 #3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS; # fixed bug capturing author and contributor URL; fixed bug resolving relative # links in author and contributor URL; fixed bug resolvin relative links in # generator URL; added support for recognizing RSS 1.0; passed Simon Fell's # namespace tests, and included them permanently in the test suite with his # permission; fixed namespace handling under Python 2.1 #3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15) #3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023 #3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei); # use libxml2 (if available) #3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author # name was in parentheses; removed ultra-problematic mxTidy support; patch to # workaround crash in PyXML/expat when encountering invalid entities # (MarkMoraes); support for textinput/textInput #3.0b20 - 4/7/2004 - MAP - added CDF support #3.0b21 - 4/14/2004 - MAP - added Hot RSS support #3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in # results dict; changed results dict to allow getting values with results.key # as well as results[key]; work around embedded illformed HTML with half # a DOCTYPE; work around malformed Content-Type header; if character encoding # is wrong, try several common ones before falling back to regexes (if this # works, bozo_exception is set to CharacterEncodingOverride); fixed character # encoding issues in BaseHTMLProcessor by tracking encoding and converting # from Unicode to raw strings before feeding data to sgmllib.SGMLParser; # convert each value in results to Unicode (if possible), even if using # regex-based parsing #3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain # high-bit characters in attributes in embedded HTML in description (thanks # Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in # FeedParserDict; tweaked FeedParserDict.has_key to return True if asking # about a mapped key #3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and # results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could # cause the same encoding to be tried twice (even if it failed the first time); # fixed DOCTYPE stripping when DOCTYPE contained entity declarations; # better textinput and image tracking in illformed RSS 1.0 feeds #3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed # my blink tag tests #3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that # failed to parse utf-16 encoded feeds; made source into a FeedParserDict; # duplicate admin:generatorAgent/@rdf:resource in generator_detail.url; # added support for image; refactored parse() fallback logic to try other # encodings if SAX parsing fails (previously it would only try other encodings # if re-encoding failed); remove unichr madness in normalize_attrs now that # we're properly tracking encoding in and out of BaseHTMLProcessor; set # feed.language from root-level xml:lang; set entry.id from rdf:about; # send Accept header #3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between # iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are # windows-1252); fixed regression that could cause the same encoding to be # tried twice (even if it failed the first time) #3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types; # recover from malformed content-type header parameter with no equals sign # ('text/xml; charset:iso-8859-1') #3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities # to Unicode equivalents in illformed feeds (aaronsw); added and # passed tests for converting character entities to Unicode equivalents # in illformed feeds (aaronsw); test for valid parsers when setting # XML_AVAILABLE; make version and encoding available when server returns # a 304; add handlers parameter to pass arbitrary urllib2 handlers (like # digest auth or proxy support); add code to parse username/password # out of url and send as basic authentication; expose downloading-related # exceptions in bozo_exception (aaronsw); added __contains__ method to # FeedParserDict (aaronsw); added publisher_detail (aaronsw) #3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always # convert feed to UTF-8 before passing to XML parser; completely revamped # logic for determining character encoding and attempting XML parsing # (much faster); increased default timeout to 20 seconds; test for presence # of Location header on redirects; added tests for many alternate character # encodings; support various EBCDIC encodings; support UTF-16BE and # UTF16-LE with or without a BOM; support UTF-8 with a BOM; support # UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no # XML parsers are available; added support for 'Content-encoding: deflate'; # send blank 'Accept-encoding: ' header if neither gzip nor zlib modules # are available #3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure # problem tracking xml:base and xml:lang if element declares it, child # doesn't, first grandchild redeclares it, and second grandchild doesn't; # refactored date parsing; defined public registerDateHandler so callers # can add support for additional date formats at runtime; added support # for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added # zopeCompatibilityHack() which turns FeedParserDict into a regular # dictionary, required for Zope compatibility, and also makes command- # line debugging easier because pprint module formats real dictionaries # better than dictionary-like objects; added NonXMLContentType exception, # which is stored in bozo_exception when a feed is served with a non-XML # media type such as 'text/plain'; respect Content-Language as default # language if not xml:lang is present; cloud dict is now FeedParserDict; # generator dict is now FeedParserDict; better tracking of xml:lang, # including support for xml:lang='' to unset the current language; # recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default # namespace; don't overwrite final status on redirects (scenarios: # redirecting to a URL that returns 304, redirecting to a URL that # redirects to another URL with a different type of redirect); add # support for HTTP 303 redirects #4.0 - MAP - support for relative URIs in xml:base attribute; fixed # encoding issue with mxTidy (phopkins); preliminary support for RFC 3229; # support for Atom 1.0; support for iTunes extensions; new 'tags' for # categories/keywords/etc. as array of dict # {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0 # terminology; parse RFC 822-style dates with no time; lots of other # bug fixes #4.1 - MAP - removed socket timeout; added support for chardet library
Python
"""Beautiful Soup Elixir and Tonic "The Screen-Scraper's Friend" http://www.crummy.com/software/BeautifulSoup/ Beautiful Soup parses a (possibly invalid) XML or HTML document into a tree representation. It provides methods and Pythonic idioms that make it easy to navigate, search, and modify the tree. A well-formed XML/HTML document yields a well-formed data structure. An ill-formed XML/HTML document yields a correspondingly ill-formed data structure. If your document is only locally well-formed, you can use this library to find and process the well-formed part of it. Beautiful Soup works with Python 2.2 and up. It has no external dependencies, but you'll have more success at converting data to UTF-8 if you also install these three packages: * chardet, for auto-detecting character encodings http://chardet.feedparser.org/ * cjkcodecs and iconv_codec, which add more encodings to the ones supported by stock Python. http://cjkpython.i18n.org/ Beautiful Soup defines classes for two main parsing strategies: * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific language that kind of looks like XML. * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid or invalid. This class has web browser-like heuristics for obtaining a sensible parse tree in the face of common HTML errors. Beautiful Soup also defines a class (UnicodeDammit) for autodetecting the encoding of an HTML or XML document, and converting it to Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser. For more than you ever wanted to know about Beautiful Soup, see the documentation: http://www.crummy.com/software/BeautifulSoup/documentation.html Here, have some legalese: Copyright (c) 2004-2010, Leonard Richardson All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the the Beautiful Soup Consortium and All Night Kosher Bakery nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT. """ from __future__ import generators __author__ = "Leonard Richardson (leonardr@segfault.org)" __version__ = "3.2.0" __copyright__ = "Copyright (c) 2004-2010 Leonard Richardson" __license__ = "New-style BSD" from sgmllib import SGMLParser, SGMLParseError import codecs import markupbase import types import re import sgmllib try: from htmlentitydefs import name2codepoint except ImportError: name2codepoint = {} try: set except NameError: from sets import Set as set #These hacks make Beautiful Soup able to parse XML with namespaces sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match DEFAULT_OUTPUT_ENCODING = "utf-8" def _match_css_class(str): """Build a RE to match the given CSS class.""" return re.compile(r"(^|.*\s)%s($|\s)" % str) # First, the classes that represent markup elements. class PageElement(object): """Contains the navigational information for some part of the page (either a tag or a piece of text)""" def setup(self, parent=None, previous=None): """Sets up the initial relations between this element and other elements.""" self.parent = parent self.previous = previous self.next = None self.previousSibling = None self.nextSibling = None if self.parent and self.parent.contents: self.previousSibling = self.parent.contents[-1] self.previousSibling.nextSibling = self def replaceWith(self, replaceWith): oldParent = self.parent myIndex = self.parent.index(self) if hasattr(replaceWith, "parent")\ and replaceWith.parent is self.parent: # We're replacing this element with one of its siblings. index = replaceWith.parent.index(replaceWith) if index and index < myIndex: # Furthermore, it comes before this element. That # means that when we extract it, the index of this # element will change. myIndex = myIndex - 1 self.extract() oldParent.insert(myIndex, replaceWith) def replaceWithChildren(self): myParent = self.parent myIndex = self.parent.index(self) self.extract() reversedChildren = list(self.contents) reversedChildren.reverse() for child in reversedChildren: myParent.insert(myIndex, child) def extract(self): """Destructively rips this element out of the tree.""" if self.parent: try: del self.parent.contents[self.parent.index(self)] except ValueError: pass #Find the two elements that would be next to each other if #this element (and any children) hadn't been parsed. Connect #the two. lastChild = self._lastRecursiveChild() nextElement = lastChild.next if self.previous: self.previous.next = nextElement if nextElement: nextElement.previous = self.previous self.previous = None lastChild.next = None self.parent = None if self.previousSibling: self.previousSibling.nextSibling = self.nextSibling if self.nextSibling: self.nextSibling.previousSibling = self.previousSibling self.previousSibling = self.nextSibling = None return self def _lastRecursiveChild(self): "Finds the last element beneath this object to be parsed." lastChild = self while hasattr(lastChild, 'contents') and lastChild.contents: lastChild = lastChild.contents[-1] return lastChild def insert(self, position, newChild): if isinstance(newChild, basestring) \ and not isinstance(newChild, NavigableString): newChild = NavigableString(newChild) position = min(position, len(self.contents)) if hasattr(newChild, 'parent') and newChild.parent is not None: # We're 'inserting' an element that's already one # of this object's children. if newChild.parent is self: index = self.index(newChild) if index > position: # Furthermore we're moving it further down the # list of this object's children. That means that # when we extract this element, our target index # will jump down one. position = position - 1 newChild.extract() newChild.parent = self previousChild = None if position == 0: newChild.previousSibling = None newChild.previous = self else: previousChild = self.contents[position-1] newChild.previousSibling = previousChild newChild.previousSibling.nextSibling = newChild newChild.previous = previousChild._lastRecursiveChild() if newChild.previous: newChild.previous.next = newChild newChildsLastElement = newChild._lastRecursiveChild() if position >= len(self.contents): newChild.nextSibling = None parent = self parentsNextSibling = None while not parentsNextSibling: parentsNextSibling = parent.nextSibling parent = parent.parent if not parent: # This is the last element in the document. break if parentsNextSibling: newChildsLastElement.next = parentsNextSibling else: newChildsLastElement.next = None else: nextChild = self.contents[position] newChild.nextSibling = nextChild if newChild.nextSibling: newChild.nextSibling.previousSibling = newChild newChildsLastElement.next = nextChild if newChildsLastElement.next: newChildsLastElement.next.previous = newChildsLastElement self.contents.insert(position, newChild) def append(self, tag): """Appends the given tag to the contents of this tag.""" self.insert(len(self.contents), tag) def findNext(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears after this Tag in the document.""" return self._findOne(self.findAllNext, name, attrs, text, **kwargs) def findAllNext(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all items that match the given criteria and appear after this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.nextGenerator, **kwargs) def findNextSibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears after this Tag in the document.""" return self._findOne(self.findNextSiblings, name, attrs, text, **kwargs) def findNextSiblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear after this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.nextSiblingGenerator, **kwargs) fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x def findPrevious(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears before this Tag in the document.""" return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs) def findAllPrevious(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all items that match the given criteria and appear before this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.previousGenerator, **kwargs) fetchPrevious = findAllPrevious # Compatibility with pre-3.x def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears before this Tag in the document.""" return self._findOne(self.findPreviousSiblings, name, attrs, text, **kwargs) def findPreviousSiblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear before this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.previousSiblingGenerator, **kwargs) fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x def findParent(self, name=None, attrs={}, **kwargs): """Returns the closest parent of this Tag that matches the given criteria.""" # NOTE: We can't use _findOne because findParents takes a different # set of arguments. r = None l = self.findParents(name, attrs, 1) if l: r = l[0] return r def findParents(self, name=None, attrs={}, limit=None, **kwargs): """Returns the parents of this Tag that match the given criteria.""" return self._findAll(name, attrs, None, limit, self.parentGenerator, **kwargs) fetchParents = findParents # Compatibility with pre-3.x #These methods do the real heavy lifting. def _findOne(self, method, name, attrs, text, **kwargs): r = None l = method(name, attrs, text, 1, **kwargs) if l: r = l[0] return r def _findAll(self, name, attrs, text, limit, generator, **kwargs): "Iterates over a generator looking for things that match." if isinstance(name, SoupStrainer): strainer = name # (Possibly) special case some findAll*(...) searches elif text is None and not limit and not attrs and not kwargs: # findAll*(True) if name is True: return [element for element in generator() if isinstance(element, Tag)] # findAll*('tag-name') elif isinstance(name, basestring): return [element for element in generator() if isinstance(element, Tag) and element.name == name] else: strainer = SoupStrainer(name, attrs, text, **kwargs) # Build a SoupStrainer else: strainer = SoupStrainer(name, attrs, text, **kwargs) results = ResultSet(strainer) g = generator() while True: try: i = g.next() except StopIteration: break if i: found = strainer.search(i) if found: results.append(found) if limit and len(results) >= limit: break return results #These Generators can be used to navigate starting from both #NavigableStrings and Tags. def nextGenerator(self): i = self while i is not None: i = i.next yield i def nextSiblingGenerator(self): i = self while i is not None: i = i.nextSibling yield i def previousGenerator(self): i = self while i is not None: i = i.previous yield i def previousSiblingGenerator(self): i = self while i is not None: i = i.previousSibling yield i def parentGenerator(self): i = self while i is not None: i = i.parent yield i # Utility methods def substituteEncoding(self, str, encoding=None): encoding = encoding or "utf-8" return str.replace("%SOUP-ENCODING%", encoding) def toEncoding(self, s, encoding=None): """Encodes an object to a string in some encoding, or to Unicode. .""" if isinstance(s, unicode): if encoding: s = s.encode(encoding) elif isinstance(s, str): if encoding: s = s.encode(encoding) else: s = unicode(s) else: if encoding: s = self.toEncoding(str(s), encoding) else: s = unicode(s) return s class NavigableString(unicode, PageElement): def __new__(cls, value): """Create a new NavigableString. When unpickling a NavigableString, this method is called with the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be passed in to the superclass's __new__ or the superclass won't know how to handle non-ASCII characters. """ if isinstance(value, unicode): return unicode.__new__(cls, value) return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING) def __getnewargs__(self): return (NavigableString.__str__(self),) def __getattr__(self, attr): """text.string gives you text. This is for backwards compatibility for Navigable*String, but for CData* it lets you get the string without the CData wrapper.""" if attr == 'string': return self else: raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr) def __unicode__(self): return str(self).decode(DEFAULT_OUTPUT_ENCODING) def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): if encoding: return self.encode(encoding) else: return self class CData(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding) class ProcessingInstruction(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): output = self if "%SOUP-ENCODING%" in output: output = self.substituteEncoding(output, encoding) return "<?%s?>" % self.toEncoding(output, encoding) class Comment(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<!--%s-->" % NavigableString.__str__(self, encoding) class Declaration(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<!%s>" % NavigableString.__str__(self, encoding) class Tag(PageElement): """Represents a found HTML tag with its attributes and contents.""" def _invert(h): "Cheap function to invert a hash." i = {} for k,v in h.items(): i[v] = k return i XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'", "quot" : '"', "amp" : "&", "lt" : "<", "gt" : ">" } XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS) def _convertEntities(self, match): """Used in a call to re.sub to replace HTML, XML, and numeric entities with the appropriate Unicode characters. If HTML entities are being converted, any unrecognized entities are escaped.""" x = match.group(1) if self.convertHTMLEntities and x in name2codepoint: return unichr(name2codepoint[x]) elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS: if self.convertXMLEntities: return self.XML_ENTITIES_TO_SPECIAL_CHARS[x] else: return u'&%s;' % x elif len(x) > 0 and x[0] == '#': # Handle numeric entities if len(x) > 1 and x[1] == 'x': return unichr(int(x[2:], 16)) else: return unichr(int(x[1:])) elif self.escapeUnrecognizedEntities: return u'&amp;%s;' % x else: return u'&%s;' % x def __init__(self, parser, name, attrs=None, parent=None, previous=None): "Basic constructor." # We don't actually store the parser object: that lets extracted # chunks be garbage-collected self.parserClass = parser.__class__ self.isSelfClosing = parser.isSelfClosingTag(name) self.name = name if attrs is None: attrs = [] elif isinstance(attrs, dict): attrs = attrs.items() self.attrs = attrs self.contents = [] self.setup(parent, previous) self.hidden = False self.containsSubstitutions = False self.convertHTMLEntities = parser.convertHTMLEntities self.convertXMLEntities = parser.convertXMLEntities self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities # Convert any HTML, XML, or numeric entities in the attribute values. convert = lambda(k, val): (k, re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);", self._convertEntities, val)) self.attrs = map(convert, self.attrs) def getString(self): if (len(self.contents) == 1 and isinstance(self.contents[0], NavigableString)): return self.contents[0] def setString(self, string): """Replace the contents of the tag with a string""" self.clear() self.append(string) string = property(getString, setString) def getText(self, separator=u""): if not len(self.contents): return u"" stopNode = self._lastRecursiveChild().next strings = [] current = self.contents[0] while current is not stopNode: if isinstance(current, NavigableString): strings.append(current.strip()) current = current.next return separator.join(strings) text = property(getText) def get(self, key, default=None): """Returns the value of the 'key' attribute for the tag, or the value given for 'default' if it doesn't have that attribute.""" return self._getAttrMap().get(key, default) def clear(self): """Extract all children.""" for child in self.contents[:]: child.extract() def index(self, element): for i, child in enumerate(self.contents): if child is element: return i raise ValueError("Tag.index: element not in tag") def has_key(self, key): return self._getAttrMap().has_key(key) def __getitem__(self, key): """tag[key] returns the value of the 'key' attribute for the tag, and throws an exception if it's not there.""" return self._getAttrMap()[key] def __iter__(self): "Iterating over a tag iterates over its contents." return iter(self.contents) def __len__(self): "The length of a tag is the length of its list of contents." return len(self.contents) def __contains__(self, x): return x in self.contents def __nonzero__(self): "A tag is non-None even if it has no contents." return True def __setitem__(self, key, value): """Setting tag[key] sets the value of the 'key' attribute for the tag.""" self._getAttrMap() self.attrMap[key] = value found = False for i in range(0, len(self.attrs)): if self.attrs[i][0] == key: self.attrs[i] = (key, value) found = True if not found: self.attrs.append((key, value)) self._getAttrMap()[key] = value def __delitem__(self, key): "Deleting tag[key] deletes all 'key' attributes for the tag." for item in self.attrs: if item[0] == key: self.attrs.remove(item) #We don't break because bad HTML can define the same #attribute multiple times. self._getAttrMap() if self.attrMap.has_key(key): del self.attrMap[key] def __call__(self, *args, **kwargs): """Calling a tag like a function is the same as calling its findAll() method. Eg. tag('a') returns a list of all the A tags found within this tag.""" return apply(self.findAll, args, kwargs) def __getattr__(self, tag): #print "Getattr %s.%s" % (self.__class__, tag) if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3: return self.find(tag[:-3]) elif tag.find('__') != 0: return self.find(tag) raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag) def __eq__(self, other): """Returns true iff this tag has the same name, the same attributes, and the same contents (recursively) as the given tag. NOTE: right now this will return false if two tags have the same attributes in a different order. Should this be fixed?""" if other is self: return True if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other): return False for i in range(0, len(self.contents)): if self.contents[i] != other.contents[i]: return False return True def __ne__(self, other): """Returns true iff this tag is not identical to the other tag, as defined in __eq__.""" return not self == other def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING): """Renders this tag as a string.""" return self.__str__(encoding) def __unicode__(self): return self.__str__(None) BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|" + "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)" + ")") def _sub_entity(self, x): """Used with a regular expression to substitute the appropriate XML entity for an XML special character.""" return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";" def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): """Returns a string or Unicode representation of this tag and its contents. To get Unicode, pass None for encoding. NOTE: since Python's HTML parser consumes whitespace, this method is not certain to reproduce the whitespace present in the original string.""" encodedName = self.toEncoding(self.name, encoding) attrs = [] if self.attrs: for key, val in self.attrs: fmt = '%s="%s"' if isinstance(val, basestring): if self.containsSubstitutions and '%SOUP-ENCODING%' in val: val = self.substituteEncoding(val, encoding) # The attribute value either: # # * Contains no embedded double quotes or single quotes. # No problem: we enclose it in double quotes. # * Contains embedded single quotes. No problem: # double quotes work here too. # * Contains embedded double quotes. No problem: # we enclose it in single quotes. # * Embeds both single _and_ double quotes. This # can't happen naturally, but it can happen if # you modify an attribute value after parsing # the document. Now we have a bit of a # problem. We solve it by enclosing the # attribute in single quotes, and escaping any # embedded single quotes to XML entities. if '"' in val: fmt = "%s='%s'" if "'" in val: # TODO: replace with apos when # appropriate. val = val.replace("'", "&squot;") # Now we're okay w/r/t quotes. But the attribute # value might also contain angle brackets, or # ampersands that aren't part of entities. We need # to escape those to XML entities too. val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val) attrs.append(fmt % (self.toEncoding(key, encoding), self.toEncoding(val, encoding))) close = '' closeTag = '' if self.isSelfClosing: close = ' /' else: closeTag = '</%s>' % encodedName indentTag, indentContents = 0, 0 if prettyPrint: indentTag = indentLevel space = (' ' * (indentTag-1)) indentContents = indentTag + 1 contents = self.renderContents(encoding, prettyPrint, indentContents) if self.hidden: s = contents else: s = [] attributeString = '' if attrs: attributeString = ' ' + ' '.join(attrs) if prettyPrint: s.append(space) s.append('<%s%s%s>' % (encodedName, attributeString, close)) if prettyPrint: s.append("\n") s.append(contents) if prettyPrint and contents and contents[-1] != "\n": s.append("\n") if prettyPrint and closeTag: s.append(space) s.append(closeTag) if prettyPrint and closeTag and self.nextSibling: s.append("\n") s = ''.join(s) return s def decompose(self): """Recursively destroys the contents of this tree.""" self.extract() if len(self.contents) == 0: return current = self.contents[0] while current is not None: next = current.next if isinstance(current, Tag): del current.contents[:] current.parent = None current.previous = None current.previousSibling = None current.next = None current.nextSibling = None current = next def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING): return self.__str__(encoding, True) def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): """Renders the contents of this tag as a string in the given encoding. If encoding is None, returns a Unicode string..""" s=[] for c in self: text = None if isinstance(c, NavigableString): text = c.__str__(encoding) elif isinstance(c, Tag): s.append(c.__str__(encoding, prettyPrint, indentLevel)) if text and prettyPrint: text = text.strip() if text: if prettyPrint: s.append(" " * (indentLevel-1)) s.append(text) if prettyPrint: s.append("\n") return ''.join(s) #Soup methods def find(self, name=None, attrs={}, recursive=True, text=None, **kwargs): """Return only the first child of this Tag matching the given criteria.""" r = None l = self.findAll(name, attrs, recursive, text, 1, **kwargs) if l: r = l[0] return r findChild = find def findAll(self, name=None, attrs={}, recursive=True, text=None, limit=None, **kwargs): """Extracts a list of Tag objects that match the given criteria. You can specify the name of the Tag and any attributes you want the Tag to have. The value of a key-value pair in the 'attrs' map can be a string, a list of strings, a regular expression object, or a callable that takes a string and returns whether or not the string matches for some custom definition of 'matches'. The same is true of the tag name.""" generator = self.recursiveChildGenerator if not recursive: generator = self.childGenerator return self._findAll(name, attrs, text, limit, generator, **kwargs) findChildren = findAll # Pre-3.x compatibility methods first = find fetch = findAll def fetchText(self, text=None, recursive=True, limit=None): return self.findAll(text=text, recursive=recursive, limit=limit) def firstText(self, text=None, recursive=True): return self.find(text=text, recursive=recursive) #Private methods def _getAttrMap(self): """Initializes a map representation of this tag's attributes, if not already initialized.""" if not getattr(self, 'attrMap'): self.attrMap = {} for (key, value) in self.attrs: self.attrMap[key] = value return self.attrMap #Generator methods def childGenerator(self): # Just use the iterator from the contents return iter(self.contents) def recursiveChildGenerator(self): if not len(self.contents): raise StopIteration stopNode = self._lastRecursiveChild().next current = self.contents[0] while current is not stopNode: yield current current = current.next # Next, a couple classes to represent queries and their results. class SoupStrainer: """Encapsulates a number of ways of matching a markup element (tag or text).""" def __init__(self, name=None, attrs={}, text=None, **kwargs): self.name = name if isinstance(attrs, basestring): kwargs['class'] = _match_css_class(attrs) attrs = None if kwargs: if attrs: attrs = attrs.copy() attrs.update(kwargs) else: attrs = kwargs self.attrs = attrs self.text = text def __str__(self): if self.text: return self.text else: return "%s|%s" % (self.name, self.attrs) def searchTag(self, markupName=None, markupAttrs={}): found = None markup = None if isinstance(markupName, Tag): markup = markupName markupAttrs = markup callFunctionWithTagData = callable(self.name) \ and not isinstance(markupName, Tag) if (not self.name) \ or callFunctionWithTagData \ or (markup and self._matches(markup, self.name)) \ or (not markup and self._matches(markupName, self.name)): if callFunctionWithTagData: match = self.name(markupName, markupAttrs) else: match = True markupAttrMap = None for attr, matchAgainst in self.attrs.items(): if not markupAttrMap: if hasattr(markupAttrs, 'get'): markupAttrMap = markupAttrs else: markupAttrMap = {} for k,v in markupAttrs: markupAttrMap[k] = v attrValue = markupAttrMap.get(attr) if not self._matches(attrValue, matchAgainst): match = False break if match: if markup: found = markup else: found = markupName return found def search(self, markup): #print 'looking for %s in %s' % (self, markup) found = None # If given a list of items, scan it for a text element that # matches. if hasattr(markup, "__iter__") \ and not isinstance(markup, Tag): for element in markup: if isinstance(element, NavigableString) \ and self.search(element): found = element break # If it's a Tag, make sure its name or attributes match. # Don't bother with Tags if we're searching for text. elif isinstance(markup, Tag): if not self.text: found = self.searchTag(markup) # If it's text, make sure the text matches. elif isinstance(markup, NavigableString) or \ isinstance(markup, basestring): if self._matches(markup, self.text): found = markup else: raise Exception, "I don't know how to match against a %s" \ % markup.__class__ return found def _matches(self, markup, matchAgainst): #print "Matching %s against %s" % (markup, matchAgainst) result = False if matchAgainst is True: result = markup is not None elif callable(matchAgainst): result = matchAgainst(markup) else: #Custom match methods take the tag as an argument, but all #other ways of matching match the tag name as a string. if isinstance(markup, Tag): markup = markup.name if markup and not isinstance(markup, basestring): markup = unicode(markup) #Now we know that chunk is either a string, or None. if hasattr(matchAgainst, 'match'): # It's a regexp object. result = markup and matchAgainst.search(markup) elif hasattr(matchAgainst, '__iter__'): # list-like result = markup in matchAgainst elif hasattr(matchAgainst, 'items'): result = markup.has_key(matchAgainst) elif matchAgainst and isinstance(markup, basestring): if isinstance(markup, unicode): matchAgainst = unicode(matchAgainst) else: matchAgainst = str(matchAgainst) if not result: result = matchAgainst == markup return result class ResultSet(list): """A ResultSet is just a list that keeps track of the SoupStrainer that created it.""" def __init__(self, source): list.__init__([]) self.source = source # Now, some helper functions. def buildTagMap(default, *args): """Turns a list of maps, lists, or scalars into a single map. Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and NESTING_RESET_TAGS maps out of lists and partial maps.""" built = {} for portion in args: if hasattr(portion, 'items'): #It's a map. Merge it. for k,v in portion.items(): built[k] = v elif hasattr(portion, '__iter__'): # is a list #It's a list. Map each item to the default. for k in portion: built[k] = default else: #It's a scalar. Map it to the default. built[portion] = default return built # Now, the parser classes. class BeautifulStoneSoup(Tag, SGMLParser): """This class contains the basic parser and search code. It defines a parser that knows nothing about tag behavior except for the following: You can't close a tag without closing all the tags it encloses. That is, "<foo><bar></foo>" actually means "<foo><bar></bar></foo>". [Another possible explanation is "<foo><bar /></foo>", but since this class defines no SELF_CLOSING_TAGS, it will never use that explanation.] This class is useful for parsing XML or made-up markup languages, or when BeautifulSoup makes an assumption counter to what you were expecting.""" SELF_CLOSING_TAGS = {} NESTABLE_TAGS = {} RESET_NESTING_TAGS = {} QUOTE_TAGS = {} PRESERVE_WHITESPACE_TAGS = [] MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'), lambda x: x.group(1) + ' />'), (re.compile('<!\s+([^<>]*)>'), lambda x: '<!' + x.group(1) + '>') ] ROOT_TAG_NAME = u'[document]' HTML_ENTITIES = "html" XML_ENTITIES = "xml" XHTML_ENTITIES = "xhtml" # TODO: This only exists for backwards-compatibility ALL_ENTITIES = XHTML_ENTITIES # Used when determining whether a text node is all whitespace and # can be replaced with a single space. A text node that contains # fancy Unicode spaces (usually non-breaking) should be left # alone. STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, } def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None, markupMassage=True, smartQuotesTo=XML_ENTITIES, convertEntities=None, selfClosingTags=None, isHTML=False): """The Soup object is initialized as the 'root tag', and the provided markup (which can be a string or a file-like object) is fed into the underlying parser. sgmllib will process most bad HTML, and the BeautifulSoup class has some tricks for dealing with some HTML that kills sgmllib, but Beautiful Soup can nonetheless choke or lose data if your data uses self-closing tags or declarations incorrectly. By default, Beautiful Soup uses regexes to sanitize input, avoiding the vast majority of these problems. If the problems don't apply to you, pass in False for markupMassage, and you'll get better performance. The default parser massage techniques fix the two most common instances of invalid HTML that choke sgmllib: <br/> (No space between name of closing tag and tag close) <! --Comment--> (Extraneous whitespace in declaration) You can pass in a custom list of (RE object, replace method) tuples to get Beautiful Soup to scrub your input the way you want.""" self.parseOnlyThese = parseOnlyThese self.fromEncoding = fromEncoding self.smartQuotesTo = smartQuotesTo self.convertEntities = convertEntities # Set the rules for how we'll deal with the entities we # encounter if self.convertEntities: # It doesn't make sense to convert encoded characters to # entities even while you're converting entities to Unicode. # Just convert it all to Unicode. self.smartQuotesTo = None if convertEntities == self.HTML_ENTITIES: self.convertXMLEntities = False self.convertHTMLEntities = True self.escapeUnrecognizedEntities = True elif convertEntities == self.XHTML_ENTITIES: self.convertXMLEntities = True self.convertHTMLEntities = True self.escapeUnrecognizedEntities = False elif convertEntities == self.XML_ENTITIES: self.convertXMLEntities = True self.convertHTMLEntities = False self.escapeUnrecognizedEntities = False else: self.convertXMLEntities = False self.convertHTMLEntities = False self.escapeUnrecognizedEntities = False self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags) SGMLParser.__init__(self) if hasattr(markup, 'read'): # It's a file-type object. markup = markup.read() self.markup = markup self.markupMassage = markupMassage try: self._feed(isHTML=isHTML) except StopParsing: pass self.markup = None # The markup can now be GCed def convert_charref(self, name): """This method fixes a bug in Python's SGMLParser.""" try: n = int(name) except ValueError: return if not 0 <= n <= 127 : # ASCII ends at 127, not 255 return return self.convert_codepoint(n) def _feed(self, inDocumentEncoding=None, isHTML=False): # Convert the document to Unicode. markup = self.markup if isinstance(markup, unicode): if not hasattr(self, 'originalEncoding'): self.originalEncoding = None else: dammit = UnicodeDammit\ (markup, [self.fromEncoding, inDocumentEncoding], smartQuotesTo=self.smartQuotesTo, isHTML=isHTML) markup = dammit.unicode self.originalEncoding = dammit.originalEncoding self.declaredHTMLEncoding = dammit.declaredHTMLEncoding if markup: if self.markupMassage: if not hasattr(self.markupMassage, "__iter__"): self.markupMassage = self.MARKUP_MASSAGE for fix, m in self.markupMassage: markup = fix.sub(m, markup) # TODO: We get rid of markupMassage so that the # soup object can be deepcopied later on. Some # Python installations can't copy regexes. If anyone # was relying on the existence of markupMassage, this # might cause problems. del(self.markupMassage) self.reset() SGMLParser.feed(self, markup) # Close out any unfinished strings and close all the open tags. self.endData() while self.currentTag.name != self.ROOT_TAG_NAME: self.popTag() def __getattr__(self, methodName): """This method routes method call requests to either the SGMLParser superclass or the Tag superclass, depending on the method name.""" #print "__getattr__ called on %s.%s" % (self.__class__, methodName) if methodName.startswith('start_') or methodName.startswith('end_') \ or methodName.startswith('do_'): return SGMLParser.__getattr__(self, methodName) elif not methodName.startswith('__'): return Tag.__getattr__(self, methodName) else: raise AttributeError def isSelfClosingTag(self, name): """Returns true iff the given string is the name of a self-closing tag according to this parser.""" return self.SELF_CLOSING_TAGS.has_key(name) \ or self.instanceSelfClosingTags.has_key(name) def reset(self): Tag.__init__(self, self, self.ROOT_TAG_NAME) self.hidden = 1 SGMLParser.reset(self) self.currentData = [] self.currentTag = None self.tagStack = [] self.quoteStack = [] self.pushTag(self) def popTag(self): tag = self.tagStack.pop() #print "Pop", tag.name if self.tagStack: self.currentTag = self.tagStack[-1] return self.currentTag def pushTag(self, tag): #print "Push", tag.name if self.currentTag: self.currentTag.contents.append(tag) self.tagStack.append(tag) self.currentTag = self.tagStack[-1] def endData(self, containerClass=NavigableString): if self.currentData: currentData = u''.join(self.currentData) if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and not set([tag.name for tag in self.tagStack]).intersection( self.PRESERVE_WHITESPACE_TAGS)): if '\n' in currentData: currentData = '\n' else: currentData = ' ' self.currentData = [] if self.parseOnlyThese and len(self.tagStack) <= 1 and \ (not self.parseOnlyThese.text or \ not self.parseOnlyThese.search(currentData)): return o = containerClass(currentData) o.setup(self.currentTag, self.previous) if self.previous: self.previous.next = o self.previous = o self.currentTag.contents.append(o) def _popToTag(self, name, inclusivePop=True): """Pops the tag stack up to and including the most recent instance of the given tag. If inclusivePop is false, pops the tag stack up to but *not* including the most recent instqance of the given tag.""" #print "Popping to %s" % name if name == self.ROOT_TAG_NAME: return numPops = 0 mostRecentTag = None for i in range(len(self.tagStack)-1, 0, -1): if name == self.tagStack[i].name: numPops = len(self.tagStack)-i break if not inclusivePop: numPops = numPops - 1 for i in range(0, numPops): mostRecentTag = self.popTag() return mostRecentTag def _smartPop(self, name): """We need to pop up to the previous tag of this type, unless one of this tag's nesting reset triggers comes between this tag and the previous tag of this type, OR unless this tag is a generic nesting trigger and another generic nesting trigger comes between this tag and the previous tag of this type. Examples: <p>Foo<b>Bar *<p>* should pop to 'p', not 'b'. <p>Foo<table>Bar *<p>* should pop to 'table', not 'p'. <p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'. <li><ul><li> *<li>* should pop to 'ul', not the first 'li'. <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr' <td><tr><td> *<td>* should pop to 'tr', not the first 'td' """ nestingResetTriggers = self.NESTABLE_TAGS.get(name) isNestable = nestingResetTriggers != None isResetNesting = self.RESET_NESTING_TAGS.has_key(name) popTo = None inclusive = True for i in range(len(self.tagStack)-1, 0, -1): p = self.tagStack[i] if (not p or p.name == name) and not isNestable: #Non-nestable tags get popped to the top or to their #last occurance. popTo = name break if (nestingResetTriggers is not None and p.name in nestingResetTriggers) \ or (nestingResetTriggers is None and isResetNesting and self.RESET_NESTING_TAGS.has_key(p.name)): #If we encounter one of the nesting reset triggers #peculiar to this tag, or we encounter another tag #that causes nesting to reset, pop up to but not #including that tag. popTo = p.name inclusive = False break p = p.parent if popTo: self._popToTag(popTo, inclusive) def unknown_starttag(self, name, attrs, selfClosing=0): #print "Start tag %s: %s" % (name, attrs) if self.quoteStack: #This is not a real tag. #print "<%s> is not real!" % name attrs = ''.join([' %s="%s"' % (x, y) for x, y in attrs]) self.handle_data('<%s%s>' % (name, attrs)) return self.endData() if not self.isSelfClosingTag(name) and not selfClosing: self._smartPop(name) if self.parseOnlyThese and len(self.tagStack) <= 1 \ and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)): return tag = Tag(self, name, attrs, self.currentTag, self.previous) if self.previous: self.previous.next = tag self.previous = tag self.pushTag(tag) if selfClosing or self.isSelfClosingTag(name): self.popTag() if name in self.QUOTE_TAGS: #print "Beginning quote (%s)" % name self.quoteStack.append(name) self.literal = 1 return tag def unknown_endtag(self, name): #print "End tag %s" % name if self.quoteStack and self.quoteStack[-1] != name: #This is not a real end tag. #print "</%s> is not real!" % name self.handle_data('</%s>' % name) return self.endData() self._popToTag(name) if self.quoteStack and self.quoteStack[-1] == name: self.quoteStack.pop() self.literal = (len(self.quoteStack) > 0) def handle_data(self, data): self.currentData.append(data) def _toStringSubclass(self, text, subclass): """Adds a certain piece of text to the tree as a NavigableString subclass.""" self.endData() self.handle_data(text) self.endData(subclass) def handle_pi(self, text): """Handle a processing instruction as a ProcessingInstruction object, possibly one with a %SOUP-ENCODING% slot into which an encoding will be plugged later.""" if text[:3] == "xml": text = u"xml version='1.0' encoding='%SOUP-ENCODING%'" self._toStringSubclass(text, ProcessingInstruction) def handle_comment(self, text): "Handle comments as Comment objects." self._toStringSubclass(text, Comment) def handle_charref(self, ref): "Handle character references as data." if self.convertEntities: data = unichr(int(ref)) else: data = '&#%s;' % ref self.handle_data(data) def handle_entityref(self, ref): """Handle entity references as data, possibly converting known HTML and/or XML entity references to the corresponding Unicode characters.""" data = None if self.convertHTMLEntities: try: data = unichr(name2codepoint[ref]) except KeyError: pass if not data and self.convertXMLEntities: data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref) if not data and self.convertHTMLEntities and \ not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref): # TODO: We've got a problem here. We're told this is # an entity reference, but it's not an XML entity # reference or an HTML entity reference. Nonetheless, # the logical thing to do is to pass it through as an # unrecognized entity reference. # # Except: when the input is "&carol;" this function # will be called with input "carol". When the input is # "AT&T", this function will be called with input # "T". We have no way of knowing whether a semicolon # was present originally, so we don't know whether # this is an unknown entity or just a misplaced # ampersand. # # The more common case is a misplaced ampersand, so I # escape the ampersand and omit the trailing semicolon. data = "&amp;%s" % ref if not data: # This case is different from the one above, because we # haven't already gone through a supposedly comprehensive # mapping of entities to Unicode characters. We might not # have gone through any mapping at all. So the chances are # very high that this is a real entity, and not a # misplaced ampersand. data = "&%s;" % ref self.handle_data(data) def handle_decl(self, data): "Handle DOCTYPEs and the like as Declaration objects." self._toStringSubclass(data, Declaration) def parse_declaration(self, i): """Treat a bogus SGML declaration as raw data. Treat a CDATA declaration as a CData object.""" j = None if self.rawdata[i:i+9] == '<![CDATA[': k = self.rawdata.find(']]>', i) if k == -1: k = len(self.rawdata) data = self.rawdata[i+9:k] j = k+3 self._toStringSubclass(data, CData) else: try: j = SGMLParser.parse_declaration(self, i) except SGMLParseError: toHandle = self.rawdata[i:] self.handle_data(toHandle) j = i + len(toHandle) return j class BeautifulSoup(BeautifulStoneSoup): """This parser knows the following facts about HTML: * Some tags have no closing tag and should be interpreted as being closed as soon as they are encountered. * The text inside some tags (ie. 'script') may contain tags which are not really part of the document and which should be parsed as text, not tags. If you want to parse the text as tags, you can always fetch it and parse it explicitly. * Tag nesting rules: Most tags can't be nested at all. For instance, the occurance of a <p> tag should implicitly close the previous <p> tag. <p>Para1<p>Para2 should be transformed into: <p>Para1</p><p>Para2 Some tags can be nested arbitrarily. For instance, the occurance of a <blockquote> tag should _not_ implicitly close the previous <blockquote> tag. Alice said: <blockquote>Bob said: <blockquote>Blah should NOT be transformed into: Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah Some tags can be nested, but the nesting is reset by the interposition of other tags. For instance, a <tr> tag should implicitly close the previous <tr> tag within the same <table>, but not close a <tr> tag in another table. <table><tr>Blah<tr>Blah should be transformed into: <table><tr>Blah</tr><tr>Blah but, <tr>Blah<table><tr>Blah should NOT be transformed into <tr>Blah<table></tr><tr>Blah Differing assumptions about tag nesting rules are a major source of problems with the BeautifulSoup class. If BeautifulSoup is not treating as nestable a tag your page author treats as nestable, try ICantBelieveItsBeautifulSoup, MinimalSoup, or BeautifulStoneSoup before writing your own subclass.""" def __init__(self, *args, **kwargs): if not kwargs.has_key('smartQuotesTo'): kwargs['smartQuotesTo'] = self.HTML_ENTITIES kwargs['isHTML'] = True BeautifulStoneSoup.__init__(self, *args, **kwargs) SELF_CLOSING_TAGS = buildTagMap(None, ('br' , 'hr', 'input', 'img', 'meta', 'spacer', 'link', 'frame', 'base', 'col')) PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea']) QUOTE_TAGS = {'script' : None, 'textarea' : None} #According to the HTML standard, each of these inline tags can #contain another tag of the same type. Furthermore, it's common #to actually use these tags this way. NESTABLE_INLINE_TAGS = ('span', 'font', 'q', 'object', 'bdo', 'sub', 'sup', 'center') #According to the HTML standard, these block tags can contain #another tag of the same type. Furthermore, it's common #to actually use these tags this way. NESTABLE_BLOCK_TAGS = ('blockquote', 'div', 'fieldset', 'ins', 'del') #Lists can contain other lists, but there are restrictions. NESTABLE_LIST_TAGS = { 'ol' : [], 'ul' : [], 'li' : ['ul', 'ol'], 'dl' : [], 'dd' : ['dl'], 'dt' : ['dl'] } #Tables can contain other tables, but there are restrictions. NESTABLE_TABLE_TAGS = {'table' : [], 'tr' : ['table', 'tbody', 'tfoot', 'thead'], 'td' : ['tr'], 'th' : ['tr'], 'thead' : ['table'], 'tbody' : ['table'], 'tfoot' : ['table'], } NON_NESTABLE_BLOCK_TAGS = ('address', 'form', 'p', 'pre') #If one of these tags is encountered, all tags up to the next tag of #this type are popped. RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript', NON_NESTABLE_BLOCK_TAGS, NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS, NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) # Used to detect the charset in a META tag; see start_meta CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M) def start_meta(self, attrs): """Beautiful Soup can detect a charset included in a META tag, try to convert the document to that charset, and re-parse the document from the beginning.""" httpEquiv = None contentType = None contentTypeIndex = None tagNeedsEncodingSubstitution = False for i in range(0, len(attrs)): key, value = attrs[i] key = key.lower() if key == 'http-equiv': httpEquiv = value elif key == 'content': contentType = value contentTypeIndex = i if httpEquiv and contentType: # It's an interesting meta tag. match = self.CHARSET_RE.search(contentType) if match: if (self.declaredHTMLEncoding is not None or self.originalEncoding == self.fromEncoding): # An HTML encoding was sniffed while converting # the document to Unicode, or an HTML encoding was # sniffed during a previous pass through the # document, or an encoding was specified # explicitly and it worked. Rewrite the meta tag. def rewrite(match): return match.group(1) + "%SOUP-ENCODING%" newAttr = self.CHARSET_RE.sub(rewrite, contentType) attrs[contentTypeIndex] = (attrs[contentTypeIndex][0], newAttr) tagNeedsEncodingSubstitution = True else: # This is our first pass through the document. # Go through it again with the encoding information. newCharset = match.group(3) if newCharset and newCharset != self.originalEncoding: self.declaredHTMLEncoding = newCharset self._feed(self.declaredHTMLEncoding) raise StopParsing pass tag = self.unknown_starttag("meta", attrs) if tag and tagNeedsEncodingSubstitution: tag.containsSubstitutions = True class StopParsing(Exception): pass class ICantBelieveItsBeautifulSoup(BeautifulSoup): """The BeautifulSoup class is oriented towards skipping over common HTML errors like unclosed tags. However, sometimes it makes errors of its own. For instance, consider this fragment: <b>Foo<b>Bar</b></b> This is perfectly valid (if bizarre) HTML. However, the BeautifulSoup class will implicitly close the first b tag when it encounters the second 'b'. It will think the author wrote "<b>Foo<b>Bar", and didn't close the first 'b' tag, because there's no real-world reason to bold something that's already bold. When it encounters '</b></b>' it will close two more 'b' tags, for a grand total of three tags closed instead of two. This can throw off the rest of your document structure. The same is true of a number of other tags, listed below. It's much more common for someone to forget to close a 'b' tag than to actually use nested 'b' tags, and the BeautifulSoup class handles the common case. This class handles the not-co-common case: where you can't believe someone wrote what they did, but it's valid HTML and BeautifulSoup screwed up by assuming it wouldn't be.""" I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \ ('em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong', 'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b', 'big') I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ('noscript',) NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS, I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS, I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS) class MinimalSoup(BeautifulSoup): """The MinimalSoup class is for parsing HTML that contains pathologically bad markup. It makes no assumptions about tag nesting, but it does know which tags are self-closing, that <script> tags contain Javascript and should not be parsed, that META tags may contain encoding information, and so on. This also makes it better for subclassing than BeautifulStoneSoup or BeautifulSoup.""" RESET_NESTING_TAGS = buildTagMap('noscript') NESTABLE_TAGS = {} class BeautifulSOAP(BeautifulStoneSoup): """This class will push a tag with only a single string child into the tag's parent as an attribute. The attribute's name is the tag name, and the value is the string child. An example should give the flavor of the change: <foo><bar>baz</bar></foo> => <foo bar="baz"><bar>baz</bar></foo> You can then access fooTag['bar'] instead of fooTag.barTag.string. This is, of course, useful for scraping structures that tend to use subelements instead of attributes, such as SOAP messages. Note that it modifies its input, so don't print the modified version out. I'm not sure how many people really want to use this class; let me know if you do. Mainly I like the name.""" def popTag(self): if len(self.tagStack) > 1: tag = self.tagStack[-1] parent = self.tagStack[-2] parent._getAttrMap() if (isinstance(tag, Tag) and len(tag.contents) == 1 and isinstance(tag.contents[0], NavigableString) and not parent.attrMap.has_key(tag.name)): parent[tag.name] = tag.contents[0] BeautifulStoneSoup.popTag(self) #Enterprise class names! It has come to our attention that some people #think the names of the Beautiful Soup parser classes are too silly #and "unprofessional" for use in enterprise screen-scraping. We feel #your pain! For such-minded folk, the Beautiful Soup Consortium And #All-Night Kosher Bakery recommends renaming this file to #"RobustParser.py" (or, in cases of extreme enterprisiness, #"RobustParserBeanInterface.class") and using the following #enterprise-friendly class aliases: class RobustXMLParser(BeautifulStoneSoup): pass class RobustHTMLParser(BeautifulSoup): pass class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup): pass class RobustInsanelyWackAssHTMLParser(MinimalSoup): pass class SimplifyingSOAPParser(BeautifulSOAP): pass ###################################################### # # Bonus library: Unicode, Dammit # # This class forces XML data into a standard format (usually to UTF-8 # or Unicode). It is heavily based on code from Mark Pilgrim's # Universal Feed Parser. It does not rewrite the XML or HTML to # reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi # (XML) and BeautifulSoup.start_meta (HTML). # Autodetects character encodings. # Download from http://chardet.feedparser.org/ try: import chardet # import chardet.constants # chardet.constants._debug = 1 except ImportError: chardet = None # cjkcodecs and iconv_codec make Python know about more character encodings. # Both are available from http://cjkpython.i18n.org/ # They're built in if you use Python 2.4. try: import cjkcodecs.aliases except ImportError: pass try: import iconv_codec except ImportError: pass class UnicodeDammit: """A class for detecting the encoding of a *ML document and converting it to a Unicode string. If the source encoding is windows-1252, can replace MS smart quotes with their HTML or XML equivalents.""" # This dictionary maps commonly seen values for "charset" in HTML # meta tags to the corresponding Python codec names. It only covers # values that aren't in Python's aliases and can't be determined # by the heuristics in find_codec. CHARSET_ALIASES = { "macintosh" : "mac-roman", "x-sjis" : "shift-jis" } def __init__(self, markup, overrideEncodings=[], smartQuotesTo='xml', isHTML=False): self.declaredHTMLEncoding = None self.markup, documentEncoding, sniffedEncoding = \ self._detectEncoding(markup, isHTML) self.smartQuotesTo = smartQuotesTo self.triedEncodings = [] if markup == '' or isinstance(markup, unicode): self.originalEncoding = None self.unicode = unicode(markup) return u = None for proposedEncoding in overrideEncodings: u = self._convertFrom(proposedEncoding) if u: break if not u: for proposedEncoding in (documentEncoding, sniffedEncoding): u = self._convertFrom(proposedEncoding) if u: break # If no luck and we have auto-detection library, try that: if not u and chardet and not isinstance(self.markup, unicode): u = self._convertFrom(chardet.detect(self.markup)['encoding']) # As a last resort, try utf-8 and windows-1252: if not u: for proposed_encoding in ("utf-8", "windows-1252"): u = self._convertFrom(proposed_encoding) if u: break self.unicode = u if not u: self.originalEncoding = None def _subMSChar(self, orig): """Changes a MS smart quote character to an XML or HTML entity.""" sub = self.MS_CHARS.get(orig) if isinstance(sub, tuple): if self.smartQuotesTo == 'xml': sub = '&#x%s;' % sub[1] else: sub = '&%s;' % sub[0] return sub def _convertFrom(self, proposed): proposed = self.find_codec(proposed) if not proposed or proposed in self.triedEncodings: return None self.triedEncodings.append(proposed) markup = self.markup # Convert smart quotes to HTML if coming from an encoding # that might have them. if self.smartQuotesTo and proposed.lower() in("windows-1252", "iso-8859-1", "iso-8859-2"): markup = re.compile("([\x80-\x9f])").sub \ (lambda(x): self._subMSChar(x.group(1)), markup) try: # print "Trying to convert document to %s" % proposed u = self._toUnicode(markup, proposed) self.markup = u self.originalEncoding = proposed except Exception, e: # print "That didn't work!" # print e return None #print "Correct encoding: %s" % proposed return self.markup def _toUnicode(self, data, encoding): '''Given a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliases''' # strip Byte Order Mark (if present) if (len(data) >= 4) and (data[:2] == '\xfe\xff') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16le' data = data[2:] elif data[:3] == '\xef\xbb\xbf': encoding = 'utf-8' data = data[3:] elif data[:4] == '\x00\x00\xfe\xff': encoding = 'utf-32be' data = data[4:] elif data[:4] == '\xff\xfe\x00\x00': encoding = 'utf-32le' data = data[4:] newdata = unicode(data, encoding) return newdata def _detectEncoding(self, xml_data, isHTML=False): """Given a document, tries to detect its XML encoding.""" xml_encoding = sniffed_xml_encoding = None try: if xml_data[:4] == '\x4c\x6f\xa7\x94': # EBCDIC xml_data = self._ebcdic_to_ascii(xml_data) elif xml_data[:4] == '\x00\x3c\x00\x3f': # UTF-16BE sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \ and (xml_data[2:4] != '\x00\x00'): # UTF-16BE with BOM sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x3f\x00': # UTF-16LE sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \ (xml_data[2:4] != '\x00\x00'): # UTF-16LE with BOM sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') elif xml_data[:4] == '\x00\x00\x00\x3c': # UTF-32BE sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x00\x00': # UTF-32LE sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') elif xml_data[:4] == '\x00\x00\xfe\xff': # UTF-32BE with BOM sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') elif xml_data[:4] == '\xff\xfe\x00\x00': # UTF-32LE with BOM sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') elif xml_data[:3] == '\xef\xbb\xbf': # UTF-8 with BOM sniffed_xml_encoding = 'utf-8' xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') else: sniffed_xml_encoding = 'ascii' pass except: xml_encoding_match = None xml_encoding_match = re.compile( '^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data) if not xml_encoding_match and isHTML: regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I) xml_encoding_match = regexp.search(xml_data) if xml_encoding_match is not None: xml_encoding = xml_encoding_match.groups()[0].lower() if isHTML: self.declaredHTMLEncoding = xml_encoding if sniffed_xml_encoding and \ (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): xml_encoding = sniffed_xml_encoding return xml_data, xml_encoding, sniffed_xml_encoding def find_codec(self, charset): return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \ or (charset and self._codec(charset.replace("-", ""))) \ or (charset and self._codec(charset.replace("-", "_"))) \ or charset def _codec(self, charset): if not charset: return charset codec = None try: codecs.lookup(charset) codec = charset except (LookupError, ValueError): pass return codec EBCDIC_TO_ASCII_MAP = None def _ebcdic_to_ascii(self, s): c = self.__class__ if not c.EBCDIC_TO_ASCII_MAP: emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200, 201,202,106,107,108,109,110,111,112,113,114,203,204,205, 206,207,208,209,126,115,116,117,118,119,120,121,122,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72, 73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81, 82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89, 90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57, 250,251,252,253,254,255) import string c.EBCDIC_TO_ASCII_MAP = string.maketrans( \ ''.join(map(chr, range(256))), ''.join(map(chr, emap))) return s.translate(c.EBCDIC_TO_ASCII_MAP) MS_CHARS = { '\x80' : ('euro', '20AC'), '\x81' : ' ', '\x82' : ('sbquo', '201A'), '\x83' : ('fnof', '192'), '\x84' : ('bdquo', '201E'), '\x85' : ('hellip', '2026'), '\x86' : ('dagger', '2020'), '\x87' : ('Dagger', '2021'), '\x88' : ('circ', '2C6'), '\x89' : ('permil', '2030'), '\x8A' : ('Scaron', '160'), '\x8B' : ('lsaquo', '2039'), '\x8C' : ('OElig', '152'), '\x8D' : '?', '\x8E' : ('#x17D', '17D'), '\x8F' : '?', '\x90' : '?', '\x91' : ('lsquo', '2018'), '\x92' : ('rsquo', '2019'), '\x93' : ('ldquo', '201C'), '\x94' : ('rdquo', '201D'), '\x95' : ('bull', '2022'), '\x96' : ('ndash', '2013'), '\x97' : ('mdash', '2014'), '\x98' : ('tilde', '2DC'), '\x99' : ('trade', '2122'), '\x9a' : ('scaron', '161'), '\x9b' : ('rsaquo', '203A'), '\x9c' : ('oelig', '153'), '\x9d' : '?', '\x9e' : ('#x17E', '17E'), '\x9f' : ('Yuml', ''),} ####################################################################### #By default, act as an HTML pretty-printer. if __name__ == '__main__': import sys soup = BeautifulSoup(sys.stdin) print soup.prettify()
Python
#!/usr/bin/env python """Universal feed parser Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds Visit http://feedparser.org/ for the latest version Visit http://feedparser.org/docs/ for the latest documentation Required: Python 2.1 or later Recommended: Python 2.3 or later Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/> """ __version__ = "4.1"# + "$Revision: 1.92 $"[11:15] + "-cvs" __license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.""" __author__ = "Mark Pilgrim <http://diveintomark.org/>" __contributors__ = ["Jason Diamond <http://injektilo.org/>", "John Beimler <http://john.beimler.org/>", "Fazal Majid <http://www.majid.info/mylos/weblog/>", "Aaron Swartz <http://aaronsw.com/>", "Kevin Marks <http://epeus.blogspot.com/>"] _debug = 0 # HTTP "User-Agent" header to send to servers when downloading feeds. # If you are embedding feedparser in a larger application, you should # change this to your application name and URL. USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__ # HTTP "Accept" header to send to servers when downloading feeds. If you don't # want to send an Accept header, set this to None. ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1" # List of preferred XML parsers, by SAX driver name. These will be tried first, # but if they're not installed, Python will keep searching through its own list # of pre-installed parsers until it finds one that supports everything we need. PREFERRED_XML_PARSERS = ["drv_libxml2"] # If you want feedparser to automatically run HTML markup through HTML Tidy, set # this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html> # or utidylib <http://utidylib.berlios.de/>. TIDY_MARKUP = 0 # List of Python interfaces for HTML Tidy, in order of preference. Only useful # if TIDY_MARKUP = 1 PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"] # ---------- required modules (should come with any Python distribution) ---------- import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2 try: from cStringIO import StringIO as _StringIO except: from StringIO import StringIO as _StringIO # ---------- optional modules (feedparser will work without these, but with reduced functionality) ---------- # gzip is included with most Python distributions, but may not be available if you compiled your own try: import gzip except: gzip = None try: import zlib except: zlib = None # If a real XML parser is available, feedparser will attempt to use it. feedparser has # been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the # Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some # versions of FreeBSD), feedparser will quietly fall back on regex-based parsing. try: import xml.sax xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers from xml.sax.saxutils import escape as _xmlescape _XML_AVAILABLE = 1 except: _XML_AVAILABLE = 0 def _xmlescape(data): data = data.replace('&', '&amp;') data = data.replace('>', '&gt;') data = data.replace('<', '&lt;') return data # base64 support for Atom feeds that contain embedded binary data try: import base64, binascii except: base64 = binascii = None # cjkcodecs and iconv_codec provide support for more character encodings. # Both are available from http://cjkpython.i18n.org/ try: import cjkcodecs.aliases except: pass try: import iconv_codec except: pass # chardet library auto-detects character encodings # Download from http://chardet.feedparser.org/ try: import chardet if _debug: import chardet.constants chardet.constants._debug = 1 except: chardet = None # ---------- don't touch these ---------- class ThingsNobodyCaresAboutButMe(Exception): pass class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass class UndeclaredNamespace(Exception): pass sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') sgmllib.special = re.compile('<!') sgmllib.charref = re.compile('&#(x?[0-9A-Fa-f]+)[^0-9A-Fa-f]') SUPPORTED_VERSIONS = {'': 'unknown', 'rss090': 'RSS 0.90', 'rss091n': 'RSS 0.91 (Netscape)', 'rss091u': 'RSS 0.91 (Userland)', 'rss092': 'RSS 0.92', 'rss093': 'RSS 0.93', 'rss094': 'RSS 0.94', 'rss20': 'RSS 2.0', 'rss10': 'RSS 1.0', 'rss': 'RSS (unknown version)', 'atom01': 'Atom 0.1', 'atom02': 'Atom 0.2', 'atom03': 'Atom 0.3', 'atom10': 'Atom 1.0', 'atom': 'Atom (unknown version)', 'cdf': 'CDF', 'hotrss': 'Hot RSS' } try: UserDict = dict except NameError: # Python 2.1 does not have dict from UserDict import UserDict def dict(aList): rc = {} for k, v in aList: rc[k] = v return rc class FeedParserDict(UserDict): keymap = {'channel': 'feed', 'items': 'entries', 'guid': 'id', 'date': 'updated', 'date_parsed': 'updated_parsed', 'description': ['subtitle', 'summary'], 'url': ['href'], 'modified': 'updated', 'modified_parsed': 'updated_parsed', 'issued': 'published', 'issued_parsed': 'published_parsed', 'copyright': 'rights', 'copyright_detail': 'rights_detail', 'tagline': 'subtitle', 'tagline_detail': 'subtitle_detail'} def __getitem__(self, key): if key == 'category': return UserDict.__getitem__(self, 'tags')[0]['term'] if key == 'categories': return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')] realkey = self.keymap.get(key, key) if type(realkey) == types.ListType: for k in realkey: if UserDict.has_key(self, k): return UserDict.__getitem__(self, k) if UserDict.has_key(self, key): return UserDict.__getitem__(self, key) return UserDict.__getitem__(self, realkey) def __setitem__(self, key, value): for k in self.keymap.keys(): if key == k: key = self.keymap[k] if type(key) == types.ListType: key = key[0] return UserDict.__setitem__(self, key, value) def get(self, key, default=None): if self.has_key(key): return self[key] else: return default def setdefault(self, key, value): if not self.has_key(key): self[key] = value return self[key] def has_key(self, key): try: return hasattr(self, key) or UserDict.has_key(self, key) except AttributeError: return False def __getattr__(self, key): try: return self.__dict__[key] except KeyError: pass try: assert not key.startswith('_') return self.__getitem__(key) except: raise AttributeError, "object has no attribute '%s'" % key def __setattr__(self, key, value): if key.startswith('_') or key == 'data': self.__dict__[key] = value else: return self.__setitem__(key, value) def __contains__(self, key): return self.has_key(key) def zopeCompatibilityHack(): global FeedParserDict del FeedParserDict def FeedParserDict(aDict=None): rc = {} if aDict: rc.update(aDict) return rc _ebcdic_to_ascii_map = None def _ebcdic_to_ascii(s): global _ebcdic_to_ascii_map if not _ebcdic_to_ascii_map: emap = ( 0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201, 202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208, 209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237, 125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243, 92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249, 48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255 ) import string _ebcdic_to_ascii_map = string.maketrans( \ ''.join(map(chr, range(256))), ''.join(map(chr, emap))) return s.translate(_ebcdic_to_ascii_map) _urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)') def _urljoin(base, uri): uri = _urifixer.sub(r'\1\3', uri) return urlparse.urljoin(base, uri) class _FeedParserMixin: namespaces = {'': '', 'http://backend.userland.com/rss': '', 'http://blogs.law.harvard.edu/tech/rss': '', 'http://purl.org/rss/1.0/': '', 'http://my.netscape.com/rdf/simple/0.9/': '', 'http://example.com/newformat#': '', 'http://example.com/necho': '', 'http://purl.org/echo/': '', 'uri/of/echo/namespace#': '', 'http://purl.org/pie/': '', 'http://purl.org/atom/ns#': '', 'http://www.w3.org/2005/Atom': '', 'http://purl.org/rss/1.0/modules/rss091#': '', 'http://webns.net/mvcb/': 'admin', 'http://purl.org/rss/1.0/modules/aggregation/': 'ag', 'http://purl.org/rss/1.0/modules/annotate/': 'annotate', 'http://media.tangent.org/rss/1.0/': 'audio', 'http://backend.userland.com/blogChannelModule': 'blogChannel', 'http://web.resource.org/cc/': 'cc', 'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons', 'http://purl.org/rss/1.0/modules/company': 'co', 'http://purl.org/rss/1.0/modules/content/': 'content', 'http://my.theinfo.org/changed/1.0/rss/': 'cp', 'http://purl.org/dc/elements/1.1/': 'dc', 'http://purl.org/dc/terms/': 'dcterms', 'http://purl.org/rss/1.0/modules/email/': 'email', 'http://purl.org/rss/1.0/modules/event/': 'ev', 'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner', 'http://freshmeat.net/rss/fm/': 'fm', 'http://xmlns.com/foaf/0.1/': 'foaf', 'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo', 'http://postneo.com/icbm/': 'icbm', 'http://purl.org/rss/1.0/modules/image/': 'image', 'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes', 'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes', 'http://purl.org/rss/1.0/modules/link/': 'l', 'http://search.yahoo.com/mrss': 'media', 'http://madskills.com/public/xml/rss/module/pingback/': 'pingback', 'http://prismstandard.org/namespaces/1.2/basic/': 'prism', 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf', 'http://www.w3.org/2000/01/rdf-schema#': 'rdfs', 'http://purl.org/rss/1.0/modules/reference/': 'ref', 'http://purl.org/rss/1.0/modules/richequiv/': 'reqv', 'http://purl.org/rss/1.0/modules/search/': 'search', 'http://purl.org/rss/1.0/modules/slash/': 'slash', 'http://schemas.xmlsoap.org/soap/envelope/': 'soap', 'http://purl.org/rss/1.0/modules/servicestatus/': 'ss', 'http://hacks.benhammersley.com/rss/streaming/': 'str', 'http://purl.org/rss/1.0/modules/subscription/': 'sub', 'http://purl.org/rss/1.0/modules/syndication/': 'sy', 'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo', 'http://purl.org/rss/1.0/modules/threading/': 'thr', 'http://purl.org/rss/1.0/modules/textinput/': 'ti', 'http://madskills.com/public/xml/rss/module/trackback/':'trackback', 'http://wellformedweb.org/commentAPI/': 'wfw', 'http://purl.org/rss/1.0/modules/wiki/': 'wiki', 'http://www.w3.org/1999/xhtml': 'xhtml', 'http://www.w3.org/XML/1998/namespace': 'xml', 'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf' } _matchnamespaces = {} can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'license', 'icon', 'logo'] can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'] can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'] html_types = ['text/html', 'application/xhtml+xml'] def __init__(self, baseuri=None, baselang=None, encoding='utf-8'): if _debug: sys.stderr.write('initializing FeedParser\n') if not self._matchnamespaces: for k, v in self.namespaces.items(): self._matchnamespaces[k.lower()] = v self.feeddata = FeedParserDict() # feed-level data self.encoding = encoding # character encoding self.entries = [] # list of entry-level data self.version = '' # feed type/version, see SUPPORTED_VERSIONS self.namespacesInUse = {} # dictionary of namespaces defined by the feed # the following are used internally to track state; # this is really out of control and should be refactored self.infeed = 0 self.inentry = 0 self.incontent = 0 self.intextinput = 0 self.inimage = 0 self.inauthor = 0 self.incontributor = 0 self.inpublisher = 0 self.insource = 0 self.sourcedata = FeedParserDict() self.contentparams = FeedParserDict() self._summaryKey = None self.namespacemap = {} self.elementstack = [] self.basestack = [] self.langstack = [] self.baseuri = baseuri or '' self.lang = baselang or None if baselang: self.feeddata['language'] = baselang def unknown_starttag(self, tag, attrs): if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs)) # normalize attrs attrs = [(k.lower(), v) for k, v in attrs] attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] # track xml:base and xml:lang attrsD = dict(attrs) baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri self.baseuri = _urljoin(self.baseuri, baseuri) lang = attrsD.get('xml:lang', attrsD.get('lang')) if lang == '': # xml:lang could be explicitly set to '', we need to capture that lang = None elif lang is None: # if no xml:lang is specified, use parent lang lang = self.lang if lang: if tag in ('feed', 'rss', 'rdf:RDF'): self.feeddata['language'] = lang self.lang = lang self.basestack.append(self.baseuri) self.langstack.append(lang) # track namespaces for prefix, uri in attrs: if prefix.startswith('xmlns:'): self.trackNamespace(prefix[6:], uri) elif prefix == 'xmlns': self.trackNamespace(None, uri) # track inline content if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): # element declared itself as escaped markup, but it isn't really self.contentparams['type'] = 'application/xhtml+xml' if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': # Note: probably shouldn't simply recreate localname here, but # our namespace handling isn't actually 100% correct in cases where # the feed redefines the default namespace (which is actually # the usual case for inline content, thanks Sam), so here we # cheat and just reconstruct the element based on localname # because that compensates for the bugs in our namespace handling. # This will horribly munge inline content with non-empty qnames, # but nobody actually does that, so I'm not fixing it. tag = tag.split(':')[-1] return self.handle_data('<%s%s>' % (tag, ''.join([' %s="%s"' % t for t in attrs])), escape=0) # match namespaces if tag.find(':') <> -1: prefix, suffix = tag.split(':', 1) else: prefix, suffix = '', tag prefix = self.namespacemap.get(prefix, prefix) if prefix: prefix = prefix + '_' # special hack for better tracking of empty textinput/image elements in illformed feeds if (not prefix) and tag not in ('title', 'link', 'description', 'name'): self.intextinput = 0 if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'): self.inimage = 0 # call special handler (if defined) or default handler methodname = '_start_' + prefix + suffix try: method = getattr(self, methodname) return method(attrsD) except AttributeError: return self.push(prefix + suffix, 1) def unknown_endtag(self, tag): if _debug: sys.stderr.write('end %s\n' % tag) # match namespaces if tag.find(':') <> -1: prefix, suffix = tag.split(':', 1) else: prefix, suffix = '', tag prefix = self.namespacemap.get(prefix, prefix) if prefix: prefix = prefix + '_' # call special handler (if defined) or default handler methodname = '_end_' + prefix + suffix try: method = getattr(self, methodname) method() except AttributeError: self.pop(prefix + suffix) # track inline content if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): # element declared itself as escaped markup, but it isn't really self.contentparams['type'] = 'application/xhtml+xml' if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': tag = tag.split(':')[-1] self.handle_data('</%s>' % tag, escape=0) # track xml:base and xml:lang going out of scope if self.basestack: self.basestack.pop() if self.basestack and self.basestack[-1]: self.baseuri = self.basestack[-1] if self.langstack: self.langstack.pop() if self.langstack: # and (self.langstack[-1] is not None): self.lang = self.langstack[-1] def handle_charref(self, ref): # called for each character reference, e.g. for '&#160;', ref will be '160' if not self.elementstack: return ref = ref.lower() if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'): text = '&#%s;' % ref else: if ref[0] == 'x': c = int(ref[1:], 16) else: c = int(ref) text = unichr(c).encode('utf-8') self.elementstack[-1][2].append(text) def handle_entityref(self, ref): # called for each entity reference, e.g. for '&copy;', ref will be 'copy' if not self.elementstack: return if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref) if ref in ('lt', 'gt', 'quot', 'amp', 'apos'): text = '&%s;' % ref else: # entity resolution graciously donated by Aaron Swartz def name2cp(k): import htmlentitydefs if hasattr(htmlentitydefs, 'name2codepoint'): # requires Python 2.3 return htmlentitydefs.name2codepoint[k] k = htmlentitydefs.entitydefs[k] if k.startswith('&#') and k.endswith(';'): return int(k[2:-1]) # not in latin-1 return ord(k) try: name2cp(ref) except KeyError: text = '&%s;' % ref else: text = unichr(name2cp(ref)).encode('utf-8') self.elementstack[-1][2].append(text) def handle_data(self, text, escape=1): # called for each block of plain text, i.e. outside of any tag and # not containing any character or entity references if not self.elementstack: return if escape and self.contentparams.get('type') == 'application/xhtml+xml': text = _xmlescape(text) self.elementstack[-1][2].append(text) def handle_comment(self, text): # called for each comment, e.g. <!-- insert message here --> pass def handle_pi(self, text): # called for each processing instruction, e.g. <?instruction> pass def handle_decl(self, text): pass def parse_declaration(self, i): # override internal declaration handler to handle CDATA blocks if _debug: sys.stderr.write('entering parse_declaration\n') if self.rawdata[i:i+9] == '<![CDATA[': k = self.rawdata.find(']]>', i) if k == -1: k = len(self.rawdata) self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0) return k+3 else: k = self.rawdata.find('>', i) return k+1 def mapContentType(self, contentType): contentType = contentType.lower() if contentType == 'text': contentType = 'text/plain' elif contentType == 'html': contentType = 'text/html' elif contentType == 'xhtml': contentType = 'application/xhtml+xml' return contentType def trackNamespace(self, prefix, uri): loweruri = uri.lower() if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version: self.version = 'rss090' if loweruri == 'http://purl.org/rss/1.0/' and not self.version: self.version = 'rss10' if loweruri == 'http://www.w3.org/2005/atom' and not self.version: self.version = 'atom10' if loweruri.find('backend.userland.com/rss') <> -1: # match any backend.userland.com namespace uri = 'http://backend.userland.com/rss' loweruri = uri if self._matchnamespaces.has_key(loweruri): self.namespacemap[prefix] = self._matchnamespaces[loweruri] self.namespacesInUse[self._matchnamespaces[loweruri]] = uri else: self.namespacesInUse[prefix or ''] = uri def resolveURI(self, uri): return _urljoin(self.baseuri or '', uri) def decodeEntities(self, element, data): return data def push(self, element, expectingText): self.elementstack.append([element, expectingText, []]) def pop(self, element, stripWhitespace=1): if not self.elementstack: return if self.elementstack[-1][0] != element: return element, expectingText, pieces = self.elementstack.pop() output = ''.join(pieces) if stripWhitespace: output = output.strip() if not expectingText: return output # decode base64 content if base64 and self.contentparams.get('base64', 0): try: output = base64.decodestring(output) except binascii.Error: pass except binascii.Incomplete: pass # resolve relative URIs if (element in self.can_be_relative_uri) and output: output = self.resolveURI(output) # decode entities within embedded markup if not self.contentparams.get('base64', 0): output = self.decodeEntities(element, output) # remove temporary cruft from contentparams try: del self.contentparams['mode'] except KeyError: pass try: del self.contentparams['base64'] except KeyError: pass # resolve relative URIs within embedded markup if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types: if element in self.can_contain_relative_uris: output = _resolveRelativeURIs(output, self.baseuri, self.encoding) # sanitize embedded markup if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types: if element in self.can_contain_dangerous_markup: output = _sanitizeHTML(output, self.encoding) if self.encoding and type(output) != type(u''): try: output = unicode(output, self.encoding) except: pass # categories/tags/keywords/whatever are handled in _end_category if element == 'category': return output # store output in appropriate place(s) if self.inentry and not self.insource: if element == 'content': self.entries[-1].setdefault(element, []) contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output self.entries[-1][element].append(contentparams) elif element == 'link': self.entries[-1][element] = output if output: self.entries[-1]['links'][-1]['href'] = output else: if element == 'description': element = 'summary' self.entries[-1][element] = output if self.incontent: contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output self.entries[-1][element + '_detail'] = contentparams elif (self.infeed or self.insource) and (not self.intextinput) and (not self.inimage): context = self._getContext() if element == 'description': element = 'subtitle' context[element] = output if element == 'link': context['links'][-1]['href'] = output elif self.incontent: contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output context[element + '_detail'] = contentparams return output def pushContent(self, tag, attrsD, defaultContentType, expectingText): self.incontent += 1 self.contentparams = FeedParserDict({ 'type': self.mapContentType(attrsD.get('type', defaultContentType)), 'language': self.lang, 'base': self.baseuri}) self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams) self.push(tag, expectingText) def popContent(self, tag): value = self.pop(tag) self.incontent -= 1 self.contentparams.clear() return value def _mapToStandardPrefix(self, name): colonpos = name.find(':') if colonpos <> -1: prefix = name[:colonpos] suffix = name[colonpos+1:] prefix = self.namespacemap.get(prefix, prefix) name = prefix + ':' + suffix return name def _getAttribute(self, attrsD, name): return attrsD.get(self._mapToStandardPrefix(name)) def _isBase64(self, attrsD, contentparams): if attrsD.get('mode', '') == 'base64': return 1 if self.contentparams['type'].startswith('text/'): return 0 if self.contentparams['type'].endswith('+xml'): return 0 if self.contentparams['type'].endswith('/xml'): return 0 return 1 def _itsAnHrefDamnIt(self, attrsD): href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None))) if href: try: del attrsD['url'] except KeyError: pass try: del attrsD['uri'] except KeyError: pass attrsD['href'] = href return attrsD def _save(self, key, value): context = self._getContext() context.setdefault(key, value) def _start_rss(self, attrsD): versionmap = {'0.91': 'rss091u', '0.92': 'rss092', '0.93': 'rss093', '0.94': 'rss094'} if not self.version: attr_version = attrsD.get('version', '') version = versionmap.get(attr_version) if version: self.version = version elif attr_version.startswith('2.'): self.version = 'rss20' else: self.version = 'rss' def _start_dlhottitles(self, attrsD): self.version = 'hotrss' def _start_channel(self, attrsD): self.infeed = 1 self._cdf_common(attrsD) _start_feedinfo = _start_channel def _cdf_common(self, attrsD): if attrsD.has_key('lastmod'): self._start_modified({}) self.elementstack[-1][-1] = attrsD['lastmod'] self._end_modified() if attrsD.has_key('href'): self._start_link({}) self.elementstack[-1][-1] = attrsD['href'] self._end_link() def _start_feed(self, attrsD): self.infeed = 1 versionmap = {'0.1': 'atom01', '0.2': 'atom02', '0.3': 'atom03'} if not self.version: attr_version = attrsD.get('version') version = versionmap.get(attr_version) if version: self.version = version else: self.version = 'atom' def _end_channel(self): self.infeed = 0 _end_feed = _end_channel def _start_image(self, attrsD): self.inimage = 1 self.push('image', 0) context = self._getContext() context.setdefault('image', FeedParserDict()) def _end_image(self): self.pop('image') self.inimage = 0 def _start_textinput(self, attrsD): self.intextinput = 1 self.push('textinput', 0) context = self._getContext() context.setdefault('textinput', FeedParserDict()) _start_textInput = _start_textinput def _end_textinput(self): self.pop('textinput') self.intextinput = 0 _end_textInput = _end_textinput def _start_author(self, attrsD): self.inauthor = 1 self.push('author', 1) _start_managingeditor = _start_author _start_dc_author = _start_author _start_dc_creator = _start_author _start_itunes_author = _start_author def _end_author(self): self.pop('author') self.inauthor = 0 self._sync_author_detail() _end_managingeditor = _end_author _end_dc_author = _end_author _end_dc_creator = _end_author _end_itunes_author = _end_author def _start_itunes_owner(self, attrsD): self.inpublisher = 1 self.push('publisher', 0) def _end_itunes_owner(self): self.pop('publisher') self.inpublisher = 0 self._sync_author_detail('publisher') def _start_contributor(self, attrsD): self.incontributor = 1 context = self._getContext() context.setdefault('contributors', []) context['contributors'].append(FeedParserDict()) self.push('contributor', 0) def _end_contributor(self): self.pop('contributor') self.incontributor = 0 def _start_dc_contributor(self, attrsD): self.incontributor = 1 context = self._getContext() context.setdefault('contributors', []) context['contributors'].append(FeedParserDict()) self.push('name', 0) def _end_dc_contributor(self): self._end_name() self.incontributor = 0 def _start_name(self, attrsD): self.push('name', 0) _start_itunes_name = _start_name def _end_name(self): value = self.pop('name') if self.inpublisher: self._save_author('name', value, 'publisher') elif self.inauthor: self._save_author('name', value) elif self.incontributor: self._save_contributor('name', value) elif self.intextinput: context = self._getContext() context['textinput']['name'] = value _end_itunes_name = _end_name def _start_width(self, attrsD): self.push('width', 0) def _end_width(self): value = self.pop('width') try: value = int(value) except: value = 0 if self.inimage: context = self._getContext() context['image']['width'] = value def _start_height(self, attrsD): self.push('height', 0) def _end_height(self): value = self.pop('height') try: value = int(value) except: value = 0 if self.inimage: context = self._getContext() context['image']['height'] = value def _start_url(self, attrsD): self.push('href', 1) _start_homepage = _start_url _start_uri = _start_url def _end_url(self): value = self.pop('href') if self.inauthor: self._save_author('href', value) elif self.incontributor: self._save_contributor('href', value) elif self.inimage: context = self._getContext() context['image']['href'] = value elif self.intextinput: context = self._getContext() context['textinput']['link'] = value _end_homepage = _end_url _end_uri = _end_url def _start_email(self, attrsD): self.push('email', 0) _start_itunes_email = _start_email def _end_email(self): value = self.pop('email') if self.inpublisher: self._save_author('email', value, 'publisher') elif self.inauthor: self._save_author('email', value) elif self.incontributor: self._save_contributor('email', value) _end_itunes_email = _end_email def _getContext(self): if self.insource: context = self.sourcedata elif self.inentry: context = self.entries[-1] else: context = self.feeddata return context def _save_author(self, key, value, prefix='author'): context = self._getContext() context.setdefault(prefix + '_detail', FeedParserDict()) context[prefix + '_detail'][key] = value self._sync_author_detail() def _save_contributor(self, key, value): context = self._getContext() context.setdefault('contributors', [FeedParserDict()]) context['contributors'][-1][key] = value def _sync_author_detail(self, key='author'): context = self._getContext() detail = context.get('%s_detail' % key) if detail: name = detail.get('name') email = detail.get('email') if name and email: context[key] = '%s (%s)' % (name, email) elif name: context[key] = name elif email: context[key] = email else: author = context.get(key) if not author: return emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))''', author) if not emailmatch: return email = emailmatch.group(0) # probably a better way to do the following, but it passes all the tests author = author.replace(email, '') author = author.replace('()', '') author = author.strip() if author and (author[0] == '('): author = author[1:] if author and (author[-1] == ')'): author = author[:-1] author = author.strip() context.setdefault('%s_detail' % key, FeedParserDict()) context['%s_detail' % key]['name'] = author context['%s_detail' % key]['email'] = email def _start_subtitle(self, attrsD): self.pushContent('subtitle', attrsD, 'text/plain', 1) _start_tagline = _start_subtitle _start_itunes_subtitle = _start_subtitle def _end_subtitle(self): self.popContent('subtitle') _end_tagline = _end_subtitle _end_itunes_subtitle = _end_subtitle def _start_rights(self, attrsD): self.pushContent('rights', attrsD, 'text/plain', 1) _start_dc_rights = _start_rights _start_copyright = _start_rights def _end_rights(self): self.popContent('rights') _end_dc_rights = _end_rights _end_copyright = _end_rights def _start_item(self, attrsD): self.entries.append(FeedParserDict()) self.push('item', 0) self.inentry = 1 self.guidislink = 0 id = self._getAttribute(attrsD, 'rdf:about') if id: context = self._getContext() context['id'] = id self._cdf_common(attrsD) _start_entry = _start_item _start_product = _start_item def _end_item(self): self.pop('item') self.inentry = 0 _end_entry = _end_item def _start_dc_language(self, attrsD): self.push('language', 1) _start_language = _start_dc_language def _end_dc_language(self): self.lang = self.pop('language') _end_language = _end_dc_language def _start_dc_publisher(self, attrsD): self.push('publisher', 1) _start_webmaster = _start_dc_publisher def _end_dc_publisher(self): self.pop('publisher') self._sync_author_detail('publisher') _end_webmaster = _end_dc_publisher def _start_published(self, attrsD): self.push('published', 1) _start_dcterms_issued = _start_published _start_issued = _start_published def _end_published(self): value = self.pop('published') self._save('published_parsed', _parse_date(value)) _end_dcterms_issued = _end_published _end_issued = _end_published def _start_updated(self, attrsD): self.push('updated', 1) _start_modified = _start_updated _start_dcterms_modified = _start_updated _start_pubdate = _start_updated _start_dc_date = _start_updated def _end_updated(self): value = self.pop('updated') parsed_value = _parse_date(value) self._save('updated_parsed', parsed_value) _end_modified = _end_updated _end_dcterms_modified = _end_updated _end_pubdate = _end_updated _end_dc_date = _end_updated def _start_created(self, attrsD): self.push('created', 1) _start_dcterms_created = _start_created def _end_created(self): value = self.pop('created') self._save('created_parsed', _parse_date(value)) _end_dcterms_created = _end_created def _start_expirationdate(self, attrsD): self.push('expired', 1) def _end_expirationdate(self): self._save('expired_parsed', _parse_date(self.pop('expired'))) def _start_cc_license(self, attrsD): self.push('license', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('license') def _start_creativecommons_license(self, attrsD): self.push('license', 1) def _end_creativecommons_license(self): self.pop('license') def _addTag(self, term, scheme, label): context = self._getContext() tags = context.setdefault('tags', []) if (not term) and (not scheme) and (not label): return value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label}) if value not in tags: tags.append(FeedParserDict({'term': term, 'scheme': scheme, 'label': label})) def _start_category(self, attrsD): if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD)) term = attrsD.get('term') scheme = attrsD.get('scheme', attrsD.get('domain')) label = attrsD.get('label') self._addTag(term, scheme, label) self.push('category', 1) _start_dc_subject = _start_category _start_keywords = _start_category def _end_itunes_keywords(self): for term in self.pop('itunes_keywords').split(): self._addTag(term, 'http://www.itunes.com/', None) def _start_itunes_category(self, attrsD): self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None) self.push('category', 1) def _end_category(self): value = self.pop('category') if not value: return context = self._getContext() tags = context['tags'] if value and len(tags) and not tags[-1]['term']: tags[-1]['term'] = value else: self._addTag(value, None, None) _end_dc_subject = _end_category _end_keywords = _end_category _end_itunes_category = _end_category def _start_cloud(self, attrsD): self._getContext()['cloud'] = FeedParserDict(attrsD) def _start_link(self, attrsD): attrsD.setdefault('rel', 'alternate') attrsD.setdefault('type', 'text/html') attrsD = self._itsAnHrefDamnIt(attrsD) if attrsD.has_key('href'): attrsD['href'] = self.resolveURI(attrsD['href']) expectingText = self.infeed or self.inentry or self.insource context = self._getContext() context.setdefault('links', []) context['links'].append(FeedParserDict(attrsD)) if attrsD['rel'] == 'enclosure': self._start_enclosure(attrsD) if attrsD.has_key('href'): expectingText = 0 if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types): context['link'] = attrsD['href'] else: self.push('link', expectingText) _start_producturl = _start_link def _end_link(self): value = self.pop('link') context = self._getContext() if self.intextinput: context['textinput']['link'] = value if self.inimage: context['image']['link'] = value _end_producturl = _end_link def _start_guid(self, attrsD): self.guidislink = (attrsD.get('ispermalink', 'true') == 'true') self.push('id', 1) def _end_guid(self): value = self.pop('id') self._save('guidislink', self.guidislink and not self._getContext().has_key('link')) if self.guidislink: # guid acts as link, but only if 'ispermalink' is not present or is 'true', # and only if the item doesn't already have a link element self._save('link', value) def _start_title(self, attrsD): self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) _start_dc_title = _start_title _start_media_title = _start_title def _end_title(self): value = self.popContent('title') context = self._getContext() if self.intextinput: context['textinput']['title'] = value elif self.inimage: context['image']['title'] = value _end_dc_title = _end_title _end_media_title = _end_title def _start_description(self, attrsD): context = self._getContext() if context.has_key('summary'): self._summaryKey = 'content' self._start_content(attrsD) else: self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource) def _start_abstract(self, attrsD): self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) def _end_description(self): if self._summaryKey == 'content': self._end_content() else: value = self.popContent('description') context = self._getContext() if self.intextinput: context['textinput']['description'] = value elif self.inimage: context['image']['description'] = value self._summaryKey = None _end_abstract = _end_description def _start_info(self, attrsD): self.pushContent('info', attrsD, 'text/plain', 1) _start_feedburner_browserfriendly = _start_info def _end_info(self): self.popContent('info') _end_feedburner_browserfriendly = _end_info def _start_generator(self, attrsD): if attrsD: attrsD = self._itsAnHrefDamnIt(attrsD) if attrsD.has_key('href'): attrsD['href'] = self.resolveURI(attrsD['href']) self._getContext()['generator_detail'] = FeedParserDict(attrsD) self.push('generator', 1) def _end_generator(self): value = self.pop('generator') context = self._getContext() if context.has_key('generator_detail'): context['generator_detail']['name'] = value def _start_admin_generatoragent(self, attrsD): self.push('generator', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('generator') self._getContext()['generator_detail'] = FeedParserDict({'href': value}) def _start_admin_errorreportsto(self, attrsD): self.push('errorreportsto', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('errorreportsto') def _start_summary(self, attrsD): context = self._getContext() if context.has_key('summary'): self._summaryKey = 'content' self._start_content(attrsD) else: self._summaryKey = 'summary' self.pushContent(self._summaryKey, attrsD, 'text/plain', 1) _start_itunes_summary = _start_summary def _end_summary(self): if self._summaryKey == 'content': self._end_content() else: self.popContent(self._summaryKey or 'summary') self._summaryKey = None _end_itunes_summary = _end_summary def _start_enclosure(self, attrsD): attrsD = self._itsAnHrefDamnIt(attrsD) self._getContext().setdefault('enclosures', []).append(FeedParserDict(attrsD)) href = attrsD.get('href') if href: context = self._getContext() if not context.get('id'): context['id'] = href def _start_source(self, attrsD): self.insource = 1 def _end_source(self): self.insource = 0 self._getContext()['source'] = copy.deepcopy(self.sourcedata) self.sourcedata.clear() def _start_content(self, attrsD): self.pushContent('content', attrsD, 'text/plain', 1) src = attrsD.get('src') if src: self.contentparams['src'] = src self.push('content', 1) def _start_prodlink(self, attrsD): self.pushContent('content', attrsD, 'text/html', 1) def _start_body(self, attrsD): self.pushContent('content', attrsD, 'application/xhtml+xml', 1) _start_xhtml_body = _start_body def _start_content_encoded(self, attrsD): self.pushContent('content', attrsD, 'text/html', 1) _start_fullitem = _start_content_encoded def _end_content(self): copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types) value = self.popContent('content') if copyToDescription: self._save('description', value) _end_body = _end_content _end_xhtml_body = _end_content _end_content_encoded = _end_content _end_fullitem = _end_content _end_prodlink = _end_content def _start_itunes_image(self, attrsD): self.push('itunes_image', 0) self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')}) _start_itunes_link = _start_itunes_image def _end_itunes_block(self): value = self.pop('itunes_block', 0) self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0 def _end_itunes_explicit(self): value = self.pop('itunes_explicit', 0) self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0 if _XML_AVAILABLE: class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler): def __init__(self, baseuri, baselang, encoding): if _debug: sys.stderr.write('trying StrictFeedParser\n') xml.sax.handler.ContentHandler.__init__(self) _FeedParserMixin.__init__(self, baseuri, baselang, encoding) self.bozo = 0 self.exc = None def startPrefixMapping(self, prefix, uri): self.trackNamespace(prefix, uri) def startElementNS(self, name, qname, attrs): namespace, localname = name lowernamespace = str(namespace or '').lower() if lowernamespace.find('backend.userland.com/rss') <> -1: # match any backend.userland.com namespace namespace = 'http://backend.userland.com/rss' lowernamespace = namespace if qname and qname.find(':') > 0: givenprefix = qname.split(':')[0] else: givenprefix = None prefix = self._matchnamespaces.get(lowernamespace, givenprefix) if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix): raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix if prefix: localname = prefix + ':' + localname localname = str(localname).lower() if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname)) # qname implementation is horribly broken in Python 2.1 (it # doesn't report any), and slightly broken in Python 2.2 (it # doesn't report the xml: namespace). So we match up namespaces # with a known list first, and then possibly override them with # the qnames the SAX parser gives us (if indeed it gives us any # at all). Thanks to MatejC for helping me test this and # tirelessly telling me that it didn't work yet. attrsD = {} for (namespace, attrlocalname), attrvalue in attrs._attrs.items(): lowernamespace = (namespace or '').lower() prefix = self._matchnamespaces.get(lowernamespace, '') if prefix: attrlocalname = prefix + ':' + attrlocalname attrsD[str(attrlocalname).lower()] = attrvalue for qname in attrs.getQNames(): attrsD[str(qname).lower()] = attrs.getValueByQName(qname) self.unknown_starttag(localname, attrsD.items()) def characters(self, text): self.handle_data(text) def endElementNS(self, name, qname): namespace, localname = name lowernamespace = str(namespace or '').lower() if qname and qname.find(':') > 0: givenprefix = qname.split(':')[0] else: givenprefix = '' prefix = self._matchnamespaces.get(lowernamespace, givenprefix) if prefix: localname = prefix + ':' + localname localname = str(localname).lower() self.unknown_endtag(localname) def error(self, exc): self.bozo = 1 self.exc = exc def fatalError(self, exc): self.error(exc) raise exc class _BaseHTMLProcessor(sgmllib.SGMLParser): elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr', 'img', 'input', 'isindex', 'link', 'meta', 'param'] def __init__(self, encoding): self.encoding = encoding if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding) sgmllib.SGMLParser.__init__(self) def reset(self): self.pieces = [] sgmllib.SGMLParser.reset(self) def _shorttag_replace(self, match): tag = match.group(1) if tag in self.elements_no_end_tag: return '<' + tag + ' />' else: return '<' + tag + '></' + tag + '>' def feed(self, data): data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'&lt;!\1', data) #data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data) data = data.replace('&#39;', "'") data = data.replace('&#34;', '"') if self.encoding and type(data) == type(u''): data = data.encode(self.encoding) sgmllib.SGMLParser.feed(self, data) def normalize_attrs(self, attrs): # utility method to be called by descendants attrs = [(k.lower(), v) for k, v in attrs] attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] return attrs def unknown_starttag(self, tag, attrs): # called for each start tag # attrs is a list of (attr, value) tuples # e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')] if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag) uattrs = [] # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds for key, value in attrs: if type(value) != type(u''): value = unicode(value, self.encoding) uattrs.append((unicode(key, self.encoding), value)) strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding) if tag in self.elements_no_end_tag: self.pieces.append('<%(tag)s%(strattrs)s />' % locals()) else: self.pieces.append('<%(tag)s%(strattrs)s>' % locals()) def unknown_endtag(self, tag): # called for each end tag, e.g. for </pre>, tag will be 'pre' # Reconstruct the original end tag. if tag not in self.elements_no_end_tag: self.pieces.append("</%(tag)s>" % locals()) def handle_charref(self, ref): # called for each character reference, e.g. for '&#160;', ref will be '160' # Reconstruct the original character reference. self.pieces.append('&#%(ref)s;' % locals()) def handle_entityref(self, ref): # called for each entity reference, e.g. for '&copy;', ref will be 'copy' # Reconstruct the original entity reference. self.pieces.append('&%(ref)s;' % locals()) def handle_data(self, text): # called for each block of plain text, i.e. outside of any tag and # not containing any character or entity references # Store the original text verbatim. if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text) self.pieces.append(text) def handle_comment(self, text): # called for each HTML comment, e.g. <!-- insert Javascript code here --> # Reconstruct the original comment. self.pieces.append('<!--%(text)s-->' % locals()) def handle_pi(self, text): # called for each processing instruction, e.g. <?instruction> # Reconstruct original processing instruction. self.pieces.append('<?%(text)s>' % locals()) def handle_decl(self, text): # called for the DOCTYPE, if present, e.g. # <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" # "http://www.w3.org/TR/html4/loose.dtd"> # Reconstruct original DOCTYPE self.pieces.append('<!%(text)s>' % locals()) _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match def _scan_name(self, i, declstartpos): rawdata = self.rawdata n = len(rawdata) if i == n: return None, -1 m = self._new_declname_match(rawdata, i) if m: s = m.group() name = s.strip() if (i + len(s)) == n: return None, -1 # end of buffer return name.lower(), m.end() else: self.handle_data(rawdata) # self.updatepos(declstartpos, i) return None, -1 def output(self): '''Return processed HTML as a single string''' return ''.join([str(p) for p in self.pieces]) class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor): def __init__(self, baseuri, baselang, encoding): sgmllib.SGMLParser.__init__(self) _FeedParserMixin.__init__(self, baseuri, baselang, encoding) def decodeEntities(self, element, data): data = data.replace('&#60;', '&lt;') data = data.replace('&#x3c;', '&lt;') data = data.replace('&#62;', '&gt;') data = data.replace('&#x3e;', '&gt;') data = data.replace('&#38;', '&amp;') data = data.replace('&#x26;', '&amp;') data = data.replace('&#34;', '&quot;') data = data.replace('&#x22;', '&quot;') data = data.replace('&#39;', '&apos;') data = data.replace('&#x27;', '&apos;') if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): data = data.replace('&lt;', '<') data = data.replace('&gt;', '>') data = data.replace('&amp;', '&') data = data.replace('&quot;', '"') data = data.replace('&apos;', "'") return data class _RelativeURIResolver(_BaseHTMLProcessor): relative_uris = [('a', 'href'), ('applet', 'codebase'), ('area', 'href'), ('blockquote', 'cite'), ('body', 'background'), ('del', 'cite'), ('form', 'action'), ('frame', 'longdesc'), ('frame', 'src'), ('iframe', 'longdesc'), ('iframe', 'src'), ('head', 'profile'), ('img', 'longdesc'), ('img', 'src'), ('img', 'usemap'), ('input', 'src'), ('input', 'usemap'), ('ins', 'cite'), ('link', 'href'), ('object', 'classid'), ('object', 'codebase'), ('object', 'data'), ('object', 'usemap'), ('q', 'cite'), ('script', 'src')] def __init__(self, baseuri, encoding): _BaseHTMLProcessor.__init__(self, encoding) self.baseuri = baseuri def resolveURI(self, uri): return _urljoin(self.baseuri, uri) def unknown_starttag(self, tag, attrs): attrs = self.normalize_attrs(attrs) attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs] _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) def _resolveRelativeURIs(htmlSource, baseURI, encoding): if _debug: sys.stderr.write('entering _resolveRelativeURIs\n') p = _RelativeURIResolver(baseURI, encoding) p.feed(htmlSource) return p.output() class _HTMLSanitizer(_BaseHTMLProcessor): acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big', 'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset', 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup', 'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike', 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var'] acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', 'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing', 'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols', 'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', 'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', 'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type', 'usemap', 'valign', 'value', 'vspace', 'width'] unacceptable_elements_with_end_tag = ['script', 'applet'] def reset(self): _BaseHTMLProcessor.reset(self) self.unacceptablestack = 0 def unknown_starttag(self, tag, attrs): if not tag in self.acceptable_elements: if tag in self.unacceptable_elements_with_end_tag: self.unacceptablestack += 1 return attrs = self.normalize_attrs(attrs) attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes] _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) def unknown_endtag(self, tag): if not tag in self.acceptable_elements: if tag in self.unacceptable_elements_with_end_tag: self.unacceptablestack -= 1 return _BaseHTMLProcessor.unknown_endtag(self, tag) def handle_pi(self, text): pass def handle_decl(self, text): pass def handle_data(self, text): if not self.unacceptablestack: _BaseHTMLProcessor.handle_data(self, text) def _sanitizeHTML(htmlSource, encoding): p = _HTMLSanitizer(encoding) p.feed(htmlSource) data = p.output() if TIDY_MARKUP: # loop through list of preferred Tidy interfaces looking for one that's installed, # then set up a common _tidy function to wrap the interface-specific API. _tidy = None for tidy_interface in PREFERRED_TIDY_INTERFACES: try: if tidy_interface == "uTidy": from tidy import parseString as _utidy def _tidy(data, **kwargs): return str(_utidy(data, **kwargs)) break elif tidy_interface == "mxTidy": from mx.Tidy import Tidy as _mxtidy def _tidy(data, **kwargs): nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs) return data break except: pass if _tidy: utf8 = type(data) == type(u'') if utf8: data = data.encode('utf-8') data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8") if utf8: data = unicode(data, 'utf-8') if data.count('<body'): data = data.split('<body', 1)[1] if data.count('>'): data = data.split('>', 1)[1] if data.count('</body'): data = data.split('</body', 1)[0] data = data.strip().replace('\r\n', '\n') return data class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler): def http_error_default(self, req, fp, code, msg, headers): if ((code / 100) == 3) and (code != 304): return self.http_error_302(req, fp, code, msg, headers) infourl = urllib.addinfourl(fp, headers, req.get_full_url()) infourl.status = code return infourl def http_error_302(self, req, fp, code, msg, headers): if headers.dict.has_key('location'): infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers) else: infourl = urllib.addinfourl(fp, headers, req.get_full_url()) if not hasattr(infourl, 'status'): infourl.status = code return infourl def http_error_301(self, req, fp, code, msg, headers): if headers.dict.has_key('location'): infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers) else: infourl = urllib.addinfourl(fp, headers, req.get_full_url()) if not hasattr(infourl, 'status'): infourl.status = code return infourl http_error_300 = http_error_302 http_error_303 = http_error_302 http_error_307 = http_error_302 def http_error_401(self, req, fp, code, msg, headers): # Check if # - server requires digest auth, AND # - we tried (unsuccessfully) with basic auth, AND # - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions) # If all conditions hold, parse authentication information # out of the Authorization header we sent the first time # (for the username and password) and the WWW-Authenticate # header the server sent back (for the realm) and retry # the request with the appropriate digest auth headers instead. # This evil genius hack has been brought to you by Aaron Swartz. host = urlparse.urlparse(req.get_full_url())[1] try: assert sys.version.split()[0] >= '2.3.3' assert base64 != None user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':') realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0] self.add_password(realm, host, user, passw) retry = self.http_error_auth_reqed('www-authenticate', host, req, headers) self.reset_retry_count() return retry except: return self.http_error_default(req, fp, code, msg, headers) def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers): """URL, filename, or string --> stream This function lets you define parsers that take any input source (URL, pathname to local or network file, or actual data as a string) and deal with it in a uniform manner. Returned object is guaranteed to have all the basic stdio read methods (read, readline, readlines). Just .close() the object when you're done with it. If the etag argument is supplied, it will be used as the value of an If-None-Match request header. If the modified argument is supplied, it must be a tuple of 9 integers as returned by gmtime() in the standard Python time module. This MUST be in GMT (Greenwich Mean Time). The formatted date/time will be used as the value of an If-Modified-Since request header. If the agent argument is supplied, it will be used as the value of a User-Agent request header. If the referrer argument is supplied, it will be used as the value of a Referer[sic] request header. If handlers is supplied, it is a list of handlers used to build a urllib2 opener. """ if hasattr(url_file_stream_or_string, 'read'): return url_file_stream_or_string if url_file_stream_or_string == '-': return sys.stdin if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'): if not agent: agent = USER_AGENT # test for inline user:password for basic auth auth = None if base64: urltype, rest = urllib.splittype(url_file_stream_or_string) realhost, rest = urllib.splithost(rest) if realhost: user_passwd, realhost = urllib.splituser(realhost) if user_passwd: url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest) auth = base64.encodestring(user_passwd).strip() # try to open with urllib2 (to use optional headers) request = urllib2.Request(url_file_stream_or_string) request.add_header('User-Agent', agent) if etag: request.add_header('If-None-Match', etag) if modified: # format into an RFC 1123-compliant timestamp. We can't use # time.strftime() since the %a and %b directives can be affected # by the current locale, but RFC 2616 states that dates must be # in English. short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5])) if referrer: request.add_header('Referer', referrer) if gzip and zlib: request.add_header('Accept-encoding', 'gzip, deflate') elif gzip: request.add_header('Accept-encoding', 'gzip') elif zlib: request.add_header('Accept-encoding', 'deflate') else: request.add_header('Accept-encoding', '') if auth: request.add_header('Authorization', 'Basic %s' % auth) if ACCEPT_HEADER: request.add_header('Accept', ACCEPT_HEADER) request.add_header('A-IM', 'feed') # RFC 3229 support opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers)) opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent try: return opener.open(request) finally: opener.close() # JohnD # try to open with native open function (if url_file_stream_or_string is a filename) try: return open(url_file_stream_or_string) except: pass # treat url_file_stream_or_string as string return _StringIO(str(url_file_stream_or_string)) _date_handlers = [] def registerDateHandler(func): '''Register a date handler function (takes string, returns 9-tuple date in GMT)''' _date_handlers.insert(0, func) # ISO-8601 date parsing routines written by Fazal Majid. # The ISO 8601 standard is very convoluted and irregular - a full ISO 8601 # parser is beyond the scope of feedparser and would be a worthwhile addition # to the Python library. # A single regular expression cannot parse ISO 8601 date formats into groups # as the standard is highly irregular (for instance is 030104 2003-01-04 or # 0301-04-01), so we use templates instead. # Please note the order in templates is significant because we need a # greedy match. _iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO', 'YY-?MM-?DD', 'YY-?OOO', 'YYYY', '-YY-?MM', '-OOO', '-YY', '--MM-?DD', '--MM', '---DD', 'CC', ''] _iso8601_re = [ tmpl.replace( 'YYYY', r'(?P<year>\d{4})').replace( 'YY', r'(?P<year>\d\d)').replace( 'MM', r'(?P<month>[01]\d)').replace( 'DD', r'(?P<day>[0123]\d)').replace( 'OOO', r'(?P<ordinal>[0123]\d\d)').replace( 'CC', r'(?P<century>\d\d$)') + r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})' + r'(:(?P<second>\d{2}))?' + r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?' for tmpl in _iso8601_tmpl] del tmpl _iso8601_matches = [re.compile(regex).match for regex in _iso8601_re] del regex def _parse_date_iso8601(dateString): '''Parse a variety of ISO-8601-compatible formats like 20040105''' m = None for _iso8601_match in _iso8601_matches: m = _iso8601_match(dateString) if m: break if not m: return if m.span() == (0, 0): return params = m.groupdict() ordinal = params.get('ordinal', 0) if ordinal: ordinal = int(ordinal) else: ordinal = 0 year = params.get('year', '--') if not year or year == '--': year = time.gmtime()[0] elif len(year) == 2: # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993 year = 100 * int(time.gmtime()[0] / 100) + int(year) else: year = int(year) month = params.get('month', '-') if not month or month == '-': # ordinals are NOT normalized by mktime, we simulate them # by setting month=1, day=ordinal if ordinal: month = 1 else: month = time.gmtime()[1] month = int(month) day = params.get('day', 0) if not day: # see above if ordinal: day = ordinal elif params.get('century', 0) or \ params.get('year', 0) or params.get('month', 0): day = 1 else: day = time.gmtime()[2] else: day = int(day) # special case of the century - is the first year of the 21st century # 2000 or 2001 ? The debate goes on... if 'century' in params.keys(): year = (int(params['century']) - 1) * 100 + 1 # in ISO 8601 most fields are optional for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']: if not params.get(field, None): params[field] = 0 hour = int(params.get('hour', 0)) minute = int(params.get('minute', 0)) second = int(params.get('second', 0)) # weekday is normalized by mktime(), we can ignore it weekday = 0 # daylight savings is complex, but not needed for feedparser's purposes # as time zones, if specified, include mention of whether it is active # (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and # and most implementations have DST bugs daylight_savings_flag = 0 tm = [year, month, day, hour, minute, second, weekday, ordinal, daylight_savings_flag] # ISO 8601 time zone adjustments tz = params.get('tz') if tz and tz != 'Z': if tz[0] == '-': tm[3] += int(params.get('tzhour', 0)) tm[4] += int(params.get('tzmin', 0)) elif tz[0] == '+': tm[3] -= int(params.get('tzhour', 0)) tm[4] -= int(params.get('tzmin', 0)) else: return None # Python's time.mktime() is a wrapper around the ANSI C mktime(3c) # which is guaranteed to normalize d/m/y/h/m/s. # Many implementations have bugs, but we'll pretend they don't. return time.localtime(time.mktime(tm)) registerDateHandler(_parse_date_iso8601) # 8-bit date handling routines written by ytrewq1. _korean_year = u'\ub144' # b3e2 in euc-kr _korean_month = u'\uc6d4' # bff9 in euc-kr _korean_day = u'\uc77c' # c0cf in euc-kr _korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr _korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr _korean_onblog_date_re = \ re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \ (_korean_year, _korean_month, _korean_day)) _korean_nate_date_re = \ re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \ (_korean_am, _korean_pm)) def _parse_date_onblog(dateString): '''Parse a string according to the OnBlog 8-bit date format''' m = _korean_onblog_date_re.match(dateString) if not m: return w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ 'zonediff': '+09:00'} if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_onblog) def _parse_date_nate(dateString): '''Parse a string according to the Nate 8-bit date format''' m = _korean_nate_date_re.match(dateString) if not m: return hour = int(m.group(5)) ampm = m.group(4) if (ampm == _korean_pm): hour += 12 hour = str(hour) if len(hour) == 1: hour = '0' + hour w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\ 'zonediff': '+09:00'} if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_nate) _mssql_date_re = \ re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?') def _parse_date_mssql(dateString): '''Parse a string according to the MS SQL date format''' m = _mssql_date_re.match(dateString) if not m: return w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ 'zonediff': '+09:00'} if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_mssql) # Unicode strings for Greek date strings _greek_months = \ { \ u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7 u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7 u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7 u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7 u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7 u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7 u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7 u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7 u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7 u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7 u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7 u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7 u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7 u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7 u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7 u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7 u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7 u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7 u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7 } _greek_wdays = \ { \ u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7 u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7 u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7 u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7 u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7 u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7 u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7 } _greek_date_format_re = \ re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)') def _parse_date_greek(dateString): '''Parse a string according to a Greek 8-bit date format.''' m = _greek_date_format_re.match(dateString) if not m: return try: wday = _greek_wdays[m.group(1)] month = _greek_months[m.group(3)] except: return rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \ {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\ 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\ 'zonediff': m.group(8)} if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date) return _parse_date_rfc822(rfc822date) registerDateHandler(_parse_date_greek) # Unicode strings for Hungarian date strings _hungarian_months = \ { \ u'janu\u00e1r': u'01', # e1 in iso-8859-2 u'febru\u00e1ri': u'02', # e1 in iso-8859-2 u'm\u00e1rcius': u'03', # e1 in iso-8859-2 u'\u00e1prilis': u'04', # e1 in iso-8859-2 u'm\u00e1ujus': u'05', # e1 in iso-8859-2 u'j\u00fanius': u'06', # fa in iso-8859-2 u'j\u00falius': u'07', # fa in iso-8859-2 u'augusztus': u'08', u'szeptember': u'09', u'okt\u00f3ber': u'10', # f3 in iso-8859-2 u'november': u'11', u'december': u'12', } _hungarian_date_format_re = \ re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))') def _parse_date_hungarian(dateString): '''Parse a string according to a Hungarian 8-bit date format.''' m = _hungarian_date_format_re.match(dateString) if not m: return try: month = _hungarian_months[m.group(2)] day = m.group(3) if len(day) == 1: day = '0' + day hour = m.group(4) if len(hour) == 1: hour = '0' + hour except: return w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \ {'year': m.group(1), 'month': month, 'day': day,\ 'hour': hour, 'minute': m.group(5),\ 'zonediff': m.group(6)} if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_hungarian) # W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by # Drake and licensed under the Python license. Removed all range checking # for month, day, hour, minute, and second, since mktime will normalize # these later def _parse_date_w3dtf(dateString): def __extract_date(m): year = int(m.group('year')) if year < 100: year = 100 * int(time.gmtime()[0] / 100) + int(year) if year < 1000: return 0, 0, 0 julian = m.group('julian') if julian: julian = int(julian) month = julian / 30 + 1 day = julian % 30 + 1 jday = None while jday != julian: t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0)) jday = time.gmtime(t)[-2] diff = abs(jday - julian) if jday > julian: if diff < day: day = day - diff else: month = month - 1 day = 31 elif jday < julian: if day + diff < 28: day = day + diff else: month = month + 1 return year, month, day month = m.group('month') day = 1 if month is None: month = 1 else: month = int(month) day = m.group('day') if day: day = int(day) else: day = 1 return year, month, day def __extract_time(m): if not m: return 0, 0, 0 hours = m.group('hours') if not hours: return 0, 0, 0 hours = int(hours) minutes = int(m.group('minutes')) seconds = m.group('seconds') if seconds: seconds = int(seconds) else: seconds = 0 return hours, minutes, seconds def __extract_tzd(m): '''Return the Time Zone Designator as an offset in seconds from UTC.''' if not m: return 0 tzd = m.group('tzd') if not tzd: return 0 if tzd == 'Z': return 0 hours = int(m.group('tzdhours')) minutes = m.group('tzdminutes') if minutes: minutes = int(minutes) else: minutes = 0 offset = (hours*60 + minutes) * 60 if tzd[0] == '+': return -offset return offset __date_re = ('(?P<year>\d\d\d\d)' '(?:(?P<dsep>-|)' '(?:(?P<julian>\d\d\d)' '|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?') __tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)' __tzd_rx = re.compile(__tzd_re) __time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)' '(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?' + __tzd_re) __datetime_re = '%s(?:T%s)?' % (__date_re, __time_re) __datetime_rx = re.compile(__datetime_re) m = __datetime_rx.match(dateString) if (m is None) or (m.group() != dateString): return gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0) if gmt[0] == 0: return return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone) registerDateHandler(_parse_date_w3dtf) def _parse_date_rfc822(dateString): '''Parse an RFC822, RFC1123, RFC2822, or asctime-style date''' data = dateString.split() if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames: del data[0] if len(data) == 4: s = data[3] i = s.find('+') if i > 0: data[3:] = [s[:i], s[i+1:]] else: data.append('') dateString = " ".join(data) if len(data) < 5: dateString += ' 00:00:00 GMT' tm = rfc822.parsedate_tz(dateString) if tm: return time.gmtime(rfc822.mktime_tz(tm)) # rfc822.py defines several time zones, but we define some extra ones. # 'ET' is equivalent to 'EST', etc. _additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800} rfc822._timezones.update(_additional_timezones) registerDateHandler(_parse_date_rfc822) def _parse_date(dateString): '''Parses a variety of date formats into a 9-tuple in GMT''' for handler in _date_handlers: try: date9tuple = handler(dateString) if not date9tuple: continue if len(date9tuple) != 9: if _debug: sys.stderr.write('date handler function must return 9-tuple\n') raise ValueError map(int, date9tuple) return date9tuple except Exception, e: if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e))) pass return None def _getCharacterEncoding(http_headers, xml_data): '''Get the character encoding of the XML document http_headers is a dictionary xml_data is a raw string (not Unicode) This is so much trickier than it sounds, it's not even funny. According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type is application/xml, application/*+xml, application/xml-external-parsed-entity, or application/xml-dtd, the encoding given in the charset parameter of the HTTP Content-Type takes precedence over the encoding given in the XML prefix within the document, and defaults to 'utf-8' if neither are specified. But, if the HTTP Content-Type is text/xml, text/*+xml, or text/xml-external-parsed-entity, the encoding given in the XML prefix within the document is ALWAYS IGNORED and only the encoding given in the charset parameter of the HTTP Content-Type header should be respected, and it defaults to 'us-ascii' if not specified. Furthermore, discussion on the atom-syntax mailing list with the author of RFC 3023 leads me to the conclusion that any document served with a Content-Type of text/* and no charset parameter must be treated as us-ascii. (We now do this.) And also that it must always be flagged as non-well-formed. (We now do this too.) If Content-Type is unspecified (input was local file or non-HTTP source) or unrecognized (server just got it totally wrong), then go by the encoding given in the XML prefix of the document and default to 'iso-8859-1' as per the HTTP specification (RFC 2616). Then, assuming we didn't find a character encoding in the HTTP headers (and the HTTP Content-type allowed us to look in the body), we need to sniff the first few bytes of the XML data and try to determine whether the encoding is ASCII-compatible. Section F of the XML specification shows the way here: http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info If the sniffed encoding is not ASCII-compatible, we need to make it ASCII compatible so that we can sniff further into the XML declaration to find the encoding attribute, which will tell us the true encoding. Of course, none of this guarantees that we will be able to parse the feed in the declared character encoding (assuming it was declared correctly, which many are not). CJKCodecs and iconv_codec help a lot; you should definitely install them if you can. http://cjkpython.i18n.org/ ''' def _parseHTTPContentType(content_type): '''takes HTTP Content-Type header and returns (content type, charset) If no charset is specified, returns (content type, '') If no content type is specified, returns ('', '') Both return parameters are guaranteed to be lowercase strings ''' content_type = content_type or '' content_type, params = cgi.parse_header(content_type) return content_type, params.get('charset', '').replace("'", '') sniffed_xml_encoding = '' xml_encoding = '' true_encoding = '' http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type')) # Must sniff for non-ASCII-compatible character encodings before # searching for XML declaration. This heuristic is defined in # section F of the XML specification: # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info try: if xml_data[:4] == '\x4c\x6f\xa7\x94': # EBCDIC xml_data = _ebcdic_to_ascii(xml_data) elif xml_data[:4] == '\x00\x3c\x00\x3f': # UTF-16BE sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'): # UTF-16BE with BOM sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x3f\x00': # UTF-16LE sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'): # UTF-16LE with BOM sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') elif xml_data[:4] == '\x00\x00\x00\x3c': # UTF-32BE sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x00\x00': # UTF-32LE sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') elif xml_data[:4] == '\x00\x00\xfe\xff': # UTF-32BE with BOM sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') elif xml_data[:4] == '\xff\xfe\x00\x00': # UTF-32LE with BOM sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') elif xml_data[:3] == '\xef\xbb\xbf': # UTF-8 with BOM sniffed_xml_encoding = 'utf-8' xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') else: # ASCII-compatible pass xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data) except: xml_encoding_match = None if xml_encoding_match: xml_encoding = xml_encoding_match.groups()[0].lower() if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): xml_encoding = sniffed_xml_encoding acceptable_content_type = 0 application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity') text_content_types = ('text/xml', 'text/xml-external-parsed-entity') if (http_content_type in application_content_types) or \ (http_content_type.startswith('application/') and http_content_type.endswith('+xml')): acceptable_content_type = 1 true_encoding = http_encoding or xml_encoding or 'utf-8' elif (http_content_type in text_content_types) or \ (http_content_type.startswith('text/')) and http_content_type.endswith('+xml'): acceptable_content_type = 1 true_encoding = http_encoding or 'us-ascii' elif http_content_type.startswith('text/'): true_encoding = http_encoding or 'us-ascii' elif http_headers and (not http_headers.has_key('content-type')): true_encoding = xml_encoding or 'iso-8859-1' else: true_encoding = xml_encoding or 'utf-8' return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type def _toUTF8(data, encoding): '''Changes an XML data stream on the fly to specify a new encoding data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already encoding is a string recognized by encodings.aliases ''' if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding) # strip Byte Order Mark (if present) if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'): if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-16be': sys.stderr.write('trying utf-16be instead\n') encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'): if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-16le': sys.stderr.write('trying utf-16le instead\n') encoding = 'utf-16le' data = data[2:] elif data[:3] == '\xef\xbb\xbf': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-8': sys.stderr.write('trying utf-8 instead\n') encoding = 'utf-8' data = data[3:] elif data[:4] == '\x00\x00\xfe\xff': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-32be': sys.stderr.write('trying utf-32be instead\n') encoding = 'utf-32be' data = data[4:] elif data[:4] == '\xff\xfe\x00\x00': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-32le': sys.stderr.write('trying utf-32le instead\n') encoding = 'utf-32le' data = data[4:] newdata = unicode(data, encoding) if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding) declmatch = re.compile('^<\?xml[^>]*?>') newdecl = '''<?xml version='1.0' encoding='utf-8'?>''' if declmatch.search(newdata): newdata = declmatch.sub(newdecl, newdata) else: newdata = newdecl + u'\n' + newdata return newdata.encode('utf-8') def _stripDoctype(data): '''Strips DOCTYPE from XML document, returns (rss_version, stripped_data) rss_version may be 'rss091n' or None stripped_data is the same XML document, minus the DOCTYPE ''' entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE) data = entity_pattern.sub('', data) doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE) doctype_results = doctype_pattern.findall(data) doctype = doctype_results and doctype_results[0] or '' if doctype.lower().count('netscape'): version = 'rss091n' else: version = None data = doctype_pattern.sub('', data) return version, data def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]): '''Parse a feed from a URL, file, stream, or string''' result = FeedParserDict() result['feed'] = FeedParserDict() result['entries'] = [] if _XML_AVAILABLE: result['bozo'] = 0 if type(handlers) == types.InstanceType: handlers = [handlers] try: f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers) data = f.read() except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = '' f = None # if feed is gzip-compressed, decompress it if f and data and hasattr(f, 'headers'): if gzip and f.headers.get('content-encoding', '') == 'gzip': try: data = gzip.GzipFile(fileobj=_StringIO(data)).read() except Exception, e: # Some feeds claim to be gzipped but they're not, so # we get garbage. Ideally, we should re-request the # feed without the 'Accept-encoding: gzip' header, # but we don't. result['bozo'] = 1 result['bozo_exception'] = e data = '' elif zlib and f.headers.get('content-encoding', '') == 'deflate': try: data = zlib.decompress(data, -zlib.MAX_WBITS) except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = '' # save HTTP headers if hasattr(f, 'info'): info = f.info() result['etag'] = info.getheader('ETag') last_modified = info.getheader('Last-Modified') if last_modified: result['modified'] = _parse_date(last_modified) if hasattr(f, 'url'): result['href'] = f.url result['status'] = 200 if hasattr(f, 'status'): result['status'] = f.status if hasattr(f, 'headers'): result['headers'] = f.headers.dict if hasattr(f, 'close'): f.close() # there are four encodings to keep track of: # - http_encoding is the encoding declared in the Content-Type HTTP header # - xml_encoding is the encoding declared in the <?xml declaration # - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data # - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications http_headers = result.get('headers', {}) result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \ _getCharacterEncoding(http_headers, data) if http_headers and (not acceptable_content_type): if http_headers.has_key('content-type'): bozo_message = '%s is not an XML media type' % http_headers['content-type'] else: bozo_message = 'no Content-type specified' result['bozo'] = 1 result['bozo_exception'] = NonXMLContentType(bozo_message) result['version'], data = _stripDoctype(data) baseuri = http_headers.get('content-location', result.get('href')) baselang = http_headers.get('content-language', None) # if server sent 304, we're done if result.get('status', 0) == 304: result['version'] = '' result['debug_message'] = 'The feed has not changed since you last checked, ' + \ 'so the server sent no data. This is a feature, not a bug!' return result # if there was a problem downloading, we're done if not data: return result # determine character encoding use_strict_parser = 0 known_encoding = 0 tried_encodings = [] # try: HTTP encoding, declared XML encoding, encoding sniffed from BOM for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding): if not proposed_encoding: continue if proposed_encoding in tried_encodings: continue tried_encodings.append(proposed_encoding) try: data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 break except: pass # if no luck and we have auto-detection library, try that if (not known_encoding) and chardet: try: proposed_encoding = chardet.detect(data)['encoding'] if proposed_encoding and (proposed_encoding not in tried_encodings): tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck and we haven't tried utf-8 yet, try that if (not known_encoding) and ('utf-8' not in tried_encodings): try: proposed_encoding = 'utf-8' tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck and we haven't tried windows-1252 yet, try that if (not known_encoding) and ('windows-1252' not in tried_encodings): try: proposed_encoding = 'windows-1252' tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck, give up if not known_encoding: result['bozo'] = 1 result['bozo_exception'] = CharacterEncodingUnknown( \ 'document encoding unknown, I tried ' + \ '%s, %s, utf-8, and windows-1252 but nothing worked' % \ (result['encoding'], xml_encoding)) result['encoding'] = '' elif proposed_encoding != result['encoding']: result['bozo'] = 1 result['bozo_exception'] = CharacterEncodingOverride( \ 'documented declared as %s, but parsed as %s' % \ (result['encoding'], proposed_encoding)) result['encoding'] = proposed_encoding if not _XML_AVAILABLE: use_strict_parser = 0 if use_strict_parser: # initialize the SAX parser feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8') saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS) saxparser.setFeature(xml.sax.handler.feature_namespaces, 1) saxparser.setContentHandler(feedparser) saxparser.setErrorHandler(feedparser) source = xml.sax.xmlreader.InputSource() source.setByteStream(_StringIO(data)) if hasattr(saxparser, '_ns_stack'): # work around bug in built-in SAX parser (doesn't recognize xml: namespace) # PyXML doesn't have this problem, and it doesn't have _ns_stack either saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'}) try: saxparser.parse(source) except Exception, e: if _debug: import traceback traceback.print_stack() traceback.print_exc() sys.stderr.write('xml parsing failed\n') result['bozo'] = 1 result['bozo_exception'] = feedparser.exc or e use_strict_parser = 0 if not use_strict_parser: feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '') feedparser.feed(data) result['feed'] = feedparser.feeddata result['entries'] = feedparser.entries result['version'] = result['version'] or feedparser.version result['namespaces'] = feedparser.namespacesInUse return result if __name__ == '__main__': if not sys.argv[1:]: print __doc__ sys.exit(0) else: urls = sys.argv[1:] zopeCompatibilityHack() from pprint import pprint for url in urls: print url print result = parse(url) pprint(result) print #REVISION HISTORY #1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements, # added Simon Fell's test suite #1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections #2.0 - 10/19/2002 # JD - use inchannel to watch out for image and textinput elements which can # also contain title, link, and description elements # JD - check for isPermaLink='false' attribute on guid elements # JD - replaced openAnything with open_resource supporting ETag and # If-Modified-Since request headers # JD - parse now accepts etag, modified, agent, and referrer optional # arguments # JD - modified parse to return a dictionary instead of a tuple so that any # etag or modified information can be returned and cached by the caller #2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything # because of etag/modified, return the old etag/modified to the caller to # indicate why nothing is being returned #2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its # useless. Fixes the problem JD was addressing by adding it. #2.1 - 11/14/2002 - MAP - added gzip support #2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent. # start_admingeneratoragent is an example of how to handle elements with # only attributes, no content. #2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify); # also, make sure we send the User-Agent even if urllib2 isn't available. # Match any variation of backend.userland.com/rss namespace. #2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is. #2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's # snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed # project name #2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree); # removed unnecessary urllib code -- urllib2 should always be available anyway; # return actual url, status, and full HTTP headers (as result['url'], # result['status'], and result['headers']) if parsing a remote feed over HTTP -- # this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>; # added the latest namespace-of-the-week for RSS 2.0 #2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom # User-Agent (otherwise urllib2 sends two, which confuses some servers) #2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for # inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds #2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or # textInput, and also to return the character encoding (if specified) #2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking # nested divs within content (JohnD); fixed missing sys import (JohanS); # fixed regular expression to capture XML character encoding (Andrei); # added support for Atom 0.3-style links; fixed bug with textInput tracking; # added support for cloud (MartijnP); added support for multiple # category/dc:subject (MartijnP); normalize content model: 'description' gets # description (which can come from description, summary, or full content if no # description), 'content' gets dict of base/language/type/value (which can come # from content:encoded, xhtml:body, content, or fullitem); # fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang # tracking; fixed bug tracking unknown tags; fixed bug tracking content when # <content> element is not in default namespace (like Pocketsoap feed); # resolve relative URLs in link, guid, docs, url, comments, wfw:comment, # wfw:commentRSS; resolve relative URLs within embedded HTML markup in # description, xhtml:body, content, content:encoded, title, subtitle, # summary, info, tagline, and copyright; added support for pingback and # trackback namespaces #2.7 - 1/5/2004 - MAP - really added support for trackback and pingback # namespaces, as opposed to 2.6 when I said I did but didn't really; # sanitize HTML markup within some elements; added mxTidy support (if # installed) to tidy HTML markup within some elements; fixed indentation # bug in _parse_date (FazalM); use socket.setdefaulttimeout if available # (FazalM); universal date parsing and normalization (FazalM): 'created', modified', # 'issued' are parsed into 9-tuple date format and stored in 'created_parsed', # 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified' # and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa #2.7.1 - 1/9/2004 - MAP - fixed bug handling &quot; and &apos;. fixed memory # leak not closing url opener (JohnD); added dc:publisher support (MarekK); # added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK) #2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in # encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL); # fixed relative URI processing for guid (skadz); added ICBM support; added # base64 support #2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many # blogspot.com sites); added _debug variable #2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing #3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available); # added several new supported namespaces; fixed bug tracking naked markup in # description; added support for enclosure; added support for source; re-added # support for cloud which got dropped somehow; added support for expirationDate #3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking # xml:base URI, one for documents that don't define one explicitly and one for # documents that define an outer and an inner xml:base that goes out of scope # before the end of the document #3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level #3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version'] # will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized; # added support for creativeCommons:license and cc:license; added support for # full Atom content model in title, tagline, info, copyright, summary; fixed bug # with gzip encoding (not always telling server we support it when we do) #3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail # (dictionary of 'name', 'url', 'email'); map author to author_detail if author # contains name + email address #3.0b8 - 1/28/2004 - MAP - added support for contributor #3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added # support for summary #3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from # xml.util.iso8601 #3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain # dangerous markup; fiddled with decodeEntities (not right); liberalized # date parsing even further #3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right); # added support to Atom 0.2 subtitle; added support for Atom content model # in copyright; better sanitizing of dangerous HTML elements with end tags # (script, frameset) #3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img, # etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />) #3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under # Python 2.1 #3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS; # fixed bug capturing author and contributor URL; fixed bug resolving relative # links in author and contributor URL; fixed bug resolvin relative links in # generator URL; added support for recognizing RSS 1.0; passed Simon Fell's # namespace tests, and included them permanently in the test suite with his # permission; fixed namespace handling under Python 2.1 #3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15) #3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023 #3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei); # use libxml2 (if available) #3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author # name was in parentheses; removed ultra-problematic mxTidy support; patch to # workaround crash in PyXML/expat when encountering invalid entities # (MarkMoraes); support for textinput/textInput #3.0b20 - 4/7/2004 - MAP - added CDF support #3.0b21 - 4/14/2004 - MAP - added Hot RSS support #3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in # results dict; changed results dict to allow getting values with results.key # as well as results[key]; work around embedded illformed HTML with half # a DOCTYPE; work around malformed Content-Type header; if character encoding # is wrong, try several common ones before falling back to regexes (if this # works, bozo_exception is set to CharacterEncodingOverride); fixed character # encoding issues in BaseHTMLProcessor by tracking encoding and converting # from Unicode to raw strings before feeding data to sgmllib.SGMLParser; # convert each value in results to Unicode (if possible), even if using # regex-based parsing #3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain # high-bit characters in attributes in embedded HTML in description (thanks # Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in # FeedParserDict; tweaked FeedParserDict.has_key to return True if asking # about a mapped key #3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and # results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could # cause the same encoding to be tried twice (even if it failed the first time); # fixed DOCTYPE stripping when DOCTYPE contained entity declarations; # better textinput and image tracking in illformed RSS 1.0 feeds #3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed # my blink tag tests #3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that # failed to parse utf-16 encoded feeds; made source into a FeedParserDict; # duplicate admin:generatorAgent/@rdf:resource in generator_detail.url; # added support for image; refactored parse() fallback logic to try other # encodings if SAX parsing fails (previously it would only try other encodings # if re-encoding failed); remove unichr madness in normalize_attrs now that # we're properly tracking encoding in and out of BaseHTMLProcessor; set # feed.language from root-level xml:lang; set entry.id from rdf:about; # send Accept header #3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between # iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are # windows-1252); fixed regression that could cause the same encoding to be # tried twice (even if it failed the first time) #3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types; # recover from malformed content-type header parameter with no equals sign # ('text/xml; charset:iso-8859-1') #3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities # to Unicode equivalents in illformed feeds (aaronsw); added and # passed tests for converting character entities to Unicode equivalents # in illformed feeds (aaronsw); test for valid parsers when setting # XML_AVAILABLE; make version and encoding available when server returns # a 304; add handlers parameter to pass arbitrary urllib2 handlers (like # digest auth or proxy support); add code to parse username/password # out of url and send as basic authentication; expose downloading-related # exceptions in bozo_exception (aaronsw); added __contains__ method to # FeedParserDict (aaronsw); added publisher_detail (aaronsw) #3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always # convert feed to UTF-8 before passing to XML parser; completely revamped # logic for determining character encoding and attempting XML parsing # (much faster); increased default timeout to 20 seconds; test for presence # of Location header on redirects; added tests for many alternate character # encodings; support various EBCDIC encodings; support UTF-16BE and # UTF16-LE with or without a BOM; support UTF-8 with a BOM; support # UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no # XML parsers are available; added support for 'Content-encoding: deflate'; # send blank 'Accept-encoding: ' header if neither gzip nor zlib modules # are available #3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure # problem tracking xml:base and xml:lang if element declares it, child # doesn't, first grandchild redeclares it, and second grandchild doesn't; # refactored date parsing; defined public registerDateHandler so callers # can add support for additional date formats at runtime; added support # for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added # zopeCompatibilityHack() which turns FeedParserDict into a regular # dictionary, required for Zope compatibility, and also makes command- # line debugging easier because pprint module formats real dictionaries # better than dictionary-like objects; added NonXMLContentType exception, # which is stored in bozo_exception when a feed is served with a non-XML # media type such as 'text/plain'; respect Content-Language as default # language if not xml:lang is present; cloud dict is now FeedParserDict; # generator dict is now FeedParserDict; better tracking of xml:lang, # including support for xml:lang='' to unset the current language; # recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default # namespace; don't overwrite final status on redirects (scenarios: # redirecting to a URL that returns 304, redirecting to a URL that # redirects to another URL with a different type of redirect); add # support for HTTP 303 redirects #4.0 - MAP - support for relative URIs in xml:base attribute; fixed # encoding issue with mxTidy (phopkins); preliminary support for RFC 3229; # support for Atom 1.0; support for iTunes extensions; new 'tags' for # categories/keywords/etc. as array of dict # {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0 # terminology; parse RFC 822-style dates with no time; lots of other # bug fixes #4.1 - MAP - removed socket timeout; added support for chardet library
Python
# -*- coding:utf-8 -*- """ Created on 21.12.2010 @author: Leon """ import feedparser as fp import re import urllib import nltk import threading import pickle from threading import Thread import numpy from BeautifulSoup import * ###### Globals ######### # dictionary to save URL -> Paragraph Lists sourceDict = {} # Lock on the sourceDict sourceDictLock = threading._allocate_lock() try: file = open('sourceDict.dump', 'r') sourceDict = pickle.load(file) file.close() except: pass # Dictionary saving a frequency distribution of PARAGRAPHS. # This is necessary to identify paragraphs occuring often, like disclaimers, # but don't carry any information content paraDict = {} # Lock on the paraDict paraDictLock = threading._allocate_lock() try: file = open('paraDict.dump', 'r') paraDict = pickle.load(file) file.close() except: pass def transformUmlaute(h): p = '' p.encode("utf-8") for c in h: c = str(c) if c=='\xc3': pass elif c=='\xbc' or c=='ü': p+='ue' elif c=='\xa4' or c=='ä': p+='ae' elif c=='\xb6' or c=='ö': p+='oe' elif c=='\x9c' or c=='Ü': p+='Ue' elif c=='\x84' or c=='Ä': p+='Ae' elif c=='\x96' or c=='Ö': p+='Oe' elif c=='\x9f': p+='ss' else: p+=c return p # German stopwords, Umlaute transformed. stopwordsGer = list(transformUmlaute(w) for w in nltk.corpus.stopwords.words('german')) # English stopwords. stopwordsEn = nltk.corpus.stopwords.words('english') feedlist =['http://rss1.smashingmagazine.com/feed/', 'http://rss1.smashingmagazine.com/feed/?f=smashing-network' 'http://feeds.reuters.com/reuters/topNews', 'http://feeds.reuters.com/reuters/businessNews', 'http://feeds.reuters.com/reuters/worldNews', 'http://feeds2.feedburner.com/time/world', 'http://feeds2.feedburner.com/time/business', 'http://feeds2.feedburner.com/time/politics', 'http://rss.cnn.com/rss/edition.rss', 'http://rss.cnn.com/rss/edition_world.rss', 'http://www.nytimes.com/services/xml/rss/nyt/World.xml', 'http://www.nytimes.com/services/xml/rss/nyt/Economy.xml', #'http://api.leakfeed.com/v1/cables/latest.rss', #'http://www.whitehouse.gov/feed/press' ] def stripTags(html, invalid_tags, keepContent=True): """# Removes all given HTML-Tags # keepContent decides, if the text nodes within the removed HTML nodes should # remain or not.""" soup = BeautifulSoup(html) if keepContent: # Keep the content of the removed HTML Nodes for tag in soup.findAll(True): if tag.name in invalid_tags: s = "" for c in tag.contents: if type(c) != NavigableString: c = stripTags(unicode(c), invalid_tags) s += unicode(c) tag.replaceWith(s) else: # Dismiss everything inside the HTML nodes for tag in soup.findAll(True): if tag.name in invalid_tags: tag.extract() return str(soup) def stripComments(html): "# Strip all HTML comments out of a document" soup = BeautifulSoup(html) comments = soup.findAll(text=lambda text:isinstance(text, Comment)) [comment.extract() for comment in comments] return str(soup) def stripHTML(h): "# Removes all HTML-Tags and replaces them with space ' '." p='' s=0 for c in h: if c=='<': s=1 elif c=='>': s=0 p+=' ' elif s==0: p+=c return p def getWordDict(words): "Transforms a word list to a dictionary with frequency distribution of the words" wDict = {} for w in words: try: wDict[w] += 1 except: wDict[w] = 1 return wDict def getParagraphs(text, wordGap=2, minWords=10, wordLength=2): """ Input: A text with large whitespace gaps. Result: Paragraphs out of the given text, which are 1. not too much splitted by whitespace and 2. have enough words of a minimum length in it The result is a list of paragraphs (strings).""" # replace whitespace with uniform space ' '. text = re.sub("\s", " ", text) # split at whitespace chunks wsChunk = (wordGap+1)*' ' text = text.split(wsChunk) result = [] for para in text: # split the paragraph at its spaces para = para.split(' ') paraText = '' wordCount = 0 for word in para: # add all non-empty words, but only increase counter if they're long enough if len(word) > 0: paraText += word+' ' if len(word) >= wordLength: wordCount += 1 if wordCount >= minWords: result.append(paraText) return result def getUrlsFromFeedlist(feedList, numThreads = 4): "Returns all titles + URLs pointing to entries, out of the feeds in the feedlist" urlList = [] urlListLock = threading._allocate_lock() feedListLock = threading._allocate_lock() class FeedThread(Thread): def __init__(self): super(FeedThread, self).__init__() def getNextFeed(self): nextFeed = None with feedListLock: if len(feedList)>0: nextFeed = feedList.pop() return nextFeed def run(self): nextFeed = self.getNextFeed() while nextFeed != None: print "Parsing feed: %s" % nextFeed feed = fp.parse(nextFeed) for entry in feed.entries: with urlListLock: urlList.append((entry.title, entry.link)) nextFeed = self.getNextFeed() currentThreads = [] # Start some Threads to get the URLs out of the RSS feeds for i in range (0, numThreads): newThread = FeedThread() currentThreads.append(newThread) newThread.start() # join the Threads for t in currentThreads: t.join() return urlList def updateDictionaries(numThreads=5): "Updates the sourceDict and paraDict with content of the new entries of the feedlist" urlList = getUrlsFromFeedlist(feedlist) # Get new URLs: newUrls = set(urlList).difference(set(sourceDict.keys())) newUrlLock = threading._allocate_lock() class ParseThread(Thread): def __init__(self): super(ParseThread, self).__init__() def getNextUrl(self): newUrl = None with newUrlLock: if len(newUrls)>0: newUrl = newUrls.pop() return newUrl def run(self): newUrl = self.getNextUrl() print " new url: %s" % str(newUrl) while newUrl != None: print " Get content and parse it from URL: %s" % str(newUrl) content = '' try: f = urllib.urlopen(newUrl[1]) # Read from the object, storing the page's contents in 'html'. content = f.read() f.close() except: print " opening url %s failed." % newUrl[1] # strip comments, <style> and <script> content = stripComments(content) content = stripTags(content, ['script', 'style'], keepContent=False) # remove formatting tags, but keep the content of them content = stripTags(content, ['b', 'i', 'u', 'h', 'h1', 'h2', 'h3', 'p'], keepContent=True) # replace remaining HTML tags with space ' ' content = stripHTML(content) # get relevant paragraphs out of the content text = getParagraphs(content) # add the result to the sourceDict with sourceDictLock: sourceDict[newUrl] = text # update paraDict: for para in text: with paraDictLock: try: paraDict[para] += 1 except: paraDict[para] = 1 newUrl = self.getNextUrl() # start some threads to parse HTML currentThreads = [] for i in range(0,numThreads): t = ParseThread() currentThreads.append(t) t.start() # join the threads for t in currentThreads: t.join() def getUniqueParas(maxOccurance = 1): """Returns a dictionary 'URL' -> 'list of paragraphs', which only containts paragraphs that are unique (no disclaimers, etc.)""" uniqueParas = {} for url in sourceDict.keys(): uniqueParas[url] = [] for para in sourceDict[url]: if paraDict[para] <= 1: uniqueParas[url].append(para) return uniqueParas updateDictionaries() print "## Saving Source Dict ##" try: file = open('sourceDict.dump', 'w') pickle.dump(sourceDict, file) file.close() except: print "saving went wrong" print "## Saving Para Dict ##" try: file = open('paraDict.dump', 'w') pickle.dump(paraDict, file) file.close() except: print "saving went wrong" uniqueParas = getUniqueParas() #print "Unique Paras:" #for url in uniqueParas.keys(): # print " URL: %s" %url # for para in uniqueParas[url]: # print " %s"%para # Abweichung: Wir speichern in allwords Tupel der Form (Anzahl, [TitelListe]), # wobei die TitelListe aus den Titeln besteht, in deren Artikeln das Wort # vorkommt. allwords = {} articlewords = [] articletitles = [] def getarticlewords(): for key in uniqueParas.keys(): url = key[1] title = key[0] paras = uniqueParas[key] wordDict = {} for para in paras: words = para.split(' ') for word in words: # remove all non-alphabetic word = re.sub("\W+", "", word) word = word.lower() # Anmerkung: Mit Stopwörtern werden die Matrizen zu groß if len(word) > 0 and word not in stopwordsEn: try: count = allwords[word][0] titleList = allwords[word][1] titleList.append(title) allwords[word] = (count+1, titleList) except: allwords[word]=(1,[title]) try: wordDict[word]+=1 except: wordDict[word]=1 articlewords.append(wordDict) articletitles.append(title) getarticlewords() ### Ab hier entsprechend Vorgabe ### def makematrix(allw, articlew): wordvec = [] wordInArt = [] numTitles = len(articlew) for word in allw.keys(): (count, titleList) = allw[word] ratio = float(len(titleList)) / float(numTitles) # Wir skalieren count an der Anzahl der gelesenen Artikel # und haben die ratio angepasst # da wir sonst zu große Matrizen erhalten if count > len(articletitles)/6 and ratio < 0.6: wordvec.append(word) for wordDict in articlew: line = [] zeroLine = True for word in wordvec: wordCount = 0 try: wordCount = wordDict[word] zeroLine = False except: wordCount = 0 line.append(wordCount) if not zeroLine: wordInArt.append(line) else: index = articlewords.index(wordDict) articlewords.remove(wordDict) articletitles.remove(articletitles[index]) wordInArt = numpy.array(wordInArt) return wordvec, wordInArt (wordvec, wordInArt) = makematrix(allwords, articlewords) head = '' for word in wordvec: head += str(word) + ',' head += "\n" body = '' for line in wordInArt: for el in line: body += str(el)+',' body += "\n" try: file = open('matrix.txt', 'w') file.write(head+body) file.close() except: print "writing to matrix.txt failed" def cost(A,B): sum=0 (n,m) = A.shape aA = numpy.array(A) aB = numpy.array(B) for i in range (0,n): for j in range (0,m): sum += pow(aA[i][j]-aB[i][j], 2) return sum def nnmf(A, m, it): print "starting nnmf" (r,c) = A.shape H = [] for i in range(0,m): row = [] for j in range (0,c): row.append(numpy.random.random()) H.append(row) H = numpy.matrix(H) print " built initial H" W = [] for i in range(0,r): row = [] for j in range (0,m): row.append(10.0*numpy.random.random()) W.append(row) W = numpy.matrix(W) print " built initial W" # scale threshold by number of articles and wordvec kS = len(articletitles)*len(wordvec)/2 print " threshold = %s" % kS k = 2*kS i = 0 print " starting loop ..." while (k > kS) and (i < it): B = W*H k = cost(A, B) wT = W.transpose() hT = H.transpose() aH = numpy.array(H) hZ = wT*A aHZ = numpy.array(hZ) hN = wT*W*H aHN = numpy.array(hN) H = numpy.matrix(aH*(aHZ/aHN)) aW = numpy.array(W) wZ = A*hT aWZ = numpy.array(wZ) wN = W*H*hT aWN = numpy.array(wN) W = numpy.matrix(aW*aWZ/aWN) i += 1 print "Finished after %s iterations with a cost of k = %s." % (i, round(k)) return H, W (H, W) = nnmf(wordInArt, 6, 30) def showfeatures(w,h,titles,wordvec): (mH,nH) = h.shape aH = numpy.array(h) # Funktion zum Anzeigen von Clustern def getClusterWords(cIndex, indent=0): merkmal = aH[cIndex] wmList = [] for j in range(0,nH): wmList.append((wordvec[j], merkmal[j])) wmList = sorted(wmList, key=lambda t:t[1], reverse=True) rS = "" rS += "\n"+ indent*"\t"+"cluster %s:\n" % (cIndex) for z in range(0, min(6, len(wmList))): rS += indent*"\t"+" " + str(wmList[z]) + "\n" return rS # Cluster anzeigen for i in range(0,mH): print getClusterWords(i) (mW,nW) = w.shape print "mW: %s | nW: %s" % w.shape aW = numpy.array(w) # Zufallsfunktion def rnd(x): return numpy.random.randint(0,x) # Der Übersicht halber: Die wichtigsten Cluster von 10 zufälligen Artikeln randList = [rnd(mW), rnd(mW), rnd(mW), rnd(mW), rnd(mW) , rnd(mW), rnd(mW), rnd(mW), rnd(mW), rnd(mW)] for i in randList: clusterWeights = aW[i] title = titles[i] aList = [] for j in range(0,nW): aList.append((j, clusterWeights[j])) aList = sorted(aList, key=lambda t:t[1], reverse=True) print "\n\"%s\":" % title for z in range(0, min(3, len(aList))): clusterIndex = aList[z][0] print " %s. cluster (%s): %s" % (z, aList[z][1], str(getClusterWords(clusterIndex, indent=2))) showfeatures(W,H,articletitles,wordvec)
Python