code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
"""$Id: channel.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from logging import *
from validators import *
#
# channel element.
#
class channel(validatorBase):
def validate(self):
if not "description" in self.children:
self.log(MissingDescription({"parent":self.name,"element":"description"}))
if not "link" in self.children:
self.log(MissingLink({"parent":self.name, "element":"link"}))
if not "title" in self.children:
self.log(MissingTitle({"parent":self.name, "element":"title"}))
if not "dc_date" in self.children:
self.log(MissingDCDate({"parent":self.name, "element":"dc:date"}))
if not "dc_rights" in self.children:
self.log(MissingDCRights({"parent":self.name, "element":"dc:rights"}))
if not "dc_language" in self.children:
self.log(MissingDCLanguage({"parent":self.name, "element":"dc:language"}))
if self.children.count("image") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"image"}))
if self.children.count("textInput") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"textInput"}))
if self.children.count("skipHours") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"skipHours"}))
if self.children.count("skipDays") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"skipDays"}))
def do_image(self):
from image import image
return image()
def do_item(self):
from item import item
return item()
def do_items(self): # this actually should be from the rss1.0 ns
return eater()
def do_textInput(self):
from textInput import textInput
return textInput()
def do_textinput(self):
if not self.attrs.has_key((rdfNS,"about")):
# optimize for RSS 2.0. If it is not valid RDF, assume that it is
# a simple misspelling (in other words, the error message will be
# less than helpful on RSS 1.0 feeds.
self.log(UndefinedElement({"parent":self.name, "element":"textinput"}))
return eater()
def do_category(self):
return text()
def do_cloud(self):
return cloud()
do_rating = validatorBase.leaf # TODO test cases?!?
def do_ttl(self):
return ttl(), noduplicates()
def do_docs(self):
return rfc2396(), noduplicates()
def do_link(self):
return rfc2396(), noduplicates()
def do_title(self):
return nonhtml(), noduplicates()
def do_description(self):
return nonhtml(), noduplicates()
def do_generator(self):
if "admin_generatorAgent" in self.children:
self.log(DuplicateSemantics({"core":"generator", "ext":"admin:generatorAgent"}))
self.log(UseAdminGeneratorAgent({"core":"generator", "ext":"admin:generatorAgent"}))
return text(), noduplicates()
def do_pubDate(self):
if "dc_date" in self.children:
self.log(DuplicateSemantics({"core":"pubDate", "ext":"dc:date"}))
self.log(UseDCDate({"core":"pubDate", "ext":"dc:date"}))
return rfc822(), noduplicates()
def do_managingEditor(self):
if "dc_creator" in self.children:
self.log(DuplicateSemantics({"core":"managingEditor", "ext":"dc:creator"}))
self.log(UseDCCreator({"core":"managingEditor", "ext":"dc:creator"}))
return email_lax(), noduplicates()
def do_webMaster(self):
if "dc_publisher" in self.children:
self.log(DuplicateSemantics({"core":"webMaster", "ext":"dc:publisher"}))
self.log(UseDCPublisher({"core":"webMaster", "ext":"dc:publisher"}))
return email_lax(), noduplicates()
def do_dc_creator(self):
if "managingEditor" in self.children:
self.log(DuplicateSemantics({"core":"managingEditor", "ext":"dc:creator"}))
return text() # duplicates allowed
def do_dc_language(self):
if "language" in self.children:
self.log(DuplicateSemantics({"core":"language", "ext":"dc:language"}))
return iso639(), noduplicates()
def do_language(self):
if "dc_language" in self.children:
self.log(DuplicateSemantics({"core":"language", "ext":"dc:language"}))
self.log(UseDCLanguage({"core":"language", "ext":"dc:language"}))
return iso639(), noduplicates()
def do_dcterms_modified(self):
if "lastBuildDate" in self.children:
self.log(DuplicateSemantics({"core":"lastBuildDate", "ext":"dcterms:modified"}))
return iso8601(), noduplicates()
def do_dc_publisher(self):
if "webMaster" in self.children:
self.log(DuplicateSemantics({"core":"webMaster", "ext":"dc:publisher"}))
return text() # duplicates allowed
def do_copyright(self):
if "dc_rights" in self.children:
self.log(DuplicateSemantics({"core":"copyright", "ext":"dc:rights"}))
self.log(UseDCRights({"core":"copyright", "ext":"dc:rights"}))
return text(), noduplicates()
def do_dc_rights(self):
if "copyright" in self.children:
self.log(DuplicateSemantics({"core":"copyright", "ext":"dc:rights"}))
return text(), noduplicates()
def do_dc_date(self):
if "pubDate" in self.children:
self.log(DuplicateSemantics({"core":"pubDate", "ext":"dc:date"}))
return iso8601(), noduplicates()
def do_admin_generatorAgent(self):
if "generator" in self.children:
self.log(DuplicateSemantics({"core":"generator", "ext":"admin:generatorAgent"}))
return admin_generatorAgent(), noduplicates()
def do_admin_errorReportsTo(self):
return admin_errorReportsTo(), noduplicates()
def do_lastBuildDate(self):
if "dcterms_modified" in self.children:
self.log(DuplicateSemantics({"core":"lastBuildDate", "ext":"dcterms:modified"}))
self.log(UseDCTermsModified({"core":"lastBuildDate", "ext":"dcterms:modified"}))
return rfc822(), noduplicates()
def do_skipHours(self):
from skipHours import skipHours
return skipHours()
def do_skipDays(self):
from skipDays import skipDays
return skipDays()
def do_blogChannel_blogRoll(self):
return rfc2396(), noduplicates()
def do_blogChannel_mySubscriptions(self):
return rfc2396(), noduplicates()
def do_blogChannel_blink(self):
return rfc2396(), noduplicates()
def do_cc_license(self):
if "creativeCommons_license" in self.children:
self.log(DuplicateSemantics({"core":"creativeCommons:license", "ext":"cc:license"}))
return eater()
def do_creativeCommons_license(self):
if "cc_license" in self.children:
self.log(DuplicateSemantics({"core":"creativeCommons:license", "ext":"cc:license"}))
return rfc2396()
def do_blink(self):
return blink(), noduplicates()
def do_sy_updatePeriod(self):
return sy_updatePeriod(), noduplicates()
def do_sy_updateFrequency(self):
return sy_updateFrequency(), noduplicates()
def do_sy_updateBase(self):
return iso8601(), noduplicates()
class blink(validatorBase):
def validate(self):
self.log(NoBlink({}))
class cloud(validatorBase):
def prevalidate(self):
if (None, 'domain') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"domain"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"domain"}))
try:
if int(self.attrs.getValue((None, 'port'))) <= 0:
self.log(InvalidIntegerAttribute({"parent":self.parent.name, "element":self.name, "attr":'port'}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":'port'}))
except KeyError:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":'port'}))
except ValueError:
self.log(InvalidIntegerAttribute({"parent":self.parent.name, "element":self.name, "attr":'port'}))
if (None, 'path') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"path"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"path"}))
if (None, 'registerProcedure') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"registerProcedure"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"registerProcedure"}))
if (None, 'protocol') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"protocol"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"protocol"}))
## TODO - is there a list of accepted protocols for this thing?
return validatorBase.prevalidate(self)
class ttl(positiveInteger): pass
class admin_generatorAgent(rdfResourceURI): pass
class admin_errorReportsTo(rdfResourceURI): pass
class sy_updateFrequency(positiveInteger): pass
class sy_updatePeriod(text):
def validate(self):
if self.value not in ('hourly', 'daily', 'weekly', 'monthly', 'yearly'):
self.log(InvalidUpdatePeriod({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidUpdatePeriod({"parent":self.parent.name, "element":self.name, "value":self.value}))
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:15 rubys
Initial revision
Revision 1.30 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.29 2003/08/04 00:03:14 rubys
Implement more strict email check for pie
Revision 1.28 2003/07/30 01:54:59 f8dy
tighten test cases, add explicit params
Revision 1.27 2003/07/29 20:57:39 f8dy
tightened up test cases, check for parent element, explicitly test for success
Revision 1.26 2003/07/29 19:38:07 f8dy
changed test cases to explicitly test for success (rather than the absence of failure)
Revision 1.25 2003/07/29 16:44:56 f8dy
changed test cases to explicitly test for success (rather than the absence of failure)
Revision 1.24 2002/12/20 13:26:00 rubys
CreativeCommons support
Revision 1.23 2002/10/24 14:47:33 f8dy
decoupled "no duplicates" check from individual validator classes,
allow handlers to return multiple validator classes
Revision 1.22 2002/10/22 17:29:52 f8dy
loosened restrictions on link/docs/url protocols; RSS now allows any
IANA protocol, not just http:// and ftp://
Revision 1.21 2002/10/22 16:43:55 rubys
textInput vs textinput: don't reject valid 1.0 feeds, but don't allow
invalid textinput fields in RSS 2.0 either...
Revision 1.20 2002/10/22 14:11:36 f8dy
initial attempts to handle RSS 1.0 vs. 2.0 images and textinputs; test
cases still fail
Revision 1.19 2002/10/22 13:16:03 f8dy
passed lowercase textinput test
Revision 1.18 2002/10/18 19:28:43 f8dy
added testcases for mod_syndication and passed them
Revision 1.17 2002/10/18 15:41:33 f8dy
added (and passed) testcases for unallowed duplicates of the same element
Revision 1.16 2002/10/18 14:17:30 f8dy
added tests for language/dc:language (must be valid ISO-639 language code
plus optional country code) and passed them
Revision 1.15 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: generator.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from validators import *
#
# Atom generator element
#
class generator(rfc2396):
def validate(self):
if self.attrs.has_key((None, "url")):
self.value = self.attrs.getValue((None, "url"))
rfc2396.validate(self, extraParams={"attr": "url"})
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:15 rubys
Initial revision
Revision 1.3 2003/12/11 23:16:32 f8dy
passed new generator test cases
Revision 1.2 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.1 2003/08/03 22:39:40 rubys
Add generator element
Revision 1.2 2003/07/09 16:24:30 f8dy
added global feed type support
Revision 1.1 2003/07/07 00:54:00 rubys
Rough in some pie/echo support
"""
| Python |
"""$Id: skipHours.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from logging import *
#
# skipHours element
#
class skipHours(validatorBase):
def prevalidate(self):
self.log(UseSyndicationModule({"core":self.name, "ext":"syndication module"}))
def validate(self):
if "hour" not in self.children:
self.log(MissingElement({"parent":self.name, "element":"hour"}))
if len(self.children) > 24:
self.log(NotEnoughHoursInTheDay({}))
def do_hour(self):
return hour()
class hour(validatorBase):
def validate(self):
try:
h = int(self.value)
if (h < 0) or (h > 24):
raise ValueError
else:
self.log(ValidHour({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidHour({"parent":self.parent.name, "element":self.name, "value":self.value}))
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:16 rubys
Initial revision
Revision 1.6 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.5 2003/07/30 01:54:59 f8dy
tighten test cases, add explicit params
Revision 1.4 2003/07/29 20:57:39 f8dy
tightened up test cases, check for parent element, explicitly test for success
Revision 1.3 2002/11/11 19:12:17 rubys
Allow zero for hours
Revision 1.2 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: skipDays.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from logging import *
#
# skipDays element
#
class skipDays(validatorBase):
def prevalidate(self):
self.log(UseSyndicationModule({"core":self.name, "ext":"syndication module"}))
def validate(self):
if "day" not in self.children:
self.log(MissingElement({"parent":self.name, "element":"day"}))
if len(self.children) > 7:
self.log(EightDaysAWeek({}))
def do_day(self):
return day()
class day(validatorBase):
def validate(self):
if self.value not in ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'):
self.log(InvalidDay({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidDay({"parent":self.parent.name, "element":self.name, "value":self.value}))
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:16 rubys
Initial revision
Revision 1.5 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.4 2003/07/30 01:54:59 f8dy
tighten test cases, add explicit params
Revision 1.3 2003/07/29 20:57:39 f8dy
tightened up test cases, check for parent element, explicitly test for success
Revision 1.2 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: text_plain.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
"""Output class for plain text output"""
from base import BaseFormatter
import feedvalidator
class Formatter(BaseFormatter):
def format(self, event):
return '%s %s%s' % (self.getLineAndColumn(event), self.getMessage(event),
self.getCount(event))
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:17 rubys
Initial revision
Revision 1.7 2003/12/12 15:53:42 f8dy
renamed source directories
Revision 1.6 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.5 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: text_html.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
"""Output class for plain text output"""
from base import BaseFormatter
import feedvalidator
import cgi
class Formatter(BaseFormatter):
FRAGMENTLEN = 80
DOCSURL = 'docs/'
def __init__(self, events, rawdata):
BaseFormatter.__init__(self, events)
self.rawdata = rawdata
def getRootClass(self, aClass):
base = aClass.__bases__[0]
if base.__name__.split('.')[-1] == 'LoggedEvent':
return aClass
else:
return self.getRootClass(base)
def getHelpURL(self, event):
rootClass = self.getRootClass(event.__class__).__name__
rootClass = rootClass.split('.')[-1]
rootClass = rootClass.lower()
# messageClass = self.getMessageClass(event).__name__.split('.')[-1]
messageClass = event.__class__.__name__.split('.')[-1]
return self.DOCSURL + rootClass + '/' + messageClass
def format(self, event):
if event.params.has_key('line'):
line = event.params['line']
if line >= len(self.rawdata.split('\n')):
# For some odd reason, UnicodeErrors tend to trigger a bug
# in the SAX parser that misrepresents the current line number.
# We try to capture the last known good line number/column as
# we go along, and now it's time to fall back to that.
line = event.params['line'] = event.params['backupline']
column = event.params['column'] = event.params['backupcolumn']
column = event.params['column']
codeFragment = self.rawdata.split('\n')[line-1]
markerColumn = column
if column > self.FRAGMENTLEN:
codeFragment = '... ' + codeFragment[column-(self.FRAGMENTLEN/2):]
markerColumn = 5 + (self.FRAGMENTLEN/2)
if len(codeFragment) > self.FRAGMENTLEN:
codeFragment = codeFragment[:(self.FRAGMENTLEN-4)] + ' ...'
else:
codeFragment = ''
return """<li>
<p><a href="#l%s">%s</a>, %s: <span class="message">%s</span>%s [<a title="more information about this error" href="%s">help</a>]</p>
<blockquote><p><code>%s<br />%s<span class="marker">%s</span></code></p></blockquote>
</li>
""" % (line, self.getLine(event),
self.getColumn(event),
self.getMessage(event),
self.getCount(event),
self.getHelpURL(event),
cgi.escape(codeFragment),
' ' * (markerColumn - 1),
'^')
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:17 rubys
Initial revision
Revision 1.14 2003/12/12 15:53:42 f8dy
renamed source directories
Revision 1.13 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.12 2003/09/01 21:28:03 f8dy
changes related to new server
Revision 1.11 2003/09/01 21:20:44 f8dy
changes related to new server
Revision 1.10 2003/06/26 18:03:04 f8dy
add workaround for case where SAX throws UnicodeError but locator.getLineNumber() is screwy
Revision 1.9 2002/10/30 23:03:01 f8dy
security fix: external (SYSTEM) entities
Revision 1.8 2002/10/30 06:07:18 f8dy
version 1.0.5
Revision 1.7 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: application_test.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
"""Output class for testing that all output messages are defined properly"""
from base import BaseFormatter
import feedvalidator
import os
LANGUAGE = os.environ.get('LANGUAGE', 'en')
lang = __import__('feedvalidator.i18n.%s' % LANGUAGE, globals(), locals(), LANGUAGE)
class Formatter(BaseFormatter):
def getMessage(self, event):
classes = [event.__class__]
while len(classes):
if lang.messages.has_key(classes[0]):
return lang.messages[classes[0]] % event.params
classes = classes + list(classes[0].__bases__)
del classes[0]
return None
def format(self, event):
"""returns the formatted representation of a single event"""
return self.getMessage(event)
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:17 rubys
Initial revision
Revision 1.3 2003/12/12 15:53:42 f8dy
renamed source directories
Revision 1.2 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.1 2003/08/05 22:09:24 f8dy
added automated message tester
"""
| Python |
"""$Id: text_xml.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
"""Output class for xml output"""
from base import BaseFormatter
from feedvalidator.logging import *
import feedvalidator
def xmlEncode(value):
value = value.replace('&', '&')
value = value.replace('<', '<')
value = value.replace('>', '>')
value = value.replace('"', '"')
value = value.replace("'", ''')
return value
class Formatter(BaseFormatter):
def format(self, event):
params = event.params
params['type'] = event.__class__.__name__
params['text'] = self.getMessage(event)
# determine the level of severity
level = 'unknown'
if isinstance(event,Info): level = 'info'
if isinstance(event,Warning): level = 'warning'
if isinstance(event,Error): level = 'error'
params['level'] = level
# organize fixed elements into a known order
order = params.keys()
order.sort()
for key in ['msgcount', 'text', 'column', 'line', 'type', 'level']:
if key in order:
order.remove(key)
order.insert(0,key)
# output the elements
result = "<message>\n"
for key in order:
value = xmlEncode(str(params[key]))
result = result + (" <%s>%s</%s>\n" % (key, value, key))
result = result + "</message>\n"
return result
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:17 rubys
Initial revision
Revision 1.5 2003/12/12 15:53:42 f8dy
renamed source directories
Revision 1.4 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.3 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: base.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
"""Base class for output classes"""
from UserList import UserList
import os
LANGUAGE = os.environ.get('LANGUAGE', 'en')
lang = __import__('feedvalidator.i18n.%s' % LANGUAGE, globals(), locals(), LANGUAGE)
class BaseFormatter(UserList):
def __getitem__(self, i):
return self.format(self.data[i])
def getLine(self, event):
if not event.params.has_key('line'): return ''
return lang.line % event.params
def getColumn(self, event):
if not event.params.has_key('column'): return ''
return lang.column % event.params
def getLineAndColumn(self, event):
line = self.getLine(event)
if not line: return ''
column = self.getColumn(event)
return '%s, %s:' % (line, column)
def getCount(self, event):
if not event.params.has_key('msgcount'): return ''
count = int(event.params['msgcount'])
if count <= 1: return ''
return lang.occurances % event.params
def getMessageClass(self, event):
classes = [event.__class__]
while len(classes):
if lang.messages.has_key(classes[0]):
return classes[0]
classes = classes + list(classes[0].__bases__)
del classes[0]
return "Undefined message: %s[%s]" % (event.__class__, event.params)
def getMessage(self, event):
classes = [event.__class__]
while len(classes):
if lang.messages.has_key(classes[0]):
return lang.messages[classes[0]] % event.params
classes = classes + list(classes[0].__bases__)
del classes[0]
return "Undefined message: %s[%s]" % (event.__class__, event.params)
def format(self, event):
"""returns the formatted representation of a single event"""
return `event`
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:17 rubys
Initial revision
Revision 1.10 2003/12/12 15:53:42 f8dy
renamed source directories
Revision 1.9 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.8 2002/10/30 23:03:01 f8dy
security fix: external (SYSTEM) entities
Revision 1.7 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: __init__.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
__all__ = ['base', 'text_plain', 'text_html']
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:17 rubys
Initial revision
Revision 1.4 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.3 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: entry.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from validators import *
from logging import *
#
# pie/echo entry element.
#
class entry(validatorBase):
def prevalidate(self):
self.links=[]
def validate(self):
if not 'title' in self.children:
self.log(MissingElement({"parent":self.name, "element":"title"}))
if not 'author' in self.children and not 'author' in self.parent.children:
self.log(MissingElement({"parent":self.name, "element":"author"}))
if not 'modified' in self.children:
self.log(MissingElement({"parent":self.name, "element":"modified"}))
if not 'issued' in self.children:
self.log(MissingElement({"parent":self.name, "element":"issued"}))
if not 'id' in self.children:
self.log(MissingElement({"parent":self.name, "element":"id"}))
# must have an alternate
if [link for link in self.links if link.rel == u'alternate']:
self.log(ValidAtomLinkRel({"parent":self.name, "element":"link", "attr":"rel", "attrvalue":"alternate"}))
else:
self.log(MissingAlternateLink({"parent":self.name, "element":"link", "attr":"rel", "attrvalue":"alternate"}))
# link/type pair must be unique
types={}
for link in self.links:
if not link.type in types: types[link.type]=[]
if link.rel in types[link.type]:
self.log(DuplicateAtomLink({"parent":self.name, "element":"link"}))
else:
types[link.type] += [link.rel]
def do_id(self):
return rfc2396(), noduplicates(), unique('id',self.parent)
def do_link(self):
from link import link
self.links += [link()]
return self.links[-1]
def do_title(self):
from content import content
return content(), noduplicates()
def do_summary(self):
from content import content
return content(), noduplicates()
def do_author(self):
from author import author
return author(), noduplicates()
def do_contributor(self):
from author import author
return author()
def do_content(self):
from content import content
return content()
def do_created(self):
return iso8601_z(), noduplicates()
def do_issued(self):
return iso8601(), noduplicates()
def do_modified(self):
return iso8601_z(), noduplicates()
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:15 rubys
Initial revision
Revision 1.13 2003/12/12 14:35:08 f8dy
fixed link rel=alternate logic to pass new "link not missing" tests
Revision 1.12 2003/12/12 06:10:58 rubys
link rel/type checking
Revision 1.11 2003/12/12 05:42:05 rubys
Rough in some support for the new link syntax
Revision 1.10 2003/12/11 18:20:46 f8dy
passed all content-related testcases
Revision 1.9 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.8 2003/08/05 14:28:26 rubys
Allow author to be omitted from entries when present on the feed
Revision 1.7 2003/08/05 07:59:04 rubys
Add feed(id,tagline,contributor)
Drop feed(subtitle), entry(subtitle)
Check for obsolete version, namespace
Check for incorrect namespace on feed element
Revision 1.6 2003/07/20 17:48:50 rubys
Validate that titles are present
Revision 1.5 2003/07/20 17:44:27 rubys
Detect duplicate ids and guids
Revision 1.4 2003/07/20 16:35:57 rubys
Ensure that issued and modified are present exactly once
Revision 1.3 2003/07/07 10:35:50 rubys
Complete first pass of echo/pie tests
Revision 1.2 2003/07/07 02:44:13 rubys
Further progress towards pie
Revision 1.1 2003/07/07 00:54:00 rubys
Rough in some pie/echo support
"""
| Python |
"""$Id: feed.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from validators import *
from logging import *
#
# Atom root element
#
class feed(validatorBase):
def prevalidate(self):
self.setFeedType(TYPE_ATOM)
self.links = []
def validate(self):
try:
version = self.attrs.getValue((None,'version'))
if not version:
self.log(MissingAttribute({"element":self.name, "attr":"version"}))
elif version in ['0.1', '0.2', '0.2.1']:
self.log(ObsoleteVersion({"element":self.name, "version":version}))
except:
self.log(MissingAttribute({"element":self.name, "attr":"version"}))
if not 'title' in self.children:
self.log(MissingElement({"parent":self.name, "element":"title"}))
# must have an alternate
if [link for link in self.links if link.rel == u'alternate']:
self.log(ValidAtomLinkRel({"parent":self.name, "element":"link", "attr":"rel", "attrvalue":"alternate"}))
else:
self.log(MissingAlternateLink({"parent":self.name, "element":"link", "attr":"rel", "attrvalue":"alternate"}))
# link/type pair must be unique
types={}
for link in self.links:
if not link.type in types: types[link.type]=[]
if link.rel in types[link.type]:
self.log(DuplicateAtomLink({"parent":self.name, "element":"link"}))
else:
types[link.type] += [link.rel]
def do_entry(self):
from entry import entry
return entry()
def do_title(self):
from content import content
return content(), noduplicates()
def do_tagline(self):
from content import content
return content(), noduplicates()
def do_info(self):
from content import content
return content(), noduplicates()
def do_id(self):
return nonblank(), rfc2396(), noduplicates()
def do_link(self):
from link import link
self.links += [link()]
return self.links[-1]
def do_modified(self):
return iso8601_z(), noduplicates()
def do_author(self):
from author import author
return author(), noduplicates()
def do_contributor(self):
from author import author
return author(), noduplicates()
def do_copyright(self):
from content import content
return content(), noduplicates()
def do_generator(self):
from generator import generator
return generator(), noduplicates()
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:15 rubys
Initial revision
Revision 1.15 2003/12/12 14:35:08 f8dy
fixed link rel=alternate logic to pass new "link not missing" tests
Revision 1.14 2003/12/12 11:30:39 rubys
Validate feed links
Revision 1.13 2003/12/12 05:42:05 rubys
Rough in some support for the new link syntax
Revision 1.12 2003/12/11 23:16:32 f8dy
passed new generator test cases
Revision 1.11 2003/12/11 20:13:58 f8dy
feed title, copyright, and tagline may be blank
Revision 1.10 2003/12/11 18:20:46 f8dy
passed all content-related testcases
Revision 1.9 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.8 2003/12/11 04:50:53 f8dy
added test cases for invalid letters in urn NSS, fixed RE to match
Revision 1.7 2003/08/05 15:03:19 rubys
Handle complex (nested) content. Remove copy/paste error in handing
of copyright.
Revision 1.6 2003/08/05 14:03:23 rubys
Tagline is optional
Revision 1.5 2003/08/05 07:59:04 rubys
Add feed(id,tagline,contributor)
Drop feed(subtitle), entry(subtitle)
Check for obsolete version, namespace
Check for incorrect namespace on feed element
Revision 1.4 2003/08/03 18:46:04 rubys
support author(url,email) and feed(author,copyright,generator)
Revision 1.3 2003/07/09 16:24:30 f8dy
added global feed type support
Revision 1.2 2003/07/07 10:35:50 rubys
Complete first pass of echo/pie tests
Revision 1.1 2003/07/07 00:54:00 rubys
Rough in some pie/echo support
"""
| Python |
"""$Id: validators.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from logging import *
import re
rdfNS = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
#
# Valid mime type
#
mime_re = re.compile('[^\s()<>,;:\\"/[\]?=]+/[^\s()<>,;:\\"/[\]?=]+$')
#
# This class simply eats events. Useful to prevent cascading of errors
#
class eater(validatorBase):
def startElementNS(self, name, qname, attrs):
handler=eater()
handler.parent=self
handler.dispatcher=self.dispatcher
self.push(handler)
#
# This class simply html events. Identifies unsafe events
#
class htmlEater(validatorBase):
def __init__(self,parent,element):
self.parent=parent
self.element=element
validatorBase.__init__(self)
def startElementNS(self, name, qname, attrs):
handler=htmlEater(self.parent,self.element)
handler.parent=self
handler.dispatcher=self.dispatcher
self.push(handler)
if name=='script':
self.log(ContainsScript({"parent":self.parent.name, "element":self.element, "tag":"script"}))
if name=='meta':
self.log(ContainsMeta({"parent":self.parent.name, "element":self.element, "tag":"meta"}))
if name=='embed':
self.log(ContainsEmbed({"parent":self.parent.name, "element":self.element, "tag":"embed"}))
if name=='object':
self.log(ContainsObject({"parent":self.parent.name, "element":self.element, "tag":"object"}))
# if name=='a' and attrs.get((None,'href'),':').count(':')==0:
# self.log(ContainsRelRef({"parent":self.parent.name, "element":self.element}))
# if name=='img' and attrs.get((None,'src'), ':').count(':')==0:
# self.log(ContainsRelRef({"parent":self.parent.name, "element":self.element}))
def endElementNS(self,name,qname):
pass
#
# text: i.e., no child elements allowed (except rdf:Description).
#
_rdfStuffToIgnore = (('rdf', 'Description'),
('foaf', 'Person'),
('foaf', 'name'),
('rdfs', 'seeAlso'))
class text(validatorBase):
def startElementNS(self, name, qname, attrs):
from base import namespaces
ns = namespaces.get(qname, '')
if (ns, name) in _rdfStuffToIgnore:
pass
## if (name == 'Description' and namespaces.get(qname,'') == 'rdf'):
## pass
## elif (name == 'person' and namespaces.get(qname,'') == 'foaf'):
## pass
else:
if name.find(':') != -1:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":name}))
else:
self.log(UndefinedElement({"parent":self.name, "element":name}))
handler=eater()
handler.parent=self
handler.dispatcher=self.dispatcher
self.push(handler)
#
# noduplicates: no child elements, no duplicate siblings
#
class noduplicates(validatorBase):
def startElementNS(self, name, qname, attrs):
pass
def prevalidate(self):
if self.name in self.parent.children:
self.log(DuplicateElement({"parent":self.parent.name, "element":self.name}))
#
# valid e-mail addresses - lax
#
class email_lax(text):
# email_re = re.compile("[\w\-.]+@[\w\-\.]+\s*(\(.*\))?$")
email_re = re.compile('''([a-zA-Z0-9\_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)''')
def validate(self):
if not self.email_re.search(self.value):
self.log(InvalidContact({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidContact({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# valid e-mail addresses
#
class email(text):
# email_re = re.compile("[\w\-.]+@[\w\-\.]+\s*(\(.*\))?$")
email_re = re.compile('''([a-zA-Z0-9_\-\+\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$''')
def validate(self):
if not self.email_re.match(self.value):
self.log(InvalidContact({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidContact({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# iso639 language code
#
class iso639(text):
def validate(self):
import iso639codes
if '-' in self.value:
lang, sublang = self.value.split('-', 1)
else:
lang = self.value
if not iso639codes.isoLang.has_key(lang):
self.log(InvalidLanguage({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidLanguage({"parent":self.parent.name, "element":self.name}))
#
# iso8601 dateTime
#
class iso8601(text):
iso8601_re = re.compile("\d\d\d\d(-\d\d(-\d\d(T\d\d:\d\d(:\d\d(\.\d*)?)?" +
"(Z|([+-]\d\d:\d\d))?)?)?)?$")
def validate(self):
if not self.iso8601_re.match(self.value):
self.log(InvalidW3DTFDate({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
work=self.value.split('T')
date=work[0].split('-')
year=int(date[0])
if len(date)>1:
month=int(date[1])
try:
import calendar
numdays=calendar.monthrange(year,month)[1]
except:
self.log(InvalidW3DTFDate({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
if len(date)>2 and int(date[2])>numdays:
self.log(InvalidW3DTFDate({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
if len(work) > 1:
time=work[1].split('Z')[0].split('+')[0].split('-')[0]
time=time.split(':')
if int(time[0])>23:
self.log(InvalidW3DTFDate({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
if len(time)>1 and int(time[1])>60:
self.log(InvalidW3DTFDate({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
if len(time)>2 and float(time[2])>60.0:
self.log(InvalidW3DTFDate({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
self.log(ValidW3DTFDate({"parent":self.parent.name, "element":self.name}))
return 1
class iso8601_z(iso8601):
tz_re = re.compile("Z|([+-]\d\d:\d\d)$")
def validate(self):
if iso8601.validate(self):
if not self.tz_re.search(self.value):
self.log(W3DTFDateNoTimezone({"parent":self.parent.name, "element":self.name, "value":self.value}))
elif not 'Z' in self.value:
self.log(W3DTFDateNonUTC({"parent":self.parent.name, "element":self.name, "value":self.value}))
class iso8601_l(iso8601):
def validate(self):
if iso8601.validate(self):
if 'Z' in self.value:
self.log(W3DTFDateNonLocal({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# rfc2396 fully qualified (non-relative) uri
#
class rfc2396(text):
# rfc2396_re = re.compile("(([a-zA-Z][0-9a-zA-Z+\\-\\.]*:)?/{0,2}" +
rfc2396_re = re.compile("[a-zA-Z][0-9a-zA-Z+\\-\\.]*:(//)?" +
"[0-9a-zA-Z;/?:@&=+$\\.\\-_!~*'()%,#]+$")
urn_re = re.compile(r"^urn:[a-zA-Z0-9][a-zA-Z0-9-]{1,31}:([a-zA-Z0-9()+,\.:=@;$_!*'\-]|%[0-9A-Fa-f]{2})+$")
tag_re = re.compile(r"^tag:([a-z0-9\-\._]+?@)?[a-z0-9\.\-]+?,\d{4}(-\d{2}(-\d{2})?)?:[0-9a-zA-Z;/\?:@&=+$\.\-_!~*'\(\)%,#]+$")
def validate(self, errorClass=InvalidLink, successClass=ValidURI, extraParams={}):
success = 0
if self.value.startswith('tag:'):
if self.tag_re.match(self.value):
success = 1
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(ValidTAG(logparams))
else:
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(InvalidTAG(logparams))
elif self.value.startswith('urn:'):
if self.urn_re.match(self.value):
success = 1
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(ValidURN(logparams))
else:
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(InvalidURN(logparams))
elif (not self.value) or (not self.rfc2396_re.match(self.value)):
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(errorClass(logparams))
elif self.value.startswith('http:') or self.value.startswith('ftp:'):
if not re.match('^\w+://[^/].*',self.value):
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(errorClass(logparams))
else:
success = 1
else:
success = 1
if success:
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(successClass(logparams))
#
# rfc822 dateTime (+Y2K extension)
#
class rfc822(text):
rfc822_re = re.compile("(((Mon)|(Tue)|(Wed)|(Thu)|(Fri)|(Sat)|(Sun)), *)?" +
"\d\d? +((Jan)|(Feb)|(Mar)|(Apr)|(May)|(Jun)|(Jul)|(Aug)|(Sep)|(Oct)|" +
"(Nov)|(Dec)) +\d\d(\d\d)? +\d\d:\d\d(:\d\d)? +(([+-]?\d\d\d\d)|" +
"(UT)|(GMT)|(EST)|(EDT)|(CST)|(CDT)|(MST)|(MDT)|(PST)|(PDT)|\w)$")
def validate(self):
if not self.rfc822_re.match(self.value):
self.log(InvalidRFC2822Date({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidRFC2822Date({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# Decode html entityrefs
#
from htmlentitydefs import entitydefs
def decodehtml(data):
chunks=re.split('&#?(\w+);',data)
for i in range(1,len(chunks),2):
if chunks[i].isdigit():
# print chunks[i]
chunks[i]=chr(int(chunks[i]))
elif chunks[i] in entitydefs:
chunks[i]=entitydefs[chunks[i]]
else:
chunks[i]='&' + chunks[i] +';'
return "".join(map(str,chunks))
#
# Scan HTML for relative URLs
#
#class absUrlMixin:
# anchor_re = re.compile('<a\s+href=(?:"(.*?)"|\'(.*?)\'|([\w-]+))\s*>', re.IGNORECASE)
# img_re = re.compile('<img\s+[^>]*src=(?:"(.*?)"|\'(.*?)\'|([\w-]+))[\s>]', re.IGNORECASE)
# absref_re = re.compile("\w+:")
# def validateAbsUrl(self,value):
# refs = self.img_re.findall(self.value) + self.anchor_re.findall(self.value)
# for ref in [reduce(lambda a,b: a or b, x) for x in refs]:
# if not self.absref_re.match(decodehtml(ref)):
# self.log(ContainsRelRef({"parent":self.parent.name, "element":self.name}))
#
# Scan HTML for 'devious' content
#
class safeHtmlMixin:
scriptTag_re = re.compile("<script[>\s]", re.IGNORECASE)
metaTag_re = re.compile("<meta[>\s]", re.IGNORECASE)
embedTag_re = re.compile("<embed[>\s]", re.IGNORECASE)
objectTag_re = re.compile("<object[>\s]", re.IGNORECASE)
def validateSafe(self,value):
if self.scriptTag_re.search(value):
self.log(ContainsScript({"parent":self.parent.name, "element":self.name, "tag":"script"}))
if self.metaTag_re.search(value):
self.log(ContainsMeta({"parent":self.parent.name, "element":self.name, "tag":"meta"}))
if self.embedTag_re.search(value):
self.log(ContainsEmbed({"parent":self.parent.name, "element":self.name, "tag":"embed"}))
if self.objectTag_re.search(value):
self.log(ContainsObject({"parent":self.parent.name, "element":self.name, "tag":"object"}))
class safeHtml(text, safeHtmlMixin):#,absUrlMixin):
def validate(self):
self.validateSafe(self.value)
# self.validateAbsUrl(self.value)
#
# Elements for which html is discouraged, also checks for relative URLs
#
class nonhtml(text,safeHtmlMixin):#,absUrlMixin):
htmlEndTag_re = re.compile("</\w+>")
def validate(self):
if self.htmlEndTag_re.search(self.value):
self.log(ContainsHTML({"parent":self.parent.name, "element":self.name}))
self.validateSafe(self.value)
# self.validateAbsUrl(self.value)
class positiveInteger(text):
def validate(self):
try:
t = int(self.value)
if t <= 0:
raise ValueError
else:
self.log(ValidInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# mixin to validate URL in attribute
#
class httpURLMixin:
http_re = re.compile("http://", re.IGNORECASE)
def validateHttpURL(self, ns, attr):
value = self.attrs[(ns, attr)]
if not self.http_re.search(value):
self.log(InvalidURLAttribute({"parent":self.parent.name, "element":self.name, "attr":attr}))
else:
self.log(ValidURLAttribute({"parent":self.parent.name, "element":self.name, "attr":attr}))
class rdfResourceURI(rfc2396):
def validate(self):
if (rdfNS, 'resource') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"rdf:resource"}))
else:
self.value=self.attrs.getValue((rdfNS, 'resource'))
rfc2396.validate(self)
class rdfAbout(validatorBase):
def startElementNS(self, name, qname, attrs):
pass
def validate(self):
if (rdfNS, 'about') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"rdf:about"}))
else:
test=rfc2396()
test.parent=self
test.dispatcher=self.dispatcher
test.name=self.name
test.value=self.attrs.getValue((rdfNS, 'about'))
test.validate()
class nonblank(text):
def validate(self, errorClass=NotBlank, extraParams={}):
if not self.value:
logparams={"parent":self.parent.name,"element":self.name}
logparams.update(extraParams)
self.log(errorClass(logparams))
class unique(nonblank):
def __init__(self, name, scope):
self.name=name
self.scope=scope
nonblank.__init__(self)
if not name+'s' in self.scope.__dict__:
self.scope.__dict__[name+'s']=[]
def validate(self):
nonblank.validate(self)
list=self.scope.__dict__[self.name+'s']
if self.value in list:
self.log(DuplicateValue({"parent":self.parent.name, "element":self.name,"value":self.value}))
else:
list.append(self.value)
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:17 rubys
Initial revision
Revision 1.71 2003/12/13 21:39:48 f8dy
added test case for tags with dashes or digits
Revision 1.70 2003/12/12 20:37:05 f8dy
oops, URNs can contain letters after all
Revision 1.69 2003/12/12 15:00:22 f8dy
changed blank link attribute tests to new error AttrNotBlank to distinguish them from elements that can not be blank
Revision 1.68 2003/12/12 11:25:56 rubys
Validate mime type in link tags
Revision 1.67 2003/12/12 05:42:05 rubys
Rough in some support for the new link syntax
Revision 1.66 2003/12/11 23:16:32 f8dy
passed new generator test cases
Revision 1.65 2003/12/11 18:20:46 f8dy
passed all content-related testcases
Revision 1.64 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.63 2003/12/11 06:00:51 f8dy
added tag: testcases, passed
Revision 1.62 2003/12/11 04:50:53 f8dy
added test cases for invalid letters in urn NSS, fixed RE to match
Revision 1.61 2003/10/16 15:54:41 rubys
Detect duplicate channels
Revision 1.60 2003/10/16 15:42:36 rubys
Fix regression, allowing the relative URL tests inside xhtml to pass
again.
Revision 1.59 2003/09/18 18:57:31 f8dy
fixed typo in htmlEater
Revision 1.58 2003/09/13 00:16:43 f8dy
change check for relative references to be compatible with pyxml
Revision 1.57 2003/08/24 00:05:34 f8dy
removed iframe tests, after further discussion this is not enough of a security risk to keep feeds from validating
Revision 1.56 2003/08/23 21:01:00 rubys
Validate that content, content:encoded, and xhtml:body are safe
Revision 1.55 2003/08/11 21:39:39 rubys
Support for rdf:About elements caused a regression whereby spurious
error messages were generated for missing titles for RSS 1.0 feeds.
Revision 1.54 2003/08/10 13:49:14 rubys
Add support for chanel and item level rdf:about. Ensure that http and
ftp URLs have exactly two slashes after the scheme.
Revision 1.53 2003/08/04 01:59:33 rubys
Full http and ftp URIs require two slashes
Revision 1.52 2003/08/04 00:54:35 rubys
Log every valid element (for better self validation in test cases)
Revision 1.51 2003/08/04 00:03:14 rubys
Implement more strict email check for pie
Revision 1.50 2003/07/30 01:33:31 f8dy
tightened up test cases, added explicit parent checks, changed negative tests to positive
Revision 1.49 2003/07/29 20:57:39 f8dy
tightened up test cases, check for parent element, explicitly test for success
Revision 1.48 2003/07/29 19:38:07 f8dy
changed test cases to explicitly test for success (rather than the absence of failure)
Revision 1.47 2003/07/29 16:44:56 f8dy
changed test cases to explicitly test for success (rather than the absence of failure)
Revision 1.46 2003/07/29 16:14:21 rubys
Validate urns
Revision 1.45 2003/07/29 15:46:31 f8dy
changed test cases to explicitly test for success (rather than the absence of failure)
Revision 1.44 2003/07/20 17:44:27 rubys
Detect duplicate ids and guids
Revision 1.43 2003/07/13 00:32:13 rubys
Don't bother checking for local/UTC unless the date is valid...
Revision 1.42 2003/07/09 16:24:30 f8dy
added global feed type support
Revision 1.41 2003/07/07 20:33:50 rubys
Unicode in HTML problem
Revision 1.40 2003/07/07 10:35:50 rubys
Complete first pass of echo/pie tests
Revision 1.39 2003/07/07 02:44:13 rubys
Further progress towards pie
Revision 1.38 2003/07/07 00:54:00 rubys
Rough in some pie/echo support
Revision 1.37 2003/02/25 22:50:20 rubys
allow urls to be html entity encoded
Revision 1.36 2002/11/10 14:32:53 rubys
it is foaf:Person (not foaf:person)
Revision 1.35 2002/11/03 23:33:44 rubys
Noduplicates validator was causing the handler stack to get
momentarily out of synch
Revision 1.34 2002/11/03 22:46:41 rubys
Patch from Christian Schmidt:
"According to RFC-822 section 3.4.2 multiple white-space characters are
treated as one."
Revision 1.33 2002/10/30 15:44:48 rubys
Improve error messages for relative references: error message should
be gramatically correct. Remove "hidden" fields prevented duplicate
errors from being flagged as such.
Revision 1.32 2002/10/30 09:18:08 rubys
Double encoded &'s in query strings cause mutlple '#' to exist in a URL
Revision 1.31 2002/10/27 22:09:41 rubys
src need not be the last attribute in an <img>
Revision 1.30 2002/10/27 18:54:30 rubys
Issue warnings for relative references in descriptions
Revision 1.29 2002/10/25 15:08:15 rubys
Minor cleanup. It is zero or one occurances of a double slash. Also make
it clear that this routine has been repurposed to be a non-relative URI.
Reinstated the original regex which includes relative URIs as a comment.
Revision 1.28 2002/10/24 18:24:36 rubys
Prevent mere mention of <scriptingNews> from causing an error to be flagged.
http://radio.weblogs.com/0001018/2002/10/24.html#a1760
Revision 1.27 2002/10/24 14:47:33 f8dy
decoupled "no duplicates" check from individual validator classes,
allow handlers to return multiple validator classes
Revision 1.26 2002/10/24 14:05:06 f8dy
refactored simpleText() to include list of RDF stuff to ignore
Revision 1.25 2002/10/23 14:47:18 f8dy
added test cases for email address in parentheses (and passed)
Revision 1.24 2002/10/22 20:11:19 f8dy
added test case for RFC 822 date with no seconds (and passed)
Revision 1.23 2002/10/22 19:20:54 f8dy
passed testcase for foaf:person within dc:creator (or any other text
element)
Revision 1.22 2002/10/22 17:29:52 f8dy
loosened restrictions on link/docs/url protocols; RSS now allows any
IANA protocol, not just http:// and ftp://
Revision 1.21 2002/10/22 16:43:55 rubys
textInput vs textinput: don't reject valid 1.0 feeds, but don't allow
invalid textinput fields in RSS 2.0 either...
Revision 1.20 2002/10/22 13:06:41 f8dy
fixed bug with links containing commas
Revision 1.19 2002/10/20 13:36:59 rubys
Permit rdf:Description anywhere text is allowed
Revision 1.18 2002/10/18 19:28:43 f8dy
added testcases for mod_syndication and passed them
Revision 1.17 2002/10/18 15:41:33 f8dy
added (and passed) testcases for unallowed duplicates of the same element
Revision 1.16 2002/10/18 14:17:30 f8dy
added tests for language/dc:language (must be valid ISO-639 language code
plus optional country code) and passed them
Revision 1.15 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
#$Id
####
# Copyright 2000,2001 by Timothy O'Malley <timo@alum.mit.edu>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
"""Timeout Socket
This module enables a timeout mechanism on all TCP connections. It
does this by inserting a shim into the socket module. After this module
has been imported, all socket creation goes through this shim. As a
result, every TCP connection will support a timeout.
The beauty of this method is that it immediately and transparently
enables the entire python library to support timeouts on TCP sockets.
As an example, if you wanted to SMTP connections to have a 20 second
timeout:
import timeoutsocket
import smtplib
timeoutsocket.setDefaultSocketTimeout(20)
The timeout applies to the socket functions that normally block on
execution: read, write, connect, and accept. If any of these
operations exceeds the specified timeout, the exception Timeout
will be raised.
The default timeout value is set to None. As a result, importing
this module does not change the default behavior of a socket. The
timeout mechanism only activates when the timeout has been set to
a numeric value. (This behavior mimics the behavior of the
select.select() function.)
This module implements two classes: TimeoutSocket and TimeoutFile.
The TimeoutSocket class defines a socket-like object that attempts to
avoid the condition where a socket may block indefinitely. The
TimeoutSocket class raises a Timeout exception whenever the
current operation delays too long.
The TimeoutFile class defines a file-like object that uses the TimeoutSocket
class. When the makefile() method of TimeoutSocket is called, it returns
an instance of a TimeoutFile.
Each of these objects adds two methods to manage the timeout value:
get_timeout() --> returns the timeout of the socket or file
set_timeout() --> sets the timeout of the socket or file
As an example, one might use the timeout feature to create httplib
connections that will timeout after 30 seconds:
import timeoutsocket
import httplib
H = httplib.HTTP("www.python.org")
H.sock.set_timeout(30)
Note: When used in this manner, the connect() routine may still
block because it happens before the timeout is set. To avoid
this, use the 'timeoutsocket.setDefaultSocketTimeout()' function.
Good Luck!
"""
__version__ = "$Revision: 4 $"
__author__ = "Timothy O'Malley <timo@alum.mit.edu>"
#
# Imports
#
import select, string
import socket
if not hasattr(socket, "_no_timeoutsocket"):
_socket = socket.socket
else:
_socket = socket._no_timeoutsocket
#
# Set up constants to test for Connected and Blocking operations.
# We delete 'os' and 'errno' to keep our namespace clean(er).
# Thanks to Alex Martelli and G. Li for the Windows error codes.
#
import os
if os.name == "nt":
_IsConnected = ( 10022, 10056 )
_ConnectBusy = ( 10035, )
_AcceptBusy = ( 10035, )
else:
import errno
_IsConnected = ( errno.EISCONN, )
_ConnectBusy = ( errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK )
_AcceptBusy = ( errno.EAGAIN, errno.EWOULDBLOCK )
del errno
del os
#
# Default timeout value for ALL TimeoutSockets
#
_DefaultTimeout = None
def setDefaultSocketTimeout(timeout):
global _DefaultTimeout
_DefaultTimeout = timeout
def getDefaultSocketTimeout():
return _DefaultTimeout
#
# Exceptions for socket errors and timeouts
#
Error = socket.error
class Timeout(Exception):
pass
#
# Factory function
#
from socket import AF_INET, SOCK_STREAM
def timeoutsocket(family=AF_INET, type=SOCK_STREAM, proto=None):
if family != AF_INET or type != SOCK_STREAM:
if proto:
return _socket(family, type, proto)
else:
return _socket(family, type)
return TimeoutSocket( _socket(family, type), _DefaultTimeout )
# end timeoutsocket
#
# The TimeoutSocket class definition
#
class TimeoutSocket:
"""TimeoutSocket object
Implements a socket-like object that raises Timeout whenever
an operation takes too long.
The definition of 'too long' can be changed using the
set_timeout() method.
"""
_copies = 0
_blocking = 1
def __init__(self, sock, timeout):
self._sock = sock
self._timeout = timeout
# end __init__
def __getattr__(self, key):
return getattr(self._sock, key)
# end __getattr__
def get_timeout(self):
return self._timeout
# end set_timeout
def set_timeout(self, timeout=None):
self._timeout = timeout
# end set_timeout
def setblocking(self, blocking):
self._blocking = blocking
return self._sock.setblocking(blocking)
# end set_timeout
def connect_ex(self, addr):
errcode = 0
try:
self.connect(addr)
except Error, why:
errcode = why[0]
return errcode
# end connect_ex
def connect(self, addr, port=None, dumbhack=None):
# In case we were called as connect(host, port)
if port != None: addr = (addr, port)
# Shortcuts
sock = self._sock
timeout = self._timeout
blocking = self._blocking
# First, make a non-blocking call to connect
try:
sock.setblocking(0)
sock.connect(addr)
sock.setblocking(blocking)
return
except Error, why:
# Set the socket's blocking mode back
sock.setblocking(blocking)
# If we are not blocking, re-raise
if not blocking:
raise
# If we are already connected, then return success.
# If we got a genuine error, re-raise it.
errcode = why[0]
if dumbhack and errcode in _IsConnected:
return
elif errcode not in _ConnectBusy:
raise
# Now, wait for the connect to happen
# ONLY if dumbhack indicates this is pass number one.
# If select raises an error, we pass it on.
# Is this the right behavior?
if not dumbhack:
r,w,e = select.select([], [sock], [], timeout)
if w:
return self.connect(addr, dumbhack=1)
# If we get here, then we should raise Timeout
raise Timeout("Attempted connect to %s timed out." % str(addr) )
# end connect
def accept(self, dumbhack=None):
# Shortcuts
sock = self._sock
timeout = self._timeout
blocking = self._blocking
# First, make a non-blocking call to accept
# If we get a valid result, then convert the
# accept'ed socket into a TimeoutSocket.
# Be carefult about the blocking mode of ourselves.
try:
sock.setblocking(0)
newsock, addr = sock.accept()
sock.setblocking(blocking)
timeoutnewsock = self.__class__(newsock, timeout)
timeoutnewsock.setblocking(blocking)
return (timeoutnewsock, addr)
except Error, why:
# Set the socket's blocking mode back
sock.setblocking(blocking)
# If we are not supposed to block, then re-raise
if not blocking:
raise
# If we got a genuine error, re-raise it.
errcode = why[0]
if errcode not in _AcceptBusy:
raise
# Now, wait for the accept to happen
# ONLY if dumbhack indicates this is pass number one.
# If select raises an error, we pass it on.
# Is this the right behavior?
if not dumbhack:
r,w,e = select.select([sock], [], [], timeout)
if r:
return self.accept(dumbhack=1)
# If we get here, then we should raise Timeout
raise Timeout("Attempted accept timed out.")
# end accept
def send(self, data, flags=0):
sock = self._sock
if self._blocking:
r,w,e = select.select([],[sock],[], self._timeout)
if not w:
raise Timeout("Send timed out")
return sock.send(data, flags)
# end send
def recv(self, bufsize, flags=0):
sock = self._sock
if self._blocking:
r,w,e = select.select([sock], [], [], self._timeout)
if not r:
raise Timeout("Recv timed out")
return sock.recv(bufsize, flags)
# end recv
def makefile(self, flags="r", bufsize=-1):
self._copies = self._copies +1
return TimeoutFile(self, flags, bufsize)
# end makefile
def close(self):
if self._copies <= 0:
self._sock.close()
else:
self._copies = self._copies -1
# end close
# end TimeoutSocket
class TimeoutFile:
"""TimeoutFile object
Implements a file-like object on top of TimeoutSocket.
"""
def __init__(self, sock, mode="r", bufsize=4096):
self._sock = sock
self._bufsize = 4096
if bufsize > 0: self._bufsize = bufsize
if not hasattr(sock, "_inqueue"): self._sock._inqueue = ""
# end __init__
def __getattr__(self, key):
return getattr(self._sock, key)
# end __getattr__
def close(self):
self._sock.close()
self._sock = None
# end close
def write(self, data):
self.send(data)
# end write
def read(self, size=-1):
_sock = self._sock
_bufsize = self._bufsize
while 1:
datalen = len(_sock._inqueue)
if datalen >= size >= 0:
break
bufsize = _bufsize
if size > 0:
bufsize = min(bufsize, size - datalen )
buf = self.recv(bufsize)
if not buf:
break
_sock._inqueue = _sock._inqueue + buf
data = _sock._inqueue
_sock._inqueue = ""
if size > 0 and datalen > size:
_sock._inqueue = data[size:]
data = data[:size]
return data
# end read
def readline(self, size=-1):
_sock = self._sock
_bufsize = self._bufsize
while 1:
idx = string.find(_sock._inqueue, "\n")
if idx >= 0:
break
datalen = len(_sock._inqueue)
if datalen >= size >= 0:
break
bufsize = _bufsize
if size > 0:
bufsize = min(bufsize, size - datalen )
buf = self.recv(bufsize)
if not buf:
break
_sock._inqueue = _sock._inqueue + buf
data = _sock._inqueue
_sock._inqueue = ""
if idx >= 0:
idx = idx + 1
_sock._inqueue = data[idx:]
data = data[:idx]
elif size > 0 and datalen > size:
_sock._inqueue = data[size:]
data = data[:size]
return data
# end readline
def readlines(self, sizehint=-1):
result = []
data = self.read()
while data:
idx = string.find(data, "\n")
if idx >= 0:
idx = idx + 1
result.append( data[:idx] )
data = data[idx:]
else:
result.append( data )
data = ""
return result
# end readlines
def flush(self): pass
# end TimeoutFile
#
# Silently replace the socket() builtin function with
# our timeoutsocket() definition.
#
if not hasattr(socket, "_no_timeoutsocket"):
socket._no_timeoutsocket = socket.socket
socket.socket = timeoutsocket
del socket
socket = timeoutsocket
# Finis
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:16 rubys
Initial revision
Revision 1.2 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: base.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from xml.sax.handler import ContentHandler
from xml.sax.xmlreader import Locator
# references:
# http://web.resource.org/rss/1.0/modules/standard.html
# http://web.resource.org/rss/1.0/modules/proposed.html
# http://dmoz.org/Reference/Libraries/Library_and_Information_Science/Technical_Services/Cataloguing/Metadata/RDF/Applications/RSS/Specifications/RSS1.0_Modules/
namespaces = {
"http://webns.net/mvcb/": "admin",
"http://purl.org/rss/1.0/modules/aggregation/": "ag",
"http://purl.org/rss/1.0/modules/annotate/": "annotate",
"http://media.tangent.org/rss/1.0/": "audio",
"http://backend.userland.com/blogChannelModule": "blogChannel",
"http://web.resource.org/cc/": "cc",
"http://backend.userland.com/creativeCommonsRssModule": "creativeCommons",
"http://purl.org/rss/1.0/modules/company": "company",
"http://purl.org/rss/1.0/modules/content/": "content",
"http://my.theinfo.org/changed/1.0/rss/": "cp",
"http://purl.org/dc/elements/1.1/": "dc",
"http://purl.org/dc/terms/": "dcterms",
"http://purl.org/rss/1.0/modules/email/": "email",
"http://purl.org/rss/1.0/modules/event/": "ev",
"http://purl.org/rss/1.0/modules/image/": "image",
"http://xmlns.com/foaf/0.1/": "foaf",
"http://purl.org/rss/1.0/modules/link/": "l",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://www.w3.org/2000/01/rdf-schema#": "rdfs",
"http://purl.org/rss/1.0/modules/reference/": "ref",
"http://purl.org/rss/1.0/modules/richequiv/": "reqv",
"http://purl.org/rss/1.0/modules/rss091#": "rss091",
"http://purl.org/rss/1.0/modules/search/": "search",
"http://purl.org/rss/1.0/modules/slash/": "slash",
"http://purl.org/rss/1.0/modules/servicestatus/": "ss",
"http://hacks.benhammersley.com/rss/streaming/": "str",
"http://purl.org/rss/1.0/modules/subscription/": "sub",
"http://purl.org/rss/1.0/modules/syndication/": "sy",
"http://purl.org/rss/1.0/modules/taxonomy/": "taxo",
"http://purl.org/rss/1.0/modules/threading/": "thr",
"http://purl.org/rss/1.0/modules/wiki/": "wiki",
"http://schemas.xmlsoap.org/soap/envelope/": "soap",
"http://purl.org/atom/ns#": "atom",
"http://www.w3.org/1999/xhtml": "xhtml",
}
#
# From the SAX parser's point of view, this class is the one responsible for
# handling SAX events. In actuality, all this class does is maintain a
# pushdown stack of the *real* content handlers, and delegates sax events
# to the current one.
#
class SAXDispatcher(ContentHandler):
firstOccurrenceOnly = 0
def __init__(self):
from root import root
ContentHandler.__init__(self)
self.lastKnownLine = 0
self.lastKnownColumn = 0
self.loggedEvents = []
self.feedType = 0
self.handler_stack=[[root(self)]]
def setDocumentLocator(self, locator):
self.locator = locator
ContentHandler.setDocumentLocator(self, self.locator)
def setFirstOccurrenceOnly(self, firstOccurrenceOnly=1):
self.firstOccurrenceOnly = firstOccurrenceOnly
def startPrefixMapping(self, prefix, uri):
if namespaces.has_key(uri):
if not namespaces[uri] == prefix and prefix:
from logging import NonstdPrefix
self.log(NonstdPrefix({'preferred':namespaces[uri], 'ns':uri}))
elif prefix in namespaces.values():
from logging import ReservedPrefix
preferredURI = [key for key, value in namespaces.items() if value == prefix][0]
self.log(ReservedPrefix({'prefix':prefix, 'ns':preferredURI}))
def startElementNS(self, name, qname, attrs):
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
qname, name = name
for handler in iter(self.handler_stack[-1]):
handler.startElementNS(name, qname, attrs)
def resolveEntity(self, publicId, systemId):
if (publicId=='-//Netscape Communications//DTD RSS 0.91//EN' and
systemId=='http://my.netscape.com/publish/formats/rss-0.91.dtd'):
from logging import ValidDoctype
self.log(ValidDoctype({}))
else:
from logging import ContainsSystemEntity
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
self.log(ContainsSystemEntity({}))
from StringIO import StringIO
return StringIO()
def characters(self, string):
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
for handler in iter(self.handler_stack[-1]):
handler.characters(string)
def endElementNS(self, name, qname):
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
qname, name = name
for handler in iter(self.handler_stack[-1]):
handler.endElementNS(name, qname)
del self.handler_stack[-1]
def push(self, handler):
try:
iter(handler)
except:
handler = [handler]
self.handler_stack.append(handler)
def log(self, event):
def findDuplicate(self, event):
duplicates = [e for e in self.loggedEvents if e.__class__ == event.__class__]
for dup in duplicates:
for k, v in event.params.items():
if k != 'value':
if not k in dup.params or dup.params[k] != v: break
else:
return dup
if event.params.has_key('element') and event.params['element']:
event.params['element'] = event.params['element'].replace('_', ':')
if self.firstOccurrenceOnly:
dup = findDuplicate(self, event)
if dup:
dup.params['msgcount'] = dup.params['msgcount'] + 1
return
event.params['msgcount'] = 1
try:
line = self.locator.getLineNumber()
backupline = self.lastKnownLine
column = self.locator.getColumnNumber()
backupcolumn = self.lastKnownColumn
except AttributeError:
line = backupline = column = backupcolumn = 0
event.params['line'] = line
event.params['backupline'] = backupline
event.params['column'] = column
event.params['backupcolumn'] = backupcolumn
self.loggedEvents.append(event)
def error(self, exception):
from logging import SAXError
self.log(SAXError({'exception':str(exception)}))
raise exception
fatalError=error
warning=error
def setFeedType(self, feedType):
self.feedType = feedType
def getFeedType(self):
return self.feedType
#
# This base class for content handlers keeps track of such administrative
# details as the parent of the current element, and delegating both log
# and push events back up the stack. It will also concatenate up all of
# the SAX events associated with character data into a value, handing such
# things as CDATA and entities.
#
# Subclasses are expected to declare "do_name" methods for every
# element that they support. These methods are expected to return the
# appropriate handler for the element.
#
# The name of the element and the names of the children processed so
# far are also maintained.
#
# Hooks are also provided for subclasses to do "prevalidation" and
# "validation".
#
class validatorBase(ContentHandler):
defaultNamespaces = []
def __init__(self):
ContentHandler.__init__(self)
self.value = ""
self.attrs = None
self.children = []
self.isValid = 1
self.name = None
def unknown_starttag(self, name, qname, attrs):
from validators import eater
return eater()
def startElementNS(self, name, qname, attrs):
from validators import eater
if qname in self.defaultNamespaces: qname=None
hasNS = (qname<>None)
if namespaces.has_key(qname):
qname, name = None, namespaces[qname] + "_" + name
# ensure all attribute namespaces are properly defined
for (namespace,attr) in attrs.keys():
if ':' in attr and not namespace:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":attr}))
if qname:
handler = self.unknown_starttag(name, qname, attrs)
else:
try:
handler = getattr(self, "do_" + name)()
except AttributeError:
if name.find(':') != -1:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":name}))
elif not hasNS:
from logging import UndefinedElement
self.log(UndefinedElement({"parent":self.name, "element":name}))
handler = eater()
try:
iter(handler)
except TypeError:
handler = [handler]
for aHandler in iter(handler):
aHandler.parent = self
aHandler.dispatcher = self.dispatcher
aHandler.value = ""
aHandler.name = name
aHandler.attrs = attrs
aHandler.prevalidate()
# MAP - always append name, even if already exists (we need this to
# check for too many hour elements in skipHours, and it doesn't
# hurt anything else)
self.children.append(name)
self.push(handler)
def endElementNS(self, name, qname):
self.value=self.value.strip()
self.validate()
if self.isValid and self.name:
from validators import ValidElement
self.log(ValidElement({"parent":self.parent.name, "element":name}))
def characters(self, string):
self.value = self.value + string
def log(self, event):
self.dispatcher.log(event)
self.isValid = 0
def setFeedType(self, feedType):
self.dispatcher.setFeedType(feedType)
def push(self, handler):
self.dispatcher.push(handler)
def leaf(self):
from validators import text
return text()
def prevalidate(self):
pass
def validate(self):
pass
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:15 rubys
Initial revision
Revision 1.41 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.40 2003/08/23 23:25:14 rubys
Allow unprefixed elements (like xhtml) to pass through without warning
Revision 1.39 2003/08/23 21:01:00 rubys
Validate that content, content:encoded, and xhtml:body are safe
Revision 1.38 2003/08/12 02:02:26 rubys
Detect unknown elements even if they have underscores. Reported by
Brent Simmons.
Revision 1.37 2003/08/09 18:18:03 rubys
Permit NetScape's 0.91 DOCTYPE
Revision 1.36 2003/08/05 05:32:35 f8dy
0.2 snapshot - change version number and default namespace
Revision 1.35 2003/08/04 00:54:35 rubys
Log every valid element (for better self validation in test cases)
Revision 1.34 2003/07/28 21:56:52 rubys
Check attributes for valid namespaces
Revision 1.33 2003/07/09 16:24:30 f8dy
added global feed type support
Revision 1.32 2003/07/07 00:54:00 rubys
Rough in some pie/echo support
Revision 1.31 2003/06/26 18:03:04 f8dy
add workaround for case where SAX throws UnicodeError but locator.getLineNumber() is screwy
Revision 1.30 2003/04/07 19:49:22 rubys
Handle ignorable whitespace in elements such as comments
Revision 1.29 2003/03/01 13:53:22 rubys
Improved duplicate checking
Revision 1.28 2002/12/20 13:26:00 rubys
CreativeCommons support
Revision 1.27 2002/10/31 00:52:21 rubys
Convert from regular expressions to EntityResolver for detecting
system entity references
Revision 1.26 2002/10/30 23:03:01 f8dy
security fix: external (SYSTEM) entities
Revision 1.25 2002/10/24 14:47:33 f8dy
decoupled "no duplicates" check from individual validator classes,
allow handlers to return multiple validator classes
Revision 1.24 2002/10/24 13:55:58 f8dy
added rdfs namespace
Revision 1.23 2002/10/22 19:20:54 f8dy
passed testcase for foaf:person within dc:creator (or any other text
element)
Revision 1.22 2002/10/22 12:57:35 f8dy
fixed bug setting parameters for ReservedPrefix error
Revision 1.21 2002/10/18 20:31:28 f8dy
fixed namespace for mod_aggregation
Revision 1.20 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: content.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from validators import *
from logging import *
#
# item element.
#
class content(validatorBase,safeHtmlMixin):
from validators import mime_re
htmlEndTag_re = re.compile("</\w+>")
HTMLTYPES = ('text/html', 'application/xhtml+xml')
def prevalidate(self):
self.mode='xml'
self.type='text/plain'
self.mixed=0
self.multitypes=[]
if self.attrs.has_key((None,"mode")):
self.mode=self.attrs.getValue((None,"mode"))
if self.attrs.has_key((None,"type")):
self.type=self.attrs.getValue((None,"type"))
if not self.mode in ['xml','escaped','base64']:
self.log(InvalidContentMode({"parent":self.parent.name, "element":self.name, "mode":self.mode}))
else:
self.log(ValidContentMode({"parent":self.parent.name, "element":self.name, "mode":self.mode}))
# if self.type == None:
# self.log(NoMIMEType({"parent":self.parent.name, "element":self.name}))
if not self.mime_re.match(self.type):
self.log(InvalidMIMEType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
else:
self.log(ValidMIMEAttribute({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
def validate(self):
if self.mode == 'base64':
import base64
try:
base64.decodestring(self.value)
except:
self.log(NotBase64({"parent":self.parent.name, "element":self.name,"value":self.value}))
elif self.mode == 'xml':
import re
if self.htmlEndTag_re.search(self.value):
if self.type in self.HTMLTYPES:
self.log(NotInline({"parent":self.parent.name, "element":self.name,"value":self.value}))
else:
self.log(ContainsUndeclaredHTML({"parent":self.parent.name, "element":self.name, "value":self.value}))
elif self.mode == 'escaped':
if self.type in self.HTMLTYPES:
self.validateSafe(self.value)
from HTMLParser import HTMLParser, HTMLParseError
try:
p=HTMLParser()
p.feed(self.value)
p.close()
self.log(ValidHtml({"parent":self.parent.name, "element":self.name,"value":self.value}))
except HTMLParseError:
import sys
self.log(NotHtml({"parent":self.parent.name, "element":self.name,"value":self.value, "message": sys.exc_info()[1].msg}))
else:
if self.htmlEndTag_re.search(self.value):
self.log(ContainsUndeclaredHTML({"parent":self.parent.name, "element":self.name, "value":self.value}))
if self.type == 'multipart/alternative':
if len(self.children)==0:
self.log(MultipartMissing({"parent":self.parent.name, "element":self.name}))
def startElementNS(self, name, qname, attrs):
if self.type == 'multipart/alternative':
if name<>'content':
self.log(MultipartInvalid({"parent":self.parent.name, "element":self.name, "name":name}))
else:
validatorBase.startElementNS(self, name, qname, attrs)
if attrs.has_key((None,'type')):
type=attrs.getValue((None,'type'))
if type=='multipart/alternative':
self.log(MultipartRecursion({"parent":self.parent.name, "element":self.name, "name":name}))
if type in self.multitypes:
self.log(MultipartDuplicate({"parent":self.parent.name, "element":self.name, "type":type}))
else:
self.multitypes += [type]
return
self.mixed=1
if self.attrs.has_key((None,"mode")):
if self.attrs.getValue((None,"mode")) == 'escaped':
self.log(NotEscaped({"parent":self.parent.name, "element":self.name}))
handler=eater()
handler.parent=self
handler.dispatcher=self
self.push(handler)
def do_content(self):
return content()
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:15 rubys
Initial revision
Revision 1.14 2003/12/12 11:25:55 rubys
Validate mime type in link tags
Revision 1.13 2003/12/12 01:24:36 rubys
Multipart/alternative tests
Revision 1.12 2003/12/11 18:20:46 f8dy
passed all content-related testcases
Revision 1.11 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.10 2003/12/11 15:18:51 f8dy
type is now optional
Revision 1.9 2003/08/23 21:01:00 rubys
Validate that content, content:encoded, and xhtml:body are safe
Revision 1.8 2003/08/23 00:28:04 rubys
Validate escaped text/HTML content
Revision 1.7 2003/08/05 15:03:19 rubys
Handle complex (nested) content. Remove copy/paste error in handing
of copyright.
Revision 1.6 2003/07/29 21:48:10 f8dy
tightened up test cases, added parent element check, changed negative test cases to positive
Revision 1.5 2003/07/11 16:36:08 rubys
Attempt to detect improper use of inline xml
Revision 1.4 2003/07/10 21:16:33 rubys
Get rssdemo back on its feet...
Revision 1.3 2003/07/10 21:02:16 rubys
Verify base64 and escaped
Revision 1.2 2003/07/07 10:35:50 rubys
Complete first pass of echo/pie tests
Revision 1.1 2003/07/07 02:44:13 rubys
Further progress towards pie
"""
| Python |
"""$Id: __init__.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
import timeoutsocket
timeoutsocket.setDefaultSocketTimeout(10)
import urllib
from logging import *
from xml.sax import SAXParseException
from xml.sax.xmlreader import InputSource
import re
MAXDATALENGTH = 200000
class ValidatorURLopener(urllib.FancyURLopener):
def __init__(self, *args):
self.version = "FeedValidator/1.21 +http://feeds.archive.org/validator/"
urllib.FancyURLopener.__init__(self, *args)
def _validate(aString, firstOccurrenceOnly=0):
"""validate RSS from string, returns validator object"""
from xml.sax import make_parser, handler
from base import SAXDispatcher
from exceptions import UnicodeError
from cStringIO import StringIO
source = InputSource()
source.setByteStream(StringIO(aString))
validator = SAXDispatcher()
validator.setFirstOccurrenceOnly(firstOccurrenceOnly)
parser = make_parser()
parser.setFeature(handler.feature_namespaces, 1)
parser.setContentHandler(validator)
parser.setErrorHandler(validator)
parser.setEntityResolver(validator)
if hasattr(parser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
parser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
parser.parse(source)
except SAXParseException:
pass
except UnicodeError:
import sys
exctype, value = sys.exc_info()[:2]
import logging
validator.log(logging.UnicodeError({"exception":value}))
return validator
def validateStream(aFile, firstOccurrenceOnly=0):
return {"feedType":validator.feedType, "loggedEvents":validator.loggedEvents}
def validateString(aString, firstOccurrenceOnly=0):
validator = _validate(aString, firstOccurrenceOnly)
return {"feedType":validator.feedType, "loggedEvents":validator.loggedEvents}
def validateURL(url, firstOccurrenceOnly=1, wantRawData=0):
"""validate RSS from URL, returns events list, or (events, rawdata) tuple"""
usock = ValidatorURLopener().open(url)
rawdata = usock.read(MAXDATALENGTH)
rawdata = rawdata.replace('\r\n', '\n').replace('\r', '\n') # normalize EOL
usock.close()
validator = _validate(rawdata, firstOccurrenceOnly)
params = {"feedType":validator.feedType, "loggedEvents":validator.loggedEvents}
if wantRawData:
params['rawdata'] = rawdata
return params
__all__ = ['base',
'channel',
'compatibility',
'image',
'item',
'logging',
'rdf',
'root',
'rss',
'skipHours',
'textInput',
'util',
'validators',
'validateURL',
'validateString']
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:14 rubys
Initial revision
Revision 1.24 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.23 2003/08/09 17:09:34 rubys
Remove misleading mapping of LookupError to UnicodeError
Revision 1.22 2003/08/06 05:40:00 f8dy
patch to send a real User-Agent on HTTP requests
Revision 1.21 2003/08/05 18:51:38 f8dy
added hack to work around bug in built-in SAX parser (doesn't recognize xml: namespace)
Revision 1.20 2003/07/09 16:24:30 f8dy
added global feed type support
Revision 1.19 2002/12/22 19:01:17 rubys
Integrate in SOAP support
Revision 1.18 2002/11/04 01:06:43 rubys
Remove remaining call to preValidate
Revision 1.17 2002/11/04 00:28:55 rubys
Handle LookupError (e.g., unknown encoding)
Revision 1.16 2002/10/31 00:52:21 rubys
Convert from regular expressions to EntityResolver for detecting
system entity references
Revision 1.15 2002/10/30 23:03:01 f8dy
security fix: external (SYSTEM) entities
Revision 1.14 2002/10/22 19:41:07 f8dy
normalize line endings before parsing (SAX parser is not Mac-CR-friendly)
Revision 1.13 2002/10/22 16:35:11 f8dy
commented out fallback except (caller handles it gracefully anyway)
Revision 1.12 2002/10/22 16:24:04 f8dy
added UnicodeError support for feeds that declare utf-8 but use 8-bit characters anyway
Revision 1.11 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: en.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
import feedvalidator
from feedvalidator.logging import *
line = "line %(line)s"
column = "column %(column)s"
occurances = " (%(msgcount)s occurrences)"
messages = {
SAXError: "XML Parsing error: %(exception)s",
NotHtml: "Invalid HTML: %(message)s",
UnicodeError: "%(exception)s (maybe a high-bit character?)",
UndefinedElement: "Undefined %(parent)s element: %(element)s",
MissingNamespace: "Missing namespace for %(element)s",
MissingElement: "Missing %(parent)s element: %(element)s",
MissingOptionalElement: "%(parent)s should contain a %(element)s element",
MissingRecommendedElement: "%(parent)s should contain a %(element)s element",
MissingAttribute: "Missing %(element)s attribute: %(attr)s",
NoBlink: "There is no blink element in RSS; use blogChannel:blink instead",
InvalidValue: "Invalid value for %(element)s: \"%(value)s\"",
InvalidWidth: "%(element)s must be between 1 and 144",
InvalidHeight: "%(element)s must be between 1 and 400",
InvalidHour: "%(element)s must be between 1 and 24",
InvalidDay: "%(element)s must be Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, or Sunday",
InvalidInteger: "%(element)s must be a positive integer",
InvalidHttpGUID: "guid must be a full URL, unless isPermaLink attribute is false",
InvalidUpdatePeriod: "%(element)s must be hourly, daily, weekly, monthly, or yearly",
RecommendedWidth: "%(element)s should be between 1 and 88",
RecommendedHeight: "%(element)s should be between 1 and 31",
NotBlank: "%(element)s can not be blank",
AttrNotBlank: "The %(attr)s attribute of %(element)s can not be blank",
DuplicateElement: "%(parent)s contains more than one %(element)s",
DuplicateSemantics: "A channel must not include both %(core)s and %(ext)s",
DuplicateItemSemantics: "An item must not include both %(core)s and %(ext)s",
DuplicateValue: "%(element)s values must not be duplicated within a feed",
NonstdPrefix: '"%(preferred)s" is the preferred prefix for the namespace "%(ns)s"',
ReservedPrefix: 'The prefix "%(prefix)s" generally uses the namespace "%(ns)s"',
UseModularEquivalent: "%(ext)s should be used instead of %(core)s",
InvalidContact: "%(element)s must include an email address",
InvalidLink: "%(element)s must be a full URL",
InvalidW3DTFDate: "%(element)s must be an ISO-8601 date",
InvalidRFC2822Date: "%(element)s must be an RFC-822 date",
InvalidLanguage: "%(element)s must be an ISO-639 language code",
InvalidURLAttribute: "%(attr)s attribute of %(element)s must be a full URL",
InvalidIntegerAttribute: "%(attr)s attribute of %(element)s must be a positive integer",
InvalidBooleanAttribute: "%(attr)s attribute of %(element)s must be 'true' or 'false'",
InvalidMIMEAttribute: "%(attr)s attribute of %(element)s must be a valid MIME type",
ItemMustContainTitleOrDescription: "item must contain either title or description",
ContainsHTML: "%(element)s should not contain HTML",
ContainsUndeclaredHTML: "%(element)s must not contain HTML unless declared in the type attribute",
NotEnoughHoursInTheDay: "skipHours can not contain more than 24 hour elements",
EightDaysAWeek: "skipDAys can not contain more than 7 day elements",
SecurityRisk: "%(element)s should not contain %(tag)s tag",
ContainsRelRef: "%(element)s should not contain relative URL references",
ContainsSystemEntity: "Feeds must not contain SYSTEM entities",
InvalidContentMode: "mode must be 'xml', 'escaped', or 'base64'",
InvalidMIMEType: "Not a valid MIME type",
NoMIMEType: "%(element)s does not specify a MIME type",
W3DTFDateNoTimezone: "Date should include a timezone",
W3DTFDateNonUTC: "Date should be a UTC date",
W3DTFDateNonLocal: "Date should not be a UTC date",
NotEscaped: "%(element)s claims to be escaped, but isn't",
NotInline: "%(element)s claims to be inline, but isn't",
NotBase64: "%(element)s claims to be base64-encoded, but isn't",
InvalidURN: "%(element)s is not a valid URN",
InvalidTAG: "%(element)s is not a valid TAG",
InvalidURI: "%(element)s is not a valid URI",
ObsoleteVersion: "This feed is an obsolete version",
ObsoleteNamespace: "This feed uses an obsolete namespace",
InvalidNamespace: "%(element)s is in an invalid namespace: %(namespace)s",
InvalidDoctype: "This feed contains conflicting DOCTYPE and version information",
MultipartInvalid: "Multipart/alternative content can only contain other content elements",
MultipartMissing: "Multipart/alternative content must contain at least one content element",
MultipartRecursion: "Multipart/alternative content can not contain other multipart/alternative content elements",
MultipartDuplicate: "Multipart/alternative content can not contain multiple content elements of the same type",
DuplicateAtomLink: "Duplicate link with the same type and rel",
MissingHref: "%(element)s must have an href attribute",
AtomLinkNotEmpty: "%(element)s should not have text (all data is in attributes)",
AtomLinkMissingRel: "%(element)s must have a rel attribute",
MissingAlternateLink: '''%(parent)s must contain a link element with rel="alternate"'''
}
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:17 rubys
Initial revision
Revision 1.54 2003/12/12 20:37:06 f8dy
oops, URNs can contain letters after all
Revision 1.53 2003/12/12 15:53:42 f8dy
renamed source directories
Revision 1.52 2003/12/12 15:00:22 f8dy
changed blank link attribute tests to new error AttrNotBlank to distinguish them from elements that can not be blank
Revision 1.51 2003/12/12 05:57:39 f8dy
added missing messages
Revision 1.50 2003/12/12 05:42:05 rubys
Rough in some support for the new link syntax
Revision 1.49 2003/12/12 01:24:36 rubys
Multipart/alternative tests
Revision 1.48 2003/12/11 20:13:58 f8dy
feed title, copyright, and tagline may be blank
Revision 1.47 2003/12/11 18:20:46 f8dy
passed all content-related testcases
Revision 1.46 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.45 2003/12/11 06:00:51 f8dy
added tag: testcases, passed
Revision 1.44 2003/08/23 00:28:04 rubys
Validate escaped text/HTML content
Revision 1.43 2003/08/06 16:16:59 f8dy
added testcase for Netscape DOCTYPE
Revision 1.42 2003/08/05 22:09:03 f8dy
added automated message test to test output messages
Revision 1.41 2003/08/05 20:54:42 f8dy
Added message for InvalidNamespace error
Revision 1.40 2003/08/05 18:04:12 f8dy
added Atom 0.2-specific messages
Revision 1.39 2003/08/04 01:59:33 rubys
Full http and ftp URIs require two slashes
Revision 1.38 2003/08/04 00:54:35 rubys
Log every valid element (for better self validation in test cases)
Revision 1.37 2003/07/29 16:14:21 rubys
Validate urns
Revision 1.36 2003/07/29 15:15:33 f8dy
added tests for invalid URNs (may be used in entry/id of Atom feeds)
Revision 1.35 2003/07/19 21:15:08 f8dy
added tests and logging classes for duplicate guid/id values within a feed (thanks AaronSw for this idea)
Revision 1.34 2003/07/09 19:28:39 f8dy
added test cases looking at actual content vs. mode (note: not passed)
Revision 1.33 2003/07/09 03:54:39 f8dy
yet more changes to the date messages
Revision 1.32 2003/07/09 03:48:04 f8dy
more changes to pie-specific messages
Revision 1.31 2003/07/09 03:31:36 f8dy
Updated pie-specific log messages
Revision 1.30 2003/06/26 18:03:04 f8dy
add workaround for case where SAX throws UnicodeError but locator.getLineNumber() is screwy
Revision 1.29 2002/10/31 00:52:21 rubys
Convert from regular expressions to EntityResolver for detecting
system entity references
Revision 1.28 2002/10/30 23:02:30 f8dy
*** empty log message ***
Revision 1.27 2002/10/30 15:44:48 rubys
Improve error messages for relative references: error message should
be gramatically correct. Remove "hidden" fields prevented duplicate
errors from being flagged as such.
Revision 1.26 2002/10/27 18:54:30 rubys
Issue warnings for relative references in descriptions
Revision 1.25 2002/10/22 22:37:21 f8dy
tweaked ReservedPrefix message one last time
Revision 1.24 2002/10/22 19:32:19 f8dy
made friendlier messages for NonStdPrefix and ReservedPrefix
Revision 1.23 2002/10/22 16:24:04 f8dy
added UnicodeError support for feeds that declare utf-8 but use 8-bit characters anyway
Revision 1.22 2002/10/19 21:08:02 f8dy
added "special case" functionality for the web front end
Revision 1.21 2002/10/18 19:28:43 f8dy
added testcases for mod_syndication and passed them
Revision 1.20 2002/10/18 14:17:30 f8dy
added tests for language/dc:language (must be valid ISO-639 language code
plus optional country code) and passed them
Revision 1.19 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: __init__.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:17 rubys
Initial revision
Revision 1.3 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.2 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: author.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from validators import *
#
# author element.
#
class author(validatorBase):
def validate(self):
if not "name" in self.children:
self.log(MissingElement({"parent":self.name, "element":"name"}))
def do_name(self):
return nonhtml(), nonblank(), noduplicates()
# def do_weblog(self):
# return rfc2396(), noduplicates()
def do_email(self):
return email(), noduplicates()
# def do_homepage(self):
# return rfc2396(), noduplicates()
def do_url(self):
return nonblank(), rfc2396(), noduplicates()
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:14 rubys
Initial revision
Revision 1.5 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.4 2003/09/01 21:27:48 f8dy
remove weblog, homepage
Revision 1.3 2003/08/03 18:46:04 rubys
support author(url,email) and feed(author,copyright,generator)
Revision 1.2 2003/07/09 16:24:30 f8dy
added global feed type support
Revision 1.1 2003/07/07 00:54:00 rubys
Rough in some pie/echo support
"""
| Python |
"""$Id: image.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from validators import *
#
# image element.
#
class image(validatorBase):
def validate(self):
if self.attrs.has_key((rdfNS,"resource")):
return # looks like an RSS 1.0 feed
if not "link" in self.children:
self.log(MissingLink({"parent":self.name, "element":"link"}))
if not "title" in self.children:
self.log(MissingTitle({"parent":self.name, "element":"title"}))
if not "url" in self.children:
self.log(MissingElement({"parent":self.name, "element":"url"}))
def do_title(self):
return title(), noduplicates()
def do_link(self):
return rfc2396(), noduplicates()
def do_url(self):
return rfc2396(), noduplicates()
def do_width(self):
return width(), noduplicates()
def do_height(self):
return height(), noduplicates()
def do_description(self):
return nonhtml(), noduplicates()
class title(text, noduplicates):
def validate(self):
if not self.value.strip():
self.log(NotBlank({"parent":self.parent.name, "element":self.name}))
else:
self.log(ValidTitle({"parent":self.parent.name, "element":self.name}))
return nonhtml()
class width(text, noduplicates):
def validate(self):
try:
w = int(self.value)
if (w <= 0) or (w > 144):
self.log(InvalidWidth({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidWidth({"parent":self.parent.name, "element":self.name}))
if w > 88:
self.log(RecommendedWidth({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidWidth({"parent":self.parent.name, "element":self.name, "value":self.value}))
class height(text, noduplicates):
def validate(self):
try:
h = int(self.value)
if (h <= 0) or (h > 400):
self.log(InvalidHeight({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidHeight({"parent":self.parent.name, "element":self.name}))
if h > 31:
self.log(RecommendedHeight({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidHeight({"parent":self.parent.name, "element":self.name, "value":self.value}))
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:15 rubys
Initial revision
Revision 1.11 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.10 2003/07/29 19:38:07 f8dy
changed test cases to explicitly test for success (rather than the absence of failure)
Revision 1.9 2002/10/24 14:47:33 f8dy
decoupled "no duplicates" check from individual validator classes,
allow handlers to return multiple validator classes
Revision 1.8 2002/10/22 17:29:52 f8dy
loosened restrictions on link/docs/url protocols; RSS now allows any
IANA protocol, not just http:// and ftp://
Revision 1.7 2002/10/22 16:43:55 rubys
textInput vs textinput: don't reject valid 1.0 feeds, but don't allow
invalid textinput fields in RSS 2.0 either...
Revision 1.6 2002/10/18 15:41:33 f8dy
added (and passed) testcases for unallowed duplicates of the same element
Revision 1.5 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: messagetest.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
import unittest, new, os, sys, glob, re
import feedvalidator
from feedvalidator import compatibility
from feedvalidator.formatter.application_test import Formatter
class TestCase(unittest.TestCase):
def failIfNoMessage(self, theClass, params, theList, msg=None):
filterFunc = compatibility.AA
events = filterFunc(theList)
output = Formatter(events)
for e in events:
if not output.format(e):
raise self.failureException, 'could not contruct message for %s' % e
desc_re = re.compile("<!--\s*Description:\s*(.*?)\s*Expect:\s*(!?)(\w*)(?:{(.*?)})?\s*-->")
def getDescription(xmlfile):
"""Extract description and exception from XML file
The deal here is that each test case is an XML file which contains
not only a possibly invalid RSS feed but also the description of the
test, i.e. the exception that we would expect the RSS validator to
raise (or not) when it validates the feed. The expected exception and
the human-readable description are placed into an XML comment like this:
<!--
Description: channel must include title
Expect: MissingTitle
-->
"""
stream = open(xmlfile)
xmldoc = stream.read()
stream.close()
search_results = desc_re.search(xmldoc)
if search_results:
description, cond, excName, plist = list(search_results.groups())
else:
raise RuntimeError, "can't parse %s" % xmlfile
method = TestCase.failIfNoMessage
params = {}
if plist:
for entry in plist.split(','):
name,value = entry.lstrip().split(':',1)
params[name] = value
exc = getattr(feedvalidator, excName)
description = xmlfile + ": " + description
return method, description, params, exc
def buildTestCase(xmlfile, description, method, exc, params):
"""factory to create functions which validate `xmlfile`
the returned function asserts that validating `xmlfile` (an XML file)
will return a list of exceptions that include an instance of
`exc` (an Exception class)
"""
func = lambda self, xmlfile=xmlfile, exc=exc, params=params: \
method(self, exc, params, feedvalidator.validateString(open(xmlfile).read())['loggedEvents'])
func.__doc__ = description
return func
if __name__ == "__main__":
curdir = os.path.abspath(os.path.dirname(sys.argv[0]))
basedir = os.path.split(curdir)[0]
for xmlfile in glob.glob(os.path.join(basedir, 'testcases', '**', '**', '*.xml')):
method, description, params, exc = getDescription(xmlfile)
testName = 'test_' + os.path.basename(xmlfile)
testFunc = buildTestCase(xmlfile, description, method, exc, params)
instanceMethod = new.instancemethod(testFunc, None, TestCase)
setattr(TestCase, testName, instanceMethod)
unittest.main()
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:14 rubys
Initial revision
Revision 1.2 2003/12/12 15:53:42 f8dy
renamed source directories
Revision 1.1 2003/08/05 22:09:03 f8dy
added automated message test to test output messages
"""
| Python |
"""$Id: validtest.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
import feedvalidator
import unittest, new, os, sys, glob, re
class TestCase(unittest.TestCase):
def failUnlessContainsInstanceOf(self, theClass, params, theList, msg=None):
"""Fail if there are no instances of theClass in theList with given params"""
failure=(msg or 'no %s instances in %s' % (theClass.__name__, `theList`))
for item in theList:
if item.__class__.__name__ == theClass.__name__:
if not params: return
for k, v in params.items():
if item.params[k] <> v:
failure=("%s.%s value was %s, expected %s" %
(theClass.__name__, k, item.params[k], v))
break
else:
return
raise self.failureException, failure
def failIfContainsInstanceOf(self, theClass, params, theList, msg=None):
"""Fail if there are instances of theClass in theList with given params"""
for item in theList:
if item.__class__.__name__ == theClass.__name__:
if not params:
raise self.failureException, \
(msg or 'unexpected %s' % (theClass.__name__))
allmatch = 1
for k, v in params.items():
if item.params[k] != v:
allmatch = 0
if allmatch:
raise self.failureException, \
"unexpected %s.%s with a value of %s" % \
(theClass.__name__, k, v)
desc_re = re.compile("<!--\s*Description:\s*(.*?)\s*Expect:\s*(!?)(\w*)(?:{(.*?)})?\s*-->")
def getDescription(xmlfile):
"""Extract description and exception from XML file
The deal here is that each test case is an XML file which contains
not only a possibly invalid RSS feed but also the description of the
test, i.e. the exception that we would expect the RSS validator to
raise (or not) when it validates the feed. The expected exception and
the human-readable description are placed into an XML comment like this:
<!--
Description: channel must include title
Expect: MissingTitle
-->
"""
stream = open(xmlfile)
xmldoc = stream.read()
stream.close()
search_results = desc_re.search(xmldoc)
if search_results:
description, cond, excName, plist = list(search_results.groups())
else:
raise RuntimeError, "can't parse %s" % xmlfile
if cond == "":
method = TestCase.failUnlessContainsInstanceOf
else:
method = TestCase.failIfContainsInstanceOf
params = {}
if plist:
for entry in plist.split(','):
name,value = entry.lstrip().split(':',1)
params[name] = value
exc = getattr(feedvalidator, excName)
description = xmlfile + ": " + description
return method, description, params, exc
def buildTestCase(xmlfile, description, method, exc, params):
"""factory to create functions which validate `xmlfile`
the returned function asserts that validating `xmlfile` (an XML file)
will return a list of exceptions that include an instance of
`exc` (an Exception class)
"""
func = lambda self, xmlfile=xmlfile, exc=exc, params=params: \
method(self, exc, params, feedvalidator.validateString(open(xmlfile).read())['loggedEvents'])
func.__doc__ = description
return func
if __name__ == "__main__":
curdir = os.path.abspath(os.path.dirname(sys.argv[0]))
basedir = os.path.split(curdir)[0]
for xmlfile in glob.glob(os.path.join(basedir, 'testcases', '**', '**', '*.xml')):
method, description, params, exc = getDescription(xmlfile)
testName = 'test_' + os.path.basename(xmlfile)
testFunc = buildTestCase(xmlfile, description, method, exc, params)
instanceMethod = new.instancemethod(testFunc, None, TestCase)
setattr(TestCase, testName, instanceMethod)
unittest.main()
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:14 rubys
Initial revision
Revision 1.2 2003/12/12 15:53:42 f8dy
renamed source directories
Revision 1.1 2003/08/06 16:56:14 f8dy
combined pievalidtest and rssvalidtest, renamed rssdemo to demo
Revision 1.20 2003/07/20 00:25:41 rubys
Search for *any* instance of a matching class/params in the log
Revision 1.19 2003/07/19 22:25:10 f8dy
fixed bug in test case suite runner, if an element in the expected params didn't match the expected value, it would set the failure message but not raise the appropriate exception, so rssvalidtest.py would claim that certain test cases passed when they didn't (this affected 7 test cases out of 700)
Revision 1.18 2003/07/09 16:24:30 f8dy
added global feed type support
Revision 1.17 2003/07/07 10:35:50 rubys
Complete first pass of echo/pie tests
Revision 1.16 2003/07/06 21:20:02 rubys
Refactor so test cases are organized by protocol
Revision 1.15 2002/10/18 14:17:30 f8dy
added tests for language/dc:language (must be valid ISO-639 language code
plus optional country code) and passed them
Revision 1.14 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: demo.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
import feedvalidator
import sys
if __name__ == '__main__':
# arg 1 is URL to validate
link = sys.argv[1:] and sys.argv[1] or 'http://www.intertwingly.net/blog/index.rss2'
print 'Validating %s' % link
events = feedvalidator.validateURL(link, firstOccurrenceOnly=1)['loggedEvents']
# (optional) arg 2 is compatibility level
# "A" is most basic level
# "AA" mimics online validator
# "AAA" is experimental; these rules WILL change or disappear in future versions
from feedvalidator import compatibility
filter = sys.argv[2:] and sys.argv[2] or "AA"
filterFunc = getattr(compatibility, filter)
events = filterFunc(events)
from feedvalidator.formatter.text_plain import Formatter
output = Formatter(events)
if output:
print "\n".join(output)
else:
print "No errors or warnings"
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:14 rubys
Initial revision
Revision 1.3 2003/12/12 15:53:42 f8dy
renamed source directories
Revision 1.2 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.1 2003/08/06 16:56:14 f8dy
combined pievalidtest and rssvalidtest, renamed rssdemo to demo
Revision 1.13 2003/07/16 19:47:15 rubys
Remove debug statement
Revision 1.12 2003/07/10 21:16:33 rubys
Get rssdemo back on its feet...
Revision 1.11 2002/10/20 04:47:21 f8dy
*** empty log message ***
Revision 1.10 2002/10/20 04:41:21 f8dy
*** empty log message ***
Revision 1.9 2002/10/20 04:36:09 f8dy
cleaned up for public distribution
Revision 1.8 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
#!/usr/bin/env python
from config import *
import cgi, sys, os, urlparse, sys, re
import cgitb
cgitb.enable()
if PYDIR not in sys.path:
sys.path.insert(0, PYDIR)
if WEBDIR not in sys.path:
sys.path.insert(0, WEBDIR)
if SRCDIR not in sys.path:
sys.path.insert(0, SRCDIR)
import feedvalidator
from feedvalidator.logging import FEEDTYPEDISPLAY, VALIDFEEDGRAPHIC
def applyTemplate(templateFile, params={}):
fsock = open(os.path.join(WEBDIR, 'templates', templateFile))
data = fsock.read() % params
fsock.close()
return data
def sanitizeURL(url):
scheme, domain, path, u1, u2, u3 = urlparse.urlparse(url)
if scheme.lower() <> 'http':
url = 'http://%s' % url
scheme, domain, path, u1, u2, u3 = urlparse.urlparse(url)
url = url.strip()
# strip user and password
url = re.sub(r'^(\w*://)[-+.\w]*(:[-+.\w]+)?@', r'\1' ,url)
return url
def buildCodeListing(events, rawdata):
# print feed
codelines = []
linenum = 1
linesWithErrors = [e.params.get('line', 0) for e in events]
for line in rawdata.split('\n'):
line = cgi.escape(line)
if not line: line = ' '
linetype = linenum in linesWithErrors and "b" or "a"
codelines.append(applyTemplate('code_listing_line.tmpl', {"line":line, "linenum":linenum, "linetype":linetype}))
linenum += 1
codelisting = "".join(codelines)
return applyTemplate('code_listing.tmpl', {"codelisting":codelisting, "url":url})
def postvalidate(url, events, rawdata, feedType, autofind=1):
"""returns dictionary including 'url', 'events', 'rawdata', 'output', 'specialCase', 'feedType'"""
# filter based on compatibility level
from feedvalidator import compatibility
filterFunc = compatibility.AA # hardcoded for now
events = filterFunc(events)
specialCase = None
from feedvalidator.formatter.text_html import Formatter
formattedOutput = Formatter(events, rawdata)
if formattedOutput:
# check for special cases
specialCase = compatibility.analyze(events, rawdata)
if (specialCase == 'html') and autofind:
try:
try:
import feedfinder
rssurls = feedfinder.getFeeds(url)
except:
rssurls = [url]
if rssurls:
url = rssurls[0]
params = feedvalidator.validateURL(url, firstOccurrenceOnly=1, wantRawData=1)
events = params['loggedEvents']
rawdata = params['rawdata']
feedType = params['feedType']
return postvalidate(url, events, rawdata, feedType, autofind=0)
except:
pass
return {"url":url, "events":events, "rawdata":rawdata, "output":formattedOutput, "specialCase":specialCase, "feedType":feedType}
fs = cgi.FieldStorage()
url = fs.getvalue("url") or ''
manual = fs.getvalue("manual") or 0
rawdata = fs.getvalue("rawdata") or ''
rawdata = rawdata[:feedvalidator.MAXDATALENGTH].replace('\r\n', '\n').replace('\r', '\n')
if (os.environ['REQUEST_METHOD'].lower() == 'post') and (not rawdata):
# SOAP
try:
# validate
params = feedvalidator.validateStream(sys.stdin)
events = params['loggedEvents']
feedType = params['feedType']
# filter based on compatibility level
from feedvalidator import compatibility
filterFunc = compatibility.AA # hardcoded for now
events = filterFunc(events)
# format as xml
from feedvalidator.formatter.text_xml import Formatter
output = Formatter(events)
# output
if output:
body = applyTemplate('soap.tmpl', {'body':"\n".join(output)})
else:
body = applyTemplate('soap.tmpl' , {'body':''})
print 'Content-type: text/xml\r\n\r\n' + body
except:
import traceback
tb = ''.join(apply(traceback.format_exception, sys.exc_info()))
from feedvalidator.formatter.text_xml import xmlEncode
print 'Status: 500 Internal Error\r\nContent-type: text/xml\r\n'
print applyTemplate('fault.tmpl', {'code':sys.exc_info()[0],
'string':sys.exc_info()[1], 'traceback':xmlEncode(tb)})
else:
print 'Content-type: text/html'
print
if url or rawdata:
# validate
goon = 0
if rawdata:
# validate raw data (from text form)
try:
params = feedvalidator.validateString(rawdata, firstOccurrenceOnly=1)
events = params['loggedEvents']
feedType = params['feedType']
goon = 1
except:
print applyTemplate('header.tmpl', {'title':'Feed Validator Results: %s' % cgi.escape(url)})
print applyTemplate('manual.tmpl', {'rawdata':cgi.escape(url)})
print applyTemplate('error.tmpl')
else:
url = sanitizeURL(url)
try:
params = feedvalidator.validateURL(url, firstOccurrenceOnly=1, wantRawData=1)
events = params['loggedEvents']
rawdata = params['rawdata']
feedType = params['feedType']
goon = 1
except:
print applyTemplate('header.tmpl', {'title':'Feed Validator Results: %s' % cgi.escape(url)})
print applyTemplate('index.tmpl', {'value':cgi.escape(url)})
print applyTemplate('error.tmpl')
if goon:
# post-validate (will do RSS autodiscovery if needed)
validationData = postvalidate(url, events, rawdata, feedType)
# write output header
url = validationData['url']
feedType = validationData['feedType']
rawdata = validationData['rawdata']
print applyTemplate('header.tmpl', {'title':'Feed Validator Results: %s' % cgi.escape(url)})
if manual:
print applyTemplate('manual.tmpl', {'rawdata':cgi.escape(rawdata)})
else:
print applyTemplate('index.tmpl', {'value':cgi.escape(url)})
output = validationData.get('output', None)
if output:
# print special case, if any
specialCase = validationData.get('specialCase', None)
if specialCase:
print applyTemplate('%s.tmpl' % specialCase)
# print validator output
print applyTemplate('invalid.tmpl')
for o in output:
print o
print applyTemplate('invalid_footer.tmpl')
# print code listing
print buildCodeListing(validationData['events'], validationData['rawdata'])
else:
# valid
print applyTemplate('valid.tmpl', {"url":cgi.escape(url), "feedType":FEEDTYPEDISPLAY[feedType], "graphic":VALIDFEEDGRAPHIC[feedType]})
else:
# nothing to validate, just write basic form
print applyTemplate('header.tmpl', {'title':'Feed Validator for Atom and RSS'})
if manual:
print applyTemplate('manual.tmpl', {'rawdata':''})
else:
print applyTemplate('index.tmpl', {'value':'http://'})
print applyTemplate('special.tmpl', {})
print applyTemplate('navbar.tmpl')
print applyTemplate('footer.tmpl')
| Python |
# This following value is primarily used for setting up the other values...
HOMEDIR = r'/home/rubys'
# This is where local python libraries are installed. This may be useful
# for locating a locally installed libxml2 library, for example...
PYDIR = HOMEDIR + r'/lib/python/'
# This is where the CGI itself is... other supporting scripts (like
# feedfinder) may be placed here.
WEBDIR = HOMEDIR + r'/public_html/feedvalidator'
# This is where the feedvalidator code lives...
SRCDIR = WEBDIR + r'/src'
| Python |
#!/usr/bin/env python
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit http://feedparser.org/ for the latest version
Visit http://feedparser.org/docs/ for the latest documentation
Required: Python 2.1 or later
Recommended: Python 2.3 or later
Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "4.2-pre-" + "$Revision: 1.144 $"[11:16] + "-cvs"
__license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>"]
_debug = 0
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# ---------- required modules (should come with any Python distribution) ----------
import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2
try:
from cStringIO import StringIO as _StringIO
except:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except:
gzip = None
try:
import zlib
except:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
from xml.sax.saxutils import escape as _xmlescape
_XML_AVAILABLE = 1
except:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except:
base64 = binascii = None
# cjkcodecs and iconv_codec provide support for more character encodings.
# Both are available from http://cjkpython.i18n.org/
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
if _debug:
import chardet.constants
chardet.constants._debug = 1
except:
chardet = None
# reversable htmlentitydefs mappings for Python 2.2
try:
from htmlentitydefs import name2codepoint, codepoint2name
except:
import htmlentitydefs
name2codepoint={}
codepoint2name={}
for (name,codepoint) in htmlentitydefs.entitydefs.iteritems():
if codepoint.startswith('&#'): codepoint=unichr(int(codepoint[2:-1]))
name2codepoint[name]=ord(codepoint)
codepoint2name[ord(codepoint)]=name
# BeautifulSoup parser used for parsing microformats from embedded HTML content
# http://www.crummy.com/software/BeautifulSoup/. At the moment, it appears
# that there is a version incompatibility, so the import is replaced with
# a 'None'. Restoring the try/import/except/none will renable the MF tests.
BeautifulSoup = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
sgmllib.special = re.compile('<!')
sgmllib.charref = re.compile('&#(\d+|x[0-9a-fA-F]+);')
if sgmllib.endbracket.search(' <').start(0):
class EndBracketMatch:
endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self,string,index=0):
self.match = self.endbracket.match(string,index)
if self.match: return self
def start(self,n):
return self.match.end(n)
sgmllib.endbracket = EndBracketMatch()
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
'hotrss': 'Hot RSS'
}
try:
UserDict = dict
except NameError:
# Python 2.1 does not have dict
from UserDict import UserDict
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
class FeedParserDict(UserDict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['subtitle', 'summary'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
return UserDict.__getitem__(self, 'tags')[0]['term']
if key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in UserDict.__getitem__(self, 'links') if link['rel']=='enclosure']
if key == 'categories':
return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')]
realkey = self.keymap.get(key, key)
if type(realkey) == types.ListType:
for k in realkey:
if UserDict.has_key(self, k):
return UserDict.__getitem__(self, k)
if UserDict.has_key(self, key):
return UserDict.__getitem__(self, key)
return UserDict.__getitem__(self, realkey)
def __setitem__(self, key, value):
for k in self.keymap.keys():
if key == k:
key = self.keymap[k]
if type(key) == types.ListType:
key = key[0]
return UserDict.__setitem__(self, key, value)
def get(self, key, default=None):
if self.has_key(key):
return self[key]
else:
return default
def setdefault(self, key, value):
if not self.has_key(key):
self[key] = value
return self[key]
def has_key(self, key):
try:
return hasattr(self, key) or UserDict.has_key(self, key)
except AttributeError:
return False
def __getattr__(self, key):
try:
return self.__dict__[key]
except KeyError:
pass
try:
assert not key.startswith('_')
return self.__getitem__(key)
except:
raise AttributeError, "object has no attribute '%s'" % key
def __setattr__(self, key, value):
if key.startswith('_') or key == 'data':
self.__dict__[key] = value
else:
return self.__setitem__(key, value)
def __contains__(self, key):
return self.has_key(key)
def zopeCompatibilityHack():
global FeedParserDict
del FeedParserDict
def FeedParserDict(aDict=None):
rc = {}
if aDict:
rc.update(aDict)
return rc
_ebcdic_to_ascii_map = None
def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
)
import string
_ebcdic_to_ascii_map = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(_ebcdic_to_ascii_map)
_cp1252 = {
unichr(128): unichr(8364), # euro sign
unichr(130): unichr(8218), # single low-9 quotation mark
unichr(131): unichr( 402), # latin small letter f with hook
unichr(132): unichr(8222), # double low-9 quotation mark
unichr(133): unichr(8230), # horizontal ellipsis
unichr(134): unichr(8224), # dagger
unichr(135): unichr(8225), # double dagger
unichr(136): unichr( 710), # modifier letter circumflex accent
unichr(137): unichr(8240), # per mille sign
unichr(138): unichr( 352), # latin capital letter s with caron
unichr(139): unichr(8249), # single left-pointing angle quotation mark
unichr(140): unichr( 338), # latin capital ligature oe
unichr(142): unichr( 381), # latin capital letter z with caron
unichr(145): unichr(8216), # left single quotation mark
unichr(146): unichr(8217), # right single quotation mark
unichr(147): unichr(8220), # left double quotation mark
unichr(148): unichr(8221), # right double quotation mark
unichr(149): unichr(8226), # bullet
unichr(150): unichr(8211), # en dash
unichr(151): unichr(8212), # em dash
unichr(152): unichr( 732), # small tilde
unichr(153): unichr(8482), # trade mark sign
unichr(154): unichr( 353), # latin small letter s with caron
unichr(155): unichr(8250), # single right-pointing angle quotation mark
unichr(156): unichr( 339), # latin small ligature oe
unichr(158): unichr( 382), # latin small letter z with caron
unichr(159): unichr( 376)} # latin capital letter y with diaeresis
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
try:
return urlparse.urljoin(base, uri)
except:
uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)])
return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf'
}
_matchnamespaces = {}
can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'license', 'icon', 'logo']
can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if _debug: sys.stderr.write('initializing FeedParser\n')
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
def unknown_starttag(self, tag, attrs):
if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
# normalize attrs
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
# Note: probably shouldn't simply recreate localname here, but
# our namespace handling isn't actually 100% correct in cases where
# the feed redefines the default namespace (which is actually
# the usual case for inline content, thanks Sam), so here we
# cheat and just reconstruct the element based on localname
# because that compensates for the bugs in our namespace handling.
# This will horribly munge inline content with non-empty qnames,
# but nobody actually does that, so I'm not fixing it.
if tag.find(':') <> -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
return self.push(prefix + suffix, 1)
def unknown_endtag(self, tag):
if _debug: sys.stderr.write('end %s\n' % tag)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack: return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack: return
if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref)
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities.keys():
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try: name2codepoint[ref]
except KeyError: text = '&%s;' % ref
else: text = unichr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack: return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if _debug: sys.stderr.write('entering parse_declaration\n')
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1: k = len(self.rawdata)
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
return k+1
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
self.version = 'rss090'
if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
self.version = 'rss10'
if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if self._matchnamespaces.has_key(loweruri):
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack: return
if self.elementstack[-1][0] != element: return
element, expectingText, pieces = self.elementstack.pop()
if self.version == 'atom10' and self.contentparams.get('type','text') == 'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0: break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText: return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = base64.decodestring(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
if self.lookslikehtml(output):
self.contentparams['type']='text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', 'text/html'))
# parse microformats
# (must do this before sanitizing because some microformats
# rely on elements that we sanitize)
if is_htmlish and element in ['content', 'description', 'summary']:
mfresults = _parseMicroformats(output, self.baseuri, self.encoding)
if mfresults:
for tag in mfresults.get('tags', []):
self._addTag(tag['term'], tag['scheme'], tag['label'])
for enclosure in mfresults.get('enclosures', []):
self._start_enclosure(enclosure)
for xfn in mfresults.get('xfn', []):
self._addXFN(xfn['relationships'], xfn['href'], xfn['name'])
vcard = mfresults.get('vcard')
if vcard:
self._getContext()['vcard'] = vcard
# sanitize embedded markup
if is_htmlish:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', 'text/html'))
if self.encoding and type(output) != type(u''):
try:
output = unicode(output, self.encoding)
except:
pass
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding=='utf-8' and type(output) == type(u''):
try:
output = unicode(output.encode('iso-8859-1'), 'utf-8')
except:
pass
# map win-1252 extensions to the proper code points
if type(output) == type(u''):
output = u''.join([c in _cp1252.keys() and _cp1252[c] or c for c in output])
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang: self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
def lookslikehtml(self, str):
if self.version.startswith('atom'): return
if self.contentparams.get('type','text/html') != 'text/plain': return
# must have a close tag or a entity reference to qualify
if not (re.search(r'</(\w+)>',str) or re.search("&#?\w+;",str)): return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
re.findall(r'</?(\w+)',str)): return
# all entities must have been defined as valid HTML entities
from htmlentitydefs import entitydefs
if filter(lambda e: e not in entitydefs.keys(),
re.findall(r'&(\w+);',str)): return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value):
context = self._getContext()
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
if not self.version:
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_dlhottitles(self, attrsD):
self.version = 'hotrss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
_start_feedinfo = _start_channel
def _cdf_common(self, attrsD):
if attrsD.has_key('lastmod'):
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if attrsD.has_key('href'):
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author: return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.replace('<>', '')
author = author.replace('<>', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, FeedParserDict())
if author:
context['%s_detail' % key]['name'] = author
if email:
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
_start_product = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value))
_end_dcterms_issued = _end_published
_end_issued = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_pubdate = _start_updated
_start_dc_date = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_pubdate = _end_updated
_end_dc_date = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value))
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')))
def _start_cc_license(self, attrsD):
self.push('license', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('license')
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
def _end_creativecommons_license(self):
self.pop('license')
def _addXFN(self, relationships, href, name):
context = self._getContext()
xfn = context.setdefault('xfn', [])
value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name})
if value not in xfn:
xfn.append(value)
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label): return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(value)
def _start_category(self, attrsD):
if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split():
self._addTag(term, 'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value: return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
if attrsD['rel'] == 'self':
attrsD.setdefault('type', 'application/atom+xml')
else:
attrsD.setdefault('type', 'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
if attrsD.get('rel')=='enclosure' and not context.get('id'):
context['id'] = attrsD.get('href')
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
context['links'].append(FeedParserDict(attrsD))
if attrsD.has_key('href'):
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
_start_producturl = _start_link
def _end_link(self):
value = self.pop('link')
context = self._getContext()
_end_producturl = _end_link
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
def _start_title(self, attrsD):
if self.incontent: return self.unknown_starttag('title', attrsD)
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
value = self.popContent('title')
if not value: return
context = self._getContext()
_end_dc_title = _end_title
_end_media_title = _end_title
def _start_description(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if context.has_key('generator_detail'):
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel']='enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
href = attrsD.get('href')
if href and not context.get('id'):
context['id'] = href
def _start_source(self, attrsD):
self.insource = 1
def _end_source(self):
self.insource = 0
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_prodlink(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToDescription:
self._save('description', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
_end_prodlink = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
if _debug: sys.stderr.write('trying StrictFeedParser\n')
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
def startPrefixMapping(self, prefix, uri):
self.trackNamespace(prefix, uri)
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD = {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname))
for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param']
def __init__(self, encoding, type):
self.encoding = encoding
self.type = type
if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
def parse_starttag(self,i):
j=sgmllib.SGMLParser.parse_starttag(self, i)
if self.type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
#data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace
data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
if self.encoding and type(data) == type(u''):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs: return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if type(value) != type(u''):
try:
value = unicode(value, self.encoding)
except:
value = unicode(value, 'iso-8859-1')
uattrs.append((unicode(key, self.encoding), value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs=strattrs.encode(self.encoding)
except:
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
else:
self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
if ref.startswith('x'):
value = unichr(int(ref[1:],16))
else:
value = unichr(int(ref))
if value in _cp1252.keys():
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%(ref)s;' % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if name2codepoint.has_key(ref):
self.pieces.append('&%(ref)s;' % locals())
else:
self.pieces.append('&%(ref)s' % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text)
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%(text)s-->' % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%(text)s>' % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%(text)s>' % locals())
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _MicroformatsParser:
STRING = 1
DATE = 2
URI = 3
NODE = 4
EMAIL = 5
known_xfn_relationships = ['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me']
known_binary_extensions = ['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv']
def __init__(self, data, baseuri, encoding):
self.document = BeautifulSoup.BeautifulSoup(data)
self.baseuri = baseuri
self.encoding = encoding
if type(data) == type(u''):
data = data.encode(encoding)
self.tags = []
self.enclosures = []
self.xfn = []
self.vcard = None
def vcardEscape(self, s):
if type(s) in (type(''), type(u'')):
s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n')
return s
def vcardFold(self, s):
s = re.sub(';+$', '', s)
sFolded = ''
iMax = 75
sPrefix = ''
while len(s) > iMax:
sFolded += sPrefix + s[:iMax] + '\n'
s = s[iMax:]
sPrefix = ' '
iMax = 74
sFolded += sPrefix + s
return sFolded
def normalize(self, s):
return re.sub(r'\s+', ' ', s).strip()
def unique(self, aList):
results = []
for element in aList:
if element not in results:
results.append(element)
return results
def toISO8601(self, dt):
return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt)
def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0):
all = lambda x: 1
sProperty = sProperty.lower()
bFound = 0
bNormalize = 1
propertyMatch = re.compile(r'\b%s\b' % sProperty)
if bAllowMultiple and (iPropertyType != self.NODE):
snapResults = []
containers = elmRoot(['ul', 'ol'], propertyMatch)
for container in containers:
snapResults.extend(container('li'))
bFound = (len(snapResults) != 0)
if not bFound:
snapResults = elmRoot(all, propertyMatch)
bFound = (len(snapResults) != 0)
if (not bFound) and (sProperty == 'value'):
snapResults = elmRoot('pre')
bFound = (len(snapResults) != 0)
bNormalize = not bFound
if not bFound:
snapResults = [elmRoot]
bFound = (len(snapResults) != 0)
arFilter = []
if sProperty == 'vcard':
snapFilter = elmRoot(all, propertyMatch)
for node in snapFilter:
if node.findParent(all, propertyMatch):
arFilter.append(node)
arResults = []
for node in snapResults:
if node not in arFilter:
arResults.append(node)
bFound = (len(arResults) != 0)
if not bFound:
if bAllowMultiple: return []
elif iPropertyType == self.STRING: return ''
elif iPropertyType == self.DATE: return BeautifulSoup.Null
elif iPropertyType == self.URI: return ''
elif iPropertyType == self.NODE: return BeautifulSoup.Null
else: return BeautifulSoup.Null
arValues = []
for elmResult in arResults:
sValue = BeautifulSoup.Null
if iPropertyType == self.NODE:
if bAllowMultiple:
arValues.append(elmResult)
continue
else:
return elmResult
sNodeName = elmResult.name.lower()
if (iPropertyType == self.EMAIL) and (sNodeName == 'a'):
sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0]
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'abbr'):
sValue = elmResult.get('title')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (iPropertyType == self.URI):
if sNodeName == 'a': sValue = elmResult.get('href')
elif sNodeName == 'img': sValue = elmResult.get('src')
elif sNodeName == 'object': sValue = elmResult.get('data')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'img'):
sValue = elmResult.get('alt')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue:
sValue = elmResult.renderContents()
sValue = re.sub(r'<\S[^>]*>', '', sValue)
sValue = sValue.replace('\r\n', '\n')
sValue = sValue.replace('\r', '\n')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue: continue
if iPropertyType == self.DATE:
sValue = _parse_date_iso8601(sValue)
if bAllowMultiple:
arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue)
else:
return bAutoEscape and self.vcardEscape(sValue) or sValue
return arValues
def findVCards(self, elmRoot, bAgentParsing=0):
sVCards = ''
if not bAgentParsing:
arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1)
else:
arCards = [elmRoot]
for elmCard in arCards:
arLines = []
def processSingleString(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue))
return sValue or ''
def processSingleURI(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.URI)
if sValue:
sContentType = ''
sEncoding = ''
sValueKey = ''
if sValue.startswith('data:'):
sEncoding = ';ENCODING=b'
sContentType = sValue.split(';')[0].split('/').pop()
sValue = sValue.split(',', 1).pop()
else:
elmValue = self.getPropertyValue(elmCard, sProperty)
if elmValue:
if sProperty != 'url':
sValueKey = ';VALUE=uri'
sContentType = elmValue.get('type', '').strip().split('/').pop().strip()
sContentType = sContentType.upper()
if sContentType == 'OCTET-STREAM':
sContentType = ''
if sContentType:
sContentType = ';TYPE=' + sContentType.upper()
arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue))
def processTypeValue(sProperty, arDefaultType, arForceType=None):
arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1)
for elmResult in arResults:
arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1)
if arForceType:
arType = self.unique(arForceType + arType)
if not arType:
arType = arDefaultType
sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue))
# AGENT
# must do this before all other properties because it is destructive
# (removes nested class="vcard" nodes so they don't interfere with
# this vcard's other properties)
arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1)
for elmAgent in arAgent:
if re.compile(r'\bvcard\b').search(elmAgent.get('class')):
sAgentValue = self.findVCards(elmAgent, 1) + '\n'
sAgentValue = sAgentValue.replace('\n', '\\n')
sAgentValue = sAgentValue.replace(';', '\\;')
if sAgentValue:
arLines.append(self.vcardFold('AGENT:' + sAgentValue))
elmAgent['class'] = ''
elmAgent.contents = BeautifulSoup.Null
else:
sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1);
if sAgentValue:
arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue))
# FN (full name)
sFN = processSingleString('fn')
# N (name)
elmName = self.getPropertyValue(elmCard, 'n')
if elmName:
sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1)
sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1)
arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1)
arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1)
arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1)
arLines.append(self.vcardFold('N:' + sFamilyName + ';' +
sGivenName + ';' +
','.join(arAdditionalNames) + ';' +
','.join(arHonorificPrefixes) + ';' +
','.join(arHonorificSuffixes)))
elif sFN:
# implied "N" optimization
# http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization
arNames = self.normalize(sFN).split()
if len(arNames) == 2:
bFamilyNameFirst = (arNames[0].endswith(',') or
len(arNames[1]) == 1 or
((len(arNames[1]) == 2) and (arNames[1].endswith('.'))))
if bFamilyNameFirst:
arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1]))
else:
arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0]))
# SORT-STRING
sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1)
if sSortString:
arLines.append(self.vcardFold('SORT-STRING:' + sSortString))
# NICKNAME
arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1)
if arNickname:
arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname)))
# PHOTO
processSingleURI('photo')
# BDAY
dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE)
if dtBday:
arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday)))
# ADR (address)
arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1)
for elmAdr in arAdr:
arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1)
if not arType:
arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1
sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1)
sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1)
sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1)
sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1)
sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1)
sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1)
sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1)
arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' +
sPostOfficeBox + ';' +
sExtendedAddress + ';' +
sStreetAddress + ';' +
sLocality + ';' +
sRegion + ';' +
sPostalCode + ';' +
sCountryName))
# LABEL
processTypeValue('label', ['intl','postal','parcel','work'])
# TEL (phone number)
processTypeValue('tel', ['voice'])
# EMAIL
processTypeValue('email', ['internet'], ['internet'])
# MAILER
processSingleString('mailer')
# TZ (timezone)
processSingleString('tz')
# GEO (geographical information)
elmGeo = self.getPropertyValue(elmCard, 'geo')
if elmGeo:
sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1)
sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1)
arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude))
# TITLE
processSingleString('title')
# ROLE
processSingleString('role')
# LOGO
processSingleURI('logo')
# ORG (organization)
elmOrg = self.getPropertyValue(elmCard, 'org')
if elmOrg:
sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1)
if not sOrganizationName:
# implied "organization-name" optimization
# http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization
sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1)
if sOrganizationName:
arLines.append(self.vcardFold('ORG:' + sOrganizationName))
else:
arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1)
arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit)))
# CATEGORY
arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1)
if arCategory:
arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory)))
# NOTE
processSingleString('note')
# REV
processSingleString('rev')
# SOUND
processSingleURI('sound')
# UID
processSingleString('uid')
# URL
processSingleURI('url')
# CLASS
processSingleString('class')
# KEY
processSingleURI('key')
if arLines:
arLines = ['BEGIN:vCard','VERSION:3.0'] + arLines + ['END:vCard']
sVCards += '\n'.join(arLines) + '\n'
return sVCards.strip()
def isProbablyDownloadable(self, elm):
attrsD = elm.attrMap
if not attrsD.has_key('href'): return 0
linktype = attrsD.get('type', '').strip()
if linktype.startswith('audio/') or \
linktype.startswith('video/') or \
(linktype.startswith('application/') and not linktype.endswith('xml')):
return 1
path = urlparse.urlparse(attrsD['href'])[2]
if path.find('.') == -1: return 0
fileext = path.split('.').pop().lower()
return fileext in self.known_binary_extensions
def findTags(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile(r'\btag\b')}):
href = elm.get('href')
if not href: continue
urlscheme, domain, path, params, query, fragment = \
urlparse.urlparse(_urljoin(self.baseuri, href))
segments = path.split('/')
tag = segments.pop()
if not tag:
tag = segments.pop()
tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', ''))
if not tagscheme.endswith('/'):
tagscheme += '/'
self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''}))
def findEnclosures(self):
all = lambda x: 1
enclosure_match = re.compile(r'\benclosure\b')
for elm in self.document(all, {'href': re.compile(r'.+')}):
if not enclosure_match.search(elm.get('rel', '')) and not self.isProbablyDownloadable(elm): continue
if elm.attrMap not in self.enclosures:
self.enclosures.append(elm.attrMap)
if elm.string and not elm.get('title'):
self.enclosures[-1]['title'] = elm.string
def findXFN(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}):
rels = elm.get('rel', '').split()
xfn_rels = []
for rel in rels:
if rel in self.known_xfn_relationships:
xfn_rels.append(rel)
if xfn_rels:
self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string})
def _parseMicroformats(htmlSource, baseURI, encoding):
if not BeautifulSoup: return
if _debug: sys.stderr.write('entering _parseMicroformats\n')
p = _MicroformatsParser(htmlSource, baseURI, encoding)
p.vcard = p.findVCards(p.document)
p.findTags()
p.findEnclosures()
p.findXFN()
return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard}
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = [('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')]
def __init__(self, baseuri, encoding, type):
_BaseHTMLProcessor.__init__(self, encoding, type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _urljoin(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, type):
if _debug: sys.stderr.write('entering _resolveRelativeURIs\n')
p = _RelativeURIResolver(baseURI, encoding, type)
p.feed(htmlSource)
return p.output()
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b',
'big', 'blockquote', 'br', 'button', 'caption', 'center', 'cite',
'code', 'col', 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt',
'em', 'fieldset', 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'hr', 'i', 'img', 'input', 'ins', 'kbd', 'label', 'legend', 'li', 'map',
'menu', 'ol', 'optgroup', 'option', 'p', 'pre', 'q', 's', 'samp',
'select', 'small', 'span', 'strike', 'strong', 'sub', 'sup', 'table',
'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u',
'ul', 'var']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'axis', 'border', 'cellpadding',
'cellspacing', 'char', 'charoff', 'charset', 'checked', 'cite', 'class',
'clear', 'cols', 'colspan', 'color', 'compact', 'coords', 'datetime',
'dir', 'disabled', 'enctype', 'for', 'frame', 'headers', 'height',
'href', 'hreflang', 'hspace', 'id', 'ismap', 'label', 'lang',
'longdesc', 'maxlength', 'media', 'method', 'multiple', 'name',
'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', 'rel', 'rev',
'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title',
'type', 'usemap', 'valign', 'value', 'vspace', 'width', 'xml:lang']
unacceptable_elements_with_end_tag = ['script', 'applet']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
# survey of common keywords found in feeds
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'font-face',
'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern', 'image',
'linearGradient', 'line', 'metadata', 'missing-glyph', 'mpath', 'path',
'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop', 'svg',
'switch', 'text', 'title', 'use']
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd',
'descent', 'display', 'dur', 'end', 'fill', 'fill-rule', 'font-family',
'font-size', 'font-stretch', 'font-style', 'font-variant',
'font-weight', 'from', 'fx', 'fy', 'g1', 'g2', 'glyph-name', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'mathematical', 'max',
'min', 'name', 'offset', 'opacity', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'repeatCount', 'repeatDur',
'requiredExtensions', 'requiredFeatures', 'restart', 'rotate', 'rx',
'ry', 'slope', 'stemh', 'stemv', 'stop-color', 'stop-opacity',
'strikethrough-position', 'strikethrough-thickness', 'stroke',
'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-width',
'systemLanguage', 'target', 'text-anchor', 'to', 'transform', 'type',
'u1', 'u2', 'underline-position', 'underline-thickness', 'unicode',
'unicode-range', 'units-per-em', 'values', 'version', 'viewBox',
'visibility', 'width', 'widths', 'x', 'x-height', 'x1', 'x2',
'xlink:actuate', 'xlink:arcrole', 'xlink:href', 'xlink:role',
'xlink:show', 'xlink:title', 'xlink:type', 'xml:base', 'xml:lang',
'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1', 'y2', 'zoomAndPan']
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK = 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK = 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
else:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if filter(lambda (n,v): n.startswith('xlink:'),attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value: clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math': self.mathmlOK = 0
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg': self.svgOK = 0
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return ''
if not re.match("^(\s*[-\w]+\s*:\s*[^:;]*(;|$))*$", style): return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value: continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def _sanitizeHTML(htmlSource, encoding, type):
p = _HTMLSanitizer(encoding, type)
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = type(data) == type(u'')
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
if ((code / 100) == 3) and (code != 304):
return self.http_error_302(req, fp, code, msg, headers)
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
return infourl
def http_error_302(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
def http_error_301(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
http_error_300 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions)
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
try:
assert sys.version.split()[0] >= '2.3.3'
assert base64 != None
user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
except:
return self.http_error_default(req, fp, code, msg, headers)
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if url_file_stream_or_string == '-':
return sys.stdin
if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'):
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.encodestring(user_passwd).strip()
# iri support
try:
if isinstance(url_file_stream_or_string,unicode):
url_file_stream_or_string = url_file_stream_or_string.encode('idna')
else:
url_file_stream_or_string = url_file_stream_or_string.decode('utf-8').encode('idna')
except:
pass
# try to open with urllib2 (to use optional headers)
request = urllib2.Request(url_file_stream_or_string)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if type(modified) == type(''):
modified = _parse_date(modified)
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
request.add_header('A-IM', 'feed') # RFC 3229 support
opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string)
except:
pass
# treat url_file_stream_or_string as string
return _StringIO(str(url_file_stream_or_string))
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}(\.\d*)?))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
del tmpl
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
del regex
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m: break
if not m: return
if m.span() == (0, 0): return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params.keys():
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tm))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m: return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
_mssql_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
def _parse_date_mssql(dateString):
'''Parse a string according to the MS SQL date format'''
m = _mssql_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_mssql)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m: return
try:
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
except:
return
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date)
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m: return
try:
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
except:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<julian>\d\d\d)'
'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
rfc822._timezones.update(_additional_timezones)
registerDateHandler(_parse_date_rfc822)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
if not date9tuple: continue
if len(date9tuple) != 9:
if _debug: sys.stderr.write('date handler function must return 9-tuple\n')
raise ValueError
map(int, date9tuple)
return date9tuple
except Exception, e:
if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e)))
pass
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). CJKCodecs and iconv_codec help a lot;
you should definitely install them if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
return content_type, params.get('charset', '').replace("'", '')
sniffed_xml_encoding = ''
xml_encoding = ''
true_encoding = ''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type'))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = _ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
true_encoding = http_encoding or 'us-ascii'
elif http_headers and (not http_headers.has_key('content-type')):
true_encoding = xml_encoding or 'iso-8859-1'
else:
true_encoding = xml_encoding or 'utf-8'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding)
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16be':
sys.stderr.write('trying utf-16be instead\n')
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16le':
sys.stderr.write('trying utf-16le instead\n')
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-8':
sys.stderr.write('trying utf-8 instead\n')
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32be':
sys.stderr.write('trying utf-32be instead\n')
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32le':
sys.stderr.write('trying utf-32le instead\n')
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE)
entity_results=entity_pattern.findall(data)
data = entity_pattern.sub('', data)
doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE)
doctype_results = doctype_pattern.findall(data)
doctype = doctype_results and doctype_results[0] or ''
if doctype.lower().count('netscape'):
version = 'rss091n'
else:
version = None
# only allow in 'safe' inline entity definitions
replacement=''
if len(doctype_results)==1 and entity_results:
safe_pattern=re.compile('\s+(\w+)\s+"(&#\w+;|[^&"]*)"')
safe_entities=filter(lambda e: safe_pattern.match(e),entity_results)
if safe_entities:
replacement='<!DOCTYPE feed [\n <!ENTITY %s>\n]>' % '>\n <!ENTITY '.join(safe_entities)
data = doctype_pattern.sub(replacement, data)
return version, data, dict(replacement and safe_pattern.findall(replacement))
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]):
'''Parse a feed from a URL, file, stream, or string'''
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
if _XML_AVAILABLE:
result['bozo'] = 0
if type(handlers) == types.InstanceType:
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
f = None
# if feed is gzip-compressed, decompress it
if f and data and hasattr(f, 'headers'):
if gzip and f.headers.get('content-encoding', '') == 'gzip':
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except Exception, e:
# Some feeds claim to be gzipped but they're not, so
# we get garbage. Ideally, we should re-request the
# feed without the 'Accept-encoding: gzip' header,
# but we don't.
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
elif zlib and f.headers.get('content-encoding', '') == 'deflate':
try:
data = zlib.decompress(data, -zlib.MAX_WBITS)
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
# save HTTP headers
if hasattr(f, 'info'):
info = f.info()
result['etag'] = info.getheader('ETag')
last_modified = info.getheader('Last-Modified')
if last_modified:
result['modified'] = _parse_date(last_modified)
if hasattr(f, 'url'):
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'headers'):
result['headers'] = f.headers.dict
if hasattr(f, 'close'):
f.close()
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
http_headers = result.get('headers', {})
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if http_headers.has_key('content-type'):
bozo_message = '%s is not an XML media type' % http_headers['content-type']
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
result['version'], data, entities = _stripDoctype(data)
baseuri = http_headers.get('content-location', result.get('href'))
baselang = http_headers.get('content-language', None)
# if server sent 304, we're done
if result.get('status', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if not data:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding):
if not proposed_encoding: continue
if proposed_encoding in tried_encodings: continue
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
break
except:
pass
# if no luck and we have auto-detection library, try that
if (not known_encoding) and chardet:
try:
proposed_encoding = chardet.detect(data)['encoding']
if proposed_encoding and (proposed_encoding not in tried_encodings):
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried utf-8 yet, try that
if (not known_encoding) and ('utf-8' not in tried_encodings):
try:
proposed_encoding = 'utf-8'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried windows-1252 yet, try that
if (not known_encoding) and ('windows-1252' not in tried_encodings):
try:
proposed_encoding = 'windows-1252'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried iso-8859-2 yet, try that.
if (not known_encoding) and ('iso-8859-2' not in tried_encodings):
try:
proposed_encoding = 'iso-8859-2'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck, give up
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = ''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'documented declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
if hasattr(saxparser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
saxparser.parse(source)
except Exception, e:
if _debug:
import traceback
traceback.print_stack()
traceback.print_exc()
sys.stderr.write('xml parsing failed\n')
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '', entities)
feedparser.feed(data)
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
class Serializer:
def __init__(self, results):
self.results = results
class TextSerializer(Serializer):
def write(self, stream=sys.stdout):
self._writer(stream, self.results, '')
def _writer(self, stream, node, prefix):
if not node: return
if hasattr(node, 'keys'):
keys = node.keys()
keys.sort()
for k in keys:
if k in ('description', 'link'): continue
if node.has_key(k + '_detail'): continue
if node.has_key(k + '_parsed'): continue
self._writer(stream, node[k], prefix + k + '.')
elif type(node) == types.ListType:
index = 0
for n in node:
self._writer(stream, n, prefix[:-1] + '[' + str(index) + '].')
index += 1
else:
try:
s = str(node).encode('utf-8')
s = s.replace('\\', '\\\\')
s = s.replace('\r', '')
s = s.replace('\n', r'\n')
stream.write(prefix[:-1])
stream.write('=')
stream.write(s)
stream.write('\n')
except:
pass
class PprintSerializer(Serializer):
def write(self, stream=sys.stdout):
stream.write(self.results['href'] + '\n\n')
from pprint import pprint
pprint(self.results, stream)
stream.write('\n')
if __name__ == '__main__':
try:
from optparse import OptionParser
except:
OptionParser = None
if OptionParser:
optionParser = OptionParser(version=__version__, usage="%prog [options] url_or_filename_or_-")
optionParser.set_defaults(format="pprint")
optionParser.add_option("-A", "--user-agent", dest="agent", metavar="AGENT", help="User-Agent for HTTP URLs")
optionParser.add_option("-e", "--referer", "--referrer", dest="referrer", metavar="URL", help="Referrer for HTTP URLs")
optionParser.add_option("-t", "--etag", dest="etag", metavar="TAG", help="ETag/If-None-Match for HTTP URLs")
optionParser.add_option("-m", "--last-modified", dest="modified", metavar="DATE", help="Last-modified/If-Modified-Since for HTTP URLs (any supported date format)")
optionParser.add_option("-f", "--format", dest="format", metavar="FORMAT", help="output results in FORMAT (text, pprint)")
optionParser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="write debugging information to stderr")
(options, urls) = optionParser.parse_args()
if options.verbose:
_debug = 1
if not urls:
optionParser.print_help()
sys.exit(0)
else:
if not sys.argv[1:]:
print __doc__
sys.exit(0)
class _Options:
etag = modified = agent = referrer = None
format = 'pprint'
options = _Options()
urls = sys.argv[1:]
zopeCompatibilityHack()
serializer = globals().get(options.format.capitalize() + 'Serializer', Serializer)
for url in urls:
results = parse(url, etag=options.etag, modified=options.modified, agent=options.agent, referrer=options.referrer)
serializer(results).write(sys.stdout)
#REVISION HISTORY
#1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements,
# added Simon Fell's test suite
#1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections
#2.0 - 10/19/2002
# JD - use inchannel to watch out for image and textinput elements which can
# also contain title, link, and description elements
# JD - check for isPermaLink='false' attribute on guid elements
# JD - replaced openAnything with open_resource supporting ETag and
# If-Modified-Since request headers
# JD - parse now accepts etag, modified, agent, and referrer optional
# arguments
# JD - modified parse to return a dictionary instead of a tuple so that any
# etag or modified information can be returned and cached by the caller
#2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything
# because of etag/modified, return the old etag/modified to the caller to
# indicate why nothing is being returned
#2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its
# useless. Fixes the problem JD was addressing by adding it.
#2.1 - 11/14/2002 - MAP - added gzip support
#2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent.
# start_admingeneratoragent is an example of how to handle elements with
# only attributes, no content.
#2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify);
# also, make sure we send the User-Agent even if urllib2 isn't available.
# Match any variation of backend.userland.com/rss namespace.
#2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is.
#2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's
# snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed
# project name
#2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree);
# removed unnecessary urllib code -- urllib2 should always be available anyway;
# return actual url, status, and full HTTP headers (as result['url'],
# result['status'], and result['headers']) if parsing a remote feed over HTTP --
# this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>;
# added the latest namespace-of-the-week for RSS 2.0
#2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom
# User-Agent (otherwise urllib2 sends two, which confuses some servers)
#2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for
# inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds
#2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or
# textInput, and also to return the character encoding (if specified)
#2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking
# nested divs within content (JohnD); fixed missing sys import (JohanS);
# fixed regular expression to capture XML character encoding (Andrei);
# added support for Atom 0.3-style links; fixed bug with textInput tracking;
# added support for cloud (MartijnP); added support for multiple
# category/dc:subject (MartijnP); normalize content model: 'description' gets
# description (which can come from description, summary, or full content if no
# description), 'content' gets dict of base/language/type/value (which can come
# from content:encoded, xhtml:body, content, or fullitem);
# fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang
# tracking; fixed bug tracking unknown tags; fixed bug tracking content when
# <content> element is not in default namespace (like Pocketsoap feed);
# resolve relative URLs in link, guid, docs, url, comments, wfw:comment,
# wfw:commentRSS; resolve relative URLs within embedded HTML markup in
# description, xhtml:body, content, content:encoded, title, subtitle,
# summary, info, tagline, and copyright; added support for pingback and
# trackback namespaces
#2.7 - 1/5/2004 - MAP - really added support for trackback and pingback
# namespaces, as opposed to 2.6 when I said I did but didn't really;
# sanitize HTML markup within some elements; added mxTidy support (if
# installed) to tidy HTML markup within some elements; fixed indentation
# bug in _parse_date (FazalM); use socket.setdefaulttimeout if available
# (FazalM); universal date parsing and normalization (FazalM): 'created', modified',
# 'issued' are parsed into 9-tuple date format and stored in 'created_parsed',
# 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified'
# and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa
#2.7.1 - 1/9/2004 - MAP - fixed bug handling " and '. fixed memory
# leak not closing url opener (JohnD); added dc:publisher support (MarekK);
# added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK)
#2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in
# encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL);
# fixed relative URI processing for guid (skadz); added ICBM support; added
# base64 support
#2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many
# blogspot.com sites); added _debug variable
#2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing
#3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available);
# added several new supported namespaces; fixed bug tracking naked markup in
# description; added support for enclosure; added support for source; re-added
# support for cloud which got dropped somehow; added support for expirationDate
#3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking
# xml:base URI, one for documents that don't define one explicitly and one for
# documents that define an outer and an inner xml:base that goes out of scope
# before the end of the document
#3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level
#3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version']
# will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized;
# added support for creativeCommons:license and cc:license; added support for
# full Atom content model in title, tagline, info, copyright, summary; fixed bug
# with gzip encoding (not always telling server we support it when we do)
#3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail
# (dictionary of 'name', 'url', 'email'); map author to author_detail if author
# contains name + email address
#3.0b8 - 1/28/2004 - MAP - added support for contributor
#3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added
# support for summary
#3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from
# xml.util.iso8601
#3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain
# dangerous markup; fiddled with decodeEntities (not right); liberalized
# date parsing even further
#3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right);
# added support to Atom 0.2 subtitle; added support for Atom content model
# in copyright; better sanitizing of dangerous HTML elements with end tags
# (script, frameset)
#3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img,
# etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />)
#3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under
# Python 2.1
#3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS;
# fixed bug capturing author and contributor URL; fixed bug resolving relative
# links in author and contributor URL; fixed bug resolvin relative links in
# generator URL; added support for recognizing RSS 1.0; passed Simon Fell's
# namespace tests, and included them permanently in the test suite with his
# permission; fixed namespace handling under Python 2.1
#3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15)
#3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023
#3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei);
# use libxml2 (if available)
#3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author
# name was in parentheses; removed ultra-problematic mxTidy support; patch to
# workaround crash in PyXML/expat when encountering invalid entities
# (MarkMoraes); support for textinput/textInput
#3.0b20 - 4/7/2004 - MAP - added CDF support
#3.0b21 - 4/14/2004 - MAP - added Hot RSS support
#3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in
# results dict; changed results dict to allow getting values with results.key
# as well as results[key]; work around embedded illformed HTML with half
# a DOCTYPE; work around malformed Content-Type header; if character encoding
# is wrong, try several common ones before falling back to regexes (if this
# works, bozo_exception is set to CharacterEncodingOverride); fixed character
# encoding issues in BaseHTMLProcessor by tracking encoding and converting
# from Unicode to raw strings before feeding data to sgmllib.SGMLParser;
# convert each value in results to Unicode (if possible), even if using
# regex-based parsing
#3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain
# high-bit characters in attributes in embedded HTML in description (thanks
# Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in
# FeedParserDict; tweaked FeedParserDict.has_key to return True if asking
# about a mapped key
#3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and
# results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could
# cause the same encoding to be tried twice (even if it failed the first time);
# fixed DOCTYPE stripping when DOCTYPE contained entity declarations;
# better textinput and image tracking in illformed RSS 1.0 feeds
#3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed
# my blink tag tests
#3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that
# failed to parse utf-16 encoded feeds; made source into a FeedParserDict;
# duplicate admin:generatorAgent/@rdf:resource in generator_detail.url;
# added support for image; refactored parse() fallback logic to try other
# encodings if SAX parsing fails (previously it would only try other encodings
# if re-encoding failed); remove unichr madness in normalize_attrs now that
# we're properly tracking encoding in and out of BaseHTMLProcessor; set
# feed.language from root-level xml:lang; set entry.id from rdf:about;
# send Accept header
#3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between
# iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are
# windows-1252); fixed regression that could cause the same encoding to be
# tried twice (even if it failed the first time)
#3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types;
# recover from malformed content-type header parameter with no equals sign
# ('text/xml; charset:iso-8859-1')
#3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities
# to Unicode equivalents in illformed feeds (aaronsw); added and
# passed tests for converting character entities to Unicode equivalents
# in illformed feeds (aaronsw); test for valid parsers when setting
# XML_AVAILABLE; make version and encoding available when server returns
# a 304; add handlers parameter to pass arbitrary urllib2 handlers (like
# digest auth or proxy support); add code to parse username/password
# out of url and send as basic authentication; expose downloading-related
# exceptions in bozo_exception (aaronsw); added __contains__ method to
# FeedParserDict (aaronsw); added publisher_detail (aaronsw)
#3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always
# convert feed to UTF-8 before passing to XML parser; completely revamped
# logic for determining character encoding and attempting XML parsing
# (much faster); increased default timeout to 20 seconds; test for presence
# of Location header on redirects; added tests for many alternate character
# encodings; support various EBCDIC encodings; support UTF-16BE and
# UTF16-LE with or without a BOM; support UTF-8 with a BOM; support
# UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no
# XML parsers are available; added support for 'Content-encoding: deflate';
# send blank 'Accept-encoding: ' header if neither gzip nor zlib modules
# are available
#3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure
# problem tracking xml:base and xml:lang if element declares it, child
# doesn't, first grandchild redeclares it, and second grandchild doesn't;
# refactored date parsing; defined public registerDateHandler so callers
# can add support for additional date formats at runtime; added support
# for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added
# zopeCompatibilityHack() which turns FeedParserDict into a regular
# dictionary, required for Zope compatibility, and also makes command-
# line debugging easier because pprint module formats real dictionaries
# better than dictionary-like objects; added NonXMLContentType exception,
# which is stored in bozo_exception when a feed is served with a non-XML
# media type such as 'text/plain'; respect Content-Language as default
# language if not xml:lang is present; cloud dict is now FeedParserDict;
# generator dict is now FeedParserDict; better tracking of xml:lang,
# including support for xml:lang='' to unset the current language;
# recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default
# namespace; don't overwrite final status on redirects (scenarios:
# redirecting to a URL that returns 304, redirecting to a URL that
# redirects to another URL with a different type of redirect); add
# support for HTTP 303 redirects
#4.0 - MAP - support for relative URIs in xml:base attribute; fixed
# encoding issue with mxTidy (phopkins); preliminary support for RFC 3229;
# support for Atom 1.0; support for iTunes extensions; new 'tags' for
# categories/keywords/etc. as array of dict
# {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0
# terminology; parse RFC 822-style dates with no time; lots of other
# bug fixes
#4.1 - MAP - removed socket timeout; added support for chardet library
#4.2 - MAP - added support for parsing microformats within content elements:
# currently supports rel-tag (maps to 'tags'), rel-enclosure (maps to
# 'enclosures'), XFN links within content elements (maps to 'xfn'),
# and hCard (parses as vCard); bug [ 1481975 ] Misencoded utf-8/win-1252
| Python |
#!/usr/bin/env python
import glob, unittest, os, sys
from trace import fullmodname
try:
from tests.utils import cleanup
except:
def cleanup():
pass
# try to start in a consistent, predictable location
if sys.path[0]: os.chdir(sys.path[0])
sys.path[0] = os.getcwd()
# find all of the planet test modules
modules = map(fullmodname, glob.glob(os.path.join('tests', 'test_*.py')))
# load all of the tests into a suite
try:
suite = unittest.TestLoader().loadTestsFromNames(modules)
except Exception, exception:
# attempt to produce a more specific message
for module in modules:
__import__(module)
raise
verbosity = 1
if "-q" in sys.argv or '--quiet' in sys.argv:
verbosity = 0
if "-v" in sys.argv or '--verbose' in sys.argv:
verbosity = 2
# run test suite
unittest.TextTestRunner(verbosity=verbosity).run(suite)
cleanup()
| Python |
from gettext import gettext as _
class Reportable:
"""Base class for all the errors, warnings and suggestions."""
text = ""
def __init__(self, extra = None):
self.extra = extra
self.context = ""
if hasattr(self, 'pace'):
self.extra = self.extra + ("\n [Pace%s]" % self.pace)
def tostring(self):
return self.context + "\n" + self.text + "\n" + self.extra
def toshortstring(self):
return self.context + " : " + self.text
# Every report should subclass one of these three classes,
# which will make filtering of results easy.
class Error(Reportable): pass
class Warning(Reportable): pass
class Suggestion(Reportable): pass
class ServerShouldHandleI18NContent(Suggestion):
text = _('Server has discarded or been unable to handle i18n content.')
class ShouldSupportCacheValidators(Suggestion):
text = _('GET should support the use of ETags and/or Last-Modifed cache validators.')
class ShouldSupportCompression(Suggestion):
text = _('GET should support the use of compression to speed of transfers.')
class MustUseValidAtom(Error):
text = _('Atom entries and feeds MUST be valid. [RFC 4287]')
class AtomShouldViolation(Warning):
text = _('Violation of a SHOULD directive of [RFC 4287]')
class MustRejectNonWellFormedAtom(Warning):
text = _('A server SHOULD reject non-wellformed content. [XML 1.0 Section 5.1 Validating and Non-Validating Processors]')
class InternalErrorEncountered(Error):
text = _('Internal error encountered. This error can occur if the site returned non-welllformed XML.')
class EntryCreationMustReturn201(Warning):
text = _('When an entry is successfully created the server SHOULD return an HTTP status code of 201. [RFC 2616 Section 9.5 POST]')
class EntryCreationMustReturnLocationHeader(Error):
text = _('When an entry is successfully created the server MUST return a Location: HTTP header. [APP-08 Section 8.1 Creating Resource with POST]')
class EntryCreationMustBeReflectedInFeed(Error):
text = _('When an entry is successfully created it must be added to the associated feed. [APP-08 Section 8.1 Creating Resources.]')
class EntryDeletionFailed(Error):
text = _('The status returned does not reflect a successful deletion.')
class EntryUpdateFailed(Error):
text = _('The status returned does not reflect a successful update.')
class EntryDeletionMustBeReflectedInFeed(Error):
text = _('When an entry is successfully deleted, the Member URI MUST be removed from the collection. ')
pace = 'PaperTrail'
class LocationHeaderMustMatchLinkRelEdit(Error):
text = _('The link/@rel="edit" URI must match the URI returned via the Location: HTTP header during creation.')
pace = 'PaperTrail'
class GetFailedOnMemberResource(Error):
text = _('Could not dereference the Member URI.')
| Python |
from distutils.core import setup
import py2exe
setup(console=["appeditor.py"]) | Python |
#!/usr/bin/env python
import glob, unittest, os, sys
from trace import fullmodname
try:
from tests.utils import cleanup
except:
def cleanup():
pass
# try to start in a consistent, predictable location
if sys.path[0]: os.chdir(sys.path[0])
sys.path[0] = os.getcwd()
# find all of the planet test modules
modules = map(fullmodname, glob.glob(os.path.join('tests', 'test_*.py')))
# load all of the tests into a suite
try:
suite = unittest.TestLoader().loadTestsFromNames(modules)
except Exception, exception:
# attempt to produce a more specific message
for module in modules:
__import__(module)
raise
verbosity = 1
if "-q" in sys.argv or '--quiet' in sys.argv:
verbosity = 0
if "-v" in sys.argv or '--verbose' in sys.argv:
verbosity = 2
# run test suite
unittest.TextTestRunner(verbosity=verbosity).run(suite)
cleanup()
| Python |
#!/usr/bin/env python2.4
import httplib2
import apptools
try:
from xml.etree.ElementTree import fromstring, tostring
except:
from elementtree.ElementTree import fromstring, tostring
import sha
import cStringIO
from urlparse import urljoin
import os
import anydbm
ATOM = "{http://www.w3.org/2005/Atom}%s"
APP = "{http://purl.org/atom/app#}%s"
ENTRY_ELEMENTS = ["title", "title__type", "summary", "summary__type", "content", "content__type"]
DEFAULT_ENTRY = """<?xml version="1.0" encoding="utf-8"?>
<entry xmlns="http://www.w3.org/2005/Atom">
<title type="text">Title goes here.</title>
<id>http://bitworking.org/foo/app/main/third</id>
<author>
<name>anonymous</name>
</author>
<updated>2006-08-04T15:52:00-05:00</updated>
<summary type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
</div>
</summary>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
</div>
</content>
</entry>"""
ERROR_ENTRY = """<?xml version="1.0" encoding="utf-8"?>
<entry xmlns="http://www.w3.org/2005/Atom">
<title type="text">An Error Occured.</title>
<id>http://bitworking.org/foo/app/main/third</id>
<author>
<name>anonymous</name>
</author>
<updated>2006-08-04T15:52:00-05:00</updated>
<summary type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
An error occured trying to access this entry.
Received a status code of: %d
</div>
</summary>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
An error occured trying to access this entry.
</div>
</content>
</entry>"""
class Entry(object):
def __init__(self, h, edit, title="", title__type="text", updated="", published="", **kwargs):
self.h = h
self.member_uri = edit
# Will be filled in with an ElementTree of the entry
# once self.get() is called.
self.element = None
self._values = {
"title" : title,
"title__type" : title__type,
"updated" : updated,
"published" : published,
"summary": "",
"summary__type": "text",
"content": "",
"content__type": "text"
}
self._values.update(kwargs)
# def get/set text element (takes both text and it's type)
# def get/set link (rel and optional type).
def __getitem__(self, name):
return self._values.get(name, name.endswith("__type") and 'text' or '')
def __setitem__(self, name, value):
if name in self._values:
self._values[name] = value
else:
raise IndexError, "index '%s' not found" % name
def get(self):
if self.member_uri:
(resp, content) = self.h.request(self.member_uri)
if resp.status != 200:
content = ERROR_ENTRY % resp.status
else:
content = DEFAULT_ENTRY
#validate_atom(content, self.member_uri)
self.element = fromstring(content)
d = apptools.parse_atom_entry(self.member_uri, self.element)
self._values.update(d)
def put(self):
# loop over the values in sef._values, update self.element
# then serialize the element into a PUT
self.h.request(self.member_uri, method="PUT", body=self.tostring(), headers={
'content-type': 'application/atom+xml'
}
)
def delete(self):
self.h.request(self.member_uri, method="DELETE")
def tostring(self):
apptools.unparse_atom_entry(self.element, self._values)
return tostring(self.element)
class _EntryIterator(object):
def __init__(self, h, collection_uri, hit_map, onlynew=False):
self.h = h
self.collection_uri = collection_uri
self.local_hit_map = {}
self.page_uri = collection_uri
self.hit_map = hit_map
self.entries = []
self.old_stuff = False
self.onlynew = onlynew
def __iter__(self):
return self
def __del__(self):
self.hit_map.sync()
def next(self):
# Once we've seen a 304 we should probably try "Cache-control: only-if-cached" first
# and only hit the web if we get a cache miss
if not self.entries:
if not self.page_uri:
self.hit_map.sync()
raise StopIteration
# If we have already hit an entry we've seen before, in the hit_map,
# then try all requests first from the cache. Cache-control: only-if-cached.
# If that fails then request over the net.
if self.old_stuff:
(resp, content) = self.h.request(self.page_uri, headers={'cache-control': 'only-if-cached'})
if resp.status != 304:
(resp, content) = self.h.request(self.page_uri)
else:
resp.status = 200
else:
(resp, content) = self.h.request(self.page_uri)
if resp.status != 200:
self.hit_map.sync()
raise StopIteration
(self.entries, self.page_uri) = apptools.parse_collection_feed(self.page_uri, content)
if len(self.entries):
entry = self.entries[0]
del self.entries[0]
hash = sha.sha(entry["edit"] + entry["edited"]).hexdigest()
if hash in self.hit_map:
self.old_stuff = True
self.hit_map.sync()
if self.onlynew:
raise StopIteration
else:
self.hit_map[hash] = entry["edit"]
# Compute the hit hash from the "edit" URI and the app:edited/atom:updated
# Do we skip entries that do not have an "edit" URI?!?
return Entry(self.h, **entry)
else:
self.hit_map.sync()
raise StopIteration
class Collection(object):
def __init__(self, h, cachedir, href, title, workspace, accept):
self.h = h
self.cachedir = os.path.join(cachedir, httplib2.safename(href))
if not os.path.exists(self.cachedir):
os.makedirs(self.cachedir)
self.hitmap = anydbm.open(os.path.join(self.cachedir, "hitmap.db"), "c")
self.href = href
self.title = title
self.workspace = workspace
self.accept = accept
def post(self, entry):
(resp, content) = self.h.request(self.href, method="POST", body=entry.tostring(), headers={
'content-type': 'application/atom+xml;type=entry'
}
)
def iter_entries(self):
return _EntryIterator(self.h, self.href, self.hitmap)
def iter_new_entries(self):
return _EntryIterator(self.h, self.href, self.hitmap, onlynew=True)
class Service:
def __init__(self, service_uri, cachedir, username, password):
self.h = httplib2.Http(os.path.join(cachedir, ".httplib2_cache"))
self.h.follow_all_redirects = True
self.h.add_credentials(username, password)
# A list of tuples, each a name and a list of Collection objects.
self._workspaces = []
(resp, content) = self.h.request(service_uri)
if resp.status == 200:
service = fromstring(content)
workspaces = service.findall(APP % "workspace")
for w in workspaces:
wstitle = w.find(ATOM % "title")
wsname = (wstitle != None) and wstitle.text or "No title"
collections = []
collection_elements = w.findall(APP % "collection")
for c in collection_elements:
cp = {}
title = c.find(ATOM % "title")
cp['title'] = (title != None) and title.text or "No title"
cp['href'] = urljoin(service_uri, c.get('href', ''))
cp['workspace'] = wsname
accepts = c.findall(APP % "accept")
cp['accept'] = [node.text for node in accepts]
collections.append(Collection(self.h, cachedir, **cp))
self._workspaces.append( (wsname, collections) )
def collections(self):
return sum([collections for (wsname, collections) in self._workspaces], [])
def workspaces(self):
"""Returns a list of tuples, (workspacename, collections), where
collections is a list of Collection objects, and workspacename is the
name of the workspace"""
return self._workspaces
| Python |
#!/usr/bin/env python
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit http://feedparser.org/ for the latest version
Visit http://feedparser.org/docs/ for the latest documentation
Required: Python 2.1 or later
Recommended: Python 2.3 or later
Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "4.2-pre-" + "$Revision: 1.144 $"[11:16] + "-cvs"
__license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>"]
_debug = 0
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# ---------- required modules (should come with any Python distribution) ----------
import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2
try:
from cStringIO import StringIO as _StringIO
except:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except:
gzip = None
try:
import zlib
except:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
from xml.sax.saxutils import escape as _xmlescape
_XML_AVAILABLE = 1
except:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except:
base64 = binascii = None
# cjkcodecs and iconv_codec provide support for more character encodings.
# Both are available from http://cjkpython.i18n.org/
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
if _debug:
import chardet.constants
chardet.constants._debug = 1
except:
chardet = None
# reversable htmlentitydefs mappings for Python 2.2
try:
from htmlentitydefs import name2codepoint, codepoint2name
except:
import htmlentitydefs
name2codepoint={}
codepoint2name={}
for (name,codepoint) in htmlentitydefs.entitydefs.iteritems():
if codepoint.startswith('&#'): codepoint=unichr(int(codepoint[2:-1]))
name2codepoint[name]=ord(codepoint)
codepoint2name[ord(codepoint)]=name
# BeautifulSoup parser used for parsing microformats from embedded HTML content
# http://www.crummy.com/software/BeautifulSoup/. At the moment, it appears
# that there is a version incompatibility, so the import is replaced with
# a 'None'. Restoring the try/import/except/none will renable the MF tests.
BeautifulSoup = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
sgmllib.special = re.compile('<!')
sgmllib.charref = re.compile('&#(\d+|x[0-9a-fA-F]+);')
if sgmllib.endbracket.search(' <').start(0):
class EndBracketMatch:
endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self,string,index=0):
self.match = self.endbracket.match(string,index)
if self.match: return self
def start(self,n):
return self.match.end(n)
sgmllib.endbracket = EndBracketMatch()
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
'hotrss': 'Hot RSS'
}
try:
UserDict = dict
except NameError:
# Python 2.1 does not have dict
from UserDict import UserDict
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
class FeedParserDict(UserDict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['subtitle', 'summary'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
return UserDict.__getitem__(self, 'tags')[0]['term']
if key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in UserDict.__getitem__(self, 'links') if link['rel']=='enclosure']
if key == 'categories':
return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')]
realkey = self.keymap.get(key, key)
if type(realkey) == types.ListType:
for k in realkey:
if UserDict.has_key(self, k):
return UserDict.__getitem__(self, k)
if UserDict.has_key(self, key):
return UserDict.__getitem__(self, key)
return UserDict.__getitem__(self, realkey)
def __setitem__(self, key, value):
for k in self.keymap.keys():
if key == k:
key = self.keymap[k]
if type(key) == types.ListType:
key = key[0]
return UserDict.__setitem__(self, key, value)
def get(self, key, default=None):
if self.has_key(key):
return self[key]
else:
return default
def setdefault(self, key, value):
if not self.has_key(key):
self[key] = value
return self[key]
def has_key(self, key):
try:
return hasattr(self, key) or UserDict.has_key(self, key)
except AttributeError:
return False
def __getattr__(self, key):
try:
return self.__dict__[key]
except KeyError:
pass
try:
assert not key.startswith('_')
return self.__getitem__(key)
except:
raise AttributeError, "object has no attribute '%s'" % key
def __setattr__(self, key, value):
if key.startswith('_') or key == 'data':
self.__dict__[key] = value
else:
return self.__setitem__(key, value)
def __contains__(self, key):
return self.has_key(key)
def zopeCompatibilityHack():
global FeedParserDict
del FeedParserDict
def FeedParserDict(aDict=None):
rc = {}
if aDict:
rc.update(aDict)
return rc
_ebcdic_to_ascii_map = None
def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
)
import string
_ebcdic_to_ascii_map = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(_ebcdic_to_ascii_map)
_cp1252 = {
unichr(128): unichr(8364), # euro sign
unichr(130): unichr(8218), # single low-9 quotation mark
unichr(131): unichr( 402), # latin small letter f with hook
unichr(132): unichr(8222), # double low-9 quotation mark
unichr(133): unichr(8230), # horizontal ellipsis
unichr(134): unichr(8224), # dagger
unichr(135): unichr(8225), # double dagger
unichr(136): unichr( 710), # modifier letter circumflex accent
unichr(137): unichr(8240), # per mille sign
unichr(138): unichr( 352), # latin capital letter s with caron
unichr(139): unichr(8249), # single left-pointing angle quotation mark
unichr(140): unichr( 338), # latin capital ligature oe
unichr(142): unichr( 381), # latin capital letter z with caron
unichr(145): unichr(8216), # left single quotation mark
unichr(146): unichr(8217), # right single quotation mark
unichr(147): unichr(8220), # left double quotation mark
unichr(148): unichr(8221), # right double quotation mark
unichr(149): unichr(8226), # bullet
unichr(150): unichr(8211), # en dash
unichr(151): unichr(8212), # em dash
unichr(152): unichr( 732), # small tilde
unichr(153): unichr(8482), # trade mark sign
unichr(154): unichr( 353), # latin small letter s with caron
unichr(155): unichr(8250), # single right-pointing angle quotation mark
unichr(156): unichr( 339), # latin small ligature oe
unichr(158): unichr( 382), # latin small letter z with caron
unichr(159): unichr( 376)} # latin capital letter y with diaeresis
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
try:
return urlparse.urljoin(base, uri)
except:
uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)])
return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf'
}
_matchnamespaces = {}
can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'license', 'icon', 'logo']
can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if _debug: sys.stderr.write('initializing FeedParser\n')
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
def unknown_starttag(self, tag, attrs):
if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
# normalize attrs
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
# Note: probably shouldn't simply recreate localname here, but
# our namespace handling isn't actually 100% correct in cases where
# the feed redefines the default namespace (which is actually
# the usual case for inline content, thanks Sam), so here we
# cheat and just reconstruct the element based on localname
# because that compensates for the bugs in our namespace handling.
# This will horribly munge inline content with non-empty qnames,
# but nobody actually does that, so I'm not fixing it.
if tag.find(':') <> -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
return self.push(prefix + suffix, 1)
def unknown_endtag(self, tag):
if _debug: sys.stderr.write('end %s\n' % tag)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack: return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack: return
if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref)
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities.keys():
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try: name2codepoint[ref]
except KeyError: text = '&%s;' % ref
else: text = unichr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack: return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if _debug: sys.stderr.write('entering parse_declaration\n')
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1: k = len(self.rawdata)
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
return k+1
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
self.version = 'rss090'
if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
self.version = 'rss10'
if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if self._matchnamespaces.has_key(loweruri):
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack: return
if self.elementstack[-1][0] != element: return
element, expectingText, pieces = self.elementstack.pop()
if self.version == 'atom10' and self.contentparams.get('type','text') == 'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0: break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText: return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = base64.decodestring(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
if self.lookslikehtml(output):
self.contentparams['type']='text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', 'text/html'))
# parse microformats
# (must do this before sanitizing because some microformats
# rely on elements that we sanitize)
if is_htmlish and element in ['content', 'description', 'summary']:
mfresults = _parseMicroformats(output, self.baseuri, self.encoding)
if mfresults:
for tag in mfresults.get('tags', []):
self._addTag(tag['term'], tag['scheme'], tag['label'])
for enclosure in mfresults.get('enclosures', []):
self._start_enclosure(enclosure)
for xfn in mfresults.get('xfn', []):
self._addXFN(xfn['relationships'], xfn['href'], xfn['name'])
vcard = mfresults.get('vcard')
if vcard:
self._getContext()['vcard'] = vcard
# sanitize embedded markup
if is_htmlish:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', 'text/html'))
if self.encoding and type(output) != type(u''):
try:
output = unicode(output, self.encoding)
except:
pass
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding=='utf-8' and type(output) == type(u''):
try:
output = unicode(output.encode('iso-8859-1'), 'utf-8')
except:
pass
# map win-1252 extensions to the proper code points
if type(output) == type(u''):
output = u''.join([c in _cp1252.keys() and _cp1252[c] or c for c in output])
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang: self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
def lookslikehtml(self, str):
if self.version.startswith('atom'): return
if self.contentparams.get('type','text/html') != 'text/plain': return
# must have a close tag or a entity reference to qualify
if not (re.search(r'</(\w+)>',str) or re.search("&#?\w+;",str)): return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
re.findall(r'</?(\w+)',str)): return
# all entities must have been defined as valid HTML entities
from htmlentitydefs import entitydefs
if filter(lambda e: e not in entitydefs.keys(),
re.findall(r'&(\w+);',str)): return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value):
context = self._getContext()
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
if not self.version:
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_dlhottitles(self, attrsD):
self.version = 'hotrss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
_start_feedinfo = _start_channel
def _cdf_common(self, attrsD):
if attrsD.has_key('lastmod'):
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if attrsD.has_key('href'):
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author: return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.replace('<>', '')
author = author.replace('<>', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, FeedParserDict())
if author:
context['%s_detail' % key]['name'] = author
if email:
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
_start_product = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value))
_end_dcterms_issued = _end_published
_end_issued = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_pubdate = _start_updated
_start_dc_date = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_pubdate = _end_updated
_end_dc_date = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value))
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')))
def _start_cc_license(self, attrsD):
self.push('license', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('license')
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
def _end_creativecommons_license(self):
self.pop('license')
def _addXFN(self, relationships, href, name):
context = self._getContext()
xfn = context.setdefault('xfn', [])
value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name})
if value not in xfn:
xfn.append(value)
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label): return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(value)
def _start_category(self, attrsD):
if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split():
self._addTag(term, 'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value: return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
if attrsD['rel'] == 'self':
attrsD.setdefault('type', 'application/atom+xml')
else:
attrsD.setdefault('type', 'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
if attrsD.get('rel')=='enclosure' and not context.get('id'):
context['id'] = attrsD.get('href')
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
context['links'].append(FeedParserDict(attrsD))
if attrsD.has_key('href'):
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
_start_producturl = _start_link
def _end_link(self):
value = self.pop('link')
context = self._getContext()
_end_producturl = _end_link
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
def _start_title(self, attrsD):
if self.incontent: return self.unknown_starttag('title', attrsD)
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
value = self.popContent('title')
if not value: return
context = self._getContext()
_end_dc_title = _end_title
_end_media_title = _end_title
def _start_description(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if context.has_key('generator_detail'):
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel']='enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
href = attrsD.get('href')
if href and not context.get('id'):
context['id'] = href
def _start_source(self, attrsD):
self.insource = 1
def _end_source(self):
self.insource = 0
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_prodlink(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToDescription:
self._save('description', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
_end_prodlink = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
if _debug: sys.stderr.write('trying StrictFeedParser\n')
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
def startPrefixMapping(self, prefix, uri):
self.trackNamespace(prefix, uri)
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD = {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname))
for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param']
def __init__(self, encoding, type):
self.encoding = encoding
self.type = type
if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
def parse_starttag(self,i):
j=sgmllib.SGMLParser.parse_starttag(self, i)
if self.type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
#data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace
data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
if self.encoding and type(data) == type(u''):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs: return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if type(value) != type(u''):
try:
value = unicode(value, self.encoding)
except:
value = unicode(value, 'iso-8859-1')
uattrs.append((unicode(key, self.encoding), value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs=strattrs.encode(self.encoding)
except:
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
else:
self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
if ref.startswith('x'):
value = unichr(int(ref[1:],16))
else:
value = unichr(int(ref))
if value in _cp1252.keys():
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%(ref)s;' % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if name2codepoint.has_key(ref):
self.pieces.append('&%(ref)s;' % locals())
else:
self.pieces.append('&%(ref)s' % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text)
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%(text)s-->' % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%(text)s>' % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%(text)s>' % locals())
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _MicroformatsParser:
STRING = 1
DATE = 2
URI = 3
NODE = 4
EMAIL = 5
known_xfn_relationships = ['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me']
known_binary_extensions = ['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv']
def __init__(self, data, baseuri, encoding):
self.document = BeautifulSoup.BeautifulSoup(data)
self.baseuri = baseuri
self.encoding = encoding
if type(data) == type(u''):
data = data.encode(encoding)
self.tags = []
self.enclosures = []
self.xfn = []
self.vcard = None
def vcardEscape(self, s):
if type(s) in (type(''), type(u'')):
s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n')
return s
def vcardFold(self, s):
s = re.sub(';+$', '', s)
sFolded = ''
iMax = 75
sPrefix = ''
while len(s) > iMax:
sFolded += sPrefix + s[:iMax] + '\n'
s = s[iMax:]
sPrefix = ' '
iMax = 74
sFolded += sPrefix + s
return sFolded
def normalize(self, s):
return re.sub(r'\s+', ' ', s).strip()
def unique(self, aList):
results = []
for element in aList:
if element not in results:
results.append(element)
return results
def toISO8601(self, dt):
return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt)
def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0):
all = lambda x: 1
sProperty = sProperty.lower()
bFound = 0
bNormalize = 1
propertyMatch = re.compile(r'\b%s\b' % sProperty)
if bAllowMultiple and (iPropertyType != self.NODE):
snapResults = []
containers = elmRoot(['ul', 'ol'], propertyMatch)
for container in containers:
snapResults.extend(container('li'))
bFound = (len(snapResults) != 0)
if not bFound:
snapResults = elmRoot(all, propertyMatch)
bFound = (len(snapResults) != 0)
if (not bFound) and (sProperty == 'value'):
snapResults = elmRoot('pre')
bFound = (len(snapResults) != 0)
bNormalize = not bFound
if not bFound:
snapResults = [elmRoot]
bFound = (len(snapResults) != 0)
arFilter = []
if sProperty == 'vcard':
snapFilter = elmRoot(all, propertyMatch)
for node in snapFilter:
if node.findParent(all, propertyMatch):
arFilter.append(node)
arResults = []
for node in snapResults:
if node not in arFilter:
arResults.append(node)
bFound = (len(arResults) != 0)
if not bFound:
if bAllowMultiple: return []
elif iPropertyType == self.STRING: return ''
elif iPropertyType == self.DATE: return BeautifulSoup.Null
elif iPropertyType == self.URI: return ''
elif iPropertyType == self.NODE: return BeautifulSoup.Null
else: return BeautifulSoup.Null
arValues = []
for elmResult in arResults:
sValue = BeautifulSoup.Null
if iPropertyType == self.NODE:
if bAllowMultiple:
arValues.append(elmResult)
continue
else:
return elmResult
sNodeName = elmResult.name.lower()
if (iPropertyType == self.EMAIL) and (sNodeName == 'a'):
sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0]
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'abbr'):
sValue = elmResult.get('title')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (iPropertyType == self.URI):
if sNodeName == 'a': sValue = elmResult.get('href')
elif sNodeName == 'img': sValue = elmResult.get('src')
elif sNodeName == 'object': sValue = elmResult.get('data')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'img'):
sValue = elmResult.get('alt')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue:
sValue = elmResult.renderContents()
sValue = re.sub(r'<\S[^>]*>', '', sValue)
sValue = sValue.replace('\r\n', '\n')
sValue = sValue.replace('\r', '\n')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue: continue
if iPropertyType == self.DATE:
sValue = _parse_date_iso8601(sValue)
if bAllowMultiple:
arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue)
else:
return bAutoEscape and self.vcardEscape(sValue) or sValue
return arValues
def findVCards(self, elmRoot, bAgentParsing=0):
sVCards = ''
if not bAgentParsing:
arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1)
else:
arCards = [elmRoot]
for elmCard in arCards:
arLines = []
def processSingleString(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue))
return sValue or ''
def processSingleURI(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.URI)
if sValue:
sContentType = ''
sEncoding = ''
sValueKey = ''
if sValue.startswith('data:'):
sEncoding = ';ENCODING=b'
sContentType = sValue.split(';')[0].split('/').pop()
sValue = sValue.split(',', 1).pop()
else:
elmValue = self.getPropertyValue(elmCard, sProperty)
if elmValue:
if sProperty != 'url':
sValueKey = ';VALUE=uri'
sContentType = elmValue.get('type', '').strip().split('/').pop().strip()
sContentType = sContentType.upper()
if sContentType == 'OCTET-STREAM':
sContentType = ''
if sContentType:
sContentType = ';TYPE=' + sContentType.upper()
arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue))
def processTypeValue(sProperty, arDefaultType, arForceType=None):
arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1)
for elmResult in arResults:
arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1)
if arForceType:
arType = self.unique(arForceType + arType)
if not arType:
arType = arDefaultType
sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue))
# AGENT
# must do this before all other properties because it is destructive
# (removes nested class="vcard" nodes so they don't interfere with
# this vcard's other properties)
arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1)
for elmAgent in arAgent:
if re.compile(r'\bvcard\b').search(elmAgent.get('class')):
sAgentValue = self.findVCards(elmAgent, 1) + '\n'
sAgentValue = sAgentValue.replace('\n', '\\n')
sAgentValue = sAgentValue.replace(';', '\\;')
if sAgentValue:
arLines.append(self.vcardFold('AGENT:' + sAgentValue))
elmAgent['class'] = ''
elmAgent.contents = BeautifulSoup.Null
else:
sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1);
if sAgentValue:
arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue))
# FN (full name)
sFN = processSingleString('fn')
# N (name)
elmName = self.getPropertyValue(elmCard, 'n')
if elmName:
sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1)
sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1)
arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1)
arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1)
arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1)
arLines.append(self.vcardFold('N:' + sFamilyName + ';' +
sGivenName + ';' +
','.join(arAdditionalNames) + ';' +
','.join(arHonorificPrefixes) + ';' +
','.join(arHonorificSuffixes)))
elif sFN:
# implied "N" optimization
# http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization
arNames = self.normalize(sFN).split()
if len(arNames) == 2:
bFamilyNameFirst = (arNames[0].endswith(',') or
len(arNames[1]) == 1 or
((len(arNames[1]) == 2) and (arNames[1].endswith('.'))))
if bFamilyNameFirst:
arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1]))
else:
arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0]))
# SORT-STRING
sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1)
if sSortString:
arLines.append(self.vcardFold('SORT-STRING:' + sSortString))
# NICKNAME
arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1)
if arNickname:
arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname)))
# PHOTO
processSingleURI('photo')
# BDAY
dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE)
if dtBday:
arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday)))
# ADR (address)
arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1)
for elmAdr in arAdr:
arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1)
if not arType:
arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1
sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1)
sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1)
sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1)
sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1)
sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1)
sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1)
sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1)
arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' +
sPostOfficeBox + ';' +
sExtendedAddress + ';' +
sStreetAddress + ';' +
sLocality + ';' +
sRegion + ';' +
sPostalCode + ';' +
sCountryName))
# LABEL
processTypeValue('label', ['intl','postal','parcel','work'])
# TEL (phone number)
processTypeValue('tel', ['voice'])
# EMAIL
processTypeValue('email', ['internet'], ['internet'])
# MAILER
processSingleString('mailer')
# TZ (timezone)
processSingleString('tz')
# GEO (geographical information)
elmGeo = self.getPropertyValue(elmCard, 'geo')
if elmGeo:
sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1)
sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1)
arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude))
# TITLE
processSingleString('title')
# ROLE
processSingleString('role')
# LOGO
processSingleURI('logo')
# ORG (organization)
elmOrg = self.getPropertyValue(elmCard, 'org')
if elmOrg:
sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1)
if not sOrganizationName:
# implied "organization-name" optimization
# http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization
sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1)
if sOrganizationName:
arLines.append(self.vcardFold('ORG:' + sOrganizationName))
else:
arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1)
arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit)))
# CATEGORY
arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1)
if arCategory:
arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory)))
# NOTE
processSingleString('note')
# REV
processSingleString('rev')
# SOUND
processSingleURI('sound')
# UID
processSingleString('uid')
# URL
processSingleURI('url')
# CLASS
processSingleString('class')
# KEY
processSingleURI('key')
if arLines:
arLines = ['BEGIN:vCard','VERSION:3.0'] + arLines + ['END:vCard']
sVCards += '\n'.join(arLines) + '\n'
return sVCards.strip()
def isProbablyDownloadable(self, elm):
attrsD = elm.attrMap
if not attrsD.has_key('href'): return 0
linktype = attrsD.get('type', '').strip()
if linktype.startswith('audio/') or \
linktype.startswith('video/') or \
(linktype.startswith('application/') and not linktype.endswith('xml')):
return 1
path = urlparse.urlparse(attrsD['href'])[2]
if path.find('.') == -1: return 0
fileext = path.split('.').pop().lower()
return fileext in self.known_binary_extensions
def findTags(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile(r'\btag\b')}):
href = elm.get('href')
if not href: continue
urlscheme, domain, path, params, query, fragment = \
urlparse.urlparse(_urljoin(self.baseuri, href))
segments = path.split('/')
tag = segments.pop()
if not tag:
tag = segments.pop()
tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', ''))
if not tagscheme.endswith('/'):
tagscheme += '/'
self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''}))
def findEnclosures(self):
all = lambda x: 1
enclosure_match = re.compile(r'\benclosure\b')
for elm in self.document(all, {'href': re.compile(r'.+')}):
if not enclosure_match.search(elm.get('rel', '')) and not self.isProbablyDownloadable(elm): continue
if elm.attrMap not in self.enclosures:
self.enclosures.append(elm.attrMap)
if elm.string and not elm.get('title'):
self.enclosures[-1]['title'] = elm.string
def findXFN(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}):
rels = elm.get('rel', '').split()
xfn_rels = []
for rel in rels:
if rel in self.known_xfn_relationships:
xfn_rels.append(rel)
if xfn_rels:
self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string})
def _parseMicroformats(htmlSource, baseURI, encoding):
if not BeautifulSoup: return
if _debug: sys.stderr.write('entering _parseMicroformats\n')
p = _MicroformatsParser(htmlSource, baseURI, encoding)
p.vcard = p.findVCards(p.document)
p.findTags()
p.findEnclosures()
p.findXFN()
return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard}
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = [('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')]
def __init__(self, baseuri, encoding, type):
_BaseHTMLProcessor.__init__(self, encoding, type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _urljoin(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, type):
if _debug: sys.stderr.write('entering _resolveRelativeURIs\n')
p = _RelativeURIResolver(baseURI, encoding, type)
p.feed(htmlSource)
return p.output()
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b',
'big', 'blockquote', 'br', 'button', 'caption', 'center', 'cite',
'code', 'col', 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt',
'em', 'fieldset', 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'hr', 'i', 'img', 'input', 'ins', 'kbd', 'label', 'legend', 'li', 'map',
'menu', 'ol', 'optgroup', 'option', 'p', 'pre', 'q', 's', 'samp',
'select', 'small', 'span', 'strike', 'strong', 'sub', 'sup', 'table',
'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u',
'ul', 'var']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'axis', 'border', 'cellpadding',
'cellspacing', 'char', 'charoff', 'charset', 'checked', 'cite', 'class',
'clear', 'cols', 'colspan', 'color', 'compact', 'coords', 'datetime',
'dir', 'disabled', 'enctype', 'for', 'frame', 'headers', 'height',
'href', 'hreflang', 'hspace', 'id', 'ismap', 'label', 'lang',
'longdesc', 'maxlength', 'media', 'method', 'multiple', 'name',
'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', 'rel', 'rev',
'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title',
'type', 'usemap', 'valign', 'value', 'vspace', 'width', 'xml:lang']
unacceptable_elements_with_end_tag = ['script', 'applet']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
# survey of common keywords found in feeds
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'font-face',
'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern', 'image',
'linearGradient', 'line', 'metadata', 'missing-glyph', 'mpath', 'path',
'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop', 'svg',
'switch', 'text', 'title', 'use']
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd',
'descent', 'display', 'dur', 'end', 'fill', 'fill-rule', 'font-family',
'font-size', 'font-stretch', 'font-style', 'font-variant',
'font-weight', 'from', 'fx', 'fy', 'g1', 'g2', 'glyph-name', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'mathematical', 'max',
'min', 'name', 'offset', 'opacity', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'repeatCount', 'repeatDur',
'requiredExtensions', 'requiredFeatures', 'restart', 'rotate', 'rx',
'ry', 'slope', 'stemh', 'stemv', 'stop-color', 'stop-opacity',
'strikethrough-position', 'strikethrough-thickness', 'stroke',
'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-width',
'systemLanguage', 'target', 'text-anchor', 'to', 'transform', 'type',
'u1', 'u2', 'underline-position', 'underline-thickness', 'unicode',
'unicode-range', 'units-per-em', 'values', 'version', 'viewBox',
'visibility', 'width', 'widths', 'x', 'x-height', 'x1', 'x2',
'xlink:actuate', 'xlink:arcrole', 'xlink:href', 'xlink:role',
'xlink:show', 'xlink:title', 'xlink:type', 'xml:base', 'xml:lang',
'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1', 'y2', 'zoomAndPan']
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK = 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK = 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
else:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if filter(lambda (n,v): n.startswith('xlink:'),attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value: clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math': self.mathmlOK = 0
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg': self.svgOK = 0
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return ''
if not re.match("^(\s*[-\w]+\s*:\s*[^:;]*(;|$))*$", style): return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value: continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def _sanitizeHTML(htmlSource, encoding, type):
p = _HTMLSanitizer(encoding, type)
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = type(data) == type(u'')
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
if ((code / 100) == 3) and (code != 304):
return self.http_error_302(req, fp, code, msg, headers)
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
return infourl
def http_error_302(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
def http_error_301(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
http_error_300 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions)
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
try:
assert sys.version.split()[0] >= '2.3.3'
assert base64 != None
user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
except:
return self.http_error_default(req, fp, code, msg, headers)
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if url_file_stream_or_string == '-':
return sys.stdin
if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'):
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.encodestring(user_passwd).strip()
# iri support
try:
if isinstance(url_file_stream_or_string,unicode):
url_file_stream_or_string = url_file_stream_or_string.encode('idna')
else:
url_file_stream_or_string = url_file_stream_or_string.decode('utf-8').encode('idna')
except:
pass
# try to open with urllib2 (to use optional headers)
request = urllib2.Request(url_file_stream_or_string)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if type(modified) == type(''):
modified = _parse_date(modified)
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
request.add_header('A-IM', 'feed') # RFC 3229 support
opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string)
except:
pass
# treat url_file_stream_or_string as string
return _StringIO(str(url_file_stream_or_string))
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}(\.\d*)?))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
del tmpl
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
del regex
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m: break
if not m: return
if m.span() == (0, 0): return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params.keys():
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tm))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m: return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
_mssql_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
def _parse_date_mssql(dateString):
'''Parse a string according to the MS SQL date format'''
m = _mssql_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_mssql)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m: return
try:
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
except:
return
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date)
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m: return
try:
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
except:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<julian>\d\d\d)'
'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
rfc822._timezones.update(_additional_timezones)
registerDateHandler(_parse_date_rfc822)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
if not date9tuple: continue
if len(date9tuple) != 9:
if _debug: sys.stderr.write('date handler function must return 9-tuple\n')
raise ValueError
map(int, date9tuple)
return date9tuple
except Exception, e:
if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e)))
pass
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). CJKCodecs and iconv_codec help a lot;
you should definitely install them if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
return content_type, params.get('charset', '').replace("'", '')
sniffed_xml_encoding = ''
xml_encoding = ''
true_encoding = ''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type'))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = _ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
true_encoding = http_encoding or 'us-ascii'
elif http_headers and (not http_headers.has_key('content-type')):
true_encoding = xml_encoding or 'iso-8859-1'
else:
true_encoding = xml_encoding or 'utf-8'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding)
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16be':
sys.stderr.write('trying utf-16be instead\n')
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16le':
sys.stderr.write('trying utf-16le instead\n')
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-8':
sys.stderr.write('trying utf-8 instead\n')
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32be':
sys.stderr.write('trying utf-32be instead\n')
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32le':
sys.stderr.write('trying utf-32le instead\n')
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE)
entity_results=entity_pattern.findall(data)
data = entity_pattern.sub('', data)
doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE)
doctype_results = doctype_pattern.findall(data)
doctype = doctype_results and doctype_results[0] or ''
if doctype.lower().count('netscape'):
version = 'rss091n'
else:
version = None
# only allow in 'safe' inline entity definitions
replacement=''
if len(doctype_results)==1 and entity_results:
safe_pattern=re.compile('\s+(\w+)\s+"(&#\w+;|[^&"]*)"')
safe_entities=filter(lambda e: safe_pattern.match(e),entity_results)
if safe_entities:
replacement='<!DOCTYPE feed [\n <!ENTITY %s>\n]>' % '>\n <!ENTITY '.join(safe_entities)
data = doctype_pattern.sub(replacement, data)
return version, data, dict(replacement and safe_pattern.findall(replacement))
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]):
'''Parse a feed from a URL, file, stream, or string'''
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
if _XML_AVAILABLE:
result['bozo'] = 0
if type(handlers) == types.InstanceType:
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
f = None
# if feed is gzip-compressed, decompress it
if f and data and hasattr(f, 'headers'):
if gzip and f.headers.get('content-encoding', '') == 'gzip':
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except Exception, e:
# Some feeds claim to be gzipped but they're not, so
# we get garbage. Ideally, we should re-request the
# feed without the 'Accept-encoding: gzip' header,
# but we don't.
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
elif zlib and f.headers.get('content-encoding', '') == 'deflate':
try:
data = zlib.decompress(data, -zlib.MAX_WBITS)
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
# save HTTP headers
if hasattr(f, 'info'):
info = f.info()
result['etag'] = info.getheader('ETag')
last_modified = info.getheader('Last-Modified')
if last_modified:
result['modified'] = _parse_date(last_modified)
if hasattr(f, 'url'):
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'headers'):
result['headers'] = f.headers.dict
if hasattr(f, 'close'):
f.close()
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
http_headers = result.get('headers', {})
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if http_headers.has_key('content-type'):
bozo_message = '%s is not an XML media type' % http_headers['content-type']
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
result['version'], data, entities = _stripDoctype(data)
baseuri = http_headers.get('content-location', result.get('href'))
baselang = http_headers.get('content-language', None)
# if server sent 304, we're done
if result.get('status', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if not data:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding):
if not proposed_encoding: continue
if proposed_encoding in tried_encodings: continue
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
break
except:
pass
# if no luck and we have auto-detection library, try that
if (not known_encoding) and chardet:
try:
proposed_encoding = chardet.detect(data)['encoding']
if proposed_encoding and (proposed_encoding not in tried_encodings):
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried utf-8 yet, try that
if (not known_encoding) and ('utf-8' not in tried_encodings):
try:
proposed_encoding = 'utf-8'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried windows-1252 yet, try that
if (not known_encoding) and ('windows-1252' not in tried_encodings):
try:
proposed_encoding = 'windows-1252'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried iso-8859-2 yet, try that.
if (not known_encoding) and ('iso-8859-2' not in tried_encodings):
try:
proposed_encoding = 'iso-8859-2'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck, give up
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = ''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'documented declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
if hasattr(saxparser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
saxparser.parse(source)
except Exception, e:
if _debug:
import traceback
traceback.print_stack()
traceback.print_exc()
sys.stderr.write('xml parsing failed\n')
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '', entities)
feedparser.feed(data)
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
class Serializer:
def __init__(self, results):
self.results = results
class TextSerializer(Serializer):
def write(self, stream=sys.stdout):
self._writer(stream, self.results, '')
def _writer(self, stream, node, prefix):
if not node: return
if hasattr(node, 'keys'):
keys = node.keys()
keys.sort()
for k in keys:
if k in ('description', 'link'): continue
if node.has_key(k + '_detail'): continue
if node.has_key(k + '_parsed'): continue
self._writer(stream, node[k], prefix + k + '.')
elif type(node) == types.ListType:
index = 0
for n in node:
self._writer(stream, n, prefix[:-1] + '[' + str(index) + '].')
index += 1
else:
try:
s = str(node).encode('utf-8')
s = s.replace('\\', '\\\\')
s = s.replace('\r', '')
s = s.replace('\n', r'\n')
stream.write(prefix[:-1])
stream.write('=')
stream.write(s)
stream.write('\n')
except:
pass
class PprintSerializer(Serializer):
def write(self, stream=sys.stdout):
stream.write(self.results['href'] + '\n\n')
from pprint import pprint
pprint(self.results, stream)
stream.write('\n')
if __name__ == '__main__':
try:
from optparse import OptionParser
except:
OptionParser = None
if OptionParser:
optionParser = OptionParser(version=__version__, usage="%prog [options] url_or_filename_or_-")
optionParser.set_defaults(format="pprint")
optionParser.add_option("-A", "--user-agent", dest="agent", metavar="AGENT", help="User-Agent for HTTP URLs")
optionParser.add_option("-e", "--referer", "--referrer", dest="referrer", metavar="URL", help="Referrer for HTTP URLs")
optionParser.add_option("-t", "--etag", dest="etag", metavar="TAG", help="ETag/If-None-Match for HTTP URLs")
optionParser.add_option("-m", "--last-modified", dest="modified", metavar="DATE", help="Last-modified/If-Modified-Since for HTTP URLs (any supported date format)")
optionParser.add_option("-f", "--format", dest="format", metavar="FORMAT", help="output results in FORMAT (text, pprint)")
optionParser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="write debugging information to stderr")
(options, urls) = optionParser.parse_args()
if options.verbose:
_debug = 1
if not urls:
optionParser.print_help()
sys.exit(0)
else:
if not sys.argv[1:]:
print __doc__
sys.exit(0)
class _Options:
etag = modified = agent = referrer = None
format = 'pprint'
options = _Options()
urls = sys.argv[1:]
zopeCompatibilityHack()
serializer = globals().get(options.format.capitalize() + 'Serializer', Serializer)
for url in urls:
results = parse(url, etag=options.etag, modified=options.modified, agent=options.agent, referrer=options.referrer)
serializer(results).write(sys.stdout)
#REVISION HISTORY
#1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements,
# added Simon Fell's test suite
#1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections
#2.0 - 10/19/2002
# JD - use inchannel to watch out for image and textinput elements which can
# also contain title, link, and description elements
# JD - check for isPermaLink='false' attribute on guid elements
# JD - replaced openAnything with open_resource supporting ETag and
# If-Modified-Since request headers
# JD - parse now accepts etag, modified, agent, and referrer optional
# arguments
# JD - modified parse to return a dictionary instead of a tuple so that any
# etag or modified information can be returned and cached by the caller
#2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything
# because of etag/modified, return the old etag/modified to the caller to
# indicate why nothing is being returned
#2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its
# useless. Fixes the problem JD was addressing by adding it.
#2.1 - 11/14/2002 - MAP - added gzip support
#2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent.
# start_admingeneratoragent is an example of how to handle elements with
# only attributes, no content.
#2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify);
# also, make sure we send the User-Agent even if urllib2 isn't available.
# Match any variation of backend.userland.com/rss namespace.
#2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is.
#2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's
# snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed
# project name
#2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree);
# removed unnecessary urllib code -- urllib2 should always be available anyway;
# return actual url, status, and full HTTP headers (as result['url'],
# result['status'], and result['headers']) if parsing a remote feed over HTTP --
# this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>;
# added the latest namespace-of-the-week for RSS 2.0
#2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom
# User-Agent (otherwise urllib2 sends two, which confuses some servers)
#2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for
# inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds
#2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or
# textInput, and also to return the character encoding (if specified)
#2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking
# nested divs within content (JohnD); fixed missing sys import (JohanS);
# fixed regular expression to capture XML character encoding (Andrei);
# added support for Atom 0.3-style links; fixed bug with textInput tracking;
# added support for cloud (MartijnP); added support for multiple
# category/dc:subject (MartijnP); normalize content model: 'description' gets
# description (which can come from description, summary, or full content if no
# description), 'content' gets dict of base/language/type/value (which can come
# from content:encoded, xhtml:body, content, or fullitem);
# fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang
# tracking; fixed bug tracking unknown tags; fixed bug tracking content when
# <content> element is not in default namespace (like Pocketsoap feed);
# resolve relative URLs in link, guid, docs, url, comments, wfw:comment,
# wfw:commentRSS; resolve relative URLs within embedded HTML markup in
# description, xhtml:body, content, content:encoded, title, subtitle,
# summary, info, tagline, and copyright; added support for pingback and
# trackback namespaces
#2.7 - 1/5/2004 - MAP - really added support for trackback and pingback
# namespaces, as opposed to 2.6 when I said I did but didn't really;
# sanitize HTML markup within some elements; added mxTidy support (if
# installed) to tidy HTML markup within some elements; fixed indentation
# bug in _parse_date (FazalM); use socket.setdefaulttimeout if available
# (FazalM); universal date parsing and normalization (FazalM): 'created', modified',
# 'issued' are parsed into 9-tuple date format and stored in 'created_parsed',
# 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified'
# and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa
#2.7.1 - 1/9/2004 - MAP - fixed bug handling " and '. fixed memory
# leak not closing url opener (JohnD); added dc:publisher support (MarekK);
# added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK)
#2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in
# encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL);
# fixed relative URI processing for guid (skadz); added ICBM support; added
# base64 support
#2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many
# blogspot.com sites); added _debug variable
#2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing
#3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available);
# added several new supported namespaces; fixed bug tracking naked markup in
# description; added support for enclosure; added support for source; re-added
# support for cloud which got dropped somehow; added support for expirationDate
#3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking
# xml:base URI, one for documents that don't define one explicitly and one for
# documents that define an outer and an inner xml:base that goes out of scope
# before the end of the document
#3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level
#3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version']
# will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized;
# added support for creativeCommons:license and cc:license; added support for
# full Atom content model in title, tagline, info, copyright, summary; fixed bug
# with gzip encoding (not always telling server we support it when we do)
#3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail
# (dictionary of 'name', 'url', 'email'); map author to author_detail if author
# contains name + email address
#3.0b8 - 1/28/2004 - MAP - added support for contributor
#3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added
# support for summary
#3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from
# xml.util.iso8601
#3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain
# dangerous markup; fiddled with decodeEntities (not right); liberalized
# date parsing even further
#3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right);
# added support to Atom 0.2 subtitle; added support for Atom content model
# in copyright; better sanitizing of dangerous HTML elements with end tags
# (script, frameset)
#3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img,
# etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />)
#3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under
# Python 2.1
#3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS;
# fixed bug capturing author and contributor URL; fixed bug resolving relative
# links in author and contributor URL; fixed bug resolvin relative links in
# generator URL; added support for recognizing RSS 1.0; passed Simon Fell's
# namespace tests, and included them permanently in the test suite with his
# permission; fixed namespace handling under Python 2.1
#3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15)
#3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023
#3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei);
# use libxml2 (if available)
#3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author
# name was in parentheses; removed ultra-problematic mxTidy support; patch to
# workaround crash in PyXML/expat when encountering invalid entities
# (MarkMoraes); support for textinput/textInput
#3.0b20 - 4/7/2004 - MAP - added CDF support
#3.0b21 - 4/14/2004 - MAP - added Hot RSS support
#3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in
# results dict; changed results dict to allow getting values with results.key
# as well as results[key]; work around embedded illformed HTML with half
# a DOCTYPE; work around malformed Content-Type header; if character encoding
# is wrong, try several common ones before falling back to regexes (if this
# works, bozo_exception is set to CharacterEncodingOverride); fixed character
# encoding issues in BaseHTMLProcessor by tracking encoding and converting
# from Unicode to raw strings before feeding data to sgmllib.SGMLParser;
# convert each value in results to Unicode (if possible), even if using
# regex-based parsing
#3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain
# high-bit characters in attributes in embedded HTML in description (thanks
# Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in
# FeedParserDict; tweaked FeedParserDict.has_key to return True if asking
# about a mapped key
#3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and
# results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could
# cause the same encoding to be tried twice (even if it failed the first time);
# fixed DOCTYPE stripping when DOCTYPE contained entity declarations;
# better textinput and image tracking in illformed RSS 1.0 feeds
#3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed
# my blink tag tests
#3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that
# failed to parse utf-16 encoded feeds; made source into a FeedParserDict;
# duplicate admin:generatorAgent/@rdf:resource in generator_detail.url;
# added support for image; refactored parse() fallback logic to try other
# encodings if SAX parsing fails (previously it would only try other encodings
# if re-encoding failed); remove unichr madness in normalize_attrs now that
# we're properly tracking encoding in and out of BaseHTMLProcessor; set
# feed.language from root-level xml:lang; set entry.id from rdf:about;
# send Accept header
#3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between
# iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are
# windows-1252); fixed regression that could cause the same encoding to be
# tried twice (even if it failed the first time)
#3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types;
# recover from malformed content-type header parameter with no equals sign
# ('text/xml; charset:iso-8859-1')
#3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities
# to Unicode equivalents in illformed feeds (aaronsw); added and
# passed tests for converting character entities to Unicode equivalents
# in illformed feeds (aaronsw); test for valid parsers when setting
# XML_AVAILABLE; make version and encoding available when server returns
# a 304; add handlers parameter to pass arbitrary urllib2 handlers (like
# digest auth or proxy support); add code to parse username/password
# out of url and send as basic authentication; expose downloading-related
# exceptions in bozo_exception (aaronsw); added __contains__ method to
# FeedParserDict (aaronsw); added publisher_detail (aaronsw)
#3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always
# convert feed to UTF-8 before passing to XML parser; completely revamped
# logic for determining character encoding and attempting XML parsing
# (much faster); increased default timeout to 20 seconds; test for presence
# of Location header on redirects; added tests for many alternate character
# encodings; support various EBCDIC encodings; support UTF-16BE and
# UTF16-LE with or without a BOM; support UTF-8 with a BOM; support
# UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no
# XML parsers are available; added support for 'Content-encoding: deflate';
# send blank 'Accept-encoding: ' header if neither gzip nor zlib modules
# are available
#3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure
# problem tracking xml:base and xml:lang if element declares it, child
# doesn't, first grandchild redeclares it, and second grandchild doesn't;
# refactored date parsing; defined public registerDateHandler so callers
# can add support for additional date formats at runtime; added support
# for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added
# zopeCompatibilityHack() which turns FeedParserDict into a regular
# dictionary, required for Zope compatibility, and also makes command-
# line debugging easier because pprint module formats real dictionaries
# better than dictionary-like objects; added NonXMLContentType exception,
# which is stored in bozo_exception when a feed is served with a non-XML
# media type such as 'text/plain'; respect Content-Language as default
# language if not xml:lang is present; cloud dict is now FeedParserDict;
# generator dict is now FeedParserDict; better tracking of xml:lang,
# including support for xml:lang='' to unset the current language;
# recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default
# namespace; don't overwrite final status on redirects (scenarios:
# redirecting to a URL that returns 304, redirecting to a URL that
# redirects to another URL with a different type of redirect); add
# support for HTTP 303 redirects
#4.0 - MAP - support for relative URIs in xml:base attribute; fixed
# encoding issue with mxTidy (phopkins); preliminary support for RFC 3229;
# support for Atom 1.0; support for iTunes extensions; new 'tags' for
# categories/keywords/etc. as array of dict
# {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0
# terminology; parse RFC 822-style dates with no time; lots of other
# bug fixes
#4.1 - MAP - removed socket timeout; added support for chardet library
#4.2 - MAP - added support for parsing microformats within content elements:
# currently supports rel-tag (maps to 'tags'), rel-enclosure (maps to
# 'enclosures'), XFN links within content elements (maps to 'xfn'),
# and hCard (parses as vCard); bug [ 1481975 ] Misencoded utf-8/win-1252
| Python |
#!/usr/bin/env python2.4
import httplib2
import apptools
try:
from xml.etree.ElementTree import fromstring, tostring
except:
from elementtree.ElementTree import fromstring, tostring
import sha
import cStringIO
from urlparse import urljoin
import os
import anydbm
ATOM = "{http://www.w3.org/2005/Atom}%s"
APP = "{http://purl.org/atom/app#}%s"
ENTRY_ELEMENTS = ["title", "title__type", "summary", "summary__type", "content", "content__type"]
DEFAULT_ENTRY = """<?xml version="1.0" encoding="utf-8"?>
<entry xmlns="http://www.w3.org/2005/Atom">
<title type="text">Title goes here.</title>
<id>http://bitworking.org/foo/app/main/third</id>
<author>
<name>anonymous</name>
</author>
<updated>2006-08-04T15:52:00-05:00</updated>
<summary type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
</div>
</summary>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
</div>
</content>
</entry>"""
ERROR_ENTRY = """<?xml version="1.0" encoding="utf-8"?>
<entry xmlns="http://www.w3.org/2005/Atom">
<title type="text">An Error Occured.</title>
<id>http://bitworking.org/foo/app/main/third</id>
<author>
<name>anonymous</name>
</author>
<updated>2006-08-04T15:52:00-05:00</updated>
<summary type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
An error occured trying to access this entry.
Received a status code of: %d
</div>
</summary>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
An error occured trying to access this entry.
</div>
</content>
</entry>"""
class Entry(object):
def __init__(self, h, edit, title="", title__type="text", updated="", published="", **kwargs):
self.h = h
self.member_uri = edit
# Will be filled in with an ElementTree of the entry
# once self.get() is called.
self.element = None
self._values = {
"title" : title,
"title__type" : title__type,
"updated" : updated,
"published" : published,
"summary": "",
"summary__type": "text",
"content": "",
"content__type": "text"
}
self._values.update(kwargs)
# def get/set text element (takes both text and it's type)
# def get/set link (rel and optional type).
def __getitem__(self, name):
return self._values.get(name, name.endswith("__type") and 'text' or '')
def __setitem__(self, name, value):
if name in self._values:
self._values[name] = value
else:
raise IndexError, "index '%s' not found" % name
def get(self):
if self.member_uri:
(resp, content) = self.h.request(self.member_uri)
if resp.status != 200:
content = ERROR_ENTRY % resp.status
else:
content = DEFAULT_ENTRY
#validate_atom(content, self.member_uri)
self.element = fromstring(content)
d = apptools.parse_atom_entry(self.member_uri, self.element)
self._values.update(d)
def put(self):
# loop over the values in sef._values, update self.element
# then serialize the element into a PUT
self.h.request(self.member_uri, method="PUT", body=self.tostring(), headers={
'content-type': 'application/atom+xml'
}
)
def delete(self):
self.h.request(self.member_uri, method="DELETE")
def tostring(self):
apptools.unparse_atom_entry(self.element, self._values)
return tostring(self.element)
class _EntryIterator(object):
def __init__(self, h, collection_uri, hit_map, onlynew=False):
self.h = h
self.collection_uri = collection_uri
self.local_hit_map = {}
self.page_uri = collection_uri
self.hit_map = hit_map
self.entries = []
self.old_stuff = False
self.onlynew = onlynew
def __iter__(self):
return self
def __del__(self):
self.hit_map.sync()
def next(self):
# Once we've seen a 304 we should probably try "Cache-control: only-if-cached" first
# and only hit the web if we get a cache miss
if not self.entries:
if not self.page_uri:
self.hit_map.sync()
raise StopIteration
# If we have already hit an entry we've seen before, in the hit_map,
# then try all requests first from the cache. Cache-control: only-if-cached.
# If that fails then request over the net.
if self.old_stuff:
(resp, content) = self.h.request(self.page_uri, headers={'cache-control': 'only-if-cached'})
if resp.status != 304:
(resp, content) = self.h.request(self.page_uri)
else:
resp.status = 200
else:
(resp, content) = self.h.request(self.page_uri)
if resp.status != 200:
self.hit_map.sync()
raise StopIteration
(self.entries, self.page_uri) = apptools.parse_collection_feed(self.page_uri, content)
if len(self.entries):
entry = self.entries[0]
del self.entries[0]
hash = sha.sha(entry["edit"] + entry["edited"]).hexdigest()
if hash in self.hit_map:
self.old_stuff = True
self.hit_map.sync()
if self.onlynew:
raise StopIteration
else:
self.hit_map[hash] = entry["edit"]
# Compute the hit hash from the "edit" URI and the app:edited/atom:updated
# Do we skip entries that do not have an "edit" URI?!?
return Entry(self.h, **entry)
else:
self.hit_map.sync()
raise StopIteration
class Collection(object):
def __init__(self, h, cachedir, href, title, workspace, accept):
self.h = h
self.cachedir = os.path.join(cachedir, httplib2.safename(href))
if not os.path.exists(self.cachedir):
os.makedirs(self.cachedir)
self.hitmap = anydbm.open(os.path.join(self.cachedir, "hitmap.db"), "c")
self.href = href
self.title = title
self.workspace = workspace
self.accept = accept
def post(self, entry):
(resp, content) = self.h.request(self.href, method="POST", body=entry.tostring(), headers={
'content-type': 'application/atom+xml;type=entry'
}
)
def iter_entries(self):
return _EntryIterator(self.h, self.href, self.hitmap)
def iter_new_entries(self):
return _EntryIterator(self.h, self.href, self.hitmap, onlynew=True)
class Service:
def __init__(self, service_uri, cachedir, username, password):
self.h = httplib2.Http(os.path.join(cachedir, ".httplib2_cache"))
self.h.follow_all_redirects = True
self.h.add_credentials(username, password)
# A list of tuples, each a name and a list of Collection objects.
self._workspaces = []
(resp, content) = self.h.request(service_uri)
if resp.status == 200:
service = fromstring(content)
workspaces = service.findall(APP % "workspace")
for w in workspaces:
wstitle = w.find(ATOM % "title")
wsname = (wstitle != None) and wstitle.text or "No title"
collections = []
collection_elements = w.findall(APP % "collection")
for c in collection_elements:
cp = {}
title = c.find(ATOM % "title")
cp['title'] = (title != None) and title.text or "No title"
cp['href'] = urljoin(service_uri, c.get('href', ''))
cp['workspace'] = wsname
accepts = c.findall(APP % "accept")
cp['accept'] = [node.text for node in accepts]
collections.append(Collection(self.h, cachedir, **cp))
self._workspaces.append( (wsname, collections) )
def collections(self):
return sum([collections for (wsname, collections) in self._workspaces], [])
def workspaces(self):
"""Returns a list of tuples, (workspacename, collections), where
collections is a list of Collection objects, and workspacename is the
name of the workspace"""
return self._workspaces
| Python |
__author__ = "Joe Gregorio <http://bitworking.org/>"
__version__ = "$Revision: 150 $"
__copyright__ = "Copyright (c) 2006 Joe Gregorio"
__license__ = "MIT"
ATOM = "http://www.w3.org/2005/Atom"
ATOM_LINK = "{%s}link" % ATOM
ATOM_ENTRY = "{%s}entry" % ATOM
ATOM_TITLE= "{%s}title" % ATOM
APP = "http://purl.org/atom/app#"
APP_COLL = "{%s}collection" % APP
APP_MEMBER_TYPE = "{%s}accept" % APP
# By default we'll check the bitworking collection
INTROSPECTION_URI = "http://bitworking.org/projects/pyapp/collection.cgi?introspection=1"
import httplib2
import unittest
import cElementTree
import urlparse
import cStringIO
import sys
import getopt
import time
import feedvalidator
from feedvalidator import compatibility
from gettext import gettext as _
from ErrorReporting import *
PACES = {'PaperTrail': False}
def usage(option=""):
print """Usage: appclienttest [OPTION] IntrospectionURI
-h, --help Display this help message then exit.
--name=<name> User name to use for authentication.
--password=<pw> Password to use for authentication.
--debug=<n> Print debugging information for n > 0.
--<PaceName> Where PaceName is one of [%s]
""" % ", ".join(PACES.keys())
if option:
print """!! %s !!""" % option
def validate_atom(testcase, content, baseuri):
retval = True
try:
events = feedvalidator.validateStream(cStringIO.StringIO(content), firstOccurrenceOnly=1,base=baseuri)['loggedEvents']
except feedvalidator.logging.ValidationFailure, vf:
events = [vf.event]
filterFunc = getattr(compatibility, "A")
events = filterFunc(events)
if len(events):
from feedvalidator.formatter.text_plain import Formatter
output = Formatter(events)
testcase.report(MustUseValidAtom("\n".join(output) + content))
retval = False
return retval
class Test:
"""Base class for all the tests. Adds basic
functionality of recording reports as they
are generated. Also has a 'run' member
function which runs over all member functions
that begin with 'test' and executes them.
"""
def __init__(self):
self.reports = []
self.context = ""
def report(self, r):
r.context = self.context
if not hasattr(r, 'pace') or (hasattr(r, 'pace') and PACES[r.pace]):
self.reports.append(r)
def run(self):
methods = [ method for method in dir(self) if callable(getattr(self, method)) and method.startswith("test")]
for method in methods:
print ".",
sys.stdout.flush()
test_member_function = getattr(self, method)
try:
self.description = str(test_member_function.__doc__)
self.context = method
test_member_function()
except Exception, e:
import traceback
self.report(InternalErrorEncountered(str(e) + traceback.format_exc()))
CONTENT_WITH_SRC = """<?xml version="1.0" encoding="utf-8"?>
<entry xmlns="http://www.w3.org/2005/Atom">
<title>This is a title</title>
<id>urn:uuid:1225c695-ffb8-4ebb-aaaa-80da354efa6a</id>
<updated>2005-09-02T10:30:00Z</updated>
<summary>Hi!</summary>
<author>
<name>Joe Gregorio</name>
</author>
<content
type="image/png"
src="http://bitworking.org/projects/atompubtest/client/helloworld.png" />
</entry>
"""
CONTENT_WITH_OTHER = """<?xml version="1.0" encoding="utf-8"?>
<entry xmlns="http://www.w3.org/2005/Atom">
<title>This should not be blank</title>
<id>urn:uuid:1225c695-ffb8-4ebb-aaaa-80da354efa6a</id>
<updated>2005-09-02T10:30:00Z</updated>
<author>
<name>Joe Gregorio</name>
</author>
<summary>Hi!</summary>
<content type="image/png" >
iVBORw0KGgoAAAANSUhEUgAAAEYAAAALCAIAAABNimxhAAAAAXNSR0IArs4c6QAAAARnQU1BAACx
jwv8YQUAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAThJREFU
SEvdlD0OgzAMhdvbcgiOwAm4ADs7M2tXRkY2bkA/9UmW5SRQ2kpV6wEZx3bei3+u27Zd/kyMEkrQ
ZfEi7oduCqmqCudlWdDneUYfxxF9XVd0TtP8OzdmYWQzRHwGOu99hlLXdUaj73v0tm1JCzF0Tr9A
SZgMii+OZ25uAeU0Tbg1TUNgXddWGSzonGJPY5UZf8TfSLVVdr2OmuWgSn6g7DISIYYsbTxhuj0k
fXv5q9PERNkEKBurUxpVcM1Z4djVw09RCik8w5RJaskOmIojNCJG7/ENFSjVv2R/i1Ko7A63LKVh
GBSiKRJDhOYJ/tk3+jAlPSej/E7jWZNo12kxIH6QQtOGCtiv8BCoEX2l8fzsasTPrgcfQtfx6wdJ
p6X1YN1h6M+th9Lq+FF7cRX+KB9g3wGfjZVSUSpSSwAAAABJRU5ErkJggg==
</content>
</entry>
"""
MIXED_TEXT_TYPES = """<?xml version="1.0" encoding="utf-8"?>
<entry xmlns="http://www.w3.org/2005/Atom">
<title type="html">
This <b>is</b> a title.
</title>
<id>urn:uuid:1225c695-ffb8-4ebb-aaaa-80da354efa6a</id>
<updated>2005-09-02T10:30:00Z</updated>
<author>
<name>Joe Gregorio</name>
</author>
<summary type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
Hello <b>World</b>!
</div>
</summary>
<content type="text" >This is just plain text content.
</content>
</entry>
"""
# A missing Author makes this entry invalid
NON_WELL_FORMED_ENTRY = """<?xml version="1.0" encoding="utf-8"?>
<entry xmlns="http://www.w3.org/2005/Atom">
<title>This is a title</title>
<id>urn:uuid:1225c695-ffb8-4ebb-aaaa-80da354efa6a</id>
<updated>2005-09-02T10:30:00Z</updated>
<summary>Hi!</summary>
"""
def absolutize(baseuri, uri):
(scheme, authority, path, query, fragment) = httplib2.parse_uri(uri)
if authority == None:
uri = urlparse.urljoin(baseuri, uri)
return uri
class EntryCollectionTests(Test):
def __init__(self, entry_coll_uri, http):
Test.__init__(self)
self.entry_coll_uri = entry_coll_uri
self.http = http
print "Testing <%s>\n" % entry_coll_uri
def setUp(self):
time.sleep(1)
def enumerate_collection(self):
relnext = [self.entry_coll_uri]
retval = {}
while relnext:
uri = absolutize(self.entry_coll_uri, relnext[0])
(response, content) = self.http.request(uri, "GET", headers = {"Cache-Control": "max-age=0"})
if not validate_atom(self, content, uri):
return {}
tree = cElementTree.fromstring(content)
for e in tree.findall(ATOM_ENTRY):
reledit = [l.get('href', '') for l in e.findall(ATOM_LINK) if l.get('rel', '') == 'edit']
for t in reledit:
retval[absolutize(self.entry_coll_uri, t)] = e
relnext = [l.get('href', '') for l in tree.findall(ATOM_LINK) if l.get('rel', '') == 'next']
return retval
def testHttpConformance(self):
"""Do a simple GET on a collection
feed and look for suggested HTTP
practice."""
(response, content) = self.http.request(self.entry_coll_uri)
if not response.has_key('etag'):
self.report(ShouldSupportCacheValidators("No ETag: header was sent with the response."))
if not response.has_key('last-modified'):
self.report(ShouldSupportCacheValidators("No Last-Modified: header was sent with the response."))
if not response.has_key('content-encoding'):
self.report(ShouldSupportCompression("No Content-Encoding: header was sent with the response indicating that a compressed entity body was not returned."))
def testContentWithSrc(self):
"""POST a good Atom Entry with a content/@src
attribute set and with the right mime-type.
Ensure that the entry is added to the collection.
"""
toc = self.enumerate_collection()
startnum = len(toc)
# Add a new entry
(response, content) = self.http.request(self.entry_coll_uri, "POST", body=CONTENT_WITH_SRC, headers={'Content-Type': 'application/atom+xml', 'Accept': '*/*'})
if response.status != 201:
self.report(EntryCreationMustReturn201("Actually returned an HTTP status code %d" % response.status))
if not response.has_key('location'):
self.report(EntryCreationMustReturnLocationHeader("Header is completely missing"))
return
toc = self.enumerate_collection()
# Make sure it was added to the collection
if startnum >= len(toc):
self.report(EntryCreationMustBeReflectedInFeed("Number of entries went from %d before to %d entries after the entry was created." % (startnum, len(toc))))
# The location header should match the link/@rel="edit"
edituri = response['location']
if edituri not in toc:
self.report(LocationHeaderMustMatchLinkRelEdit("The Location: header value %s can't be found in %s" % (response['location'], str(toc))))
(response, content) = self.http.request(edituri, "GET", headers = {"Cache-Control": "max-age=0"})
if response.status != 200:
self.report(GetFailedOnMemberResource("Expected an HTTP status code of 200 but instead received %d" % response.status))
validate_atom(self, content, edituri)
# Cleanup
(response, content) = self.http.request(edituri, "DELETE")
if response.status >= 400:
self.report(EntryDeletionFailed("HTTP Status %d" % response.status))
toc = self.enumerate_collection()
if startnum != len(toc):
self.report(EntryDeletionMustBeReflectedInFeed("Number of entries went from %d before to %d entries after the entry was deleted." % (startnum, len(toc))))
if edituri in toc:
self.report(EntryDeletionMustBeReflectedInFeed("The URI for the entry just deleted <%s> must not appear in the feed after the entry is deleted." % edituri))
def testContentWithBase64Content(self):
""" POST a good Atom Entry with an entry
whose atom:content is a base64 encoded png.
"""
(response, content) = self.http.request(self.entry_coll_uri, "POST", body=CONTENT_WITH_OTHER, headers={'Content-Type': 'application/atom+xml', 'Accept': '*/*'})
if response.status != 201:
self.report(EntryCreationMustReturn201("Actually returned an HTTP status code %d" % response.status))
edituri = response['location']
(response, content) = self.http.request(edituri, "DELETE")
if response.status >= 400:
self.report(EntryDeletionFailed("HTTP Status %d" % response.status))
def testMixedTextConstructs(self):
""" POST a good Atom Entry with an entry
whose Text Constructs contain a mix of types.
"""
(response, content) = self.http.request(self.entry_coll_uri, "POST", body=MIXED_TEXT_TYPES, headers={'Content-Type': 'application/atom+xml', 'Accept': '*/*'})
if response.status != 201:
self.report(EntryCreationMustReturn201("Actually returned an HTTP status code %d" % response.status))
edituri = response['location']
(response, content) = self.http.request(edituri, "DELETE")
if response.status >= 400:
self.report(EntryDeletionFailed("HTTP Status %d" % response.status))
def testNonWellFormedEntry(self):
""" POST an invalid Atom Entry
"""
(response, content) = self.http.request(self.entry_coll_uri, "POST", body=NON_WELL_FORMED_ENTRY, headers={'Content-Type': 'application/atom+xml', 'Accept': '*/*'})
if response.status < 400:
self.report(MustRejectNonWellFormedAtom("Actually returned an HTTP status code %d" % response.status))
def testI18n(self):
""" POST a fully utf-8 Atom Entry
"""
i18n = file("i18n.atom", "r").read()
tree = cElementTree.fromstring(i18n)
title_sent = tree.findall(".//" + ATOM_TITLE)[0].text
(response, content) = self.http.request(self.entry_coll_uri, "POST", body=i18n, headers={'Content-Type': 'application/atom+xml', 'Content-Length' : str(len(i18n)), 'Accept': '*/*'})
if response.status >= 400:
self.report(ServerShouldHandleI18NContent("Actually returned an HTTP status code %d" % response.status))
if response.status != 201:
self.report(EntryCreationMustReturn201("Actually returned an HTTP status code %d" % response.status))
location = response['location']
(response, content) = self.http.request(location, "GET")
tree = cElementTree.fromstring(content)
title_received = tree.findall(".//" + ATOM_TITLE)[0].text
tree.findall(".//" + ATOM_TITLE)[0].text = "Non Internationalized"
if title_sent != title_received:
self.report(ServerShouldHandleI18NContent(u"%s != %s" % (title_sent, title_received)))
ni18n = cElementTree.tostring(tree)
(response, content) = self.http.request(location, "PUT", body=ni18n, headers={'Content-Length' : str(len(ni18n)), 'Content-Type' : 'application/atom+xml'})
if response.status != 200:
self.report(EntryUpdateFailed("Actually returned an HTTP status code %d" % response.status))
(response, content) = self.http.request(location, "GET")
tree = cElementTree.fromstring(content)
title_received = tree.findall(".//" + ATOM_TITLE)[0].text
if title_received != "Non Internationalized":
self.report(EntryUpdateFailed("Title not updated. %s != %s" % (title_received, "Non Internationalized")))
(response, content) = self.http.request(location, "DELETE")
def testDoubleAddWithSameAtomId(self):
"""POST two Atom entries with the same atom:id
to the collection. The response for both MUST be
201 and two new entries must be created."""
toc = self.enumerate_collection()
startnum = len(toc)
# Add a new entry
(response, content) = self.http.request(self.entry_coll_uri, "POST", body=CONTENT_WITH_SRC, headers={'Content-Type': 'application/atom+xml', 'Accept': '*/*'})
if response.status != 201:
self.report(EntryCreationMustReturn201("Actually returned an HTTP status code %d" % response.status))
return
toc = self.enumerate_collection()
# Make sure it was added to the collection
if startnum >= len(toc):
self.report(EntryCreationMustBeReflectedInFeed("Number of entries went from %d before to %d entries after the entry was created." % (startnum, len(toc))))
return
# The location header should match the link/@rel="edit"
edituri = response['location']
time.sleep(2)
(response, content) = self.http.request(self.entry_coll_uri, "POST", body=CONTENT_WITH_SRC, headers={'Content-Type': 'application/atom+xml', 'Accept': '*/*'})
if response.status != 201:
self.report(EntryCreationMustReturn201("When POSTing a second entry with the same atom:id of an entry we just POSTed the server returned an HTTP status code %d" % response.status))
return
toc = self.enumerate_collection()
# Make sure it was added to the collection
if startnum+1 >= len(toc):
self.report(EntryCreationMustBeReflectedInFeed("Number of entries went from %d before to %d entries after the entry was created." % (startnum+1, len(toc))))
edituri2 = response['location']
if edituri == edituri2:
self.report(EntryCreationMustReturnLocationHeader("Non unique Location: header value returned in a 201 response. <%s>" % edituri))
# Cleanup
(response, content) = self.http.request(edituri, "DELETE")
if response.status >= 400:
self.report(EntryDeletionFailed("HTTP Status %d" % response.status))
(response, content) = self.http.request(edituri2, "DELETE")
if response.status >= 400:
self.report(EntryDeletionFailed("HTTP Status %d" % response.status))
class TestIntrospection(Test):
def __init__(self, uri, http):
Test.__init__(self)
self.http = http
self.introspection_uri = uri
def testEachEntryCollection(self):
"""Run over each entry collection listed
in an Introspection document and
run the Entry collection tests
against it."""
response, content = self.http.request(self.introspection_uri)
# Add a validation step for the introspection document itself.
tree = cElementTree.fromstring(content)
for coll in tree.findall(".//" + APP_COLL):
coll_type = [t for t in coll.findall(APP_MEMBER_TYPE) if t.text == "entry"]
if coll_type:
test = EntryCollectionTests(coll.get('href'), self.http)
test.run()
self.reports.extend(test.reports)
def format(r):
return """----------------------------------------
%s:
%s
Context:
%s
Details:
%s
""" % (r.__class__.__name__, r.text, r.context, r.extra)
def print_report(reports, reportclass):
matching = [r for r in reports if isinstance(r, reportclass)]
if matching:
print "\n".join([format(r) for r in matching])
else:
print " No problems found."
def main():
options = ["help", "name=", "password=", "debug="]
options.extend(PACES.keys())
try:
opts, args = getopt.getopt(sys.argv[1:], "h", options )
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
name = password = None
for o, a in opts:
if o.split("--")[-1] in PACES:
PACES[o.split("--")[-1]] = True
if o == "--name":
name = a
if o == "--password":
password = a
if o == "--debug":
print "debug level"
httplib2.debuglevel = int(a)
if o in ["h", "--help"]:
usage()
sys.exit()
http = httplib2.Http(".cache")
if name:
print "%s: %s" % (name, password)
http.add_credentials(name, password)
if not args:
args = [INTROSPECTION_URI]
enforced_paces = [name for name in PACES.keys() if PACES[name]]
for target_uri in args:
print "Atom Client Tests"
print "-----------------"
print ""
print "Testing the service at <%s>" % target_uri
print ""
if enforced_paces:
print "The following Paces are being enforced <%s>" % ", ".join(enforced_paces)
else:
print "No Paces are being enforced."
print ""
print "Running: ",
test = TestIntrospection(target_uri, http)
test.run()
reports = test.reports
print ""
print "== Errors =="
print_report(reports, Error)
print "== Warnings =="
print_report(reports, Warning)
print "== Suggestions =="
print_report(reports, Suggestion)
if not reports:
print "Success!"
if __name__ == '__main__':
main()
| Python |
import logging
try:
from xml.etree.ElementTree import fromstring, tostring, SubElement
except:
from elementtree.ElementTree import fromstring, tostring, SubElement
import re
from urlparse import urljoin
from StringIO import StringIO
ATOM = "{http://www.w3.org/2005/Atom}%s"
APP = "{http://purl.org/atom/app#}%s"
def get_element(name, entry):
value = ""
l = entry.findall(ATOM % name)
if l:
value = l[0].text
return {name: value}
def get_text(name, entry):
value = ""
texttype = "text"
l = entry.findall(ATOM % name)
if l:
value = l[0].text
texttype = mime2atom(l[0].get('type', 'text'))
if texttype in ["text", "html"]:
pass
elif texttype == "xhtml":
div = l[0].findall("{http://www.w3.org/1999/xhtml}div")[0]
value = tostring(div).strip()[52:-11]
else:
value = ""
if value == None:
value = ""
return {name: value, (name + "__type"): texttype}
def set_text(name, entry, values):
#logging.warn(values[name + "__type"])
elements = entry.findall(ATOM % name)
if not elements:
element = SubElement(entry, ATOM % name)
else:
element = elements[0]
element.set('type', values[name + "__type"])
[element.remove(e) for e in element.getchildren()]
type = values[name + "__type"]
if type in ["html", "text"]:
element.text = values[name]
elif type == "xhtml":
element.text = ""
try:
# For now if we don't have valid XHTML then just push it up
# as html. In the future we can use the 1812 normalization
# code to convert it into xhtml.
#logging.warn(tostring(entry))
div = fromstring((u"<div xmlns='http://www.w3.org/1999/xhtml'>%s</div>" % values[name]).encode('utf-8'))
element.append(div)
#logging.warn(tostring(element))
#logging.warn(tostring(entry))
except:
element.text = values[name]
element.set('type', 'html')
mime_to_atom = {
"application/xhtml+xml": "xhtml",
"text/html": "html",
"text/plain": "text"
}
def mime2atom(t):
if t in mime_to_atom:
return mime_to_atom[t]
else:
return t
def parse_atom_entry(uri, entry):
res = {}
res.update(get_text('content', entry))
res.update(get_text('title', entry))
res.update(get_text('summary', entry))
return res
def unparse_atom_entry(entry, values):
set_text('title', entry, values)
set_text('summary', entry, values)
set_text('content', entry, values)
def parse_collection_feed(uri, src):
# loop over the entries and pull out the title, link/@rel="edit", updated and published.
entries = []
feed = fromstring(src)
for e in feed.findall(ATOM % "entry"):
entry = {}
try:
edit_links = [l.attrib['href'] for l in e.findall(ATOM % "link") if 'rel' in l.attrib and l.attrib['rel'] == "edit"]
except:
edit_links = []
entry['edit'] = urljoin(uri, edit_links and edit_links[0] or '')
entry['title'] = e.find(ATOM % "title").text
entry['updated'] = e.find(ATOM % "updated").text
entry['edited'] = e.find(APP % "edited").text
entries.append(entry)
next_links = [l.attrib['href'] for l in feed.findall(ATOM % "link") if 'rel' in l.attrib and l.attrib['rel'] == "next"]
if next_links:
next = urljoin(uri, next_links[0])
else:
next = ''
return (entries, next)
def wrap(text, width):
l = 0
ret = []
for s in text.split(' '):
ret.append(s)
l += len(s)
nl = s.find('\n') >= 0
if l > width or nl:
l = 0
if not nl:
ret.append('\n')
else:
ret.append(' ')
return "".join(ret)
| Python |
#!/usr/bin/env python
from config import *
import cgi, sys, os, urlparse, sys, re
import cgitb
cgitb.enable()
if PYDIR not in sys.path:
sys.path.insert(0, PYDIR)
if WEBDIR not in sys.path:
sys.path.insert(0, WEBDIR)
if SRCDIR not in sys.path:
sys.path.insert(0, SRCDIR)
import feedvalidator
from feedvalidator.logging import FEEDTYPEDISPLAY, VALIDFEEDGRAPHIC
def applyTemplate(templateFile, params={}):
fsock = open(os.path.join(WEBDIR, 'templates', templateFile))
data = fsock.read() % params
fsock.close()
return data
def sanitizeURL(url):
scheme, domain, path, u1, u2, u3 = urlparse.urlparse(url)
if scheme.lower() <> 'http':
url = 'http://%s' % url
scheme, domain, path, u1, u2, u3 = urlparse.urlparse(url)
url = url.strip()
# strip user and password
url = re.sub(r'^(\w*://)[-+.\w]*(:[-+.\w]+)?@', r'\1' ,url)
return url
def buildCodeListing(events, rawdata):
# print feed
codelines = []
linenum = 1
linesWithErrors = [e.params.get('line', 0) for e in events]
for line in rawdata.split('\n'):
line = cgi.escape(line)
if not line: line = ' '
linetype = linenum in linesWithErrors and "b" or "a"
codelines.append(applyTemplate('code_listing_line.tmpl', {"line":line, "linenum":linenum, "linetype":linetype}))
linenum += 1
codelisting = "".join(codelines)
return applyTemplate('code_listing.tmpl', {"codelisting":codelisting, "url":url})
def postvalidate(url, events, rawdata, feedType, autofind=1):
"""returns dictionary including 'url', 'events', 'rawdata', 'output', 'specialCase', 'feedType'"""
# filter based on compatibility level
from feedvalidator import compatibility
filterFunc = compatibility.AA # hardcoded for now
events = filterFunc(events)
specialCase = None
from feedvalidator.formatter.text_html import Formatter
formattedOutput = Formatter(events, rawdata)
if formattedOutput:
# check for special cases
specialCase = compatibility.analyze(events, rawdata)
if (specialCase == 'html') and autofind:
try:
try:
import feedfinder
rssurls = feedfinder.getFeeds(url)
except:
rssurls = [url]
if rssurls:
url = rssurls[0]
params = feedvalidator.validateURL(url, firstOccurrenceOnly=1, wantRawData=1)
events = params['loggedEvents']
rawdata = params['rawdata']
feedType = params['feedType']
return postvalidate(url, events, rawdata, feedType, autofind=0)
except:
pass
return {"url":url, "events":events, "rawdata":rawdata, "output":formattedOutput, "specialCase":specialCase, "feedType":feedType}
fs = cgi.FieldStorage()
url = fs.getvalue("url") or ''
manual = fs.getvalue("manual") or 0
rawdata = fs.getvalue("rawdata") or ''
rawdata = rawdata[:feedvalidator.MAXDATALENGTH].replace('\r\n', '\n').replace('\r', '\n')
if (os.environ['REQUEST_METHOD'].lower() == 'post') and (not rawdata):
# SOAP
try:
# validate
params = feedvalidator.validateStream(sys.stdin)
events = params['loggedEvents']
feedType = params['feedType']
# filter based on compatibility level
from feedvalidator import compatibility
filterFunc = compatibility.AA # hardcoded for now
events = filterFunc(events)
# format as xml
from feedvalidator.formatter.text_xml import Formatter
output = Formatter(events)
# output
if output:
body = applyTemplate('soap.tmpl', {'body':"\n".join(output)})
else:
body = applyTemplate('soap.tmpl' , {'body':''})
print 'Content-type: text/xml\r\n\r\n' + body
except:
import traceback
tb = ''.join(apply(traceback.format_exception, sys.exc_info()))
from feedvalidator.formatter.text_xml import xmlEncode
print 'Status: 500 Internal Error\r\nContent-type: text/xml\r\n'
print applyTemplate('fault.tmpl', {'code':sys.exc_info()[0],
'string':sys.exc_info()[1], 'traceback':xmlEncode(tb)})
else:
print 'Content-type: text/html'
print
if url or rawdata:
# validate
goon = 0
if rawdata:
# validate raw data (from text form)
try:
params = feedvalidator.validateString(rawdata, firstOccurrenceOnly=1)
events = params['loggedEvents']
feedType = params['feedType']
goon = 1
except:
print applyTemplate('header.tmpl', {'title':'Feed Validator Results: %s' % cgi.escape(url)})
print applyTemplate('manual.tmpl', {'rawdata':cgi.escape(url)})
print applyTemplate('error.tmpl')
else:
url = sanitizeURL(url)
try:
params = feedvalidator.validateURL(url, firstOccurrenceOnly=1, wantRawData=1)
events = params['loggedEvents']
rawdata = params['rawdata']
feedType = params['feedType']
goon = 1
except:
print applyTemplate('header.tmpl', {'title':'Feed Validator Results: %s' % cgi.escape(url)})
print applyTemplate('index.tmpl', {'value':cgi.escape(url)})
print applyTemplate('error.tmpl')
if goon:
# post-validate (will do RSS autodiscovery if needed)
validationData = postvalidate(url, events, rawdata, feedType)
# write output header
url = validationData['url']
feedType = validationData['feedType']
rawdata = validationData['rawdata']
print applyTemplate('header.tmpl', {'title':'Feed Validator Results: %s' % cgi.escape(url)})
if manual:
print applyTemplate('manual.tmpl', {'rawdata':cgi.escape(rawdata)})
else:
print applyTemplate('index.tmpl', {'value':cgi.escape(url)})
output = validationData.get('output', None)
if output:
# print special case, if any
specialCase = validationData.get('specialCase', None)
if specialCase:
print applyTemplate('%s.tmpl' % specialCase)
# print validator output
print applyTemplate('invalid.tmpl')
for o in output:
print o
print applyTemplate('invalid_footer.tmpl')
# print code listing
print buildCodeListing(validationData['events'], validationData['rawdata'])
else:
# valid
print applyTemplate('valid.tmpl', {"url":cgi.escape(url), "feedType":FEEDTYPEDISPLAY[feedType], "graphic":VALIDFEEDGRAPHIC[feedType]})
else:
# nothing to validate, just write basic form
print applyTemplate('header.tmpl', {'title':'Feed Validator for Atom and RSS'})
if manual:
print applyTemplate('manual.tmpl', {'rawdata':''})
else:
print applyTemplate('index.tmpl', {'value':'http://'})
print applyTemplate('special.tmpl', {})
print applyTemplate('navbar.tmpl')
print applyTemplate('footer.tmpl')
| Python |
"""$Id: compatibility.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from logging import *
def _must(event):
return isinstance(event, Error)
def _should(event):
return isinstance(event, Warning)
def _may(event):
return isinstance(event, Info)
def _count(events, eventclass):
return len([e for e in events if isinstance(e, eventclass)])
def A(events):
return [event for event in events if _must(event)]
def AA(events):
return [event for event in events if _must(event) or _should(event)]
def AAA(events):
return [event for event in events if _must(event) or _should(event) or _may(event)]
def AAAA(events):
return events
def analyze(events, rawdata):
if _count(events, InvalidContact) and \
(_count(events, InvalidRFC2822Date) > 1):
return "oldmt"
if _count(events, UndefinedElement) and rawdata.count('<html'):
return "html"
return None
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:14 rubys
Initial revision
Revision 1.6 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.5 2003/08/03 18:46:04 rubys
support author(url,email) and feed(author,copyright,generator)
Revision 1.4 2002/10/22 02:18:33 f8dy
added RSS autodiscovery support
Revision 1.3 2002/10/19 21:08:02 f8dy
added "special case" functionality for the web front end
Revision 1.2 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: root.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
#
# Main document.
# Supports rss, rdf, pie, and ffkar
#
class root(validatorBase):
purl1_namespace='http://purl.org/rss/1.0/'
purl2_namespace='http://purl.org/rss/2.0/'
soap_namespace='http://feeds.archive.org/validator/'
pie_namespace='http://purl.org/atom/ns#'
def __init__(self, parent):
validatorBase.__init__(self)
self.parent = parent
self.dispatcher = parent
self.name = "root"
def startElementNS(self, name, qname, attrs):
if name=='rss':
validatorBase.defaultNamespaces.append(qname)
if name=='channel':
validatorBase.defaultNamespaces.append(self.purl2_namespace)
if name=='feed':
if not qname:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":"root", "element":name}))
validatorBase.defaultNamespaces.append(self.pie_namespace)
validatorBase.startElementNS(self, name, qname, attrs)
def unknown_starttag(self, name, qname, attrs):
from logging import ObsoleteNamespace,InvalidNamespace,UndefinedElement
if qname in ['http://example.com/newformat#']:
self.log(ObsoleteNamespace({"element":name, "namespace":qname}))
elif name=='feed':
self.log(InvalidNamespace({"element":name, "namespace":qname}))
else:
self.log(UndefinedElement({"parent":"root", "element":name}))
from validators import eater
return eater()
def do_rss(self):
from rss import rss
return rss()
def do_feed(self):
from feed import feed
return feed()
def do_rdf_RDF(self):
from rdf import rdf
validatorBase.defaultNamespaces.append(self.purl1_namespace)
return rdf()
def do_channel(self):
from channel import channel
return channel()
def do_soap_Envelope(self):
return root(self)
def do_soap_Body(self):
validatorBase.defaultNamespaces.append(self.soap_namespace)
return root(self)
def do_request(self):
return root(self)
def do_xhtml_html(self):
from logging import UndefinedElement
self.log(UndefinedElement({"parent":"root", "element":"xhtml:html"}))
from validators import eater
return eater()
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:16 rubys
Initial revision
Revision 1.18 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.17 2003/08/23 21:01:00 rubys
Validate that content, content:encoded, and xhtml:body are safe
Revision 1.16 2003/08/05 07:59:04 rubys
Add feed(id,tagline,contributor)
Drop feed(subtitle), entry(subtitle)
Check for obsolete version, namespace
Check for incorrect namespace on feed element
Revision 1.15 2003/08/05 05:32:35 f8dy
0.2 snapshot - change version number and default namespace
Revision 1.14 2003/08/04 00:54:35 rubys
Log every valid element (for better self validation in test cases)
Revision 1.13 2003/07/09 16:24:30 f8dy
added global feed type support
Revision 1.12 2003/07/07 10:35:50 rubys
Complete first pass of echo/pie tests
Revision 1.11 2003/07/07 00:54:00 rubys
Rough in some pie/echo support
Revision 1.10 2002/12/22 23:56:09 rubys
Adjust names, add a WSDL
Revision 1.9 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: item.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from validators import *
from logging import *
#
# item element.
#
class item(validatorBase):
def validate(self):
if not "link" in self.children:
self.log(MissingItemLink({"parent":self.name, "element":"link"}))
if not "title" in self.children:
self.log(MissingItemTitle({"parent":self.name, "element":"title"}))
if (not "title" in self.children) and (not "description" in self.children):
self.log(ItemMustContainTitleOrDescription({}))
def do_link(self):
return rfc2396(), noduplicates()
def do_comments(self):
return rfc2396(), noduplicates()
def do_annotate_reference(self):
return annotate_reference(), noduplicates()
def do_title(self):
return nonhtml(), noduplicates()
def do_description(self):
return nonhtml(), noduplicates()
def do_enclosure(self):
return enclosure()
def do_pubDate(self):
if "dc_date" in self.children:
self.log(DuplicateItemSemantics({"core":"pubDate", "ext":"dc:date"}))
self.log(UseDCDate({"core":"pubDate", "ext":"dc:date"}))
return rfc822(), noduplicates()
def do_author(self):
if "dc_creator" in self.children:
self.log(DuplicateItemSemantics({"core":"author", "ext":"dc:creator"}))
self.log(UseDCCreator({"core":"author", "ext":"dc:creator"}))
return email_lax(), noduplicates()
def do_category(self):
if "dc_subject" in self.children:
self.log(DuplicateItemSemantics({"core":"category", "ext":"dc:subject"}))
self.log(UseDCSubject({"core":"category", "ext":"dc:subject"}))
return text()
def do_dc_subject(self):
if "category" in self.children:
self.log(DuplicateItemSemantics({"core":"category", "ext":"dc:subject"}))
return text()
def do_dc_creator(self):
if "author" in self.children:
self.log(DuplicateItemSemantics({"core":"author", "ext":"dc:creator"}))
return text()
def do_dc_date(self):
if "pubDate" in self.children:
self.log(DuplicateItemSemantics({"core":"pubDate", "ext":"dc:date"}))
return iso8601(), noduplicates()
def do_source(self):
if "dc_source" in self.children:
self.log(DuplicateItemSemantics({"core":"source", "ext":"dc:source"}))
self.log(UseDCSource({"core":"source", "ext":"dc:source"}))
return source(), noduplicates()
def do_dc_source(self):
if "source" in self.children:
self.log(DuplicateItemSemantics({"core":"source", "ext":"dc:source"}))
return text(), noduplicates()
def do_guid(self):
return guid(), noduplicates(), unique('guid',self.parent)
def do_content_encoded(self):
return safeHtml(), noduplicates()
def do_cc_license(self):
if "creativeCommons_license" in self.children:
self.log(DuplicateSemantics({"core":"creativeCommons:license", "ext":"cc:license"}))
return eater()
def do_creativeCommons_license(self):
if "cc_license" in self.children:
self.log(DuplicateSemantics({"core":"creativeCommons:license", "ext":"cc:license"}))
return rfc2396()
def do_xhtml_body(self):
return htmlEater(self,'xhtml:body')
class source(text, httpURLMixin):
def prevalidate(self):
try:
self.validateHttpURL(None, 'url')
except KeyError:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":'url'}))
return text.prevalidate(self)
class enclosure(validatorBase, httpURLMixin):
from validators import mime_re
def prevalidate(self):
try:
if int(self.attrs.getValue((None, 'length'))) <= 0:
self.log(InvalidIntegerAttribute({"parent":self.parent.name, "element":self.name, "attr":'length'}))
else:
self.log(ValidIntegerAttribute({"parent":self.parent.name, "element":self.name, "attr":'length'}))
except KeyError:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":'length'}))
except ValueError:
self.log(InvalidIntegerAttribute({"parent":self.parent.name, "element":self.name, "attr":'length'}))
try:
if not self.mime_re.match(self.attrs.getValue((None, 'type'))):
self.log(InvalidMIMEAttribute({"parent":self.parent.name, "element":self.name, "attr":'type'}))
else:
self.log(ValidMIMEAttribute({"parent":self.parent.name, "element":self.name, "attr":'type'}))
except KeyError:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":'type'}))
try:
self.validateHttpURL(None, 'url')
except KeyError:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":'url'}))
return validatorBase.prevalidate(self)
class guid(rfc2396, noduplicates):
def validate(self):
isPermalink = 1
try:
isPermalinkStr = self.attrs.getValue((None, 'isPermaLink'))
if isPermalinkStr not in ('true', 'false'):
self.log(InvalidBooleanAttribute({"parent":self.parent.name, "element":self.name, "attr":"isPermaLink"}))
else:
self.log(ValidBooleanAttribute({"parent":self.parent.name, "element":self.name, "attr":"isPermaLink"}))
isPermalink = (isPermalinkStr == 'true')
except KeyError:
pass
if isPermalink:
return rfc2396.validate(self, InvalidHttpGUID, ValidHttpGUID)
else:
self.log(ValidHttpGUID({"parent":self.parent.name, "element":self.name}))
return noduplicates.validate(self)
class annotate_reference(rdfResourceURI): pass
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:16 rubys
Initial revision
Revision 1.29 2003/12/12 11:25:56 rubys
Validate mime type in link tags
Revision 1.28 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.27 2003/08/23 21:01:00 rubys
Validate that content, content:encoded, and xhtml:body are safe
Revision 1.26 2003/08/04 00:03:14 rubys
Implement more strict email check for pie
Revision 1.25 2003/07/30 01:54:59 f8dy
tighten test cases, add explicit params
Revision 1.24 2003/07/29 20:57:39 f8dy
tightened up test cases, check for parent element, explicitly test for success
Revision 1.23 2003/07/29 19:38:07 f8dy
changed test cases to explicitly test for success (rather than the absence of failure)
Revision 1.22 2003/07/20 17:44:27 rubys
Detect duplicate ids and guids
Revision 1.21 2003/01/11 03:59:26 rubys
dashes are legal in MIME types
Revision 1.20 2002/12/20 13:26:00 rubys
CreativeCommons support
Revision 1.19 2002/10/24 14:47:33 f8dy
decoupled "no duplicates" check from individual validator classes,
allow handlers to return multiple validator classes
Revision 1.18 2002/10/22 17:29:52 f8dy
loosened restrictions on link/docs/url protocols; RSS now allows any
IANA protocol, not just http:// and ftp://
Revision 1.17 2002/10/18 15:46:35 f8dy
added (and passed) rule for no multiple content:encoded
Revision 1.16 2002/10/18 14:17:30 f8dy
added tests for language/dc:language (must be valid ISO-639 language code
plus optional country code) and passed them
Revision 1.15 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: rdf.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from logging import *
from validators import rdfAbout, noduplicates
#
# rdf:RDF element. The valid children include "channel", "item", "textinput", "image"
#
class rdf(validatorBase):
def do_channel(self):
from channel import channel
return rdfAbout(), channel(), noduplicates()
def do_item(self):
from item import item
return rdfAbout(), item()
def do_textinput(self):
from textInput import textInput
return textInput()
def do_image(self):
from image import image
return image()
def prevalidate(self):
self.setFeedType(TYPE_RSS1)
def validate(self):
if not "channel" in self.children:
self.log(MissingChannel({"parent":self.name, "element":"channel"}))
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:16 rubys
Initial revision
Revision 1.10 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.9 2003/10/16 15:54:41 rubys
Detect duplicate channels
Revision 1.8 2003/08/12 00:26:30 rubys
Misleading error message if a channel is missing in an RSS 1.0 feed
Revision 1.7 2003/08/10 13:49:14 rubys
Add support for chanel and item level rdf:about. Ensure that http and
ftp URLs have exactly two slashes after the scheme.
Revision 1.6 2003/07/29 19:38:07 f8dy
changed test cases to explicitly test for success (rather than the absence of failure)
Revision 1.5 2003/07/09 16:24:30 f8dy
added global feed type support
Revision 1.4 2002/10/22 14:11:36 f8dy
initial attempts to handle RSS 1.0 vs. 2.0 images and textinputs; test
cases still fail
Revision 1.3 2002/10/22 13:16:03 f8dy
passed lowercase textinput test
Revision 1.2 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: iso639codes.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
isoLang = \
{'aa': 'Afar',
'ab': 'Abkhazian',
'af': 'Afrikaans',
'am': 'Amharic',
'ar': 'Arabic',
'as': 'Assamese',
'ay': 'Aymara',
'az': 'Azerbaijani',
'ba': 'Bashkir',
'be': 'Byelorussian',
'bg': 'Bulgarian',
'bh': 'Bihari',
'bi': 'Bislama',
'bn': 'Bengali;Bangla',
'bo': 'Tibetan',
'br': 'Breton',
'ca': 'Catalan',
'co': 'Corsican',
'cs': 'Czech',
'cy': 'Welsh',
'da': 'Danish',
'de': 'German',
'dz': 'Bhutani',
'el': 'Greek',
'en': 'English',
'eo': 'Esperanto',
'es': 'Spanish',
'et': 'Estonian',
'eu': 'Basque',
'fa': 'Persian (Farsi)',
'fi': 'Finnish',
'fj': 'Fiji',
'fo': 'Faroese',
'fr': 'French',
'fy': 'Frisian',
'ga': 'Irish',
'gd': 'Scots Gaelic',
'gl': 'Galician',
'gn': 'Guarani',
'gu': 'Gujarati',
'ha': 'Hausa',
'he': 'Hebrew',
'hi': 'Hindi',
'hr': 'Croatian',
'hu': 'Hungarian',
'hy': 'Armenian',
'ia': 'Interlingua',
'id': 'Indonesian',
'ie': 'Interlingue',
'ik': 'Inupiak',
'is': 'Icelandic',
'it': 'Italian',
'iu': 'Inuktitut',
'ja': 'Japanese',
'jv': 'Javanese',
'ka': 'Georgian',
'kk': 'Kazakh',
'kl': 'Greenlandic',
'km': 'Cambodian',
'kn': 'Kannada',
'ko': 'Korean',
'ks': 'Kashmiri',
'ku': 'Kurdish',
'ky': 'Kirghiz',
'la': 'Latin',
'ln': 'Lingala',
'lo': 'Laothian',
'lt': 'Lithuanian',
'lv': 'Latvian;Lettish',
'mg': 'Malagasy',
'mi': 'Maori',
'mk': 'Macedonian',
'ml': 'Malayalam',
'mn': 'Mongolian',
'mo': 'Moldavian',
'mr': 'Marathi',
'ms': 'Malay',
'mt': 'Maltese',
'my': 'Burmese',
'na': 'Nauru',
'ne': 'Nepali',
'nl': 'Dutch',
'no': 'Norwegian',
'oc': 'Occitan',
'om': 'Afan (Oromo)',
'or': 'Oriya',
'pa': 'Punjabi',
'pl': 'Polish',
'ps': 'Pashto;pushto',
'pt': 'Portuguese',
'qu': 'Quechua',
'rm': 'Rhaeto-Romance',
'rn': 'Kurundi',
'ro': 'Romanian',
'ru': 'Russian',
'rw': 'Kinyarwanda',
'sa': 'Sanskrit',
'sd': 'Sindhi',
'sg': 'Sangho',
'sh': 'Serbo-Croatian',
'si': 'Singhalese',
'sk': 'Slovak',
'sl': 'Slovenian',
'sm': 'Samoan',
'sn': 'Shona',
'so': 'Somali',
'sq': 'Albanian',
'sr': 'Serbian',
'ss': 'Siswati',
'st': 'Sesotho',
'su': 'Sundanese',
'sv': 'Swedish',
'sw': 'Swahili',
'ta': 'Tamil',
'te': 'Telugu',
'tg': 'Tajik',
'th': 'Thai',
'ti': 'Tigrinya',
'tk': 'Turkmen',
'tl': 'Tagalog',
'tn': 'Setswana',
'to': 'Tonga',
'tr': 'Turkish',
'ts': 'Tsonga',
'tt': 'Tatar',
'tw': 'Twi',
'ug': 'Uigur',
'uk': 'Ukrainian',
'ur': 'Urdu',
'uz': 'Uzbek',
'vi': 'Vietnamese',
'vo': 'Volapuk',
'wo': 'Wolof',
'xh': 'Xhosa',
'yi': 'Yiddish',
'yo': 'Yoruba',
'za': 'Zhuang',
'zh': 'Chinese',
'zu': 'Zulu',
'x' : 'a user-defined language',
'xx': 'a user-defined language'}
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:15 rubys
Initial revision
Revision 1.2 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.1 2002/10/18 14:19:07 f8dy
added code mapping for language tests
"""
| Python |
"""$Id: logging.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
# feed types
TYPE_UNKNOWN = 0
TYPE_RSS1 = 1
TYPE_RSS2 = 2
TYPE_ATOM = 3
FEEDTYPEDISPLAY = {0:"(unknown type)", 1:"RSS", 2:"RSS", 3:"Atom"}
VALIDFEEDGRAPHIC = {0:"", 1:"valid-rss.png", 2:"valid-rss.png", 3:"valid-atom.png"}
#
# logging support
#
class LoggedEvent:
def __init__(self, params):
self.params = params
class Info(LoggedEvent): pass
class Warning(LoggedEvent): pass
class Error(LoggedEvent): pass
###################### error ######################
class SAXError(Error): pass
class UnicodeError(Error): pass
class UndefinedElement(Error): pass
class MissingNamespace(UndefinedElement): pass
class NoBlink(UndefinedElement): pass
class MissingAttribute(Error): pass
class DuplicateElement(Error): pass
class NotEnoughHoursInTheDay(Error): pass
class EightDaysAWeek(Error): pass
class InvalidValue(Error): pass
class InvalidContact(InvalidValue): pass
class InvalidLink(InvalidValue): pass
class InvalidW3DTFDate(InvalidValue): pass
class InvalidRFC2822Date(InvalidValue): pass
class InvalidURLAttribute(InvalidValue): pass
class InvalidIntegerAttribute(InvalidValue): pass
class InvalidBooleanAttribute(InvalidValue): pass
class InvalidMIMEAttribute(InvalidValue): pass
class NotBlank(InvalidValue): pass
class AttrNotBlank(InvalidValue): pass
class InvalidInteger(InvalidValue): pass
class InvalidWidth(InvalidValue): pass
class InvalidHeight(InvalidValue): pass
class InvalidHour(InvalidValue): pass
class InvalidDay(InvalidValue): pass
class InvalidHttpGUID(InvalidValue): pass
class InvalidLanguage(InvalidValue): pass
class InvalidUpdatePeriod(InvalidValue): pass
class ContainsUndeclaredHTML(InvalidValue): pass
class MissingElement(Error): pass
class MissingChannel(MissingElement): pass
class MissingDescription(MissingElement): pass
class MissingLink(MissingElement): pass
class MissingTitle(MissingElement): pass
class ItemMustContainTitleOrDescription(MissingElement): pass
class FatalSecurityRisk(Error): pass
class ContainsSystemEntity(FatalSecurityRisk): pass
class DuplicateValue(InvalidValue): pass
class InvalidDoctype(Error): pass
class MultipartInvalid(Error): pass
class MultipartMissing(Error): pass
class MultipartRecursion(Error): pass
class MultipartDuplicate(Error): pass
class DuplicateAtomLink(Error): pass
class MissingHref(Error): pass
class AtomLinkNotEmpty(Error): pass
class AtomLinkMissingRel(Error): pass
class MissingAlternateLink(Error): pass
###################### warning ######################
class DuplicateSemantics(Warning): pass
class DuplicateItemSemantics(DuplicateSemantics): pass
class ContainsRelRef(Warning): pass
class ReservedPrefix(Warning): pass
class SecurityRisk(Warning): pass
class ContainsScript(SecurityRisk): pass
class ContainsMeta(SecurityRisk): pass
class ContainsEmbed(SecurityRisk): pass
class ContainsObject(SecurityRisk): pass
###################### info ######################
class ContainsHTML(Info): pass
class MissingOptionalElement(Info): pass
class MissingItemLink(MissingOptionalElement): pass
class MissingItemTitle(MissingOptionalElement): pass
class BestPractices(Info): pass
class MissingRecommendedElement(BestPractices): pass
class MissingDCLanguage(MissingRecommendedElement): pass
class MissingDCRights(MissingRecommendedElement): pass
class MissingDCDate(MissingRecommendedElement): pass
class UseModularEquivalent(BestPractices): pass
class UseDCRights(UseModularEquivalent): pass
class UseAdminGeneratorAgent(UseModularEquivalent): pass
class UseDCCreator(UseModularEquivalent): pass
class UseDCSubject(UseModularEquivalent): pass
class UseDCDate(UseModularEquivalent): pass
class UseDCSource(UseModularEquivalent): pass
class UseDCLanguage(UseModularEquivalent): pass
class UseDCTermsModified(UseModularEquivalent): pass
class UseDCPublisher(UseModularEquivalent): pass
class UseSyndicationModule(UseModularEquivalent): pass
class UseAnnotateReference(UseModularEquivalent): pass
class RecommendedWidth(BestPractices): pass
class RecommendedHeight(BestPractices): pass
class NonstdPrefix(BestPractices): pass
## Atom-specific errors
class ObsoleteVersion(Error): pass
class ObsoleteNamespace(Error): pass
class InvalidURI(InvalidValue) : pass
class InvalidURN(InvalidValue): pass
class InvalidTAG(InvalidValue): pass
class InvalidContentMode(InvalidValue) : pass
class InvalidMIMEType(InvalidValue) : pass
class InvalidNamespace(Error): pass
class NoMIMEType(MissingAttribute) : pass
class NotEscaped(InvalidValue): pass
class NotBase64(InvalidValue): pass
class NotInline(Warning): pass # this one can never be sure...
class NotHtml(Error): pass
class W3DTFDateNoTimezone(Warning) : pass
class W3DTFDateNonUTC(Info) : pass
class W3DTFDateNonLocal(Warning) : pass
############## non-errors (logging successes) ###################
class Success(LoggedEvent): pass
class ValidValue(Success): pass
class ValidCloud(Success): pass
class ValidURI(ValidValue): pass
class ValidHttpGUID(ValidURI): pass
class ValidURLAttribute(ValidURI): pass
class ValidURN(ValidValue): pass
class ValidTAG(ValidValue): pass
class ValidTitle(ValidValue): pass
class ValidDate(ValidValue): pass
class ValidW3DTFDate(ValidDate): pass
class ValidRFC2822Date(ValidDate): pass
class ValidAttributeValue(ValidValue): pass
class ValidBooleanAttribute(ValidAttributeValue): pass
class ValidLanguage(ValidValue): pass
class ValidHeight(ValidValue): pass
class ValidWidth(ValidValue): pass
class ValidTitle(ValidValue): pass
class ValidContact(ValidValue): pass
class ValidIntegerAttribute(ValidValue): pass
class ValidMIMEAttribute(ValidValue): pass
class ValidDay(ValidValue): pass
class ValidHour(ValidValue): pass
class ValidInteger(ValidValue): pass
class ValidUpdatePeriod(ValidValue): pass
class ValidContentMode(ValidValue): pass
class ValidElement(ValidValue): pass
class ValidCopyright(ValidValue): pass
class ValidGeneratorName(ValidValue): pass
class OptionalValueMissing(ValidValue): pass
class ValidDoctype(ValidValue): pass
class ValidHtml(ValidValue): pass
class ValidAtomLinkRel(ValidValue): pass
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:16 rubys
Initial revision
Revision 1.63 2003/12/12 15:00:22 f8dy
changed blank link attribute tests to new error AttrNotBlank to distinguish them from elements that can not be blank
Revision 1.62 2003/12/12 14:23:19 f8dy
ValidAtomLinkRel should inherit from ValidValue
Revision 1.61 2003/12/12 05:42:05 rubys
Rough in some support for the new link syntax
Revision 1.60 2003/12/12 01:24:36 rubys
Multipart/alternative tests
Revision 1.59 2003/12/11 18:20:46 f8dy
passed all content-related testcases
Revision 1.58 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.57 2003/12/11 06:00:51 f8dy
added tag: testcases, passed
Revision 1.56 2003/08/24 00:05:34 f8dy
removed iframe tests, after further discussion this is not enough of a security risk to keep feeds from validating
Revision 1.55 2003/08/23 01:45:22 f8dy
added ContainsIframe
Revision 1.54 2003/08/23 00:28:04 rubys
Validate escaped text/HTML content
Revision 1.53 2003/08/06 16:16:59 f8dy
added testcase for Netscape DOCTYPE
Revision 1.52 2003/08/06 16:10:04 f8dy
added testcase for Netscape DOCTYPE
Revision 1.51 2003/08/05 18:01:37 f8dy
*** empty log message ***
Revision 1.50 2003/08/05 07:59:04 rubys
Add feed(id,tagline,contributor)
Drop feed(subtitle), entry(subtitle)
Check for obsolete version, namespace
Check for incorrect namespace on feed element
Revision 1.49 2003/08/05 05:37:42 f8dy
0.2 snapshot - add test for obsolete 0.1 version
Revision 1.48 2003/08/04 01:05:33 rubys
Check for HTML in titles
Revision 1.47 2003/08/04 00:54:35 rubys
Log every valid element (for better self validation in test cases)
Revision 1.46 2003/08/03 18:46:04 rubys
support author(url,email) and feed(author,copyright,generator)
Revision 1.45 2003/07/29 21:48:10 f8dy
tightened up test cases, added parent element check, changed negative test cases to positive
Revision 1.44 2003/07/29 20:57:39 f8dy
tightened up test cases, check for parent element, explicitly test for success
Revision 1.43 2003/07/29 19:38:07 f8dy
changed test cases to explicitly test for success (rather than the absence of failure)
Revision 1.42 2003/07/29 17:13:17 f8dy
more urn tests
Revision 1.41 2003/07/29 16:44:56 f8dy
changed test cases to explicitly test for success (rather than the absence of failure)
Revision 1.40 2003/07/29 15:46:31 f8dy
changed test cases to explicitly test for success (rather than the absence of failure)
Revision 1.39 2003/07/29 15:15:33 f8dy
added tests for invalid URNs (may be used in entry/id of Atom feeds)
Revision 1.38 2003/07/20 17:44:27 rubys
Detect duplicate ids and guids
Revision 1.37 2003/07/19 21:15:08 f8dy
added tests and logging classes for duplicate guid/id values within a feed (thanks AaronSw for this idea)
Revision 1.36 2003/07/11 17:47:04 rubys
not-inline can only be a warning as one can never be totally sure...
Revision 1.35 2003/07/09 19:28:39 f8dy
added test cases looking at actual content vs. mode (note: not passed)
Revision 1.34 2003/07/09 16:24:30 f8dy
added global feed type support
Revision 1.33 2003/07/09 03:31:36 f8dy
Updated pie-specific log messages
Revision 1.32 2003/07/07 10:35:50 rubys
Complete first pass of echo/pie tests
Revision 1.31 2003/07/07 00:54:00 rubys
Rough in some pie/echo support
Revision 1.30 2003/07/06 21:20:02 rubys
Refactor so test cases are organized by protocol
Revision 1.29 2002/10/30 23:03:01 f8dy
security fix: external (SYSTEM) entities
Revision 1.28 2002/10/27 18:54:30 rubys
Issue warnings for relative references in descriptions
Revision 1.27 2002/10/22 16:24:04 f8dy
added UnicodeError support for feeds that declare utf-8 but use 8-bit characters anyway
Revision 1.26 2002/10/18 19:28:43 f8dy
added testcases for mod_syndication and passed them
Revision 1.25 2002/10/18 14:17:30 f8dy
added tests for language/dc:language (must be valid ISO-639 language code
plus optional country code) and passed them
Revision 1.24 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: rss.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from logging import *
from validators import noduplicates
#
# Rss element. The only valid child element is "channel"
#
class rss(validatorBase):
def do_channel(self):
from channel import channel
return channel(), noduplicates()
def prevalidate(self):
self.setFeedType(TYPE_RSS2) # could be anything in the 0.9x family, don't really care
def validate(self):
if not "channel" in self.children:
self.log(MissingChannel({"parent":self.name, "element":"channel"}))
if (None,'version') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"version"}))
elif [e for e in self.dispatcher.loggedEvents if e.__class__==ValidDoctype]:
if self.attrs[(None,'version')]<>'0.91':
self.log(InvalidDoctype({"parent":self.parent.name, "element":self.name, "attr":"version"}))
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:16 rubys
Initial revision
Revision 1.9 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.8 2003/10/16 15:54:41 rubys
Detect duplicate channels
Revision 1.7 2003/08/09 18:32:27 rubys
Only allow NetScape DocType on RSS 0.91 feeds
Revision 1.6 2003/08/09 17:21:01 rubys
Fix misleading message when rss channel is missing
Revision 1.5 2003/07/29 19:38:07 f8dy
changed test cases to explicitly test for success (rather than the absence of failure)
Revision 1.4 2003/07/09 16:24:30 f8dy
added global feed type support
Revision 1.3 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: textInput.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from validators import *
#
# textInput element.
#
class textInput(validatorBase):
def validate(self):
if not "title" in self.children:
self.log(MissingTitle({"parent":self.name, "element":"title"}))
if not "link" in self.children:
self.log(MissingLink({"parent":self.name, "element":"link"}))
if not "description" in self.children:
self.log(MissingDescription({"parent":self.name,"element":"description"}))
if not "name" in self.children:
self.log(MissingElement({"parent":self.name, "element":"name"}))
def do_title(self):
return nonhtml(), noduplicates()
def do_description(self):
return nonhtml(), noduplicates()
def do_name(self):
return nonhtml(), noduplicates()
def do_link(self):
return rfc2396(), noduplicates()
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:16 rubys
Initial revision
Revision 1.6 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.5 2002/10/24 14:47:33 f8dy
decoupled "no duplicates" check from individual validator classes,
allow handlers to return multiple validator classes
Revision 1.4 2002/10/22 17:29:52 f8dy
loosened restrictions on link/docs/url protocols; RSS now allows any
IANA protocol, not just http:// and ftp://
Revision 1.3 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: link.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from validators import *
#
# Atom link element
#
class link(nonblank,rfc2396):
def prevalidate(self):
self.type = ""
self.rel = ""
self.title = ""
def validate(self):
if self.attrs.has_key((None, "rel")):
self.log(ValidAtomLinkRel({"parent":self.parent.name, "element":self.name, "attr":"rel"}))
self.value = self.rel = self.attrs.getValue((None, "rel"))
nonblank.validate(self, errorClass=AttrNotBlank, extraParams={"attr": "rel"})
else:
self.log(AtomLinkMissingRel({"parent":self.parent.name, "element":self.name, "attr":"rel"}))
if self.attrs.has_key((None, "type")):
self.value = self.type = self.attrs.getValue((None, "type"))
if not mime_re.match(self.type):
self.log(InvalidMIMEType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
else:
self.log(ValidMIMEAttribute({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
else:
self.log(NoMIMEType({"parent":self.parent.name, "element":self.name, "attr":"rel"}))
if self.attrs.has_key((None, "title")):
self.log(ValidTitle({"parent":self.parent.name, "element":self.name, "attr":"title"}))
self.value = self.title = self.attrs.getValue((None, "title"))
nonblank.validate(self, errorClass=AttrNotBlank, extraParams={"attr": "title"})
if self.attrs.has_key((None, "href")):
self.value = self.attrs.getValue((None, "href"))
rfc2396.validate(self, extraParams={"attr": "href"})
nonblank.validate(self, errorClass=AttrNotBlank, extraParams={"attr": "href"})
else:
self.log(MissingHref({"parent":self.parent.name, "element":self.name}))
def characters(self, text):
self.log(AtomLinkNotEmpty({"parent":self.parent.name, "element":self.name}))
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:16 rubys
Initial revision
Revision 1.5 2003/12/12 15:00:22 f8dy
changed blank link attribute tests to new error AttrNotBlank to distinguish them from elements that can not be blank
Revision 1.4 2003/12/12 11:25:56 rubys
Validate mime type in link tags
Revision 1.3 2003/12/12 06:24:05 rubys
link type validation
Revision 1.2 2003/12/12 06:10:58 rubys
link rel/type checking
Revision 1.1 2003/12/12 05:42:05 rubys
Rough in some support for the new link syntax
"""
| Python |
"""$Id: channel.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from logging import *
from validators import *
#
# channel element.
#
class channel(validatorBase):
def validate(self):
if not "description" in self.children:
self.log(MissingDescription({"parent":self.name,"element":"description"}))
if not "link" in self.children:
self.log(MissingLink({"parent":self.name, "element":"link"}))
if not "title" in self.children:
self.log(MissingTitle({"parent":self.name, "element":"title"}))
if not "dc_date" in self.children:
self.log(MissingDCDate({"parent":self.name, "element":"dc:date"}))
if not "dc_rights" in self.children:
self.log(MissingDCRights({"parent":self.name, "element":"dc:rights"}))
if not "dc_language" in self.children:
self.log(MissingDCLanguage({"parent":self.name, "element":"dc:language"}))
if self.children.count("image") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"image"}))
if self.children.count("textInput") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"textInput"}))
if self.children.count("skipHours") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"skipHours"}))
if self.children.count("skipDays") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"skipDays"}))
def do_image(self):
from image import image
return image()
def do_item(self):
from item import item
return item()
def do_items(self): # this actually should be from the rss1.0 ns
return eater()
def do_textInput(self):
from textInput import textInput
return textInput()
def do_textinput(self):
if not self.attrs.has_key((rdfNS,"about")):
# optimize for RSS 2.0. If it is not valid RDF, assume that it is
# a simple misspelling (in other words, the error message will be
# less than helpful on RSS 1.0 feeds.
self.log(UndefinedElement({"parent":self.name, "element":"textinput"}))
return eater()
def do_category(self):
return text()
def do_cloud(self):
return cloud()
do_rating = validatorBase.leaf # TODO test cases?!?
def do_ttl(self):
return ttl(), noduplicates()
def do_docs(self):
return rfc2396(), noduplicates()
def do_link(self):
return rfc2396(), noduplicates()
def do_title(self):
return nonhtml(), noduplicates()
def do_description(self):
return nonhtml(), noduplicates()
def do_generator(self):
if "admin_generatorAgent" in self.children:
self.log(DuplicateSemantics({"core":"generator", "ext":"admin:generatorAgent"}))
self.log(UseAdminGeneratorAgent({"core":"generator", "ext":"admin:generatorAgent"}))
return text(), noduplicates()
def do_pubDate(self):
if "dc_date" in self.children:
self.log(DuplicateSemantics({"core":"pubDate", "ext":"dc:date"}))
self.log(UseDCDate({"core":"pubDate", "ext":"dc:date"}))
return rfc822(), noduplicates()
def do_managingEditor(self):
if "dc_creator" in self.children:
self.log(DuplicateSemantics({"core":"managingEditor", "ext":"dc:creator"}))
self.log(UseDCCreator({"core":"managingEditor", "ext":"dc:creator"}))
return email_lax(), noduplicates()
def do_webMaster(self):
if "dc_publisher" in self.children:
self.log(DuplicateSemantics({"core":"webMaster", "ext":"dc:publisher"}))
self.log(UseDCPublisher({"core":"webMaster", "ext":"dc:publisher"}))
return email_lax(), noduplicates()
def do_dc_creator(self):
if "managingEditor" in self.children:
self.log(DuplicateSemantics({"core":"managingEditor", "ext":"dc:creator"}))
return text() # duplicates allowed
def do_dc_language(self):
if "language" in self.children:
self.log(DuplicateSemantics({"core":"language", "ext":"dc:language"}))
return iso639(), noduplicates()
def do_language(self):
if "dc_language" in self.children:
self.log(DuplicateSemantics({"core":"language", "ext":"dc:language"}))
self.log(UseDCLanguage({"core":"language", "ext":"dc:language"}))
return iso639(), noduplicates()
def do_dcterms_modified(self):
if "lastBuildDate" in self.children:
self.log(DuplicateSemantics({"core":"lastBuildDate", "ext":"dcterms:modified"}))
return iso8601(), noduplicates()
def do_dc_publisher(self):
if "webMaster" in self.children:
self.log(DuplicateSemantics({"core":"webMaster", "ext":"dc:publisher"}))
return text() # duplicates allowed
def do_copyright(self):
if "dc_rights" in self.children:
self.log(DuplicateSemantics({"core":"copyright", "ext":"dc:rights"}))
self.log(UseDCRights({"core":"copyright", "ext":"dc:rights"}))
return text(), noduplicates()
def do_dc_rights(self):
if "copyright" in self.children:
self.log(DuplicateSemantics({"core":"copyright", "ext":"dc:rights"}))
return text(), noduplicates()
def do_dc_date(self):
if "pubDate" in self.children:
self.log(DuplicateSemantics({"core":"pubDate", "ext":"dc:date"}))
return iso8601(), noduplicates()
def do_admin_generatorAgent(self):
if "generator" in self.children:
self.log(DuplicateSemantics({"core":"generator", "ext":"admin:generatorAgent"}))
return admin_generatorAgent(), noduplicates()
def do_admin_errorReportsTo(self):
return admin_errorReportsTo(), noduplicates()
def do_lastBuildDate(self):
if "dcterms_modified" in self.children:
self.log(DuplicateSemantics({"core":"lastBuildDate", "ext":"dcterms:modified"}))
self.log(UseDCTermsModified({"core":"lastBuildDate", "ext":"dcterms:modified"}))
return rfc822(), noduplicates()
def do_skipHours(self):
from skipHours import skipHours
return skipHours()
def do_skipDays(self):
from skipDays import skipDays
return skipDays()
def do_blogChannel_blogRoll(self):
return rfc2396(), noduplicates()
def do_blogChannel_mySubscriptions(self):
return rfc2396(), noduplicates()
def do_blogChannel_blink(self):
return rfc2396(), noduplicates()
def do_cc_license(self):
if "creativeCommons_license" in self.children:
self.log(DuplicateSemantics({"core":"creativeCommons:license", "ext":"cc:license"}))
return eater()
def do_creativeCommons_license(self):
if "cc_license" in self.children:
self.log(DuplicateSemantics({"core":"creativeCommons:license", "ext":"cc:license"}))
return rfc2396()
def do_blink(self):
return blink(), noduplicates()
def do_sy_updatePeriod(self):
return sy_updatePeriod(), noduplicates()
def do_sy_updateFrequency(self):
return sy_updateFrequency(), noduplicates()
def do_sy_updateBase(self):
return iso8601(), noduplicates()
class blink(validatorBase):
def validate(self):
self.log(NoBlink({}))
class cloud(validatorBase):
def prevalidate(self):
if (None, 'domain') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"domain"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"domain"}))
try:
if int(self.attrs.getValue((None, 'port'))) <= 0:
self.log(InvalidIntegerAttribute({"parent":self.parent.name, "element":self.name, "attr":'port'}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":'port'}))
except KeyError:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":'port'}))
except ValueError:
self.log(InvalidIntegerAttribute({"parent":self.parent.name, "element":self.name, "attr":'port'}))
if (None, 'path') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"path"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"path"}))
if (None, 'registerProcedure') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"registerProcedure"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"registerProcedure"}))
if (None, 'protocol') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"protocol"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"protocol"}))
## TODO - is there a list of accepted protocols for this thing?
return validatorBase.prevalidate(self)
class ttl(positiveInteger): pass
class admin_generatorAgent(rdfResourceURI): pass
class admin_errorReportsTo(rdfResourceURI): pass
class sy_updateFrequency(positiveInteger): pass
class sy_updatePeriod(text):
def validate(self):
if self.value not in ('hourly', 'daily', 'weekly', 'monthly', 'yearly'):
self.log(InvalidUpdatePeriod({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidUpdatePeriod({"parent":self.parent.name, "element":self.name, "value":self.value}))
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:15 rubys
Initial revision
Revision 1.30 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.29 2003/08/04 00:03:14 rubys
Implement more strict email check for pie
Revision 1.28 2003/07/30 01:54:59 f8dy
tighten test cases, add explicit params
Revision 1.27 2003/07/29 20:57:39 f8dy
tightened up test cases, check for parent element, explicitly test for success
Revision 1.26 2003/07/29 19:38:07 f8dy
changed test cases to explicitly test for success (rather than the absence of failure)
Revision 1.25 2003/07/29 16:44:56 f8dy
changed test cases to explicitly test for success (rather than the absence of failure)
Revision 1.24 2002/12/20 13:26:00 rubys
CreativeCommons support
Revision 1.23 2002/10/24 14:47:33 f8dy
decoupled "no duplicates" check from individual validator classes,
allow handlers to return multiple validator classes
Revision 1.22 2002/10/22 17:29:52 f8dy
loosened restrictions on link/docs/url protocols; RSS now allows any
IANA protocol, not just http:// and ftp://
Revision 1.21 2002/10/22 16:43:55 rubys
textInput vs textinput: don't reject valid 1.0 feeds, but don't allow
invalid textinput fields in RSS 2.0 either...
Revision 1.20 2002/10/22 14:11:36 f8dy
initial attempts to handle RSS 1.0 vs. 2.0 images and textinputs; test
cases still fail
Revision 1.19 2002/10/22 13:16:03 f8dy
passed lowercase textinput test
Revision 1.18 2002/10/18 19:28:43 f8dy
added testcases for mod_syndication and passed them
Revision 1.17 2002/10/18 15:41:33 f8dy
added (and passed) testcases for unallowed duplicates of the same element
Revision 1.16 2002/10/18 14:17:30 f8dy
added tests for language/dc:language (must be valid ISO-639 language code
plus optional country code) and passed them
Revision 1.15 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: generator.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from validators import *
#
# Atom generator element
#
class generator(rfc2396):
def validate(self):
if self.attrs.has_key((None, "url")):
self.value = self.attrs.getValue((None, "url"))
rfc2396.validate(self, extraParams={"attr": "url"})
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:15 rubys
Initial revision
Revision 1.3 2003/12/11 23:16:32 f8dy
passed new generator test cases
Revision 1.2 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.1 2003/08/03 22:39:40 rubys
Add generator element
Revision 1.2 2003/07/09 16:24:30 f8dy
added global feed type support
Revision 1.1 2003/07/07 00:54:00 rubys
Rough in some pie/echo support
"""
| Python |
"""$Id: skipHours.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from logging import *
#
# skipHours element
#
class skipHours(validatorBase):
def prevalidate(self):
self.log(UseSyndicationModule({"core":self.name, "ext":"syndication module"}))
def validate(self):
if "hour" not in self.children:
self.log(MissingElement({"parent":self.name, "element":"hour"}))
if len(self.children) > 24:
self.log(NotEnoughHoursInTheDay({}))
def do_hour(self):
return hour()
class hour(validatorBase):
def validate(self):
try:
h = int(self.value)
if (h < 0) or (h > 24):
raise ValueError
else:
self.log(ValidHour({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidHour({"parent":self.parent.name, "element":self.name, "value":self.value}))
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:16 rubys
Initial revision
Revision 1.6 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.5 2003/07/30 01:54:59 f8dy
tighten test cases, add explicit params
Revision 1.4 2003/07/29 20:57:39 f8dy
tightened up test cases, check for parent element, explicitly test for success
Revision 1.3 2002/11/11 19:12:17 rubys
Allow zero for hours
Revision 1.2 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: skipDays.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from logging import *
#
# skipDays element
#
class skipDays(validatorBase):
def prevalidate(self):
self.log(UseSyndicationModule({"core":self.name, "ext":"syndication module"}))
def validate(self):
if "day" not in self.children:
self.log(MissingElement({"parent":self.name, "element":"day"}))
if len(self.children) > 7:
self.log(EightDaysAWeek({}))
def do_day(self):
return day()
class day(validatorBase):
def validate(self):
if self.value not in ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'):
self.log(InvalidDay({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidDay({"parent":self.parent.name, "element":self.name, "value":self.value}))
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:16 rubys
Initial revision
Revision 1.5 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.4 2003/07/30 01:54:59 f8dy
tighten test cases, add explicit params
Revision 1.3 2003/07/29 20:57:39 f8dy
tightened up test cases, check for parent element, explicitly test for success
Revision 1.2 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: text_plain.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
"""Output class for plain text output"""
from base import BaseFormatter
import feedvalidator
class Formatter(BaseFormatter):
def format(self, event):
return '%s %s%s' % (self.getLineAndColumn(event), self.getMessage(event),
self.getCount(event))
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:17 rubys
Initial revision
Revision 1.7 2003/12/12 15:53:42 f8dy
renamed source directories
Revision 1.6 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.5 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: text_html.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
"""Output class for plain text output"""
from base import BaseFormatter
import feedvalidator
import cgi
class Formatter(BaseFormatter):
FRAGMENTLEN = 80
DOCSURL = 'docs/'
def __init__(self, events, rawdata):
BaseFormatter.__init__(self, events)
self.rawdata = rawdata
def getRootClass(self, aClass):
base = aClass.__bases__[0]
if base.__name__.split('.')[-1] == 'LoggedEvent':
return aClass
else:
return self.getRootClass(base)
def getHelpURL(self, event):
rootClass = self.getRootClass(event.__class__).__name__
rootClass = rootClass.split('.')[-1]
rootClass = rootClass.lower()
# messageClass = self.getMessageClass(event).__name__.split('.')[-1]
messageClass = event.__class__.__name__.split('.')[-1]
return self.DOCSURL + rootClass + '/' + messageClass
def format(self, event):
if event.params.has_key('line'):
line = event.params['line']
if line >= len(self.rawdata.split('\n')):
# For some odd reason, UnicodeErrors tend to trigger a bug
# in the SAX parser that misrepresents the current line number.
# We try to capture the last known good line number/column as
# we go along, and now it's time to fall back to that.
line = event.params['line'] = event.params['backupline']
column = event.params['column'] = event.params['backupcolumn']
column = event.params['column']
codeFragment = self.rawdata.split('\n')[line-1]
markerColumn = column
if column > self.FRAGMENTLEN:
codeFragment = '... ' + codeFragment[column-(self.FRAGMENTLEN/2):]
markerColumn = 5 + (self.FRAGMENTLEN/2)
if len(codeFragment) > self.FRAGMENTLEN:
codeFragment = codeFragment[:(self.FRAGMENTLEN-4)] + ' ...'
else:
codeFragment = ''
return """<li>
<p><a href="#l%s">%s</a>, %s: <span class="message">%s</span>%s [<a title="more information about this error" href="%s">help</a>]</p>
<blockquote><p><code>%s<br />%s<span class="marker">%s</span></code></p></blockquote>
</li>
""" % (line, self.getLine(event),
self.getColumn(event),
self.getMessage(event),
self.getCount(event),
self.getHelpURL(event),
cgi.escape(codeFragment),
' ' * (markerColumn - 1),
'^')
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:17 rubys
Initial revision
Revision 1.14 2003/12/12 15:53:42 f8dy
renamed source directories
Revision 1.13 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.12 2003/09/01 21:28:03 f8dy
changes related to new server
Revision 1.11 2003/09/01 21:20:44 f8dy
changes related to new server
Revision 1.10 2003/06/26 18:03:04 f8dy
add workaround for case where SAX throws UnicodeError but locator.getLineNumber() is screwy
Revision 1.9 2002/10/30 23:03:01 f8dy
security fix: external (SYSTEM) entities
Revision 1.8 2002/10/30 06:07:18 f8dy
version 1.0.5
Revision 1.7 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: application_test.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
"""Output class for testing that all output messages are defined properly"""
from base import BaseFormatter
import feedvalidator
import os
LANGUAGE = os.environ.get('LANGUAGE', 'en')
lang = __import__('feedvalidator.i18n.%s' % LANGUAGE, globals(), locals(), LANGUAGE)
class Formatter(BaseFormatter):
def getMessage(self, event):
classes = [event.__class__]
while len(classes):
if lang.messages.has_key(classes[0]):
return lang.messages[classes[0]] % event.params
classes = classes + list(classes[0].__bases__)
del classes[0]
return None
def format(self, event):
"""returns the formatted representation of a single event"""
return self.getMessage(event)
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:17 rubys
Initial revision
Revision 1.3 2003/12/12 15:53:42 f8dy
renamed source directories
Revision 1.2 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.1 2003/08/05 22:09:24 f8dy
added automated message tester
"""
| Python |
"""$Id: text_xml.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
"""Output class for xml output"""
from base import BaseFormatter
from feedvalidator.logging import *
import feedvalidator
def xmlEncode(value):
value = value.replace('&', '&')
value = value.replace('<', '<')
value = value.replace('>', '>')
value = value.replace('"', '"')
value = value.replace("'", ''')
return value
class Formatter(BaseFormatter):
def format(self, event):
params = event.params
params['type'] = event.__class__.__name__
params['text'] = self.getMessage(event)
# determine the level of severity
level = 'unknown'
if isinstance(event,Info): level = 'info'
if isinstance(event,Warning): level = 'warning'
if isinstance(event,Error): level = 'error'
params['level'] = level
# organize fixed elements into a known order
order = params.keys()
order.sort()
for key in ['msgcount', 'text', 'column', 'line', 'type', 'level']:
if key in order:
order.remove(key)
order.insert(0,key)
# output the elements
result = "<message>\n"
for key in order:
value = xmlEncode(str(params[key]))
result = result + (" <%s>%s</%s>\n" % (key, value, key))
result = result + "</message>\n"
return result
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:17 rubys
Initial revision
Revision 1.5 2003/12/12 15:53:42 f8dy
renamed source directories
Revision 1.4 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.3 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: base.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
"""Base class for output classes"""
from UserList import UserList
import os
LANGUAGE = os.environ.get('LANGUAGE', 'en')
lang = __import__('feedvalidator.i18n.%s' % LANGUAGE, globals(), locals(), LANGUAGE)
class BaseFormatter(UserList):
def __getitem__(self, i):
return self.format(self.data[i])
def getLine(self, event):
if not event.params.has_key('line'): return ''
return lang.line % event.params
def getColumn(self, event):
if not event.params.has_key('column'): return ''
return lang.column % event.params
def getLineAndColumn(self, event):
line = self.getLine(event)
if not line: return ''
column = self.getColumn(event)
return '%s, %s:' % (line, column)
def getCount(self, event):
if not event.params.has_key('msgcount'): return ''
count = int(event.params['msgcount'])
if count <= 1: return ''
return lang.occurances % event.params
def getMessageClass(self, event):
classes = [event.__class__]
while len(classes):
if lang.messages.has_key(classes[0]):
return classes[0]
classes = classes + list(classes[0].__bases__)
del classes[0]
return "Undefined message: %s[%s]" % (event.__class__, event.params)
def getMessage(self, event):
classes = [event.__class__]
while len(classes):
if lang.messages.has_key(classes[0]):
return lang.messages[classes[0]] % event.params
classes = classes + list(classes[0].__bases__)
del classes[0]
return "Undefined message: %s[%s]" % (event.__class__, event.params)
def format(self, event):
"""returns the formatted representation of a single event"""
return `event`
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:17 rubys
Initial revision
Revision 1.10 2003/12/12 15:53:42 f8dy
renamed source directories
Revision 1.9 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.8 2002/10/30 23:03:01 f8dy
security fix: external (SYSTEM) entities
Revision 1.7 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: __init__.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
__all__ = ['base', 'text_plain', 'text_html']
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:17 rubys
Initial revision
Revision 1.4 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.3 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: entry.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from validators import *
from logging import *
#
# pie/echo entry element.
#
class entry(validatorBase):
def prevalidate(self):
self.links=[]
def validate(self):
if not 'title' in self.children:
self.log(MissingElement({"parent":self.name, "element":"title"}))
if not 'author' in self.children and not 'author' in self.parent.children:
self.log(MissingElement({"parent":self.name, "element":"author"}))
if not 'modified' in self.children:
self.log(MissingElement({"parent":self.name, "element":"modified"}))
if not 'issued' in self.children:
self.log(MissingElement({"parent":self.name, "element":"issued"}))
if not 'id' in self.children:
self.log(MissingElement({"parent":self.name, "element":"id"}))
# must have an alternate
if [link for link in self.links if link.rel == u'alternate']:
self.log(ValidAtomLinkRel({"parent":self.name, "element":"link", "attr":"rel", "attrvalue":"alternate"}))
else:
self.log(MissingAlternateLink({"parent":self.name, "element":"link", "attr":"rel", "attrvalue":"alternate"}))
# link/type pair must be unique
types={}
for link in self.links:
if not link.type in types: types[link.type]=[]
if link.rel in types[link.type]:
self.log(DuplicateAtomLink({"parent":self.name, "element":"link"}))
else:
types[link.type] += [link.rel]
def do_id(self):
return rfc2396(), noduplicates(), unique('id',self.parent)
def do_link(self):
from link import link
self.links += [link()]
return self.links[-1]
def do_title(self):
from content import content
return content(), noduplicates()
def do_summary(self):
from content import content
return content(), noduplicates()
def do_author(self):
from author import author
return author(), noduplicates()
def do_contributor(self):
from author import author
return author()
def do_content(self):
from content import content
return content()
def do_created(self):
return iso8601_z(), noduplicates()
def do_issued(self):
return iso8601(), noduplicates()
def do_modified(self):
return iso8601_z(), noduplicates()
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:15 rubys
Initial revision
Revision 1.13 2003/12/12 14:35:08 f8dy
fixed link rel=alternate logic to pass new "link not missing" tests
Revision 1.12 2003/12/12 06:10:58 rubys
link rel/type checking
Revision 1.11 2003/12/12 05:42:05 rubys
Rough in some support for the new link syntax
Revision 1.10 2003/12/11 18:20:46 f8dy
passed all content-related testcases
Revision 1.9 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.8 2003/08/05 14:28:26 rubys
Allow author to be omitted from entries when present on the feed
Revision 1.7 2003/08/05 07:59:04 rubys
Add feed(id,tagline,contributor)
Drop feed(subtitle), entry(subtitle)
Check for obsolete version, namespace
Check for incorrect namespace on feed element
Revision 1.6 2003/07/20 17:48:50 rubys
Validate that titles are present
Revision 1.5 2003/07/20 17:44:27 rubys
Detect duplicate ids and guids
Revision 1.4 2003/07/20 16:35:57 rubys
Ensure that issued and modified are present exactly once
Revision 1.3 2003/07/07 10:35:50 rubys
Complete first pass of echo/pie tests
Revision 1.2 2003/07/07 02:44:13 rubys
Further progress towards pie
Revision 1.1 2003/07/07 00:54:00 rubys
Rough in some pie/echo support
"""
| Python |
"""$Id: feed.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from validators import *
from logging import *
#
# Atom root element
#
class feed(validatorBase):
def prevalidate(self):
self.setFeedType(TYPE_ATOM)
self.links = []
def validate(self):
try:
version = self.attrs.getValue((None,'version'))
if not version:
self.log(MissingAttribute({"element":self.name, "attr":"version"}))
elif version in ['0.1', '0.2', '0.2.1']:
self.log(ObsoleteVersion({"element":self.name, "version":version}))
except:
self.log(MissingAttribute({"element":self.name, "attr":"version"}))
if not 'title' in self.children:
self.log(MissingElement({"parent":self.name, "element":"title"}))
# must have an alternate
if [link for link in self.links if link.rel == u'alternate']:
self.log(ValidAtomLinkRel({"parent":self.name, "element":"link", "attr":"rel", "attrvalue":"alternate"}))
else:
self.log(MissingAlternateLink({"parent":self.name, "element":"link", "attr":"rel", "attrvalue":"alternate"}))
# link/type pair must be unique
types={}
for link in self.links:
if not link.type in types: types[link.type]=[]
if link.rel in types[link.type]:
self.log(DuplicateAtomLink({"parent":self.name, "element":"link"}))
else:
types[link.type] += [link.rel]
def do_entry(self):
from entry import entry
return entry()
def do_title(self):
from content import content
return content(), noduplicates()
def do_tagline(self):
from content import content
return content(), noduplicates()
def do_info(self):
from content import content
return content(), noduplicates()
def do_id(self):
return nonblank(), rfc2396(), noduplicates()
def do_link(self):
from link import link
self.links += [link()]
return self.links[-1]
def do_modified(self):
return iso8601_z(), noduplicates()
def do_author(self):
from author import author
return author(), noduplicates()
def do_contributor(self):
from author import author
return author(), noduplicates()
def do_copyright(self):
from content import content
return content(), noduplicates()
def do_generator(self):
from generator import generator
return generator(), noduplicates()
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:15 rubys
Initial revision
Revision 1.15 2003/12/12 14:35:08 f8dy
fixed link rel=alternate logic to pass new "link not missing" tests
Revision 1.14 2003/12/12 11:30:39 rubys
Validate feed links
Revision 1.13 2003/12/12 05:42:05 rubys
Rough in some support for the new link syntax
Revision 1.12 2003/12/11 23:16:32 f8dy
passed new generator test cases
Revision 1.11 2003/12/11 20:13:58 f8dy
feed title, copyright, and tagline may be blank
Revision 1.10 2003/12/11 18:20:46 f8dy
passed all content-related testcases
Revision 1.9 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.8 2003/12/11 04:50:53 f8dy
added test cases for invalid letters in urn NSS, fixed RE to match
Revision 1.7 2003/08/05 15:03:19 rubys
Handle complex (nested) content. Remove copy/paste error in handing
of copyright.
Revision 1.6 2003/08/05 14:03:23 rubys
Tagline is optional
Revision 1.5 2003/08/05 07:59:04 rubys
Add feed(id,tagline,contributor)
Drop feed(subtitle), entry(subtitle)
Check for obsolete version, namespace
Check for incorrect namespace on feed element
Revision 1.4 2003/08/03 18:46:04 rubys
support author(url,email) and feed(author,copyright,generator)
Revision 1.3 2003/07/09 16:24:30 f8dy
added global feed type support
Revision 1.2 2003/07/07 10:35:50 rubys
Complete first pass of echo/pie tests
Revision 1.1 2003/07/07 00:54:00 rubys
Rough in some pie/echo support
"""
| Python |
"""$Id: validators.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from logging import *
import re
rdfNS = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
#
# Valid mime type
#
mime_re = re.compile('[^\s()<>,;:\\"/[\]?=]+/[^\s()<>,;:\\"/[\]?=]+$')
#
# This class simply eats events. Useful to prevent cascading of errors
#
class eater(validatorBase):
def startElementNS(self, name, qname, attrs):
handler=eater()
handler.parent=self
handler.dispatcher=self.dispatcher
self.push(handler)
#
# This class simply html events. Identifies unsafe events
#
class htmlEater(validatorBase):
def __init__(self,parent,element):
self.parent=parent
self.element=element
validatorBase.__init__(self)
def startElementNS(self, name, qname, attrs):
handler=htmlEater(self.parent,self.element)
handler.parent=self
handler.dispatcher=self.dispatcher
self.push(handler)
if name=='script':
self.log(ContainsScript({"parent":self.parent.name, "element":self.element, "tag":"script"}))
if name=='meta':
self.log(ContainsMeta({"parent":self.parent.name, "element":self.element, "tag":"meta"}))
if name=='embed':
self.log(ContainsEmbed({"parent":self.parent.name, "element":self.element, "tag":"embed"}))
if name=='object':
self.log(ContainsObject({"parent":self.parent.name, "element":self.element, "tag":"object"}))
# if name=='a' and attrs.get((None,'href'),':').count(':')==0:
# self.log(ContainsRelRef({"parent":self.parent.name, "element":self.element}))
# if name=='img' and attrs.get((None,'src'), ':').count(':')==0:
# self.log(ContainsRelRef({"parent":self.parent.name, "element":self.element}))
def endElementNS(self,name,qname):
pass
#
# text: i.e., no child elements allowed (except rdf:Description).
#
_rdfStuffToIgnore = (('rdf', 'Description'),
('foaf', 'Person'),
('foaf', 'name'),
('rdfs', 'seeAlso'))
class text(validatorBase):
def startElementNS(self, name, qname, attrs):
from base import namespaces
ns = namespaces.get(qname, '')
if (ns, name) in _rdfStuffToIgnore:
pass
## if (name == 'Description' and namespaces.get(qname,'') == 'rdf'):
## pass
## elif (name == 'person' and namespaces.get(qname,'') == 'foaf'):
## pass
else:
if name.find(':') != -1:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":name}))
else:
self.log(UndefinedElement({"parent":self.name, "element":name}))
handler=eater()
handler.parent=self
handler.dispatcher=self.dispatcher
self.push(handler)
#
# noduplicates: no child elements, no duplicate siblings
#
class noduplicates(validatorBase):
def startElementNS(self, name, qname, attrs):
pass
def prevalidate(self):
if self.name in self.parent.children:
self.log(DuplicateElement({"parent":self.parent.name, "element":self.name}))
#
# valid e-mail addresses - lax
#
class email_lax(text):
# email_re = re.compile("[\w\-.]+@[\w\-\.]+\s*(\(.*\))?$")
email_re = re.compile('''([a-zA-Z0-9\_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)''')
def validate(self):
if not self.email_re.search(self.value):
self.log(InvalidContact({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidContact({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# valid e-mail addresses
#
class email(text):
# email_re = re.compile("[\w\-.]+@[\w\-\.]+\s*(\(.*\))?$")
email_re = re.compile('''([a-zA-Z0-9_\-\+\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$''')
def validate(self):
if not self.email_re.match(self.value):
self.log(InvalidContact({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidContact({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# iso639 language code
#
class iso639(text):
def validate(self):
import iso639codes
if '-' in self.value:
lang, sublang = self.value.split('-', 1)
else:
lang = self.value
if not iso639codes.isoLang.has_key(lang):
self.log(InvalidLanguage({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidLanguage({"parent":self.parent.name, "element":self.name}))
#
# iso8601 dateTime
#
class iso8601(text):
iso8601_re = re.compile("\d\d\d\d(-\d\d(-\d\d(T\d\d:\d\d(:\d\d(\.\d*)?)?" +
"(Z|([+-]\d\d:\d\d))?)?)?)?$")
def validate(self):
if not self.iso8601_re.match(self.value):
self.log(InvalidW3DTFDate({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
work=self.value.split('T')
date=work[0].split('-')
year=int(date[0])
if len(date)>1:
month=int(date[1])
try:
import calendar
numdays=calendar.monthrange(year,month)[1]
except:
self.log(InvalidW3DTFDate({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
if len(date)>2 and int(date[2])>numdays:
self.log(InvalidW3DTFDate({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
if len(work) > 1:
time=work[1].split('Z')[0].split('+')[0].split('-')[0]
time=time.split(':')
if int(time[0])>23:
self.log(InvalidW3DTFDate({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
if len(time)>1 and int(time[1])>60:
self.log(InvalidW3DTFDate({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
if len(time)>2 and float(time[2])>60.0:
self.log(InvalidW3DTFDate({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
self.log(ValidW3DTFDate({"parent":self.parent.name, "element":self.name}))
return 1
class iso8601_z(iso8601):
tz_re = re.compile("Z|([+-]\d\d:\d\d)$")
def validate(self):
if iso8601.validate(self):
if not self.tz_re.search(self.value):
self.log(W3DTFDateNoTimezone({"parent":self.parent.name, "element":self.name, "value":self.value}))
elif not 'Z' in self.value:
self.log(W3DTFDateNonUTC({"parent":self.parent.name, "element":self.name, "value":self.value}))
class iso8601_l(iso8601):
def validate(self):
if iso8601.validate(self):
if 'Z' in self.value:
self.log(W3DTFDateNonLocal({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# rfc2396 fully qualified (non-relative) uri
#
class rfc2396(text):
# rfc2396_re = re.compile("(([a-zA-Z][0-9a-zA-Z+\\-\\.]*:)?/{0,2}" +
rfc2396_re = re.compile("[a-zA-Z][0-9a-zA-Z+\\-\\.]*:(//)?" +
"[0-9a-zA-Z;/?:@&=+$\\.\\-_!~*'()%,#]+$")
urn_re = re.compile(r"^urn:[a-zA-Z0-9][a-zA-Z0-9-]{1,31}:([a-zA-Z0-9()+,\.:=@;$_!*'\-]|%[0-9A-Fa-f]{2})+$")
tag_re = re.compile(r"^tag:([a-z0-9\-\._]+?@)?[a-z0-9\.\-]+?,\d{4}(-\d{2}(-\d{2})?)?:[0-9a-zA-Z;/\?:@&=+$\.\-_!~*'\(\)%,#]+$")
def validate(self, errorClass=InvalidLink, successClass=ValidURI, extraParams={}):
success = 0
if self.value.startswith('tag:'):
if self.tag_re.match(self.value):
success = 1
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(ValidTAG(logparams))
else:
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(InvalidTAG(logparams))
elif self.value.startswith('urn:'):
if self.urn_re.match(self.value):
success = 1
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(ValidURN(logparams))
else:
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(InvalidURN(logparams))
elif (not self.value) or (not self.rfc2396_re.match(self.value)):
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(errorClass(logparams))
elif self.value.startswith('http:') or self.value.startswith('ftp:'):
if not re.match('^\w+://[^/].*',self.value):
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(errorClass(logparams))
else:
success = 1
else:
success = 1
if success:
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(successClass(logparams))
#
# rfc822 dateTime (+Y2K extension)
#
class rfc822(text):
rfc822_re = re.compile("(((Mon)|(Tue)|(Wed)|(Thu)|(Fri)|(Sat)|(Sun)), *)?" +
"\d\d? +((Jan)|(Feb)|(Mar)|(Apr)|(May)|(Jun)|(Jul)|(Aug)|(Sep)|(Oct)|" +
"(Nov)|(Dec)) +\d\d(\d\d)? +\d\d:\d\d(:\d\d)? +(([+-]?\d\d\d\d)|" +
"(UT)|(GMT)|(EST)|(EDT)|(CST)|(CDT)|(MST)|(MDT)|(PST)|(PDT)|\w)$")
def validate(self):
if not self.rfc822_re.match(self.value):
self.log(InvalidRFC2822Date({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidRFC2822Date({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# Decode html entityrefs
#
from htmlentitydefs import entitydefs
def decodehtml(data):
chunks=re.split('&#?(\w+);',data)
for i in range(1,len(chunks),2):
if chunks[i].isdigit():
# print chunks[i]
chunks[i]=chr(int(chunks[i]))
elif chunks[i] in entitydefs:
chunks[i]=entitydefs[chunks[i]]
else:
chunks[i]='&' + chunks[i] +';'
return "".join(map(str,chunks))
#
# Scan HTML for relative URLs
#
#class absUrlMixin:
# anchor_re = re.compile('<a\s+href=(?:"(.*?)"|\'(.*?)\'|([\w-]+))\s*>', re.IGNORECASE)
# img_re = re.compile('<img\s+[^>]*src=(?:"(.*?)"|\'(.*?)\'|([\w-]+))[\s>]', re.IGNORECASE)
# absref_re = re.compile("\w+:")
# def validateAbsUrl(self,value):
# refs = self.img_re.findall(self.value) + self.anchor_re.findall(self.value)
# for ref in [reduce(lambda a,b: a or b, x) for x in refs]:
# if not self.absref_re.match(decodehtml(ref)):
# self.log(ContainsRelRef({"parent":self.parent.name, "element":self.name}))
#
# Scan HTML for 'devious' content
#
class safeHtmlMixin:
scriptTag_re = re.compile("<script[>\s]", re.IGNORECASE)
metaTag_re = re.compile("<meta[>\s]", re.IGNORECASE)
embedTag_re = re.compile("<embed[>\s]", re.IGNORECASE)
objectTag_re = re.compile("<object[>\s]", re.IGNORECASE)
def validateSafe(self,value):
if self.scriptTag_re.search(value):
self.log(ContainsScript({"parent":self.parent.name, "element":self.name, "tag":"script"}))
if self.metaTag_re.search(value):
self.log(ContainsMeta({"parent":self.parent.name, "element":self.name, "tag":"meta"}))
if self.embedTag_re.search(value):
self.log(ContainsEmbed({"parent":self.parent.name, "element":self.name, "tag":"embed"}))
if self.objectTag_re.search(value):
self.log(ContainsObject({"parent":self.parent.name, "element":self.name, "tag":"object"}))
class safeHtml(text, safeHtmlMixin):#,absUrlMixin):
def validate(self):
self.validateSafe(self.value)
# self.validateAbsUrl(self.value)
#
# Elements for which html is discouraged, also checks for relative URLs
#
class nonhtml(text,safeHtmlMixin):#,absUrlMixin):
htmlEndTag_re = re.compile("</\w+>")
def validate(self):
if self.htmlEndTag_re.search(self.value):
self.log(ContainsHTML({"parent":self.parent.name, "element":self.name}))
self.validateSafe(self.value)
# self.validateAbsUrl(self.value)
class positiveInteger(text):
def validate(self):
try:
t = int(self.value)
if t <= 0:
raise ValueError
else:
self.log(ValidInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# mixin to validate URL in attribute
#
class httpURLMixin:
http_re = re.compile("http://", re.IGNORECASE)
def validateHttpURL(self, ns, attr):
value = self.attrs[(ns, attr)]
if not self.http_re.search(value):
self.log(InvalidURLAttribute({"parent":self.parent.name, "element":self.name, "attr":attr}))
else:
self.log(ValidURLAttribute({"parent":self.parent.name, "element":self.name, "attr":attr}))
class rdfResourceURI(rfc2396):
def validate(self):
if (rdfNS, 'resource') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"rdf:resource"}))
else:
self.value=self.attrs.getValue((rdfNS, 'resource'))
rfc2396.validate(self)
class rdfAbout(validatorBase):
def startElementNS(self, name, qname, attrs):
pass
def validate(self):
if (rdfNS, 'about') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"rdf:about"}))
else:
test=rfc2396()
test.parent=self
test.dispatcher=self.dispatcher
test.name=self.name
test.value=self.attrs.getValue((rdfNS, 'about'))
test.validate()
class nonblank(text):
def validate(self, errorClass=NotBlank, extraParams={}):
if not self.value:
logparams={"parent":self.parent.name,"element":self.name}
logparams.update(extraParams)
self.log(errorClass(logparams))
class unique(nonblank):
def __init__(self, name, scope):
self.name=name
self.scope=scope
nonblank.__init__(self)
if not name+'s' in self.scope.__dict__:
self.scope.__dict__[name+'s']=[]
def validate(self):
nonblank.validate(self)
list=self.scope.__dict__[self.name+'s']
if self.value in list:
self.log(DuplicateValue({"parent":self.parent.name, "element":self.name,"value":self.value}))
else:
list.append(self.value)
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:17 rubys
Initial revision
Revision 1.71 2003/12/13 21:39:48 f8dy
added test case for tags with dashes or digits
Revision 1.70 2003/12/12 20:37:05 f8dy
oops, URNs can contain letters after all
Revision 1.69 2003/12/12 15:00:22 f8dy
changed blank link attribute tests to new error AttrNotBlank to distinguish them from elements that can not be blank
Revision 1.68 2003/12/12 11:25:56 rubys
Validate mime type in link tags
Revision 1.67 2003/12/12 05:42:05 rubys
Rough in some support for the new link syntax
Revision 1.66 2003/12/11 23:16:32 f8dy
passed new generator test cases
Revision 1.65 2003/12/11 18:20:46 f8dy
passed all content-related testcases
Revision 1.64 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.63 2003/12/11 06:00:51 f8dy
added tag: testcases, passed
Revision 1.62 2003/12/11 04:50:53 f8dy
added test cases for invalid letters in urn NSS, fixed RE to match
Revision 1.61 2003/10/16 15:54:41 rubys
Detect duplicate channels
Revision 1.60 2003/10/16 15:42:36 rubys
Fix regression, allowing the relative URL tests inside xhtml to pass
again.
Revision 1.59 2003/09/18 18:57:31 f8dy
fixed typo in htmlEater
Revision 1.58 2003/09/13 00:16:43 f8dy
change check for relative references to be compatible with pyxml
Revision 1.57 2003/08/24 00:05:34 f8dy
removed iframe tests, after further discussion this is not enough of a security risk to keep feeds from validating
Revision 1.56 2003/08/23 21:01:00 rubys
Validate that content, content:encoded, and xhtml:body are safe
Revision 1.55 2003/08/11 21:39:39 rubys
Support for rdf:About elements caused a regression whereby spurious
error messages were generated for missing titles for RSS 1.0 feeds.
Revision 1.54 2003/08/10 13:49:14 rubys
Add support for chanel and item level rdf:about. Ensure that http and
ftp URLs have exactly two slashes after the scheme.
Revision 1.53 2003/08/04 01:59:33 rubys
Full http and ftp URIs require two slashes
Revision 1.52 2003/08/04 00:54:35 rubys
Log every valid element (for better self validation in test cases)
Revision 1.51 2003/08/04 00:03:14 rubys
Implement more strict email check for pie
Revision 1.50 2003/07/30 01:33:31 f8dy
tightened up test cases, added explicit parent checks, changed negative tests to positive
Revision 1.49 2003/07/29 20:57:39 f8dy
tightened up test cases, check for parent element, explicitly test for success
Revision 1.48 2003/07/29 19:38:07 f8dy
changed test cases to explicitly test for success (rather than the absence of failure)
Revision 1.47 2003/07/29 16:44:56 f8dy
changed test cases to explicitly test for success (rather than the absence of failure)
Revision 1.46 2003/07/29 16:14:21 rubys
Validate urns
Revision 1.45 2003/07/29 15:46:31 f8dy
changed test cases to explicitly test for success (rather than the absence of failure)
Revision 1.44 2003/07/20 17:44:27 rubys
Detect duplicate ids and guids
Revision 1.43 2003/07/13 00:32:13 rubys
Don't bother checking for local/UTC unless the date is valid...
Revision 1.42 2003/07/09 16:24:30 f8dy
added global feed type support
Revision 1.41 2003/07/07 20:33:50 rubys
Unicode in HTML problem
Revision 1.40 2003/07/07 10:35:50 rubys
Complete first pass of echo/pie tests
Revision 1.39 2003/07/07 02:44:13 rubys
Further progress towards pie
Revision 1.38 2003/07/07 00:54:00 rubys
Rough in some pie/echo support
Revision 1.37 2003/02/25 22:50:20 rubys
allow urls to be html entity encoded
Revision 1.36 2002/11/10 14:32:53 rubys
it is foaf:Person (not foaf:person)
Revision 1.35 2002/11/03 23:33:44 rubys
Noduplicates validator was causing the handler stack to get
momentarily out of synch
Revision 1.34 2002/11/03 22:46:41 rubys
Patch from Christian Schmidt:
"According to RFC-822 section 3.4.2 multiple white-space characters are
treated as one."
Revision 1.33 2002/10/30 15:44:48 rubys
Improve error messages for relative references: error message should
be gramatically correct. Remove "hidden" fields prevented duplicate
errors from being flagged as such.
Revision 1.32 2002/10/30 09:18:08 rubys
Double encoded &'s in query strings cause mutlple '#' to exist in a URL
Revision 1.31 2002/10/27 22:09:41 rubys
src need not be the last attribute in an <img>
Revision 1.30 2002/10/27 18:54:30 rubys
Issue warnings for relative references in descriptions
Revision 1.29 2002/10/25 15:08:15 rubys
Minor cleanup. It is zero or one occurances of a double slash. Also make
it clear that this routine has been repurposed to be a non-relative URI.
Reinstated the original regex which includes relative URIs as a comment.
Revision 1.28 2002/10/24 18:24:36 rubys
Prevent mere mention of <scriptingNews> from causing an error to be flagged.
http://radio.weblogs.com/0001018/2002/10/24.html#a1760
Revision 1.27 2002/10/24 14:47:33 f8dy
decoupled "no duplicates" check from individual validator classes,
allow handlers to return multiple validator classes
Revision 1.26 2002/10/24 14:05:06 f8dy
refactored simpleText() to include list of RDF stuff to ignore
Revision 1.25 2002/10/23 14:47:18 f8dy
added test cases for email address in parentheses (and passed)
Revision 1.24 2002/10/22 20:11:19 f8dy
added test case for RFC 822 date with no seconds (and passed)
Revision 1.23 2002/10/22 19:20:54 f8dy
passed testcase for foaf:person within dc:creator (or any other text
element)
Revision 1.22 2002/10/22 17:29:52 f8dy
loosened restrictions on link/docs/url protocols; RSS now allows any
IANA protocol, not just http:// and ftp://
Revision 1.21 2002/10/22 16:43:55 rubys
textInput vs textinput: don't reject valid 1.0 feeds, but don't allow
invalid textinput fields in RSS 2.0 either...
Revision 1.20 2002/10/22 13:06:41 f8dy
fixed bug with links containing commas
Revision 1.19 2002/10/20 13:36:59 rubys
Permit rdf:Description anywhere text is allowed
Revision 1.18 2002/10/18 19:28:43 f8dy
added testcases for mod_syndication and passed them
Revision 1.17 2002/10/18 15:41:33 f8dy
added (and passed) testcases for unallowed duplicates of the same element
Revision 1.16 2002/10/18 14:17:30 f8dy
added tests for language/dc:language (must be valid ISO-639 language code
plus optional country code) and passed them
Revision 1.15 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
#$Id
####
# Copyright 2000,2001 by Timothy O'Malley <timo@alum.mit.edu>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
"""Timeout Socket
This module enables a timeout mechanism on all TCP connections. It
does this by inserting a shim into the socket module. After this module
has been imported, all socket creation goes through this shim. As a
result, every TCP connection will support a timeout.
The beauty of this method is that it immediately and transparently
enables the entire python library to support timeouts on TCP sockets.
As an example, if you wanted to SMTP connections to have a 20 second
timeout:
import timeoutsocket
import smtplib
timeoutsocket.setDefaultSocketTimeout(20)
The timeout applies to the socket functions that normally block on
execution: read, write, connect, and accept. If any of these
operations exceeds the specified timeout, the exception Timeout
will be raised.
The default timeout value is set to None. As a result, importing
this module does not change the default behavior of a socket. The
timeout mechanism only activates when the timeout has been set to
a numeric value. (This behavior mimics the behavior of the
select.select() function.)
This module implements two classes: TimeoutSocket and TimeoutFile.
The TimeoutSocket class defines a socket-like object that attempts to
avoid the condition where a socket may block indefinitely. The
TimeoutSocket class raises a Timeout exception whenever the
current operation delays too long.
The TimeoutFile class defines a file-like object that uses the TimeoutSocket
class. When the makefile() method of TimeoutSocket is called, it returns
an instance of a TimeoutFile.
Each of these objects adds two methods to manage the timeout value:
get_timeout() --> returns the timeout of the socket or file
set_timeout() --> sets the timeout of the socket or file
As an example, one might use the timeout feature to create httplib
connections that will timeout after 30 seconds:
import timeoutsocket
import httplib
H = httplib.HTTP("www.python.org")
H.sock.set_timeout(30)
Note: When used in this manner, the connect() routine may still
block because it happens before the timeout is set. To avoid
this, use the 'timeoutsocket.setDefaultSocketTimeout()' function.
Good Luck!
"""
__version__ = "$Revision: 4 $"
__author__ = "Timothy O'Malley <timo@alum.mit.edu>"
#
# Imports
#
import select, string
import socket
if not hasattr(socket, "_no_timeoutsocket"):
_socket = socket.socket
else:
_socket = socket._no_timeoutsocket
#
# Set up constants to test for Connected and Blocking operations.
# We delete 'os' and 'errno' to keep our namespace clean(er).
# Thanks to Alex Martelli and G. Li for the Windows error codes.
#
import os
if os.name == "nt":
_IsConnected = ( 10022, 10056 )
_ConnectBusy = ( 10035, )
_AcceptBusy = ( 10035, )
else:
import errno
_IsConnected = ( errno.EISCONN, )
_ConnectBusy = ( errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK )
_AcceptBusy = ( errno.EAGAIN, errno.EWOULDBLOCK )
del errno
del os
#
# Default timeout value for ALL TimeoutSockets
#
_DefaultTimeout = None
def setDefaultSocketTimeout(timeout):
global _DefaultTimeout
_DefaultTimeout = timeout
def getDefaultSocketTimeout():
return _DefaultTimeout
#
# Exceptions for socket errors and timeouts
#
Error = socket.error
class Timeout(Exception):
pass
#
# Factory function
#
from socket import AF_INET, SOCK_STREAM
def timeoutsocket(family=AF_INET, type=SOCK_STREAM, proto=None):
if family != AF_INET or type != SOCK_STREAM:
if proto:
return _socket(family, type, proto)
else:
return _socket(family, type)
return TimeoutSocket( _socket(family, type), _DefaultTimeout )
# end timeoutsocket
#
# The TimeoutSocket class definition
#
class TimeoutSocket:
"""TimeoutSocket object
Implements a socket-like object that raises Timeout whenever
an operation takes too long.
The definition of 'too long' can be changed using the
set_timeout() method.
"""
_copies = 0
_blocking = 1
def __init__(self, sock, timeout):
self._sock = sock
self._timeout = timeout
# end __init__
def __getattr__(self, key):
return getattr(self._sock, key)
# end __getattr__
def get_timeout(self):
return self._timeout
# end set_timeout
def set_timeout(self, timeout=None):
self._timeout = timeout
# end set_timeout
def setblocking(self, blocking):
self._blocking = blocking
return self._sock.setblocking(blocking)
# end set_timeout
def connect_ex(self, addr):
errcode = 0
try:
self.connect(addr)
except Error, why:
errcode = why[0]
return errcode
# end connect_ex
def connect(self, addr, port=None, dumbhack=None):
# In case we were called as connect(host, port)
if port != None: addr = (addr, port)
# Shortcuts
sock = self._sock
timeout = self._timeout
blocking = self._blocking
# First, make a non-blocking call to connect
try:
sock.setblocking(0)
sock.connect(addr)
sock.setblocking(blocking)
return
except Error, why:
# Set the socket's blocking mode back
sock.setblocking(blocking)
# If we are not blocking, re-raise
if not blocking:
raise
# If we are already connected, then return success.
# If we got a genuine error, re-raise it.
errcode = why[0]
if dumbhack and errcode in _IsConnected:
return
elif errcode not in _ConnectBusy:
raise
# Now, wait for the connect to happen
# ONLY if dumbhack indicates this is pass number one.
# If select raises an error, we pass it on.
# Is this the right behavior?
if not dumbhack:
r,w,e = select.select([], [sock], [], timeout)
if w:
return self.connect(addr, dumbhack=1)
# If we get here, then we should raise Timeout
raise Timeout("Attempted connect to %s timed out." % str(addr) )
# end connect
def accept(self, dumbhack=None):
# Shortcuts
sock = self._sock
timeout = self._timeout
blocking = self._blocking
# First, make a non-blocking call to accept
# If we get a valid result, then convert the
# accept'ed socket into a TimeoutSocket.
# Be carefult about the blocking mode of ourselves.
try:
sock.setblocking(0)
newsock, addr = sock.accept()
sock.setblocking(blocking)
timeoutnewsock = self.__class__(newsock, timeout)
timeoutnewsock.setblocking(blocking)
return (timeoutnewsock, addr)
except Error, why:
# Set the socket's blocking mode back
sock.setblocking(blocking)
# If we are not supposed to block, then re-raise
if not blocking:
raise
# If we got a genuine error, re-raise it.
errcode = why[0]
if errcode not in _AcceptBusy:
raise
# Now, wait for the accept to happen
# ONLY if dumbhack indicates this is pass number one.
# If select raises an error, we pass it on.
# Is this the right behavior?
if not dumbhack:
r,w,e = select.select([sock], [], [], timeout)
if r:
return self.accept(dumbhack=1)
# If we get here, then we should raise Timeout
raise Timeout("Attempted accept timed out.")
# end accept
def send(self, data, flags=0):
sock = self._sock
if self._blocking:
r,w,e = select.select([],[sock],[], self._timeout)
if not w:
raise Timeout("Send timed out")
return sock.send(data, flags)
# end send
def recv(self, bufsize, flags=0):
sock = self._sock
if self._blocking:
r,w,e = select.select([sock], [], [], self._timeout)
if not r:
raise Timeout("Recv timed out")
return sock.recv(bufsize, flags)
# end recv
def makefile(self, flags="r", bufsize=-1):
self._copies = self._copies +1
return TimeoutFile(self, flags, bufsize)
# end makefile
def close(self):
if self._copies <= 0:
self._sock.close()
else:
self._copies = self._copies -1
# end close
# end TimeoutSocket
class TimeoutFile:
"""TimeoutFile object
Implements a file-like object on top of TimeoutSocket.
"""
def __init__(self, sock, mode="r", bufsize=4096):
self._sock = sock
self._bufsize = 4096
if bufsize > 0: self._bufsize = bufsize
if not hasattr(sock, "_inqueue"): self._sock._inqueue = ""
# end __init__
def __getattr__(self, key):
return getattr(self._sock, key)
# end __getattr__
def close(self):
self._sock.close()
self._sock = None
# end close
def write(self, data):
self.send(data)
# end write
def read(self, size=-1):
_sock = self._sock
_bufsize = self._bufsize
while 1:
datalen = len(_sock._inqueue)
if datalen >= size >= 0:
break
bufsize = _bufsize
if size > 0:
bufsize = min(bufsize, size - datalen )
buf = self.recv(bufsize)
if not buf:
break
_sock._inqueue = _sock._inqueue + buf
data = _sock._inqueue
_sock._inqueue = ""
if size > 0 and datalen > size:
_sock._inqueue = data[size:]
data = data[:size]
return data
# end read
def readline(self, size=-1):
_sock = self._sock
_bufsize = self._bufsize
while 1:
idx = string.find(_sock._inqueue, "\n")
if idx >= 0:
break
datalen = len(_sock._inqueue)
if datalen >= size >= 0:
break
bufsize = _bufsize
if size > 0:
bufsize = min(bufsize, size - datalen )
buf = self.recv(bufsize)
if not buf:
break
_sock._inqueue = _sock._inqueue + buf
data = _sock._inqueue
_sock._inqueue = ""
if idx >= 0:
idx = idx + 1
_sock._inqueue = data[idx:]
data = data[:idx]
elif size > 0 and datalen > size:
_sock._inqueue = data[size:]
data = data[:size]
return data
# end readline
def readlines(self, sizehint=-1):
result = []
data = self.read()
while data:
idx = string.find(data, "\n")
if idx >= 0:
idx = idx + 1
result.append( data[:idx] )
data = data[idx:]
else:
result.append( data )
data = ""
return result
# end readlines
def flush(self): pass
# end TimeoutFile
#
# Silently replace the socket() builtin function with
# our timeoutsocket() definition.
#
if not hasattr(socket, "_no_timeoutsocket"):
socket._no_timeoutsocket = socket.socket
socket.socket = timeoutsocket
del socket
socket = timeoutsocket
# Finis
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:16 rubys
Initial revision
Revision 1.2 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: base.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from xml.sax.handler import ContentHandler
from xml.sax.xmlreader import Locator
# references:
# http://web.resource.org/rss/1.0/modules/standard.html
# http://web.resource.org/rss/1.0/modules/proposed.html
# http://dmoz.org/Reference/Libraries/Library_and_Information_Science/Technical_Services/Cataloguing/Metadata/RDF/Applications/RSS/Specifications/RSS1.0_Modules/
namespaces = {
"http://webns.net/mvcb/": "admin",
"http://purl.org/rss/1.0/modules/aggregation/": "ag",
"http://purl.org/rss/1.0/modules/annotate/": "annotate",
"http://media.tangent.org/rss/1.0/": "audio",
"http://backend.userland.com/blogChannelModule": "blogChannel",
"http://web.resource.org/cc/": "cc",
"http://backend.userland.com/creativeCommonsRssModule": "creativeCommons",
"http://purl.org/rss/1.0/modules/company": "company",
"http://purl.org/rss/1.0/modules/content/": "content",
"http://my.theinfo.org/changed/1.0/rss/": "cp",
"http://purl.org/dc/elements/1.1/": "dc",
"http://purl.org/dc/terms/": "dcterms",
"http://purl.org/rss/1.0/modules/email/": "email",
"http://purl.org/rss/1.0/modules/event/": "ev",
"http://purl.org/rss/1.0/modules/image/": "image",
"http://xmlns.com/foaf/0.1/": "foaf",
"http://purl.org/rss/1.0/modules/link/": "l",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://www.w3.org/2000/01/rdf-schema#": "rdfs",
"http://purl.org/rss/1.0/modules/reference/": "ref",
"http://purl.org/rss/1.0/modules/richequiv/": "reqv",
"http://purl.org/rss/1.0/modules/rss091#": "rss091",
"http://purl.org/rss/1.0/modules/search/": "search",
"http://purl.org/rss/1.0/modules/slash/": "slash",
"http://purl.org/rss/1.0/modules/servicestatus/": "ss",
"http://hacks.benhammersley.com/rss/streaming/": "str",
"http://purl.org/rss/1.0/modules/subscription/": "sub",
"http://purl.org/rss/1.0/modules/syndication/": "sy",
"http://purl.org/rss/1.0/modules/taxonomy/": "taxo",
"http://purl.org/rss/1.0/modules/threading/": "thr",
"http://purl.org/rss/1.0/modules/wiki/": "wiki",
"http://schemas.xmlsoap.org/soap/envelope/": "soap",
"http://purl.org/atom/ns#": "atom",
"http://www.w3.org/1999/xhtml": "xhtml",
}
#
# From the SAX parser's point of view, this class is the one responsible for
# handling SAX events. In actuality, all this class does is maintain a
# pushdown stack of the *real* content handlers, and delegates sax events
# to the current one.
#
class SAXDispatcher(ContentHandler):
firstOccurrenceOnly = 0
def __init__(self):
from root import root
ContentHandler.__init__(self)
self.lastKnownLine = 0
self.lastKnownColumn = 0
self.loggedEvents = []
self.feedType = 0
self.handler_stack=[[root(self)]]
def setDocumentLocator(self, locator):
self.locator = locator
ContentHandler.setDocumentLocator(self, self.locator)
def setFirstOccurrenceOnly(self, firstOccurrenceOnly=1):
self.firstOccurrenceOnly = firstOccurrenceOnly
def startPrefixMapping(self, prefix, uri):
if namespaces.has_key(uri):
if not namespaces[uri] == prefix and prefix:
from logging import NonstdPrefix
self.log(NonstdPrefix({'preferred':namespaces[uri], 'ns':uri}))
elif prefix in namespaces.values():
from logging import ReservedPrefix
preferredURI = [key for key, value in namespaces.items() if value == prefix][0]
self.log(ReservedPrefix({'prefix':prefix, 'ns':preferredURI}))
def startElementNS(self, name, qname, attrs):
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
qname, name = name
for handler in iter(self.handler_stack[-1]):
handler.startElementNS(name, qname, attrs)
def resolveEntity(self, publicId, systemId):
if (publicId=='-//Netscape Communications//DTD RSS 0.91//EN' and
systemId=='http://my.netscape.com/publish/formats/rss-0.91.dtd'):
from logging import ValidDoctype
self.log(ValidDoctype({}))
else:
from logging import ContainsSystemEntity
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
self.log(ContainsSystemEntity({}))
from StringIO import StringIO
return StringIO()
def characters(self, string):
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
for handler in iter(self.handler_stack[-1]):
handler.characters(string)
def endElementNS(self, name, qname):
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
qname, name = name
for handler in iter(self.handler_stack[-1]):
handler.endElementNS(name, qname)
del self.handler_stack[-1]
def push(self, handler):
try:
iter(handler)
except:
handler = [handler]
self.handler_stack.append(handler)
def log(self, event):
def findDuplicate(self, event):
duplicates = [e for e in self.loggedEvents if e.__class__ == event.__class__]
for dup in duplicates:
for k, v in event.params.items():
if k != 'value':
if not k in dup.params or dup.params[k] != v: break
else:
return dup
if event.params.has_key('element') and event.params['element']:
event.params['element'] = event.params['element'].replace('_', ':')
if self.firstOccurrenceOnly:
dup = findDuplicate(self, event)
if dup:
dup.params['msgcount'] = dup.params['msgcount'] + 1
return
event.params['msgcount'] = 1
try:
line = self.locator.getLineNumber()
backupline = self.lastKnownLine
column = self.locator.getColumnNumber()
backupcolumn = self.lastKnownColumn
except AttributeError:
line = backupline = column = backupcolumn = 0
event.params['line'] = line
event.params['backupline'] = backupline
event.params['column'] = column
event.params['backupcolumn'] = backupcolumn
self.loggedEvents.append(event)
def error(self, exception):
from logging import SAXError
self.log(SAXError({'exception':str(exception)}))
raise exception
fatalError=error
warning=error
def setFeedType(self, feedType):
self.feedType = feedType
def getFeedType(self):
return self.feedType
#
# This base class for content handlers keeps track of such administrative
# details as the parent of the current element, and delegating both log
# and push events back up the stack. It will also concatenate up all of
# the SAX events associated with character data into a value, handing such
# things as CDATA and entities.
#
# Subclasses are expected to declare "do_name" methods for every
# element that they support. These methods are expected to return the
# appropriate handler for the element.
#
# The name of the element and the names of the children processed so
# far are also maintained.
#
# Hooks are also provided for subclasses to do "prevalidation" and
# "validation".
#
class validatorBase(ContentHandler):
defaultNamespaces = []
def __init__(self):
ContentHandler.__init__(self)
self.value = ""
self.attrs = None
self.children = []
self.isValid = 1
self.name = None
def unknown_starttag(self, name, qname, attrs):
from validators import eater
return eater()
def startElementNS(self, name, qname, attrs):
from validators import eater
if qname in self.defaultNamespaces: qname=None
hasNS = (qname<>None)
if namespaces.has_key(qname):
qname, name = None, namespaces[qname] + "_" + name
# ensure all attribute namespaces are properly defined
for (namespace,attr) in attrs.keys():
if ':' in attr and not namespace:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":attr}))
if qname:
handler = self.unknown_starttag(name, qname, attrs)
else:
try:
handler = getattr(self, "do_" + name)()
except AttributeError:
if name.find(':') != -1:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":name}))
elif not hasNS:
from logging import UndefinedElement
self.log(UndefinedElement({"parent":self.name, "element":name}))
handler = eater()
try:
iter(handler)
except TypeError:
handler = [handler]
for aHandler in iter(handler):
aHandler.parent = self
aHandler.dispatcher = self.dispatcher
aHandler.value = ""
aHandler.name = name
aHandler.attrs = attrs
aHandler.prevalidate()
# MAP - always append name, even if already exists (we need this to
# check for too many hour elements in skipHours, and it doesn't
# hurt anything else)
self.children.append(name)
self.push(handler)
def endElementNS(self, name, qname):
self.value=self.value.strip()
self.validate()
if self.isValid and self.name:
from validators import ValidElement
self.log(ValidElement({"parent":self.parent.name, "element":name}))
def characters(self, string):
self.value = self.value + string
def log(self, event):
self.dispatcher.log(event)
self.isValid = 0
def setFeedType(self, feedType):
self.dispatcher.setFeedType(feedType)
def push(self, handler):
self.dispatcher.push(handler)
def leaf(self):
from validators import text
return text()
def prevalidate(self):
pass
def validate(self):
pass
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:15 rubys
Initial revision
Revision 1.41 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.40 2003/08/23 23:25:14 rubys
Allow unprefixed elements (like xhtml) to pass through without warning
Revision 1.39 2003/08/23 21:01:00 rubys
Validate that content, content:encoded, and xhtml:body are safe
Revision 1.38 2003/08/12 02:02:26 rubys
Detect unknown elements even if they have underscores. Reported by
Brent Simmons.
Revision 1.37 2003/08/09 18:18:03 rubys
Permit NetScape's 0.91 DOCTYPE
Revision 1.36 2003/08/05 05:32:35 f8dy
0.2 snapshot - change version number and default namespace
Revision 1.35 2003/08/04 00:54:35 rubys
Log every valid element (for better self validation in test cases)
Revision 1.34 2003/07/28 21:56:52 rubys
Check attributes for valid namespaces
Revision 1.33 2003/07/09 16:24:30 f8dy
added global feed type support
Revision 1.32 2003/07/07 00:54:00 rubys
Rough in some pie/echo support
Revision 1.31 2003/06/26 18:03:04 f8dy
add workaround for case where SAX throws UnicodeError but locator.getLineNumber() is screwy
Revision 1.30 2003/04/07 19:49:22 rubys
Handle ignorable whitespace in elements such as comments
Revision 1.29 2003/03/01 13:53:22 rubys
Improved duplicate checking
Revision 1.28 2002/12/20 13:26:00 rubys
CreativeCommons support
Revision 1.27 2002/10/31 00:52:21 rubys
Convert from regular expressions to EntityResolver for detecting
system entity references
Revision 1.26 2002/10/30 23:03:01 f8dy
security fix: external (SYSTEM) entities
Revision 1.25 2002/10/24 14:47:33 f8dy
decoupled "no duplicates" check from individual validator classes,
allow handlers to return multiple validator classes
Revision 1.24 2002/10/24 13:55:58 f8dy
added rdfs namespace
Revision 1.23 2002/10/22 19:20:54 f8dy
passed testcase for foaf:person within dc:creator (or any other text
element)
Revision 1.22 2002/10/22 12:57:35 f8dy
fixed bug setting parameters for ReservedPrefix error
Revision 1.21 2002/10/18 20:31:28 f8dy
fixed namespace for mod_aggregation
Revision 1.20 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: content.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from validators import *
from logging import *
#
# item element.
#
class content(validatorBase,safeHtmlMixin):
from validators import mime_re
htmlEndTag_re = re.compile("</\w+>")
HTMLTYPES = ('text/html', 'application/xhtml+xml')
def prevalidate(self):
self.mode='xml'
self.type='text/plain'
self.mixed=0
self.multitypes=[]
if self.attrs.has_key((None,"mode")):
self.mode=self.attrs.getValue((None,"mode"))
if self.attrs.has_key((None,"type")):
self.type=self.attrs.getValue((None,"type"))
if not self.mode in ['xml','escaped','base64']:
self.log(InvalidContentMode({"parent":self.parent.name, "element":self.name, "mode":self.mode}))
else:
self.log(ValidContentMode({"parent":self.parent.name, "element":self.name, "mode":self.mode}))
# if self.type == None:
# self.log(NoMIMEType({"parent":self.parent.name, "element":self.name}))
if not self.mime_re.match(self.type):
self.log(InvalidMIMEType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
else:
self.log(ValidMIMEAttribute({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
def validate(self):
if self.mode == 'base64':
import base64
try:
base64.decodestring(self.value)
except:
self.log(NotBase64({"parent":self.parent.name, "element":self.name,"value":self.value}))
elif self.mode == 'xml':
import re
if self.htmlEndTag_re.search(self.value):
if self.type in self.HTMLTYPES:
self.log(NotInline({"parent":self.parent.name, "element":self.name,"value":self.value}))
else:
self.log(ContainsUndeclaredHTML({"parent":self.parent.name, "element":self.name, "value":self.value}))
elif self.mode == 'escaped':
if self.type in self.HTMLTYPES:
self.validateSafe(self.value)
from HTMLParser import HTMLParser, HTMLParseError
try:
p=HTMLParser()
p.feed(self.value)
p.close()
self.log(ValidHtml({"parent":self.parent.name, "element":self.name,"value":self.value}))
except HTMLParseError:
import sys
self.log(NotHtml({"parent":self.parent.name, "element":self.name,"value":self.value, "message": sys.exc_info()[1].msg}))
else:
if self.htmlEndTag_re.search(self.value):
self.log(ContainsUndeclaredHTML({"parent":self.parent.name, "element":self.name, "value":self.value}))
if self.type == 'multipart/alternative':
if len(self.children)==0:
self.log(MultipartMissing({"parent":self.parent.name, "element":self.name}))
def startElementNS(self, name, qname, attrs):
if self.type == 'multipart/alternative':
if name<>'content':
self.log(MultipartInvalid({"parent":self.parent.name, "element":self.name, "name":name}))
else:
validatorBase.startElementNS(self, name, qname, attrs)
if attrs.has_key((None,'type')):
type=attrs.getValue((None,'type'))
if type=='multipart/alternative':
self.log(MultipartRecursion({"parent":self.parent.name, "element":self.name, "name":name}))
if type in self.multitypes:
self.log(MultipartDuplicate({"parent":self.parent.name, "element":self.name, "type":type}))
else:
self.multitypes += [type]
return
self.mixed=1
if self.attrs.has_key((None,"mode")):
if self.attrs.getValue((None,"mode")) == 'escaped':
self.log(NotEscaped({"parent":self.parent.name, "element":self.name}))
handler=eater()
handler.parent=self
handler.dispatcher=self
self.push(handler)
def do_content(self):
return content()
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:15 rubys
Initial revision
Revision 1.14 2003/12/12 11:25:55 rubys
Validate mime type in link tags
Revision 1.13 2003/12/12 01:24:36 rubys
Multipart/alternative tests
Revision 1.12 2003/12/11 18:20:46 f8dy
passed all content-related testcases
Revision 1.11 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.10 2003/12/11 15:18:51 f8dy
type is now optional
Revision 1.9 2003/08/23 21:01:00 rubys
Validate that content, content:encoded, and xhtml:body are safe
Revision 1.8 2003/08/23 00:28:04 rubys
Validate escaped text/HTML content
Revision 1.7 2003/08/05 15:03:19 rubys
Handle complex (nested) content. Remove copy/paste error in handing
of copyright.
Revision 1.6 2003/07/29 21:48:10 f8dy
tightened up test cases, added parent element check, changed negative test cases to positive
Revision 1.5 2003/07/11 16:36:08 rubys
Attempt to detect improper use of inline xml
Revision 1.4 2003/07/10 21:16:33 rubys
Get rssdemo back on its feet...
Revision 1.3 2003/07/10 21:02:16 rubys
Verify base64 and escaped
Revision 1.2 2003/07/07 10:35:50 rubys
Complete first pass of echo/pie tests
Revision 1.1 2003/07/07 02:44:13 rubys
Further progress towards pie
"""
| Python |
"""$Id: __init__.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
import timeoutsocket
timeoutsocket.setDefaultSocketTimeout(10)
import urllib
from logging import *
from xml.sax import SAXParseException
from xml.sax.xmlreader import InputSource
import re
MAXDATALENGTH = 200000
class ValidatorURLopener(urllib.FancyURLopener):
def __init__(self, *args):
self.version = "FeedValidator/1.21 +http://feeds.archive.org/validator/"
urllib.FancyURLopener.__init__(self, *args)
def _validate(aString, firstOccurrenceOnly=0):
"""validate RSS from string, returns validator object"""
from xml.sax import make_parser, handler
from base import SAXDispatcher
from exceptions import UnicodeError
from cStringIO import StringIO
source = InputSource()
source.setByteStream(StringIO(aString))
validator = SAXDispatcher()
validator.setFirstOccurrenceOnly(firstOccurrenceOnly)
parser = make_parser()
parser.setFeature(handler.feature_namespaces, 1)
parser.setContentHandler(validator)
parser.setErrorHandler(validator)
parser.setEntityResolver(validator)
if hasattr(parser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
parser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
parser.parse(source)
except SAXParseException:
pass
except UnicodeError:
import sys
exctype, value = sys.exc_info()[:2]
import logging
validator.log(logging.UnicodeError({"exception":value}))
return validator
def validateStream(aFile, firstOccurrenceOnly=0):
return {"feedType":validator.feedType, "loggedEvents":validator.loggedEvents}
def validateString(aString, firstOccurrenceOnly=0):
validator = _validate(aString, firstOccurrenceOnly)
return {"feedType":validator.feedType, "loggedEvents":validator.loggedEvents}
def validateURL(url, firstOccurrenceOnly=1, wantRawData=0):
"""validate RSS from URL, returns events list, or (events, rawdata) tuple"""
usock = ValidatorURLopener().open(url)
rawdata = usock.read(MAXDATALENGTH)
rawdata = rawdata.replace('\r\n', '\n').replace('\r', '\n') # normalize EOL
usock.close()
validator = _validate(rawdata, firstOccurrenceOnly)
params = {"feedType":validator.feedType, "loggedEvents":validator.loggedEvents}
if wantRawData:
params['rawdata'] = rawdata
return params
__all__ = ['base',
'channel',
'compatibility',
'image',
'item',
'logging',
'rdf',
'root',
'rss',
'skipHours',
'textInput',
'util',
'validators',
'validateURL',
'validateString']
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:14 rubys
Initial revision
Revision 1.24 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.23 2003/08/09 17:09:34 rubys
Remove misleading mapping of LookupError to UnicodeError
Revision 1.22 2003/08/06 05:40:00 f8dy
patch to send a real User-Agent on HTTP requests
Revision 1.21 2003/08/05 18:51:38 f8dy
added hack to work around bug in built-in SAX parser (doesn't recognize xml: namespace)
Revision 1.20 2003/07/09 16:24:30 f8dy
added global feed type support
Revision 1.19 2002/12/22 19:01:17 rubys
Integrate in SOAP support
Revision 1.18 2002/11/04 01:06:43 rubys
Remove remaining call to preValidate
Revision 1.17 2002/11/04 00:28:55 rubys
Handle LookupError (e.g., unknown encoding)
Revision 1.16 2002/10/31 00:52:21 rubys
Convert from regular expressions to EntityResolver for detecting
system entity references
Revision 1.15 2002/10/30 23:03:01 f8dy
security fix: external (SYSTEM) entities
Revision 1.14 2002/10/22 19:41:07 f8dy
normalize line endings before parsing (SAX parser is not Mac-CR-friendly)
Revision 1.13 2002/10/22 16:35:11 f8dy
commented out fallback except (caller handles it gracefully anyway)
Revision 1.12 2002/10/22 16:24:04 f8dy
added UnicodeError support for feeds that declare utf-8 but use 8-bit characters anyway
Revision 1.11 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: en.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
import feedvalidator
from feedvalidator.logging import *
line = "line %(line)s"
column = "column %(column)s"
occurances = " (%(msgcount)s occurrences)"
messages = {
SAXError: "XML Parsing error: %(exception)s",
NotHtml: "Invalid HTML: %(message)s",
UnicodeError: "%(exception)s (maybe a high-bit character?)",
UndefinedElement: "Undefined %(parent)s element: %(element)s",
MissingNamespace: "Missing namespace for %(element)s",
MissingElement: "Missing %(parent)s element: %(element)s",
MissingOptionalElement: "%(parent)s should contain a %(element)s element",
MissingRecommendedElement: "%(parent)s should contain a %(element)s element",
MissingAttribute: "Missing %(element)s attribute: %(attr)s",
NoBlink: "There is no blink element in RSS; use blogChannel:blink instead",
InvalidValue: "Invalid value for %(element)s: \"%(value)s\"",
InvalidWidth: "%(element)s must be between 1 and 144",
InvalidHeight: "%(element)s must be between 1 and 400",
InvalidHour: "%(element)s must be between 1 and 24",
InvalidDay: "%(element)s must be Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, or Sunday",
InvalidInteger: "%(element)s must be a positive integer",
InvalidHttpGUID: "guid must be a full URL, unless isPermaLink attribute is false",
InvalidUpdatePeriod: "%(element)s must be hourly, daily, weekly, monthly, or yearly",
RecommendedWidth: "%(element)s should be between 1 and 88",
RecommendedHeight: "%(element)s should be between 1 and 31",
NotBlank: "%(element)s can not be blank",
AttrNotBlank: "The %(attr)s attribute of %(element)s can not be blank",
DuplicateElement: "%(parent)s contains more than one %(element)s",
DuplicateSemantics: "A channel must not include both %(core)s and %(ext)s",
DuplicateItemSemantics: "An item must not include both %(core)s and %(ext)s",
DuplicateValue: "%(element)s values must not be duplicated within a feed",
NonstdPrefix: '"%(preferred)s" is the preferred prefix for the namespace "%(ns)s"',
ReservedPrefix: 'The prefix "%(prefix)s" generally uses the namespace "%(ns)s"',
UseModularEquivalent: "%(ext)s should be used instead of %(core)s",
InvalidContact: "%(element)s must include an email address",
InvalidLink: "%(element)s must be a full URL",
InvalidW3DTFDate: "%(element)s must be an ISO-8601 date",
InvalidRFC2822Date: "%(element)s must be an RFC-822 date",
InvalidLanguage: "%(element)s must be an ISO-639 language code",
InvalidURLAttribute: "%(attr)s attribute of %(element)s must be a full URL",
InvalidIntegerAttribute: "%(attr)s attribute of %(element)s must be a positive integer",
InvalidBooleanAttribute: "%(attr)s attribute of %(element)s must be 'true' or 'false'",
InvalidMIMEAttribute: "%(attr)s attribute of %(element)s must be a valid MIME type",
ItemMustContainTitleOrDescription: "item must contain either title or description",
ContainsHTML: "%(element)s should not contain HTML",
ContainsUndeclaredHTML: "%(element)s must not contain HTML unless declared in the type attribute",
NotEnoughHoursInTheDay: "skipHours can not contain more than 24 hour elements",
EightDaysAWeek: "skipDAys can not contain more than 7 day elements",
SecurityRisk: "%(element)s should not contain %(tag)s tag",
ContainsRelRef: "%(element)s should not contain relative URL references",
ContainsSystemEntity: "Feeds must not contain SYSTEM entities",
InvalidContentMode: "mode must be 'xml', 'escaped', or 'base64'",
InvalidMIMEType: "Not a valid MIME type",
NoMIMEType: "%(element)s does not specify a MIME type",
W3DTFDateNoTimezone: "Date should include a timezone",
W3DTFDateNonUTC: "Date should be a UTC date",
W3DTFDateNonLocal: "Date should not be a UTC date",
NotEscaped: "%(element)s claims to be escaped, but isn't",
NotInline: "%(element)s claims to be inline, but isn't",
NotBase64: "%(element)s claims to be base64-encoded, but isn't",
InvalidURN: "%(element)s is not a valid URN",
InvalidTAG: "%(element)s is not a valid TAG",
InvalidURI: "%(element)s is not a valid URI",
ObsoleteVersion: "This feed is an obsolete version",
ObsoleteNamespace: "This feed uses an obsolete namespace",
InvalidNamespace: "%(element)s is in an invalid namespace: %(namespace)s",
InvalidDoctype: "This feed contains conflicting DOCTYPE and version information",
MultipartInvalid: "Multipart/alternative content can only contain other content elements",
MultipartMissing: "Multipart/alternative content must contain at least one content element",
MultipartRecursion: "Multipart/alternative content can not contain other multipart/alternative content elements",
MultipartDuplicate: "Multipart/alternative content can not contain multiple content elements of the same type",
DuplicateAtomLink: "Duplicate link with the same type and rel",
MissingHref: "%(element)s must have an href attribute",
AtomLinkNotEmpty: "%(element)s should not have text (all data is in attributes)",
AtomLinkMissingRel: "%(element)s must have a rel attribute",
MissingAlternateLink: '''%(parent)s must contain a link element with rel="alternate"'''
}
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:17 rubys
Initial revision
Revision 1.54 2003/12/12 20:37:06 f8dy
oops, URNs can contain letters after all
Revision 1.53 2003/12/12 15:53:42 f8dy
renamed source directories
Revision 1.52 2003/12/12 15:00:22 f8dy
changed blank link attribute tests to new error AttrNotBlank to distinguish them from elements that can not be blank
Revision 1.51 2003/12/12 05:57:39 f8dy
added missing messages
Revision 1.50 2003/12/12 05:42:05 rubys
Rough in some support for the new link syntax
Revision 1.49 2003/12/12 01:24:36 rubys
Multipart/alternative tests
Revision 1.48 2003/12/11 20:13:58 f8dy
feed title, copyright, and tagline may be blank
Revision 1.47 2003/12/11 18:20:46 f8dy
passed all content-related testcases
Revision 1.46 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.45 2003/12/11 06:00:51 f8dy
added tag: testcases, passed
Revision 1.44 2003/08/23 00:28:04 rubys
Validate escaped text/HTML content
Revision 1.43 2003/08/06 16:16:59 f8dy
added testcase for Netscape DOCTYPE
Revision 1.42 2003/08/05 22:09:03 f8dy
added automated message test to test output messages
Revision 1.41 2003/08/05 20:54:42 f8dy
Added message for InvalidNamespace error
Revision 1.40 2003/08/05 18:04:12 f8dy
added Atom 0.2-specific messages
Revision 1.39 2003/08/04 01:59:33 rubys
Full http and ftp URIs require two slashes
Revision 1.38 2003/08/04 00:54:35 rubys
Log every valid element (for better self validation in test cases)
Revision 1.37 2003/07/29 16:14:21 rubys
Validate urns
Revision 1.36 2003/07/29 15:15:33 f8dy
added tests for invalid URNs (may be used in entry/id of Atom feeds)
Revision 1.35 2003/07/19 21:15:08 f8dy
added tests and logging classes for duplicate guid/id values within a feed (thanks AaronSw for this idea)
Revision 1.34 2003/07/09 19:28:39 f8dy
added test cases looking at actual content vs. mode (note: not passed)
Revision 1.33 2003/07/09 03:54:39 f8dy
yet more changes to the date messages
Revision 1.32 2003/07/09 03:48:04 f8dy
more changes to pie-specific messages
Revision 1.31 2003/07/09 03:31:36 f8dy
Updated pie-specific log messages
Revision 1.30 2003/06/26 18:03:04 f8dy
add workaround for case where SAX throws UnicodeError but locator.getLineNumber() is screwy
Revision 1.29 2002/10/31 00:52:21 rubys
Convert from regular expressions to EntityResolver for detecting
system entity references
Revision 1.28 2002/10/30 23:02:30 f8dy
*** empty log message ***
Revision 1.27 2002/10/30 15:44:48 rubys
Improve error messages for relative references: error message should
be gramatically correct. Remove "hidden" fields prevented duplicate
errors from being flagged as such.
Revision 1.26 2002/10/27 18:54:30 rubys
Issue warnings for relative references in descriptions
Revision 1.25 2002/10/22 22:37:21 f8dy
tweaked ReservedPrefix message one last time
Revision 1.24 2002/10/22 19:32:19 f8dy
made friendlier messages for NonStdPrefix and ReservedPrefix
Revision 1.23 2002/10/22 16:24:04 f8dy
added UnicodeError support for feeds that declare utf-8 but use 8-bit characters anyway
Revision 1.22 2002/10/19 21:08:02 f8dy
added "special case" functionality for the web front end
Revision 1.21 2002/10/18 19:28:43 f8dy
added testcases for mod_syndication and passed them
Revision 1.20 2002/10/18 14:17:30 f8dy
added tests for language/dc:language (must be valid ISO-639 language code
plus optional country code) and passed them
Revision 1.19 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: __init__.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:17 rubys
Initial revision
Revision 1.3 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.2 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: author.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from validators import *
#
# author element.
#
class author(validatorBase):
def validate(self):
if not "name" in self.children:
self.log(MissingElement({"parent":self.name, "element":"name"}))
def do_name(self):
return nonhtml(), nonblank(), noduplicates()
# def do_weblog(self):
# return rfc2396(), noduplicates()
def do_email(self):
return email(), noduplicates()
# def do_homepage(self):
# return rfc2396(), noduplicates()
def do_url(self):
return nonblank(), rfc2396(), noduplicates()
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:14 rubys
Initial revision
Revision 1.5 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.4 2003/09/01 21:27:48 f8dy
remove weblog, homepage
Revision 1.3 2003/08/03 18:46:04 rubys
support author(url,email) and feed(author,copyright,generator)
Revision 1.2 2003/07/09 16:24:30 f8dy
added global feed type support
Revision 1.1 2003/07/07 00:54:00 rubys
Rough in some pie/echo support
"""
| Python |
"""$Id: image.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from validators import *
#
# image element.
#
class image(validatorBase):
def validate(self):
if self.attrs.has_key((rdfNS,"resource")):
return # looks like an RSS 1.0 feed
if not "link" in self.children:
self.log(MissingLink({"parent":self.name, "element":"link"}))
if not "title" in self.children:
self.log(MissingTitle({"parent":self.name, "element":"title"}))
if not "url" in self.children:
self.log(MissingElement({"parent":self.name, "element":"url"}))
def do_title(self):
return title(), noduplicates()
def do_link(self):
return rfc2396(), noduplicates()
def do_url(self):
return rfc2396(), noduplicates()
def do_width(self):
return width(), noduplicates()
def do_height(self):
return height(), noduplicates()
def do_description(self):
return nonhtml(), noduplicates()
class title(text, noduplicates):
def validate(self):
if not self.value.strip():
self.log(NotBlank({"parent":self.parent.name, "element":self.name}))
else:
self.log(ValidTitle({"parent":self.parent.name, "element":self.name}))
return nonhtml()
class width(text, noduplicates):
def validate(self):
try:
w = int(self.value)
if (w <= 0) or (w > 144):
self.log(InvalidWidth({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidWidth({"parent":self.parent.name, "element":self.name}))
if w > 88:
self.log(RecommendedWidth({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidWidth({"parent":self.parent.name, "element":self.name, "value":self.value}))
class height(text, noduplicates):
def validate(self):
try:
h = int(self.value)
if (h <= 0) or (h > 400):
self.log(InvalidHeight({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidHeight({"parent":self.parent.name, "element":self.name}))
if h > 31:
self.log(RecommendedHeight({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidHeight({"parent":self.parent.name, "element":self.name, "value":self.value}))
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:15 rubys
Initial revision
Revision 1.11 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.10 2003/07/29 19:38:07 f8dy
changed test cases to explicitly test for success (rather than the absence of failure)
Revision 1.9 2002/10/24 14:47:33 f8dy
decoupled "no duplicates" check from individual validator classes,
allow handlers to return multiple validator classes
Revision 1.8 2002/10/22 17:29:52 f8dy
loosened restrictions on link/docs/url protocols; RSS now allows any
IANA protocol, not just http:// and ftp://
Revision 1.7 2002/10/22 16:43:55 rubys
textInput vs textinput: don't reject valid 1.0 feeds, but don't allow
invalid textinput fields in RSS 2.0 either...
Revision 1.6 2002/10/18 15:41:33 f8dy
added (and passed) testcases for unallowed duplicates of the same element
Revision 1.5 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: messagetest.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
import unittest, new, os, sys, glob, re
import feedvalidator
from feedvalidator import compatibility
from feedvalidator.formatter.application_test import Formatter
class TestCase(unittest.TestCase):
def failIfNoMessage(self, theClass, params, theList, msg=None):
filterFunc = compatibility.AA
events = filterFunc(theList)
output = Formatter(events)
for e in events:
if not output.format(e):
raise self.failureException, 'could not contruct message for %s' % e
desc_re = re.compile("<!--\s*Description:\s*(.*?)\s*Expect:\s*(!?)(\w*)(?:{(.*?)})?\s*-->")
def getDescription(xmlfile):
"""Extract description and exception from XML file
The deal here is that each test case is an XML file which contains
not only a possibly invalid RSS feed but also the description of the
test, i.e. the exception that we would expect the RSS validator to
raise (or not) when it validates the feed. The expected exception and
the human-readable description are placed into an XML comment like this:
<!--
Description: channel must include title
Expect: MissingTitle
-->
"""
stream = open(xmlfile)
xmldoc = stream.read()
stream.close()
search_results = desc_re.search(xmldoc)
if search_results:
description, cond, excName, plist = list(search_results.groups())
else:
raise RuntimeError, "can't parse %s" % xmlfile
method = TestCase.failIfNoMessage
params = {}
if plist:
for entry in plist.split(','):
name,value = entry.lstrip().split(':',1)
params[name] = value
exc = getattr(feedvalidator, excName)
description = xmlfile + ": " + description
return method, description, params, exc
def buildTestCase(xmlfile, description, method, exc, params):
"""factory to create functions which validate `xmlfile`
the returned function asserts that validating `xmlfile` (an XML file)
will return a list of exceptions that include an instance of
`exc` (an Exception class)
"""
func = lambda self, xmlfile=xmlfile, exc=exc, params=params: \
method(self, exc, params, feedvalidator.validateString(open(xmlfile).read())['loggedEvents'])
func.__doc__ = description
return func
if __name__ == "__main__":
curdir = os.path.abspath(os.path.dirname(sys.argv[0]))
basedir = os.path.split(curdir)[0]
for xmlfile in glob.glob(os.path.join(basedir, 'testcases', '**', '**', '*.xml')):
method, description, params, exc = getDescription(xmlfile)
testName = 'test_' + os.path.basename(xmlfile)
testFunc = buildTestCase(xmlfile, description, method, exc, params)
instanceMethod = new.instancemethod(testFunc, None, TestCase)
setattr(TestCase, testName, instanceMethod)
unittest.main()
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:14 rubys
Initial revision
Revision 1.2 2003/12/12 15:53:42 f8dy
renamed source directories
Revision 1.1 2003/08/05 22:09:03 f8dy
added automated message test to test output messages
"""
| Python |
"""$Id: validtest.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
import feedvalidator
import unittest, new, os, sys, glob, re
class TestCase(unittest.TestCase):
def failUnlessContainsInstanceOf(self, theClass, params, theList, msg=None):
"""Fail if there are no instances of theClass in theList with given params"""
failure=(msg or 'no %s instances in %s' % (theClass.__name__, `theList`))
for item in theList:
if item.__class__.__name__ == theClass.__name__:
if not params: return
for k, v in params.items():
if item.params[k] <> v:
failure=("%s.%s value was %s, expected %s" %
(theClass.__name__, k, item.params[k], v))
break
else:
return
raise self.failureException, failure
def failIfContainsInstanceOf(self, theClass, params, theList, msg=None):
"""Fail if there are instances of theClass in theList with given params"""
for item in theList:
if item.__class__.__name__ == theClass.__name__:
if not params:
raise self.failureException, \
(msg or 'unexpected %s' % (theClass.__name__))
allmatch = 1
for k, v in params.items():
if item.params[k] != v:
allmatch = 0
if allmatch:
raise self.failureException, \
"unexpected %s.%s with a value of %s" % \
(theClass.__name__, k, v)
desc_re = re.compile("<!--\s*Description:\s*(.*?)\s*Expect:\s*(!?)(\w*)(?:{(.*?)})?\s*-->")
def getDescription(xmlfile):
"""Extract description and exception from XML file
The deal here is that each test case is an XML file which contains
not only a possibly invalid RSS feed but also the description of the
test, i.e. the exception that we would expect the RSS validator to
raise (or not) when it validates the feed. The expected exception and
the human-readable description are placed into an XML comment like this:
<!--
Description: channel must include title
Expect: MissingTitle
-->
"""
stream = open(xmlfile)
xmldoc = stream.read()
stream.close()
search_results = desc_re.search(xmldoc)
if search_results:
description, cond, excName, plist = list(search_results.groups())
else:
raise RuntimeError, "can't parse %s" % xmlfile
if cond == "":
method = TestCase.failUnlessContainsInstanceOf
else:
method = TestCase.failIfContainsInstanceOf
params = {}
if plist:
for entry in plist.split(','):
name,value = entry.lstrip().split(':',1)
params[name] = value
exc = getattr(feedvalidator, excName)
description = xmlfile + ": " + description
return method, description, params, exc
def buildTestCase(xmlfile, description, method, exc, params):
"""factory to create functions which validate `xmlfile`
the returned function asserts that validating `xmlfile` (an XML file)
will return a list of exceptions that include an instance of
`exc` (an Exception class)
"""
func = lambda self, xmlfile=xmlfile, exc=exc, params=params: \
method(self, exc, params, feedvalidator.validateString(open(xmlfile).read())['loggedEvents'])
func.__doc__ = description
return func
if __name__ == "__main__":
curdir = os.path.abspath(os.path.dirname(sys.argv[0]))
basedir = os.path.split(curdir)[0]
for xmlfile in glob.glob(os.path.join(basedir, 'testcases', '**', '**', '*.xml')):
method, description, params, exc = getDescription(xmlfile)
testName = 'test_' + os.path.basename(xmlfile)
testFunc = buildTestCase(xmlfile, description, method, exc, params)
instanceMethod = new.instancemethod(testFunc, None, TestCase)
setattr(TestCase, testName, instanceMethod)
unittest.main()
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:14 rubys
Initial revision
Revision 1.2 2003/12/12 15:53:42 f8dy
renamed source directories
Revision 1.1 2003/08/06 16:56:14 f8dy
combined pievalidtest and rssvalidtest, renamed rssdemo to demo
Revision 1.20 2003/07/20 00:25:41 rubys
Search for *any* instance of a matching class/params in the log
Revision 1.19 2003/07/19 22:25:10 f8dy
fixed bug in test case suite runner, if an element in the expected params didn't match the expected value, it would set the failure message but not raise the appropriate exception, so rssvalidtest.py would claim that certain test cases passed when they didn't (this affected 7 test cases out of 700)
Revision 1.18 2003/07/09 16:24:30 f8dy
added global feed type support
Revision 1.17 2003/07/07 10:35:50 rubys
Complete first pass of echo/pie tests
Revision 1.16 2003/07/06 21:20:02 rubys
Refactor so test cases are organized by protocol
Revision 1.15 2002/10/18 14:17:30 f8dy
added tests for language/dc:language (must be valid ISO-639 language code
plus optional country code) and passed them
Revision 1.14 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
"""$Id: demo.py 4 2004-02-03 17:31:11Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 4 $"
__date__ = "$Date: 2004-02-03 17:31:11 +0000 (Tue, 03 Feb 2004) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
import feedvalidator
import sys
if __name__ == '__main__':
# arg 1 is URL to validate
link = sys.argv[1:] and sys.argv[1] or 'http://www.intertwingly.net/blog/index.rss2'
print 'Validating %s' % link
events = feedvalidator.validateURL(link, firstOccurrenceOnly=1)['loggedEvents']
# (optional) arg 2 is compatibility level
# "A" is most basic level
# "AA" mimics online validator
# "AAA" is experimental; these rules WILL change or disappear in future versions
from feedvalidator import compatibility
filter = sys.argv[2:] and sys.argv[2] or "AA"
filterFunc = getattr(compatibility, filter)
events = filterFunc(events)
from feedvalidator.formatter.text_plain import Formatter
output = Formatter(events)
if output:
print "\n".join(output)
else:
print "No errors or warnings"
__history__ = """
$Log$
Revision 1.1 2004/02/03 17:33:14 rubys
Initial revision
Revision 1.3 2003/12/12 15:53:42 f8dy
renamed source directories
Revision 1.2 2003/12/11 16:32:08 f8dy
fixed id tags in header
Revision 1.1 2003/08/06 16:56:14 f8dy
combined pievalidtest and rssvalidtest, renamed rssdemo to demo
Revision 1.13 2003/07/16 19:47:15 rubys
Remove debug statement
Revision 1.12 2003/07/10 21:16:33 rubys
Get rssdemo back on its feet...
Revision 1.11 2002/10/20 04:47:21 f8dy
*** empty log message ***
Revision 1.10 2002/10/20 04:41:21 f8dy
*** empty log message ***
Revision 1.9 2002/10/20 04:36:09 f8dy
cleaned up for public distribution
Revision 1.8 2002/10/18 13:06:57 f8dy
added licensing information
"""
| Python |
#!/usr/bin/env python
from config import *
import cgi, sys, os, urlparse, sys, re
import cgitb
cgitb.enable()
if PYDIR not in sys.path:
sys.path.insert(0, PYDIR)
if WEBDIR not in sys.path:
sys.path.insert(0, WEBDIR)
if SRCDIR not in sys.path:
sys.path.insert(0, SRCDIR)
import feedvalidator
from feedvalidator.logging import FEEDTYPEDISPLAY, VALIDFEEDGRAPHIC
def applyTemplate(templateFile, params={}):
fsock = open(os.path.join(WEBDIR, 'templates', templateFile))
data = fsock.read() % params
fsock.close()
return data
def sanitizeURL(url):
scheme, domain, path, u1, u2, u3 = urlparse.urlparse(url)
if scheme.lower() <> 'http':
url = 'http://%s' % url
scheme, domain, path, u1, u2, u3 = urlparse.urlparse(url)
url = url.strip()
# strip user and password
url = re.sub(r'^(\w*://)[-+.\w]*(:[-+.\w]+)?@', r'\1' ,url)
return url
def buildCodeListing(events, rawdata):
# print feed
codelines = []
linenum = 1
linesWithErrors = [e.params.get('line', 0) for e in events]
for line in rawdata.split('\n'):
line = cgi.escape(line)
if not line: line = ' '
linetype = linenum in linesWithErrors and "b" or "a"
codelines.append(applyTemplate('code_listing_line.tmpl', {"line":line, "linenum":linenum, "linetype":linetype}))
linenum += 1
codelisting = "".join(codelines)
return applyTemplate('code_listing.tmpl', {"codelisting":codelisting, "url":url})
def postvalidate(url, events, rawdata, feedType, autofind=1):
"""returns dictionary including 'url', 'events', 'rawdata', 'output', 'specialCase', 'feedType'"""
# filter based on compatibility level
from feedvalidator import compatibility
filterFunc = compatibility.AA # hardcoded for now
events = filterFunc(events)
specialCase = None
from feedvalidator.formatter.text_html import Formatter
formattedOutput = Formatter(events, rawdata)
if formattedOutput:
# check for special cases
specialCase = compatibility.analyze(events, rawdata)
if (specialCase == 'html') and autofind:
try:
try:
import feedfinder
rssurls = feedfinder.getFeeds(url)
except:
rssurls = [url]
if rssurls:
url = rssurls[0]
params = feedvalidator.validateURL(url, firstOccurrenceOnly=1, wantRawData=1)
events = params['loggedEvents']
rawdata = params['rawdata']
feedType = params['feedType']
return postvalidate(url, events, rawdata, feedType, autofind=0)
except:
pass
return {"url":url, "events":events, "rawdata":rawdata, "output":formattedOutput, "specialCase":specialCase, "feedType":feedType}
fs = cgi.FieldStorage()
url = fs.getvalue("url") or ''
manual = fs.getvalue("manual") or 0
rawdata = fs.getvalue("rawdata") or ''
rawdata = rawdata[:feedvalidator.MAXDATALENGTH].replace('\r\n', '\n').replace('\r', '\n')
if (os.environ['REQUEST_METHOD'].lower() == 'post') and (not rawdata):
# SOAP
try:
# validate
params = feedvalidator.validateStream(sys.stdin)
events = params['loggedEvents']
feedType = params['feedType']
# filter based on compatibility level
from feedvalidator import compatibility
filterFunc = compatibility.AA # hardcoded for now
events = filterFunc(events)
# format as xml
from feedvalidator.formatter.text_xml import Formatter
output = Formatter(events)
# output
if output:
body = applyTemplate('soap.tmpl', {'body':"\n".join(output)})
else:
body = applyTemplate('soap.tmpl' , {'body':''})
print 'Content-type: text/xml\r\n\r\n' + body
except:
import traceback
tb = ''.join(apply(traceback.format_exception, sys.exc_info()))
from feedvalidator.formatter.text_xml import xmlEncode
print 'Status: 500 Internal Error\r\nContent-type: text/xml\r\n'
print applyTemplate('fault.tmpl', {'code':sys.exc_info()[0],
'string':sys.exc_info()[1], 'traceback':xmlEncode(tb)})
else:
print 'Content-type: text/html'
print
if url or rawdata:
# validate
goon = 0
if rawdata:
# validate raw data (from text form)
try:
params = feedvalidator.validateString(rawdata, firstOccurrenceOnly=1)
events = params['loggedEvents']
feedType = params['feedType']
goon = 1
except:
print applyTemplate('header.tmpl', {'title':'Feed Validator Results: %s' % cgi.escape(url)})
print applyTemplate('manual.tmpl', {'rawdata':cgi.escape(url)})
print applyTemplate('error.tmpl')
else:
url = sanitizeURL(url)
try:
params = feedvalidator.validateURL(url, firstOccurrenceOnly=1, wantRawData=1)
events = params['loggedEvents']
rawdata = params['rawdata']
feedType = params['feedType']
goon = 1
except:
print applyTemplate('header.tmpl', {'title':'Feed Validator Results: %s' % cgi.escape(url)})
print applyTemplate('index.tmpl', {'value':cgi.escape(url)})
print applyTemplate('error.tmpl')
if goon:
# post-validate (will do RSS autodiscovery if needed)
validationData = postvalidate(url, events, rawdata, feedType)
# write output header
url = validationData['url']
feedType = validationData['feedType']
rawdata = validationData['rawdata']
print applyTemplate('header.tmpl', {'title':'Feed Validator Results: %s' % cgi.escape(url)})
if manual:
print applyTemplate('manual.tmpl', {'rawdata':cgi.escape(rawdata)})
else:
print applyTemplate('index.tmpl', {'value':cgi.escape(url)})
output = validationData.get('output', None)
if output:
# print special case, if any
specialCase = validationData.get('specialCase', None)
if specialCase:
print applyTemplate('%s.tmpl' % specialCase)
# print validator output
print applyTemplate('invalid.tmpl')
for o in output:
print o
print applyTemplate('invalid_footer.tmpl')
# print code listing
print buildCodeListing(validationData['events'], validationData['rawdata'])
else:
# valid
print applyTemplate('valid.tmpl', {"url":cgi.escape(url), "feedType":FEEDTYPEDISPLAY[feedType], "graphic":VALIDFEEDGRAPHIC[feedType]})
else:
# nothing to validate, just write basic form
print applyTemplate('header.tmpl', {'title':'Feed Validator for Atom and RSS'})
if manual:
print applyTemplate('manual.tmpl', {'rawdata':''})
else:
print applyTemplate('index.tmpl', {'value':'http://'})
print applyTemplate('special.tmpl', {})
print applyTemplate('navbar.tmpl')
print applyTemplate('footer.tmpl')
| Python |
# This following value is primarily used for setting up the other values...
HOMEDIR = r'/home/rubys'
# This is where local python libraries are installed. This may be useful
# for locating a locally installed libxml2 library, for example...
PYDIR = HOMEDIR + r'/lib/python/'
# This is where the CGI itself is... other supporting scripts (like
# feedfinder) may be placed here.
WEBDIR = HOMEDIR + r'/public_html/feedvalidator'
# This is where the feedvalidator code lives...
SRCDIR = WEBDIR + r'/src'
| Python |
#!/usr/bin/env python
from config import *
import cgi, cgitb, sys
cgitb.enable()
import codecs
ENCODING='UTF-8'
sys.stdout = codecs.getwriter(ENCODING)(sys.stdout)
if SRCDIR not in sys.path:
sys.path.insert(0, SRCDIR)
class request:
content_type = "text/html"
from index import index
fs = cgi.FieldStorage()
req = request()
url = fs.getvalue('url') or ''
out = fs.getvalue('out') or 'xml'
result=index(req,url,out)
print "Content-type: %s\r\n\r\n%s" % (req.content_type, result)
| Python |
modules = [
'testUri',
'testXmlEncoding',
'testXmlEncodingDecode',
'testMediaTypes',
'testHowtoNs',
'validtest',
'mkmsgs',
]
if __name__ == '__main__':
import os, sys, unittest
srcdir = os.path.join(os.path.dirname(os.path.abspath(__file__)),'src')
testdir = os.path.join(srcdir,'tests')
xdocsdir = os.path.join(os.path.dirname(srcdir),'docs-xml')
sys.path.insert(0,srcdir)
sys.path.insert(0,testdir)
sys.path.insert(0,xdocsdir)
suite = unittest.TestSuite()
for module in modules:
suite.addTest(__import__(module).buildTestSuite())
unittest.TextTestRunner().run(suite)
| Python |
# Copyright (c) 2002, 2003, 2005 Allan Saddi <allan@saddi.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $Id: fcgi.py 1828 2005-11-28 17:17:29Z asaddi $
"""
fcgi - a FastCGI/WSGI gateway.
For more information about FastCGI, see <http://www.fastcgi.com/>.
For more information about the Web Server Gateway Interface, see
<http://www.python.org/peps/pep-0333.html>.
Example usage:
#!/usr/bin/env python
from myapplication import app # Assume app is your WSGI application object
from fcgi import WSGIServer
WSGIServer(app).run()
See the documentation for WSGIServer/Server for more information.
On most platforms, fcgi will fallback to regular CGI behavior if run in a
non-FastCGI context. If you want to force CGI behavior, set the environment
variable FCGI_FORCE_CGI to "Y" or "y".
"""
__author__ = 'Allan Saddi <allan@saddi.com>'
__version__ = '$Revision: 1828 $'
import sys
import os
import signal
import struct
import cStringIO as StringIO
import select
import socket
import errno
import traceback
try:
import thread
import threading
thread_available = True
except ImportError:
import dummy_thread as thread
import dummy_threading as threading
thread_available = False
# Apparently 2.3 doesn't define SHUT_WR? Assume it is 1 in this case.
if not hasattr(socket, 'SHUT_WR'):
socket.SHUT_WR = 1
__all__ = ['WSGIServer']
# Constants from the spec.
FCGI_LISTENSOCK_FILENO = 0
FCGI_HEADER_LEN = 8
FCGI_VERSION_1 = 1
FCGI_BEGIN_REQUEST = 1
FCGI_ABORT_REQUEST = 2
FCGI_END_REQUEST = 3
FCGI_PARAMS = 4
FCGI_STDIN = 5
FCGI_STDOUT = 6
FCGI_STDERR = 7
FCGI_DATA = 8
FCGI_GET_VALUES = 9
FCGI_GET_VALUES_RESULT = 10
FCGI_UNKNOWN_TYPE = 11
FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
FCGI_NULL_REQUEST_ID = 0
FCGI_KEEP_CONN = 1
FCGI_RESPONDER = 1
FCGI_AUTHORIZER = 2
FCGI_FILTER = 3
FCGI_REQUEST_COMPLETE = 0
FCGI_CANT_MPX_CONN = 1
FCGI_OVERLOADED = 2
FCGI_UNKNOWN_ROLE = 3
FCGI_MAX_CONNS = 'FCGI_MAX_CONNS'
FCGI_MAX_REQS = 'FCGI_MAX_REQS'
FCGI_MPXS_CONNS = 'FCGI_MPXS_CONNS'
FCGI_Header = '!BBHHBx'
FCGI_BeginRequestBody = '!HB5x'
FCGI_EndRequestBody = '!LB3x'
FCGI_UnknownTypeBody = '!B7x'
FCGI_EndRequestBody_LEN = struct.calcsize(FCGI_EndRequestBody)
FCGI_UnknownTypeBody_LEN = struct.calcsize(FCGI_UnknownTypeBody)
if __debug__:
import time
# Set non-zero to write debug output to a file.
DEBUG = 0
DEBUGLOG = '/tmp/fcgi.log'
def _debug(level, msg):
if DEBUG < level:
return
try:
f = open(DEBUGLOG, 'a')
f.write('%sfcgi: %s\n' % (time.ctime()[4:-4], msg))
f.close()
except:
pass
class InputStream(object):
"""
File-like object representing FastCGI input streams (FCGI_STDIN and
FCGI_DATA). Supports the minimum methods required by WSGI spec.
"""
def __init__(self, conn):
self._conn = conn
# See Server.
self._shrinkThreshold = conn.server.inputStreamShrinkThreshold
self._buf = ''
self._bufList = []
self._pos = 0 # Current read position.
self._avail = 0 # Number of bytes currently available.
self._eof = False # True when server has sent EOF notification.
def _shrinkBuffer(self):
"""Gets rid of already read data (since we can't rewind)."""
if self._pos >= self._shrinkThreshold:
self._buf = self._buf[self._pos:]
self._avail -= self._pos
self._pos = 0
assert self._avail >= 0
def _waitForData(self):
"""Waits for more data to become available."""
self._conn.process_input()
def read(self, n=-1):
if self._pos == self._avail and self._eof:
return ''
while True:
if n < 0 or (self._avail - self._pos) < n:
# Not enough data available.
if self._eof:
# And there's no more coming.
newPos = self._avail
break
else:
# Wait for more data.
self._waitForData()
continue
else:
newPos = self._pos + n
break
# Merge buffer list, if necessary.
if self._bufList:
self._buf += ''.join(self._bufList)
self._bufList = []
r = self._buf[self._pos:newPos]
self._pos = newPos
self._shrinkBuffer()
return r
def readline(self, length=None):
if self._pos == self._avail and self._eof:
return ''
while True:
# Unfortunately, we need to merge the buffer list early.
if self._bufList:
self._buf += ''.join(self._bufList)
self._bufList = []
# Find newline.
i = self._buf.find('\n', self._pos)
if i < 0:
# Not found?
if self._eof:
# No more data coming.
newPos = self._avail
break
else:
# Wait for more to come.
self._waitForData()
continue
else:
newPos = i + 1
break
if length is not None:
if self._pos + length < newPos:
newPos = self._pos + length
r = self._buf[self._pos:newPos]
self._pos = newPos
self._shrinkBuffer()
return r
def readlines(self, sizehint=0):
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def __iter__(self):
return self
def next(self):
r = self.readline()
if not r:
raise StopIteration
return r
def add_data(self, data):
if not data:
self._eof = True
else:
self._bufList.append(data)
self._avail += len(data)
class MultiplexedInputStream(InputStream):
"""
A version of InputStream meant to be used with MultiplexedConnections.
Assumes the MultiplexedConnection (the producer) and the Request
(the consumer) are running in different threads.
"""
def __init__(self, conn):
super(MultiplexedInputStream, self).__init__(conn)
# Arbitrates access to this InputStream (it's used simultaneously
# by a Request and its owning Connection object).
lock = threading.RLock()
# Notifies Request thread that there is new data available.
self._lock = threading.Condition(lock)
def _waitForData(self):
# Wait for notification from add_data().
self._lock.wait()
def read(self, n=-1):
self._lock.acquire()
try:
return super(MultiplexedInputStream, self).read(n)
finally:
self._lock.release()
def readline(self, length=None):
self._lock.acquire()
try:
return super(MultiplexedInputStream, self).readline(length)
finally:
self._lock.release()
def add_data(self, data):
self._lock.acquire()
try:
super(MultiplexedInputStream, self).add_data(data)
self._lock.notify()
finally:
self._lock.release()
class OutputStream(object):
"""
FastCGI output stream (FCGI_STDOUT/FCGI_STDERR). By default, calls to
write() or writelines() immediately result in Records being sent back
to the server. Buffering should be done in a higher level!
"""
def __init__(self, conn, req, type, buffered=False):
self._conn = conn
self._req = req
self._type = type
self._buffered = buffered
self._bufList = [] # Used if buffered is True
self.dataWritten = False
self.closed = False
def _write(self, data):
length = len(data)
while length:
toWrite = min(length, self._req.server.maxwrite - FCGI_HEADER_LEN)
rec = Record(self._type, self._req.requestId)
rec.contentLength = toWrite
rec.contentData = data[:toWrite]
self._conn.writeRecord(rec)
data = data[toWrite:]
length -= toWrite
def write(self, data):
assert not self.closed
if not data:
return
self.dataWritten = True
if self._buffered:
self._bufList.append(data)
else:
self._write(data)
def writelines(self, lines):
assert not self.closed
for line in lines:
self.write(line)
def flush(self):
# Only need to flush if this OutputStream is actually buffered.
if self._buffered:
data = ''.join(self._bufList)
self._bufList = []
self._write(data)
# Though available, the following should NOT be called by WSGI apps.
def close(self):
"""Sends end-of-stream notification, if necessary."""
if not self.closed and self.dataWritten:
self.flush()
rec = Record(self._type, self._req.requestId)
self._conn.writeRecord(rec)
self.closed = True
class TeeOutputStream(object):
"""
Simple wrapper around two or more output file-like objects that copies
written data to all streams.
"""
def __init__(self, streamList):
self._streamList = streamList
def write(self, data):
for f in self._streamList:
f.write(data)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
for f in self._streamList:
f.flush()
class StdoutWrapper(object):
"""
Wrapper for sys.stdout so we know if data has actually been written.
"""
def __init__(self, stdout):
self._file = stdout
self.dataWritten = False
def write(self, data):
if data:
self.dataWritten = True
self._file.write(data)
def writelines(self, lines):
for line in lines:
self.write(line)
def __getattr__(self, name):
return getattr(self._file, name)
def decode_pair(s, pos=0):
"""
Decodes a name/value pair.
The number of bytes decoded as well as the name/value pair
are returned.
"""
nameLength = ord(s[pos])
if nameLength & 128:
nameLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff
pos += 4
else:
pos += 1
valueLength = ord(s[pos])
if valueLength & 128:
valueLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff
pos += 4
else:
pos += 1
name = s[pos:pos+nameLength]
pos += nameLength
value = s[pos:pos+valueLength]
pos += valueLength
return (pos, (name, value))
def encode_pair(name, value):
"""
Encodes a name/value pair.
The encoded string is returned.
"""
nameLength = len(name)
if nameLength < 128:
s = chr(nameLength)
else:
s = struct.pack('!L', nameLength | 0x80000000L)
valueLength = len(value)
if valueLength < 128:
s += chr(valueLength)
else:
s += struct.pack('!L', valueLength | 0x80000000L)
return s + name + value
class Record(object):
"""
A FastCGI Record.
Used for encoding/decoding records.
"""
def __init__(self, type=FCGI_UNKNOWN_TYPE, requestId=FCGI_NULL_REQUEST_ID):
self.version = FCGI_VERSION_1
self.type = type
self.requestId = requestId
self.contentLength = 0
self.paddingLength = 0
self.contentData = ''
def _recvall(sock, length):
"""
Attempts to receive length bytes from a socket, blocking if necessary.
(Socket may be blocking or non-blocking.)
"""
dataList = []
recvLen = 0
while length:
try:
data = sock.recv(length)
except socket.error, e:
if e[0] == errno.EAGAIN:
select.select([sock], [], [])
continue
else:
raise
if not data: # EOF
break
dataList.append(data)
dataLen = len(data)
recvLen += dataLen
length -= dataLen
return ''.join(dataList), recvLen
_recvall = staticmethod(_recvall)
def read(self, sock):
"""Read and decode a Record from a socket."""
try:
header, length = self._recvall(sock, FCGI_HEADER_LEN)
except:
raise EOFError
if length < FCGI_HEADER_LEN:
raise EOFError
self.version, self.type, self.requestId, self.contentLength, \
self.paddingLength = struct.unpack(FCGI_Header, header)
if __debug__: _debug(9, 'read: fd = %d, type = %d, requestId = %d, '
'contentLength = %d' %
(sock.fileno(), self.type, self.requestId,
self.contentLength))
if self.contentLength:
try:
self.contentData, length = self._recvall(sock,
self.contentLength)
except:
raise EOFError
if length < self.contentLength:
raise EOFError
if self.paddingLength:
try:
self._recvall(sock, self.paddingLength)
except:
raise EOFError
def _sendall(sock, data):
"""
Writes data to a socket and does not return until all the data is sent.
"""
length = len(data)
while length:
try:
sent = sock.send(data)
except socket.error, e:
if e[0] == errno.EPIPE:
return # Don't bother raising an exception. Just ignore.
elif e[0] == errno.EAGAIN:
select.select([], [sock], [])
continue
else:
raise
data = data[sent:]
length -= sent
_sendall = staticmethod(_sendall)
def write(self, sock):
"""Encode and write a Record to a socket."""
self.paddingLength = -self.contentLength & 7
if __debug__: _debug(9, 'write: fd = %d, type = %d, requestId = %d, '
'contentLength = %d' %
(sock.fileno(), self.type, self.requestId,
self.contentLength))
header = struct.pack(FCGI_Header, self.version, self.type,
self.requestId, self.contentLength,
self.paddingLength)
self._sendall(sock, header)
if self.contentLength:
self._sendall(sock, self.contentData)
if self.paddingLength:
self._sendall(sock, '\x00'*self.paddingLength)
class Request(object):
"""
Represents a single FastCGI request.
These objects are passed to your handler and is the main interface
between your handler and the fcgi module. The methods should not
be called by your handler. However, server, params, stdin, stdout,
stderr, and data are free for your handler's use.
"""
def __init__(self, conn, inputStreamClass):
self._conn = conn
self.server = conn.server
self.params = {}
self.stdin = inputStreamClass(conn)
self.stdout = OutputStream(conn, self, FCGI_STDOUT)
self.stderr = OutputStream(conn, self, FCGI_STDERR, buffered=True)
self.data = inputStreamClass(conn)
def run(self):
"""Runs the handler, flushes the streams, and ends the request."""
try:
protocolStatus, appStatus = self.server.handler(self)
except:
traceback.print_exc(file=self.stderr)
self.stderr.flush()
if not self.stdout.dataWritten:
self.server.error(self)
protocolStatus, appStatus = FCGI_REQUEST_COMPLETE, 0
if __debug__: _debug(1, 'protocolStatus = %d, appStatus = %d' %
(protocolStatus, appStatus))
self._flush()
self._end(appStatus, protocolStatus)
def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE):
self._conn.end_request(self, appStatus, protocolStatus)
def _flush(self):
self.stdout.close()
self.stderr.close()
class CGIRequest(Request):
"""A normal CGI request disguised as a FastCGI request."""
def __init__(self, server):
# These are normally filled in by Connection.
self.requestId = 1
self.role = FCGI_RESPONDER
self.flags = 0
self.aborted = False
self.server = server
self.params = dict(os.environ)
self.stdin = sys.stdin
self.stdout = StdoutWrapper(sys.stdout) # Oh, the humanity!
self.stderr = sys.stderr
self.data = StringIO.StringIO()
def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE):
sys.exit(appStatus)
def _flush(self):
# Not buffered, do nothing.
pass
class Connection(object):
"""
A Connection with the web server.
Each Connection is associated with a single socket (which is
connected to the web server) and is responsible for handling all
the FastCGI message processing for that socket.
"""
_multiplexed = False
_inputStreamClass = InputStream
def __init__(self, sock, addr, server):
self._sock = sock
self._addr = addr
self.server = server
# Active Requests for this Connection, mapped by request ID.
self._requests = {}
def _cleanupSocket(self):
"""Close the Connection's socket."""
try:
self._sock.shutdown(socket.SHUT_WR)
except:
return
try:
while True:
r, w, e = select.select([self._sock], [], [])
if not r or not self._sock.recv(1024):
break
except:
pass
self._sock.close()
def run(self):
"""Begin processing data from the socket."""
self._keepGoing = True
while self._keepGoing:
try:
self.process_input()
except EOFError:
break
except (select.error, socket.error), e:
if e[0] == errno.EBADF: # Socket was closed by Request.
break
raise
self._cleanupSocket()
def process_input(self):
"""Attempt to read a single Record from the socket and process it."""
# Currently, any children Request threads notify this Connection
# that it is no longer needed by closing the Connection's socket.
# We need to put a timeout on select, otherwise we might get
# stuck in it indefinitely... (I don't like this solution.)
while self._keepGoing:
try:
r, w, e = select.select([self._sock], [], [], 1.0)
except ValueError:
# Sigh. ValueError gets thrown sometimes when passing select
# a closed socket.
raise EOFError
if r: break
if not self._keepGoing:
return
rec = Record()
rec.read(self._sock)
if rec.type == FCGI_GET_VALUES:
self._do_get_values(rec)
elif rec.type == FCGI_BEGIN_REQUEST:
self._do_begin_request(rec)
elif rec.type == FCGI_ABORT_REQUEST:
self._do_abort_request(rec)
elif rec.type == FCGI_PARAMS:
self._do_params(rec)
elif rec.type == FCGI_STDIN:
self._do_stdin(rec)
elif rec.type == FCGI_DATA:
self._do_data(rec)
elif rec.requestId == FCGI_NULL_REQUEST_ID:
self._do_unknown_type(rec)
else:
# Need to complain about this.
pass
def writeRecord(self, rec):
"""
Write a Record to the socket.
"""
rec.write(self._sock)
def end_request(self, req, appStatus=0L,
protocolStatus=FCGI_REQUEST_COMPLETE, remove=True):
"""
End a Request.
Called by Request objects. An FCGI_END_REQUEST Record is
sent to the web server. If the web server no longer requires
the connection, the socket is closed, thereby ending this
Connection (run() returns).
"""
rec = Record(FCGI_END_REQUEST, req.requestId)
rec.contentData = struct.pack(FCGI_EndRequestBody, appStatus,
protocolStatus)
rec.contentLength = FCGI_EndRequestBody_LEN
self.writeRecord(rec)
if remove:
del self._requests[req.requestId]
if __debug__: _debug(2, 'end_request: flags = %d' % req.flags)
if not (req.flags & FCGI_KEEP_CONN) and not self._requests:
self._cleanupSocket()
self._keepGoing = False
def _do_get_values(self, inrec):
"""Handle an FCGI_GET_VALUES request from the web server."""
outrec = Record(FCGI_GET_VALUES_RESULT)
pos = 0
while pos < inrec.contentLength:
pos, (name, value) = decode_pair(inrec.contentData, pos)
cap = self.server.capability.get(name)
if cap is not None:
outrec.contentData += encode_pair(name, str(cap))
outrec.contentLength = len(outrec.contentData)
self.writeRecord(outrec)
def _do_begin_request(self, inrec):
"""Handle an FCGI_BEGIN_REQUEST from the web server."""
role, flags = struct.unpack(FCGI_BeginRequestBody, inrec.contentData)
req = self.server.request_class(self, self._inputStreamClass)
req.requestId, req.role, req.flags = inrec.requestId, role, flags
req.aborted = False
if not self._multiplexed and self._requests:
# Can't multiplex requests.
self.end_request(req, 0L, FCGI_CANT_MPX_CONN, remove=False)
else:
self._requests[inrec.requestId] = req
def _do_abort_request(self, inrec):
"""
Handle an FCGI_ABORT_REQUEST from the web server.
We just mark a flag in the associated Request.
"""
req = self._requests.get(inrec.requestId)
if req is not None:
req.aborted = True
def _start_request(self, req):
"""Run the request."""
# Not multiplexed, so run it inline.
req.run()
def _do_params(self, inrec):
"""
Handle an FCGI_PARAMS Record.
If the last FCGI_PARAMS Record is received, start the request.
"""
req = self._requests.get(inrec.requestId)
if req is not None:
if inrec.contentLength:
pos = 0
while pos < inrec.contentLength:
pos, (name, value) = decode_pair(inrec.contentData, pos)
req.params[name] = value
else:
self._start_request(req)
def _do_stdin(self, inrec):
"""Handle the FCGI_STDIN stream."""
req = self._requests.get(inrec.requestId)
if req is not None:
req.stdin.add_data(inrec.contentData)
def _do_data(self, inrec):
"""Handle the FCGI_DATA stream."""
req = self._requests.get(inrec.requestId)
if req is not None:
req.data.add_data(inrec.contentData)
def _do_unknown_type(self, inrec):
"""Handle an unknown request type. Respond accordingly."""
outrec = Record(FCGI_UNKNOWN_TYPE)
outrec.contentData = struct.pack(FCGI_UnknownTypeBody, inrec.type)
outrec.contentLength = FCGI_UnknownTypeBody_LEN
self.writeRecord(rec)
class MultiplexedConnection(Connection):
"""
A version of Connection capable of handling multiple requests
simultaneously.
"""
_multiplexed = True
_inputStreamClass = MultiplexedInputStream
def __init__(self, sock, addr, server):
super(MultiplexedConnection, self).__init__(sock, addr, server)
# Used to arbitrate access to self._requests.
lock = threading.RLock()
# Notification is posted everytime a request completes, allowing us
# to quit cleanly.
self._lock = threading.Condition(lock)
def _cleanupSocket(self):
# Wait for any outstanding requests before closing the socket.
self._lock.acquire()
while self._requests:
self._lock.wait()
self._lock.release()
super(MultiplexedConnection, self)._cleanupSocket()
def writeRecord(self, rec):
# Must use locking to prevent intermingling of Records from different
# threads.
self._lock.acquire()
try:
# Probably faster than calling super. ;)
rec.write(self._sock)
finally:
self._lock.release()
def end_request(self, req, appStatus=0L,
protocolStatus=FCGI_REQUEST_COMPLETE, remove=True):
self._lock.acquire()
try:
super(MultiplexedConnection, self).end_request(req, appStatus,
protocolStatus,
remove)
self._lock.notify()
finally:
self._lock.release()
def _do_begin_request(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_begin_request(inrec)
finally:
self._lock.release()
def _do_abort_request(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_abort_request(inrec)
finally:
self._lock.release()
def _start_request(self, req):
thread.start_new_thread(req.run, ())
def _do_params(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_params(inrec)
finally:
self._lock.release()
def _do_stdin(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_stdin(inrec)
finally:
self._lock.release()
def _do_data(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_data(inrec)
finally:
self._lock.release()
class Server(object):
"""
The FastCGI server.
Waits for connections from the web server, processing each
request.
If run in a normal CGI context, it will instead instantiate a
CGIRequest and run the handler through there.
"""
request_class = Request
cgirequest_class = CGIRequest
# Limits the size of the InputStream's string buffer to this size + the
# server's maximum Record size. Since the InputStream is not seekable,
# we throw away already-read data once this certain amount has been read.
inputStreamShrinkThreshold = 102400 - 8192
def __init__(self, handler=None, maxwrite=8192, bindAddress=None,
multiplexed=False):
"""
handler, if present, must reference a function or method that
takes one argument: a Request object. If handler is not
specified at creation time, Server *must* be subclassed.
(The handler method below is abstract.)
maxwrite is the maximum number of bytes (per Record) to write
to the server. I've noticed mod_fastcgi has a relatively small
receive buffer (8K or so).
bindAddress, if present, must either be a string or a 2-tuple. If
present, run() will open its own listening socket. You would use
this if you wanted to run your application as an 'external' FastCGI
app. (i.e. the webserver would no longer be responsible for starting
your app) If a string, it will be interpreted as a filename and a UNIX
socket will be opened. If a tuple, the first element, a string,
is the interface name/IP to bind to, and the second element (an int)
is the port number.
Set multiplexed to True if you want to handle multiple requests
per connection. Some FastCGI backends (namely mod_fastcgi) don't
multiplex requests at all, so by default this is off (which saves
on thread creation/locking overhead). If threads aren't available,
this keyword is ignored; it's not possible to multiplex requests
at all.
"""
if handler is not None:
self.handler = handler
self.maxwrite = maxwrite
if thread_available:
try:
import resource
# Attempt to glean the maximum number of connections
# from the OS.
maxConns = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
except ImportError:
maxConns = 100 # Just some made up number.
maxReqs = maxConns
if multiplexed:
self._connectionClass = MultiplexedConnection
maxReqs *= 5 # Another made up number.
else:
self._connectionClass = Connection
self.capability = {
FCGI_MAX_CONNS: maxConns,
FCGI_MAX_REQS: maxReqs,
FCGI_MPXS_CONNS: multiplexed and 1 or 0
}
else:
self._connectionClass = Connection
self.capability = {
# If threads aren't available, these are pretty much correct.
FCGI_MAX_CONNS: 1,
FCGI_MAX_REQS: 1,
FCGI_MPXS_CONNS: 0
}
self._bindAddress = bindAddress
def _setupSocket(self):
if self._bindAddress is None: # Run as a normal FastCGI?
isFCGI = True
sock = socket.fromfd(FCGI_LISTENSOCK_FILENO, socket.AF_INET,
socket.SOCK_STREAM)
try:
sock.getpeername()
except socket.error, e:
if e[0] == errno.ENOTSOCK:
# Not a socket, assume CGI context.
isFCGI = False
elif e[0] != errno.ENOTCONN:
raise
# FastCGI/CGI discrimination is broken on Mac OS X.
# Set the environment variable FCGI_FORCE_CGI to "Y" or "y"
# if you want to run your app as a simple CGI. (You can do
# this with Apache's mod_env [not loaded by default in OS X
# client, ha ha] and the SetEnv directive.)
if not isFCGI or \
os.environ.get('FCGI_FORCE_CGI', 'N').upper().startswith('Y'):
req = self.cgirequest_class(self)
req.run()
sys.exit(0)
else:
# Run as a server
if type(self._bindAddress) in [ str, unicode]:
# Unix socket
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
os.unlink(self._bindAddress)
except OSError:
pass
else:
# INET socket
assert type(self._bindAddress) is tuple
assert len(self._bindAddress) == 2
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(self._bindAddress)
sock.listen(socket.SOMAXCONN)
return sock
def _cleanupSocket(self, sock):
"""Closes the main socket."""
sock.close()
def _installSignalHandlers(self):
self._oldSIGs = [(x,signal.getsignal(x)) for x in
(signal.SIGHUP, signal.SIGINT, signal.SIGTERM)]
signal.signal(signal.SIGHUP, self._hupHandler)
signal.signal(signal.SIGINT, self._intHandler)
signal.signal(signal.SIGTERM, self._intHandler)
def _restoreSignalHandlers(self):
for signum,handler in self._oldSIGs:
signal.signal(signum, handler)
def _hupHandler(self, signum, frame):
self._hupReceived = True
self._keepGoing = False
def _intHandler(self, signum, frame):
self._keepGoing = False
def run(self, timeout=1.0):
"""
The main loop. Exits on SIGHUP, SIGINT, SIGTERM. Returns True if
SIGHUP was received, False otherwise.
"""
web_server_addrs = os.environ.get('FCGI_WEB_SERVER_ADDRS')
if web_server_addrs is not None:
web_server_addrs = map(lambda x: x.strip(),
web_server_addrs.split(','))
sock = self._setupSocket()
self._keepGoing = True
self._hupReceived = False
# Install signal handlers.
self._installSignalHandlers()
while self._keepGoing:
try:
r, w, e = select.select([sock], [], [], timeout)
except select.error, e:
if e[0] == errno.EINTR:
continue
raise
if r:
try:
clientSock, addr = sock.accept()
except socket.error, e:
if e[0] in (errno.EINTR, errno.EAGAIN):
continue
raise
if web_server_addrs and \
(len(addr) != 2 or addr[0] not in web_server_addrs):
clientSock.close()
continue
# Instantiate a new Connection and begin processing FastCGI
# messages (either in a new thread or this thread).
conn = self._connectionClass(clientSock, addr, self)
thread.start_new_thread(conn.run, ())
self._mainloopPeriodic()
# Restore signal handlers.
self._restoreSignalHandlers()
self._cleanupSocket(sock)
return self._hupReceived
def _mainloopPeriodic(self):
"""
Called with just about each iteration of the main loop. Meant to
be overridden.
"""
pass
def _exit(self, reload=False):
"""
Protected convenience method for subclasses to force an exit. Not
really thread-safe, which is why it isn't public.
"""
if self._keepGoing:
self._keepGoing = False
self._hupReceived = reload
def handler(self, req):
"""
Default handler, which just raises an exception. Unless a handler
is passed at initialization time, this must be implemented by
a subclass.
"""
raise NotImplementedError, self.__class__.__name__ + '.handler'
def error(self, req):
"""
Called by Request if an exception occurs within the handler. May and
should be overridden.
"""
import cgitb
req.stdout.write('Content-Type: text/html\r\n\r\n' +
cgitb.html(sys.exc_info()))
class WSGIServer(Server):
"""
FastCGI server that supports the Web Server Gateway Interface. See
<http://www.python.org/peps/pep-0333.html>.
"""
def __init__(self, application, environ=None, multithreaded=True, **kw):
"""
environ, if present, must be a dictionary-like object. Its
contents will be copied into application's environ. Useful
for passing application-specific variables.
Set multithreaded to False if your application is not MT-safe.
"""
if kw.has_key('handler'):
del kw['handler'] # Doesn't make sense to let this through
super(WSGIServer, self).__init__(**kw)
if environ is None:
environ = {}
self.application = application
self.environ = environ
self.multithreaded = multithreaded
# Used to force single-threadedness
self._app_lock = thread.allocate_lock()
def handler(self, req):
"""Special handler for WSGI."""
if req.role != FCGI_RESPONDER:
return FCGI_UNKNOWN_ROLE, 0
# Mostly taken from example CGI gateway.
environ = req.params
environ.update(self.environ)
environ['wsgi.version'] = (1,0)
environ['wsgi.input'] = req.stdin
if self._bindAddress is None:
stderr = req.stderr
else:
stderr = TeeOutputStream((sys.stderr, req.stderr))
environ['wsgi.errors'] = stderr
environ['wsgi.multithread'] = not isinstance(req, CGIRequest) and \
thread_available and self.multithreaded
# Rationale for the following: If started by the web server
# (self._bindAddress is None) in either FastCGI or CGI mode, the
# possibility of being spawned multiple times simultaneously is quite
# real. And, if started as an external server, multiple copies may be
# spawned for load-balancing/redundancy. (Though I don't think
# mod_fastcgi supports this?)
environ['wsgi.multiprocess'] = True
environ['wsgi.run_once'] = isinstance(req, CGIRequest)
if environ.get('HTTPS', 'off') in ('on', '1'):
environ['wsgi.url_scheme'] = 'https'
else:
environ['wsgi.url_scheme'] = 'http'
self._sanitizeEnv(environ)
headers_set = []
headers_sent = []
result = None
def write(data):
assert type(data) in [str, unicode], 'write() argument must be string'
assert headers_set, 'write() before start_response()'
if not headers_sent:
status, responseHeaders = headers_sent[:] = headers_set
found = False
for header,value in responseHeaders:
if header.lower() == 'content-length':
found = True
break
if not found and result is not None:
try:
if len(result) == 1:
responseHeaders.append(('Content-Length',
str(len(data))))
except:
pass
s = 'Status: %s\r\n' % status
for header in responseHeaders:
s += '%s: %s\r\n' % header
s += '\r\n'
req.stdout.write(s)
req.stdout.write(data)
req.stdout.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
# Re-raise if too late
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None # avoid dangling circular ref
else:
assert not headers_set, 'Headers already set!'
assert type(status) in [str, unicode], 'Status must be a string'
assert len(status) >= 4, 'Status must be at least 4 characters'
assert int(status[:3]), 'Status must begin with 3-digit code'
assert status[3] == ' ', 'Status must have a space after code'
assert type(response_headers) is list, 'Headers must be a list'
if __debug__:
for name,val in response_headers:
assert type(name) in [str, unicode], 'Header names must be strings'
assert type(val) in [str, unicode], 'Header values must be strings'
headers_set[:] = [status, response_headers]
return write
if not self.multithreaded:
self._app_lock.acquire()
try:
result = self.application(environ, start_response)
try:
for data in result:
if data:
write(data)
if not headers_sent:
write('') # in case body was empty
finally:
if hasattr(result, 'close'):
result.close()
finally:
if not self.multithreaded:
self._app_lock.release()
return FCGI_REQUEST_COMPLETE, 0
def _sanitizeEnv(self, environ):
"""Ensure certain values are present, if required by WSGI."""
if not environ.has_key('SCRIPT_NAME'):
environ['SCRIPT_NAME'] = ''
if not environ.has_key('PATH_INFO'):
environ['PATH_INFO'] = ''
# If any of these are missing, it probably signifies a broken
# server...
for name,default in [('REQUEST_METHOD', 'GET'),
('SERVER_NAME', 'localhost'),
('SERVER_PORT', '80'),
('SERVER_PROTOCOL', 'HTTP/1.0')]:
if not environ.has_key(name):
environ['wsgi.errors'].write('%s: missing FastCGI param %s '
'required by WSGI!\n' %
(self.__class__.__name__, name))
environ[name] = default
if __name__ == '__main__':
def test_app(environ, start_response):
"""Probably not the most efficient example."""
import cgi
start_response('200 OK', [('Content-Type', 'text/html')])
yield '<html><head><title>Hello World!</title></head>\n' \
'<body>\n' \
'<p>Hello World!</p>\n' \
'<table border="1">'
names = environ.keys()
names.sort()
for name in names:
yield '<tr><td>%s</td><td>%s</td></tr>\n' % (
name, cgi.escape(`environ[name]`))
form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ,
keep_blank_values=1)
if form.list:
yield '<tr><th colspan="2">Form data</th></tr>'
for field in form.list:
yield '<tr><td>%s</td><td>%s</td></tr>\n' % (
field.name, field.value)
yield '</table>\n' \
'</body></html>\n'
WSGIServer(test_app, bindAddress=("127.0.0.1", 3000)).run()
| Python |
#!/usr/bin/env python
from config import *
import cgi, sys, os, urlparse, sys, re, urllib
import cgitb
cgitb.enable()
import codecs
ENCODING='UTF-8'
sys.stdout = codecs.getwriter(ENCODING)(sys.stdout)
# Used for CGI parameters
decUTF8 = codecs.getdecoder('utf-8')
decW1252 = codecs.getdecoder('windows-1252')
if PYDIR not in sys.path:
sys.path.insert(0, PYDIR)
if WEBDIR not in sys.path:
sys.path.insert(0, WEBDIR)
if SRCDIR not in sys.path:
sys.path.insert(0, SRCDIR)
import feedvalidator
from feedvalidator.logging import FEEDTYPEDISPLAY, VALIDFEEDGRAPHIC
from feedvalidator.logging import Info, Warning, Error, ValidationFailure
from feedvalidator.logging import TYPE_ATOM_ENTRY, TYPE_OPENSEARCH, TYPE_XRD
from feedvalidator.logging import TYPE_APP_SERVICE, TYPE_APP_CATEGORIES
def applyTemplate(templateFile, params={}):
params['CSSURL'] = CSSURL
fsock = open(os.path.join(WEBDIR, 'templates', templateFile))
data = fsock.read() % params
fsock.close()
return data.encode('utf-8')
def sanitizeURL(url):
# Allow feed: URIs, as described by draft-obasanjo-feed-URI-scheme-02
if url.lower().startswith('feed:'):
url = url[5:]
if url.startswith('//'):
url = 'http:' + url
if not url.split(':')[0].lower() in ['http','https']:
url = 'http://%s' % url
url = url.strip()
# strip user and password
url = re.sub(r'^(\w*://)[-+.\w]*(:[-+.\w]+)?@', r'\1' ,url)
return url
def escapeURL(url):
parts = list(urlparse.urlparse(url))
safe = ['/', '/:@', '/', '/', '/?&=;', '/']
for i in range(0,len(parts)):
parts[i] = urllib.quote(urllib.unquote(parts[i]),safe[i])
url = cgi.escape(urlparse.urlunparse(parts))
try:
return url.decode('idna')
except:
return url
import feedvalidator.formatter.text_html
def buildCodeListing(events, rawdata, url):
# print feed
codelines = []
linenum = 1
linesWithErrors = [e.params.get('line', 0) for e in events]
for line in rawdata.split('\n'):
line = feedvalidator.formatter.text_html.escapeAndMark(line)
if not line: line = ' '
linetype = linenum in linesWithErrors and "b" or "a"
codelines.append(applyTemplate('code_listing_line.tmpl', {"line":line, "linenum":linenum, "linetype":linetype}).decode('utf-8'))
linenum += 1
codelisting = "".join(codelines)
return applyTemplate('code_listing.tmpl', {"codelisting":codelisting, "url":escapeURL(url)})
def yieldEventList(output):
errors, warnings = output.getErrors(), output.getWarnings()
yield output.header()
for o in output.getErrors():
yield o.encode('utf-8')
if errors and warnings:
yield output.footer()
if len(warnings) == 1:
yield applyTemplate('andwarn1.tmpl')
else:
yield applyTemplate('andwarn2.tmpl')
yield output.header()
for o in output.getWarnings():
yield o.encode('utf-8')
yield output.footer()
from feedvalidator.formatter.text_html import Formatter
def postvalidate(url, events, rawdata, feedType, autofind=1):
"""returns dictionary including 'url', 'events', 'rawdata', 'output', 'specialCase', 'feedType'"""
# filter based on compatibility level
from feedvalidator import compatibility
filterFunc = compatibility.AA # hardcoded for now
events = filterFunc(events)
specialCase = None
formattedOutput = Formatter(events, rawdata)
if formattedOutput:
# check for special cases
specialCase = compatibility.analyze(events, rawdata)
if (specialCase == 'html') and autofind:
try:
try:
import feedfinder
rssurls = feedfinder.getLinks(rawdata,url)
except:
rssurls = [url]
if rssurls:
url = rssurls[0]
params = feedvalidator.validateURL(url, firstOccurrenceOnly=1, wantRawData=1)
events = params['loggedEvents']
rawdata = params['rawdata']
feedType = params['feedType']
return postvalidate(url, events, rawdata, feedType, autofind=0)
except:
pass
return {"url":url, "events":events, "rawdata":rawdata, "output":formattedOutput, "specialCase":specialCase, "feedType":feedType}
def checker_app(environ, start_response):
method = environ['REQUEST_METHOD'].lower()
contentType = environ.get('CONTENT_TYPE', None)
output_option = ''
if (method == 'get') or (contentType and cgi.parse_header(contentType)[0].lower() == 'application/x-www-form-urlencoded'):
fs = cgi.FieldStorage(fp=environ.get('wsgi.input',None), environ=environ)
url = fs.getvalue("url") or ''
try:
if url: url = url.decode('utf-8').encode('idna')
except:
pass
manual = fs.getvalue("manual") or 0
rawdata = fs.getvalue("rawdata") or ''
output_option = fs.getvalue("output") or ''
# XXX Should use 'charset'
try:
rawdata = decUTF8(rawdata)[0]
except UnicodeError:
rawdata = decW1252(rawdata)[0]
rawdata = rawdata[:feedvalidator.MAXDATALENGTH].replace('\r\n', '\n').replace('\r', '\n')
else:
url = None
manual = None
rawdata = None
if (output_option == "soap12"):
# SOAP
try:
if ((method == 'post') and (not rawdata)):
params = feedvalidator.validateStream(sys.stdin, contentType=contentType)
elif rawdata :
params = feedvalidator.validateString(rawdata, firstOccurrenceOnly=1)
elif url:
url = sanitizeURL(url)
params = feedvalidator.validateURL(url, firstOccurrenceOnly=1, wantRawData=1)
events = params['loggedEvents']
feedType = params['feedType']
# filter based on compatibility level
from feedvalidator import compatibility
filterFunc = compatibility.AA # hardcoded for now
events = filterFunc(events)
events_error = list()
events_warn = list()
events_info = list()
# format as xml
from feedvalidator.formatter.text_xml import Formatter as xmlformat
output = xmlformat(events)
for event in events:
if isinstance(event,Error): events_error.append(output.format(event))
if isinstance(event,Warning): events_warn.append(output.format(event))
if isinstance(event,Info): events_info.append(output.format(event))
if len(events_error) > 0:
validation_bool = "false"
else:
validation_bool = "true"
from datetime import datetime
right_now = datetime.now()
validationtime = str( right_now.isoformat())
body = applyTemplate('soap.tmpl', {
'errorlist':"\n".join( events_error), 'errorcount': str(len(events_error)),
'warninglist':"\n".join( events_warn), 'warningcount': str(len(events_warn)),
'infolist':"\n".join( events_info), 'infocount': str(len(events_info)),
'home_url': HOMEURL, 'url': url, 'date_time': validationtime, 'validation_bool': validation_bool
})
start_response('200 OK', [('Content-type', 'application/soap+xml; charset=' + ENCODING)])
yield body
except:
import traceback
tb = ''.join(apply(traceback.format_exception, sys.exc_info()))
from feedvalidator.formatter.text_xml import xmlEncode
start_response('500 Internal Error', [('Content-type', 'text/xml; charset=' + ENCODING)])
yield applyTemplate('fault.tmpl', {'code':sys.exc_info()[0],
'string':sys.exc_info()[1], 'traceback':xmlEncode(tb)})
else:
start_response('200 OK', [('Content-type', 'text/html; charset=' + ENCODING)])
if url or rawdata:
# validate
goon = 0
if rawdata:
# validate raw data (from text form)
try:
params = feedvalidator.validateString(rawdata, firstOccurrenceOnly=1)
events = params['loggedEvents']
feedType = params['feedType']
goon = 1
except ValidationFailure, vfv:
yield applyTemplate('header.tmpl', {'title':'Feed Validator Results: %s' % escapeURL(url)})
yield applyTemplate('manual.tmpl', {'rawdata':escapeURL(url)})
output = Formatter([vfv.event], None)
for item in yieldEventList(output):
yield item
yield applyTemplate('error.tmpl')
except:
yield applyTemplate('header.tmpl', {'title':'Feed Validator Results: %s' % escapeURL(url)})
yield applyTemplate('manual.tmpl', {'rawdata':escapeURL(url)})
yield applyTemplate('error.tmpl')
else:
url = sanitizeURL(url)
try:
params = feedvalidator.validateURL(url, firstOccurrenceOnly=1, wantRawData=1)
events = params['loggedEvents']
rawdata = params['rawdata']
feedType = params['feedType']
goon = 1
except ValidationFailure, vfv:
yield applyTemplate('header.tmpl', {'title':'Feed Validator Results: %s' % escapeURL(url)})
yield applyTemplate('index.tmpl', {'value':escapeURL(url)})
output = Formatter([vfv.event], None)
for item in yieldEventList(output):
yield item
yield applyTemplate('error.tmpl')
except:
yield applyTemplate('header.tmpl', {'title':'Feed Validator Results: %s' % escapeURL(url)})
yield applyTemplate('index.tmpl', {'value':escapeURL(url)})
yield applyTemplate('error.tmpl')
if goon:
# post-validate (will do RSS autodiscovery if needed)
validationData = postvalidate(url, events, rawdata, feedType)
# write output header
url = validationData['url']
feedType = validationData['feedType']
rawdata = validationData['rawdata']
htmlUrl = escapeURL(urllib.quote(url))
try:
htmlUrl = htmlUrl.encode('idna')
except:
pass
docType = 'feed'
if feedType == TYPE_ATOM_ENTRY: docType = 'entry'
if feedType == TYPE_XRD: docType = 'document'
if feedType == TYPE_APP_CATEGORIES: docType = 'Document'
if feedType == TYPE_APP_SERVICE: docType = 'Document'
if feedType == TYPE_OPENSEARCH: docType = 'description document'
yield applyTemplate('header.tmpl', {'title':'Feed Validator Results: %s' % escapeURL(url)})
if manual:
yield applyTemplate('manual.tmpl', {'rawdata':cgi.escape(rawdata)})
else:
yield applyTemplate('index.tmpl', {'value':escapeURL(url)})
output = validationData.get('output', None)
# print special case, if any
specialCase = validationData.get('specialCase', None)
if specialCase:
yield applyTemplate('%s.tmpl' % specialCase)
msc = output.mostSeriousClass()
# Explain the overall verdict
if msc == Error:
from feedvalidator.logging import ObsoleteNamespace
if len(output.getErrors())==1 and \
isinstance(output.data[0],ObsoleteNamespace):
yield applyTemplate('notsupported.tmpl')
elif specialCase != 'html':
yield applyTemplate('invalid.tmpl')
else:
yield applyTemplate('congrats.tmpl', {"feedType":FEEDTYPEDISPLAY[feedType], "graphic":VALIDFEEDGRAPHIC[feedType], "docType":docType})
if msc == Warning:
yield applyTemplate('warning.tmpl')
elif msc == Info:
yield applyTemplate('info.tmpl')
# Print any issues, whether or not the overall feed is valid
if output:
if specialCase != 'html':
for item in yieldEventList(output):
yield item
# print code listing
yield buildCodeListing(validationData['events'], validationData['rawdata'], url)
# As long as there were no errors, show that the feed is valid
if msc != Error:
# valid
yield applyTemplate('valid.tmpl', {"url":htmlUrl, "srcUrl":htmlUrl, "feedType":FEEDTYPEDISPLAY[feedType], "graphic":VALIDFEEDGRAPHIC[feedType], "HOMEURL":HOMEURL, "docType":docType})
else:
# nothing to validate, just write basic form
yield applyTemplate('header.tmpl', {'title':'Feed Validator for Atom and RSS'})
if manual:
yield applyTemplate('manual.tmpl', {'rawdata':''})
else:
yield applyTemplate('index.tmpl', {'value':'http://'})
yield applyTemplate('special.tmpl', {})
yield applyTemplate('navbar.tmpl')
yield applyTemplate('footer.tmpl')
if __name__ == "__main__":
if len(sys.argv)==1 or not sys.argv[1].isdigit():
def start_response(status, headers):
print 'Status: %s\r\n' % status,
for header,value in headers:
print '%s: %s\r\n' % (header, value),
print
for output in checker_app(os.environ, start_response):
print output.decode('utf-8')
else:
# export HTTP_HOST=http://feedvalidator.org/
# export SCRIPT_NAME=check.cgi
# export SCRIPT_FILENAME=/home/rubys/svn/feedvalidator/check.cgi
import fcgi
port=int(sys.argv[1])
fcgi.WSGIServer(checker_app, bindAddress=("127.0.0.1", port)).run()
| Python |
"""feedfinder: Find the Web feed for a Web page
http://www.aaronsw.com/2002/feedfinder/
Usage:
feed(uri) - returns feed found for a URI
feeds(uri) - returns all feeds found for a URI
>>> import feedfinder
>>> feedfinder.feed('scripting.com')
'http://scripting.com/rss.xml'
>>>
>>> feedfinder.feeds('scripting.com')
['http://delong.typepad.com/sdj/atom.xml',
'http://delong.typepad.com/sdj/index.rdf',
'http://delong.typepad.com/sdj/rss.xml']
>>>
Can also use from the command line. Feeds are returned one per line:
$ python feedfinder.py diveintomark.org
http://diveintomark.org/xml/atom.xml
How it works:
0. At every step, feeds are minimally verified to make sure they are really feeds.
1. If the URI points to a feed, it is simply returned; otherwise
the page is downloaded and the real fun begins.
2. Feeds pointed to by LINK tags in the header of the page (autodiscovery)
3. <A> links to feeds on the same server ending in ".rss", ".rdf", ".xml", or
".atom"
4. <A> links to feeds on the same server containing "rss", "rdf", "xml", or "atom"
5. <A> links to feeds on external servers ending in ".rss", ".rdf", ".xml", or
".atom"
6. <A> links to feeds on external servers containing "rss", "rdf", "xml", or "atom"
7. Try some guesses about common places for feeds (index.xml, atom.xml, etc.).
8. As a last ditch effort, we search Syndic8 for feeds matching the URI
"""
__version__ = "1.36"
__date__ = "2006-04-24"
__maintainer__ = "Aaron Swartz (me@aaronsw.com)"
__author__ = "Mark Pilgrim (http://diveintomark.org)"
__copyright__ = "Copyright 2002-4, Mark Pilgrim; 2006 Aaron Swartz"
__license__ = "Python"
__credits__ = """Abe Fettig for a patch to sort Syndic8 feeds by popularity
Also Jason Diamond, Brian Lalor for bug reporting and patches"""
_debug = 0
import sgmllib, urllib, urlparse, re, sys, robotparser
import threading
class TimeoutError(Exception): pass
def timelimit(timeout):
def internal(function):
def internal2(*args, **kw):
"""
from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/473878
"""
class Calculator(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = None
self.error = None
def run(self):
try:
self.result = function(*args, **kw)
except:
self.error = sys.exc_info()
c = Calculator()
c.setDaemon(True) # don't hold up exiting
c.start()
c.join(timeout)
if c.isAlive():
raise TimeoutError
if c.error:
raise c.error[0], c.error[1]
return c.result
return internal2
return internal
# XML-RPC support allows feedfinder to query Syndic8 for possible matches.
# Python 2.3 now comes with this module by default, otherwise you can download it
try:
import xmlrpclib # http://www.pythonware.com/products/xmlrpc/
except ImportError:
xmlrpclib = None
if not dict:
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
def _debuglog(message):
if _debug: print message
class URLGatekeeper:
"""a class to track robots.txt rules across multiple servers"""
def __init__(self):
self.rpcache = {} # a dictionary of RobotFileParser objects, by domain
self.urlopener = urllib.FancyURLopener()
self.urlopener.version = "feedfinder/" + __version__ + " " + self.urlopener.version + " +http://www.aaronsw.com/2002/feedfinder/"
_debuglog(self.urlopener.version)
self.urlopener.addheaders = [('User-agent', self.urlopener.version)]
robotparser.URLopener.version = self.urlopener.version
robotparser.URLopener.addheaders = self.urlopener.addheaders
def _getrp(self, url):
protocol, domain = urlparse.urlparse(url)[:2]
if self.rpcache.has_key(domain):
return self.rpcache[domain]
baseurl = '%s://%s' % (protocol, domain)
robotsurl = urlparse.urljoin(baseurl, 'robots.txt')
_debuglog('fetching %s' % robotsurl)
rp = robotparser.RobotFileParser(robotsurl)
try:
rp.read()
except:
pass
self.rpcache[domain] = rp
return rp
def can_fetch(self, url):
rp = self._getrp(url)
allow = rp.can_fetch(self.urlopener.version, url)
_debuglog("gatekeeper of %s says %s" % (url, allow))
return allow
@timelimit(10)
def get(self, url):
if not self.can_fetch(url): return ''
try:
return self.urlopener.open(url).read()
except:
return ''
_gatekeeper = URLGatekeeper()
class BaseParser(sgmllib.SGMLParser):
def __init__(self, baseuri):
sgmllib.SGMLParser.__init__(self)
self.links = []
self.baseuri = baseuri
def normalize_attrs(self, attrs):
def cleanattr(v):
v = sgmllib.charref.sub(lambda m: unichr(int(m.groups()[0])), v)
v = v.strip()
v = v.replace('<', '<').replace('>', '>').replace(''', "'").replace('"', '"').replace('&', '&')
return v
attrs = [(k.lower(), cleanattr(v)) for k, v in attrs]
attrs = [(k, k in ('rel','type') and v.lower() or v) for k, v in attrs]
return attrs
def do_base(self, attrs):
attrsD = dict(self.normalize_attrs(attrs))
if not attrsD.has_key('href'): return
self.baseuri = attrsD['href']
def error(self, *a, **kw): pass # we're not picky
class LinkParser(BaseParser):
FEED_TYPES = ('application/rss+xml',
'text/xml',
'application/atom+xml',
'application/x.atom+xml',
'application/x-atom+xml')
def do_link(self, attrs):
attrsD = dict(self.normalize_attrs(attrs))
if not attrsD.has_key('rel'): return
rels = attrsD['rel'].split()
if 'alternate' not in rels: return
if attrsD.get('type') not in self.FEED_TYPES: return
if not attrsD.has_key('href'): return
self.links.append(urlparse.urljoin(self.baseuri, attrsD['href']))
class ALinkParser(BaseParser):
def start_a(self, attrs):
attrsD = dict(self.normalize_attrs(attrs))
if not attrsD.has_key('href'): return
self.links.append(urlparse.urljoin(self.baseuri, attrsD['href']))
def makeFullURI(uri):
if uri.startswith('feed://'):
uri = 'http://' + uri.split('feed://', 1).pop()
for x in ['http', 'https']:
if uri.startswith('%s://' % x):
return uri
return 'http://%s' % uri
def getLinks(data, baseuri):
p = LinkParser(baseuri)
p.feed(data)
return p.links
def getALinks(data, baseuri):
p = ALinkParser(baseuri)
p.feed(data)
return p.links
def getLocalLinks(links, baseuri):
baseuri = baseuri.lower()
urilen = len(baseuri)
return [l for l in links if l.lower().startswith(baseuri)]
def isFeedLink(link):
if link.startswith('http://feeds.feedburner.com/'): return True
if link.endswith('/feeds/posts/default'): return True
return link[-4:].lower() in ('.rss', '.rdf', '.xml', '.atom', 'atom/',
'/atom', '/feed')
def isXMLRelatedLink(link):
link = link.lower()
return link.count('rss') + link.count('rdf') + link.count('xml') + link.count('atom') + link.count('feed')
def couldBeFeedData(data):
try:
data = data.lower()
except TimeoutError: # server down, give up
return []
if data.count('<html'): return 0
return data.count('<rss') + data.count('<rdf') + data.count('<feed')
def isFeed(uri):
_debuglog('seeing if %s is a feed' % uri)
protocol = urlparse.urlparse(uri)
if protocol[0] not in ('http', 'https'): return 0
data = _gatekeeper.get(uri)
return couldBeFeedData(data)
def sortFeeds(feed1Info, feed2Info):
return cmp(feed2Info['headlines_rank'], feed1Info['headlines_rank'])
def getFeedsFromSyndic8(uri):
feeds = []
try:
server = xmlrpclib.Server('http://www.syndic8.com/xmlrpc.php')
feedids = server.syndic8.FindFeeds(uri)
infolist = server.syndic8.GetFeedInfo(feedids, ['headlines_rank','status','dataurl'])
infolist.sort(sortFeeds)
feeds = [f['dataurl'] for f in infolist if f['status']=='Syndicated']
_debuglog('found %s feeds through Syndic8' % len(feeds))
except:
pass
return feeds
def feeds(uri, all=False, querySyndic8=False):
fulluri = makeFullURI(uri)
try:
data = _gatekeeper.get(fulluri)
except:
return []
# is this already a feed?
if couldBeFeedData(data):
return [fulluri]
# nope, it's a page, try LINK tags first
_debuglog('looking for LINK tags')
try:
feeds = getLinks(data, fulluri)
except:
feeds = []
_debuglog('found %s feeds through LINK tags' % len(feeds))
feeds = filter(isFeed, feeds)
if all or not feeds:
# no LINK tags, look for regular <A> links that point to feeds
_debuglog('no LINK tags, looking at A tags')
try:
links = getALinks(data, fulluri)
except:
links = []
locallinks = getLocalLinks(links, fulluri)
# look for obvious feed links on the same server
feeds.extend(filter(isFeed, filter(isFeedLink, locallinks)))
if all or not feeds:
# look harder for feed links on the same server
feeds.extend(filter(isFeed, filter(isXMLRelatedLink, locallinks)))
if all or not feeds:
# look for obvious feed links on another server
feeds.extend(filter(isFeed, filter(isFeedLink, links)))
if all or not feeds:
# look harder for feed links on another server
feeds.extend(filter(isFeed, filter(isXMLRelatedLink, links)))
if all or not feeds:
_debuglog('no A tags, guessing')
suffixes = [ # filenames used by popular software:
'atom.xml', # blogger, TypePad
'index.atom', # MT, apparently
'index.rdf', # MT
'rss.xml', # Dave Winer/Manila
'index.xml', # MT
'index.rss' # Slash
]
feeds.extend(filter(isFeed, [urlparse.urljoin(fulluri, x) for x in suffixes]))
if (all or not feeds) and querySyndic8:
# still no luck, search Syndic8 for feeds (requires xmlrpclib)
_debuglog('still no luck, searching Syndic8')
feeds.extend(getFeedsFromSyndic8(uri))
if hasattr(__builtins__, 'set') or __builtins__.has_key('set'):
feeds = list(set(feeds))
return feeds
getFeeds = feeds # backwards-compatibility
def feed(uri):
#todo: give preference to certain feed formats
feedlist = feeds(uri)
if feedlist:
return feedlist[0]
else:
return None
##### test harness ######
def test():
uri = 'http://diveintomark.org/tests/client/autodiscovery/html4-001.html'
failed = []
count = 0
while 1:
data = _gatekeeper.get(uri)
if data.find('Atom autodiscovery test') == -1: break
sys.stdout.write('.')
sys.stdout.flush()
count += 1
links = getLinks(data, uri)
if not links:
print '\n*** FAILED ***', uri, 'could not find link'
failed.append(uri)
elif len(links) > 1:
print '\n*** FAILED ***', uri, 'found too many links'
failed.append(uri)
else:
atomdata = urllib.urlopen(links[0]).read()
if atomdata.find('<link rel="alternate"') == -1:
print '\n*** FAILED ***', uri, 'retrieved something that is not a feed'
failed.append(uri)
else:
backlink = atomdata.split('href="').pop().split('"')[0]
if backlink != uri:
print '\n*** FAILED ***', uri, 'retrieved wrong feed'
failed.append(uri)
if data.find('<link rel="next" href="') == -1: break
uri = urlparse.urljoin(uri, data.split('<link rel="next" href="').pop().split('"')[0])
print
print count, 'tests executed,', len(failed), 'failed'
if __name__ == '__main__':
args = sys.argv[1:]
if args and args[0] == '--debug':
_debug = 1
args.pop(0)
if args:
uri = args[0]
else:
uri = 'http://diveintomark.org/'
if uri == 'test':
test()
else:
print "\n".join(getFeeds(uri))
| Python |
from setuptools import setup, find_packages
import sys, os
version = ''
setup(name='feedvalidator',
version=version,
description="Validator for feeds",
long_description= \
"""Feedvalidator validates feeds in a variety of syndication formats.
""",
# Get more strings from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='syndication atom rdf rss feeds',
author='Sam Ruby',
url='http://feedvalidator.org/',
license='MIT',
packages=['feedvalidator', 'feedvalidator.i18n', 'feedvalidator.formatter'],
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
'setuptools',
],
entry_points="""
# -*- Entry points: -*-
""",
)
| Python |
"""$Id: uri.py 988 2008-03-12 18:22:48Z sa3ruby $"""
"""
Code to test URI references for validity, and give their normalized
form, according to RFC 3986.
"""
__author__ = "Joseph Walton <http://www.kafsemo.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2004, 2007 Joseph Walton"
from urlparse import urljoin
from urllib import quote, quote_plus, unquote, unquote_plus
from unicodedata import normalize
from codecs import lookup
import re
(enc, dec) = lookup('UTF-8')[:2]
SUBDELIMS='!$&\'()*+,;='
PCHAR='-._~' + SUBDELIMS + ':@'
GENDELIMS=':/?#[]@'
RESERVED=GENDELIMS + SUBDELIMS
default_port = {
'ftp': 21,
'telnet': 23,
'http': 80,
'gopher': 70,
'news': 119,
'nntp': 119,
'prospero': 191,
'https': 443,
'snews': 563,
'snntp': 563,
}
class BadUri(Exception):
pass
def _n(s):
return enc(normalize('NFC', dec(s)[0]))[0]
octetRe = re.compile('([^%]|%[a-fA-F0-9]{2})')
def asOctets(s):
while (s):
m = octetRe.match(s)
if not(m):
raise BadUri()
c = m.group(1)
if (c[0] == '%'):
yield(c.upper(), chr(int(c[1:], 0x10)))
else:
yield(c, c)
s = s[m.end(1):]
def _qnu(s,safe=''):
if s == None:
return None
# unquote{,_plus} leave high-bit octets unconverted in Unicode strings
# This conversion will, correctly, cause UnicodeEncodeError if there are
# non-ASCII characters present in the string
s = str(s)
res = ''
b = ''
for (c,x) in asOctets(s):
if x in RESERVED and x in safe:
res += quote(_n(unquote(b)), safe)
b = ''
res += c
else:
b += x
res += quote(_n(unquote(b)), safe)
return res
# Match an optional port specification
portRe = re.compile(':(\d*)$')
def _normPort(netloc,defPort):
nl = netloc.lower()
p = defPort
m = portRe.search(nl)
if m:
if m.group(1) != '':
p = int(m.group(1))
nl = nl[:m.start(1) - 1]
if nl and nl[-1] == '.' and nl.rfind('.', 0, -2) >= 0:
nl = nl[:-1]
# Square brackets are allowed, and only allowed, delimiting IPv6 addresses
if nl.startswith('[') != nl.endswith(']'):
raise BadUri()
if p != defPort:
nl = nl + ':' + str(p)
return nl
def _normAuth(auth,port):
i = auth.rfind('@')
if i >= 0:
c = auth[:i]
if c == ':':
c = ''
h = auth[i + 1:]
else:
c = None
h = auth
if c:
return c + '@' + _normPort(h,port)
else:
return _normPort(h,port)
def _normPath(p):
l = p.split(u'/')
i = 0
if l and l[0]:
i = len(l)
while i < len(l):
c = l[i]
if (c == '.'):
if i < len(l) - 1:
del l[i]
else:
l[i] = ''
elif (c == '..'):
if i < len(l) - 1:
del l[i]
else:
l[i] = ''
if i > 1 or (i > 0 and l[0]):
i -= 1
del l[i]
else:
i += 1
if l == ['']:
l = ['', '']
return u'/'.join([_qnu(c, PCHAR) for c in l])
# From RFC 2396bis, with added end-of-string marker
uriRe = re.compile('^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?$')
def _canonical(s):
m = uriRe.match(s)
if not(m):
raise BadUri()
# Check for a relative URI
if m.group(2) is None:
scheme = None
else:
scheme = m.group(2).lower()
if m.group(4) is None:
authority = None
p = m.group(5)
# Don't try to normalise URI references with relative paths
if scheme is None and not p.startswith('/'):
return None
if scheme == 'mailto':
# XXX From RFC 2368, mailto equivalence needs to be subtler than this
i = p.find('@')
if i > 0:
j = p.find('?')
if j < 0:
j = len(p)
p = _qnu(p[:i]) + '@' + _qnu(p[i + 1:].lower()) + _qnu(p[j:])
path = p
else:
if scheme is None or p.startswith('/'):
path = _normPath(p)
else:
path = _qnu(p, PCHAR + '/')
else:
a = m.group(4)
p = m.group(5)
if scheme in default_port:
a = _normAuth(a, default_port[scheme])
else:
a = _normAuth(a, None)
authority = a
path = _normPath(p)
query = _qnu(m.group(7), PCHAR + "/?")
fragment = _qnu(m.group(9), PCHAR + "/?")
s = u''
if scheme != None:
s += scheme + ':'
if authority != None:
s += '//' + authority
s += path
if query != None:
s += '?' + query
if fragment != None:
s += '#' + fragment
return s
class Uri:
"""A Uri wraps a string and performs equality testing according to the
rules for URI equivalence. """
def __init__(self,s):
self.s = s
self.n = _canonical(s)
def __str__(self):
return self.s
def __repr__(self):
return repr(self.s)
def __eq__(self, a):
return self.n == a.n
def canonicalForm(u):
"""Give the canonical form for a URI, so char-by-char comparisons become valid tests for equivalence."""
try:
return _canonical(u)
except BadUri:
return None
except UnicodeError:
return None
| Python |
"""$Id: compatibility.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from logging import *
def _must(event):
return isinstance(event, Error)
def _should(event):
return isinstance(event, Warning)
def _may(event):
return isinstance(event, Info)
def A(events):
return [event for event in events if _must(event)]
def AA(events):
return [event for event in events if _must(event) or _should(event)]
def AAA(events):
return [event for event in events if _must(event) or _should(event) or _may(event)]
def AAAA(events):
return events
def analyze(events, rawdata):
block = rawdata[0:512].strip().upper()
if block.startswith('<HTML'): return 'html'
if block.startswith('<!DOCTYPE HTML'): return 'html'
for event in events:
if isinstance(event,UndefinedElement):
if event.params['parent'] == 'root':
if event.params['element'].lower() in ['html','xhtml:html']:
return "html"
return None
| Python |
"""$Id: root.py 1049 2009-05-06 02:00:03Z rothfuss $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1049 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
rss11_namespace='http://purl.org/net/rss1.1#'
purl1_namespace='http://purl.org/rss/1.0/'
soap_namespace='http://feeds.archive.org/validator/'
pie_namespace='http://purl.org/atom/ns#'
atom_namespace='http://www.w3.org/2005/Atom'
opensearch_namespace='http://a9.com/-/spec/opensearch/1.1/'
xrds_namespace='xri://$xrds'
kml20_namespace='http://earth.google.com/kml/2.0'
kml21_namespace='http://earth.google.com/kml/2.1'
kml22_namespace='http://www.opengis.net/kml/2.2'
#
# Main document.
# Supports rss, rdf, pie, kml, and ffkar
#
class root(validatorBase):
def __init__(self, parent, base):
validatorBase.__init__(self)
self.parent = parent
self.dispatcher = parent
self.name = "root"
self.xmlBase = base
self.xmlLang = None
def startElementNS(self, name, qname, attrs):
if name=='rss':
if qname:
from logging import InvalidNamespace
self.log(InvalidNamespace({"parent":"root", "element":name, "namespace":qname}))
self.dispatcher.defaultNamespaces.append(qname)
if name=='feed' or name=='entry':
if self.namespace.has_key('atom'):
from logging import AvoidNamespacePrefix
self.log(AvoidNamespacePrefix({'prefix':'atom'}))
if self.namespace.has_key('xhtml'):
from logging import AvoidNamespacePrefix
self.log(AvoidNamespacePrefix({'prefix':'xhtml'}))
if qname==pie_namespace:
from logging import ObsoleteNamespace
self.log(ObsoleteNamespace({"element":"feed"}))
self.dispatcher.defaultNamespaces.append(pie_namespace)
from logging import TYPE_ATOM
self.setFeedType(TYPE_ATOM)
elif not qname:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":"root", "element":name}))
else:
if name=='feed':
from logging import TYPE_ATOM
self.setFeedType(TYPE_ATOM)
else:
from logging import TYPE_ATOM_ENTRY
self.setFeedType(TYPE_ATOM_ENTRY)
self.dispatcher.defaultNamespaces.append(atom_namespace)
if qname<>atom_namespace:
from logging import InvalidNamespace
self.log(InvalidNamespace({"parent":"root", "element":name, "namespace":qname}))
self.dispatcher.defaultNamespaces.append(qname)
if name=='Channel':
if not qname:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":"root", "element":name}))
elif qname != rss11_namespace :
from logging import InvalidNamespace
self.log(InvalidNamespace({"parent":"root", "element":name, "namespace":qname}))
else:
self.dispatcher.defaultNamespaces.append(qname)
from logging import TYPE_RSS1
self.setFeedType(TYPE_RSS1)
if name=='kml':
from logging import TYPE_KML20, TYPE_KML21, TYPE_KML22
self.dispatcher.defaultNamespaces.append(qname)
if not qname:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":"root", "element":name}))
qname = kml20_namespace
feedType = TYPE_KML20
elif qname == kml20_namespace:
feedType = TYPE_KML20
elif qname == kml21_namespace:
feedType = TYPE_KML21
elif qname == kml22_namespace:
feedType = TYPE_KML22
elif qname != kml20_namespace and qname != kml21_namespace and qname != kml22_namespace:
from logging import InvalidNamespace
self.log(InvalidNamespace({"element":name, "namespace":qname}))
qname = kml22_namespace
feedType = TYPE_KML22
self.setFeedType(feedType)
if name=='OpenSearchDescription':
if not qname:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":"root", "element":name}))
qname = opensearch_namespace
elif qname != opensearch_namespace:
from logging import InvalidNamespace
self.log(InvalidNamespace({"element":name, "namespace":qname}))
self.dispatcher.defaultNamespaces.append(qname)
qname = opensearch_namespace
if name=='XRDS':
from logging import TYPE_XRD
self.setFeedType(TYPE_XRD)
if not qname:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":"root", "element":name}))
qname = xrds_namespace
elif qname != xrds_namespace:
from logging import InvalidNamespace
self.log(InvalidNamespace({"element":name, "namespace":qname}))
self.dispatcher.defaultNamespaces.append(qname)
qname = xrds_namespace
validatorBase.startElementNS(self, name, qname, attrs)
def unknown_starttag(self, name, qname, attrs):
from logging import ObsoleteNamespace,InvalidNamespace,UndefinedElement
if qname in ['http://example.com/newformat#','http://purl.org/atom/ns#']:
self.log(ObsoleteNamespace({"element":name, "namespace":qname}))
elif name=='feed':
self.log(InvalidNamespace({"element":name, "namespace":qname}))
else:
self.log(UndefinedElement({"parent":"root", "element":name}))
from validators import any
return any(self, name, qname, attrs)
def do_rss(self):
from rss import rss
return rss()
def do_feed(self):
from feed import feed
if pie_namespace in self.dispatcher.defaultNamespaces:
from validators import eater
return eater()
return feed()
def do_entry(self):
from entry import entry
return entry()
def do_app_categories(self):
from logging import TYPE_APP_CATEGORIES
self.setFeedType(TYPE_APP_CATEGORIES)
from categories import categories
return categories()
def do_app_service(self):
from logging import TYPE_APP_SERVICE
self.setFeedType(TYPE_APP_SERVICE)
from service import service
return service()
def do_kml(self):
from kml import kml
return kml()
def do_opml(self):
from opml import opml
return opml()
def do_outlineDocument(self):
from logging import ObsoleteVersion
self.log(ObsoleteVersion({"element":"outlineDocument"}))
from opml import opml
return opml()
def do_opensearch_OpenSearchDescription(self):
import opensearch
self.dispatcher.defaultNamespaces.append(opensearch_namespace)
from logging import TYPE_OPENSEARCH
self.setFeedType(TYPE_OPENSEARCH)
return opensearch.OpenSearchDescription()
def do_xrds_XRDS(self):
from xrd import xrds
return xrds()
def do_rdf_RDF(self):
from rdf import rdf
self.dispatcher.defaultNamespaces.append(purl1_namespace)
return rdf()
def do_Channel(self):
from channel import rss10Channel
return rss10Channel()
def do_soap_Envelope(self):
return root(self, self.xmlBase)
def do_soap_Body(self):
self.dispatcher.defaultNamespaces.append(soap_namespace)
return root(self, self.xmlBase)
def do_request(self):
return root(self, self.xmlBase)
def do_xhtml_html(self):
from logging import UndefinedElement
self.log(UndefinedElement({"parent":"root", "element":"xhtml:html"}))
from validators import eater
return eater()
| Python |
"""$Id: item.py 1019 2008-06-03 05:13:16Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1019 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
from logging import *
from itunes import itunes_item
from extension import *
#
# item element.
#
class item(validatorBase, extension_item, itunes_item):
def validate(self):
if (not "title" in self.children) and (not "description" in self.children):
self.log(ItemMustContainTitleOrDescription({}))
if not "guid" in self.children:
if self.getFeedType() == TYPE_RSS2:
rss = self.parent.parent
while rss and rss.name!='rss': rss=rss.parent
if rss.version.startswith("2."):
self.log(MissingGuid({"parent":self.name, "element":"guid"}))
if "slash_comments" in self.children:
if "lastBuildDate" not in self.parent.children and self.getFeedType()==TYPE_RSS2:
self.log(SlashDate({}))
if self.itunes: itunes_item.validate(self)
def do_link(self):
return rfc2396_full(), noduplicates()
def do_title(self):
return nonhtml(), nonblank(), noduplicates()
def do_description(self):
if self.getFeedType() == TYPE_RSS2:
rss = self.parent.parent
while rss and rss.name!='rss': rss=rss.parent
if rss.version == "0.91":
return nonhtml(), noduplicates()
return safeHtml(), noduplicates()
def do_content_encoded(self):
if self.getFeedType() == TYPE_RSS2:
if not 'description' in self.children:
self.log(NeedDescriptionBeforeContent({}))
return safeHtml(), noduplicates()
def do_content_items(self):
return ContentItems(), noduplicates()
def do_xhtml_body(self):
if self.getFeedType() == TYPE_RSS2:
self.log(DuplicateDescriptionSemantics({"element":"xhtml:body"}))
return htmlEater().setElement('xhtml:body',{},self)
def do_atom_id(self):
if "guid" in self.children:
self.log(DuplicateItemSemantics({"core":"guid", "ext":"atom:id"}))
return rfc2396_full(), noduplicates(), unique('atom_id',self.parent)
def do_atom_link(self):
from link import link
return link()
def do_atom_title(self):
from content import content
return content(), noduplicates()
def do_atom_summary(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_atom_author(self):
from author import author
return author(), noduplicates()
def do_atom_contributor(self):
from author import author
return author()
def do_atom_content(self):
from content import content
return content()
def do_atom_published(self):
if "published" in self.children:
self.log(DuplicateItemSemantics({"core":"pubDate", "ext":"atom:published"}))
return rfc3339(), noduplicates()
def do_atom_updated(self):
return rfc3339(), noduplicates()
def do_dc_creator(self):
if self.child.find('.')<0 and "author" in self.children:
self.log(DuplicateItemSemantics({"core":"author", "ext":"dc:creator"}))
return text() # duplicates allowed
def do_dc_subject(self):
if self.child.find('.')<0 and "category" in self.children:
self.log(DuplicateItemSemantics({"core":"category", "ext":"dc:subject"}))
return text() # duplicates allowed
def do_dc_date(self):
if self.child.find('.')<0 and "pubDate" in self.children:
self.log(DuplicateItemSemantics({"core":"pubDate", "ext":"dc:date"}))
return w3cdtf()
def do_cc_license(self):
if "creativeCommons_license" in self.children:
self.log(DuplicateItemSemantics({"core":"creativeCommons:license", "ext":"cc:license"}))
return eater()
def do_creativeCommons_license(self):
if "cc_license" in self.children:
self.log(DuplicateItemSemantics({"core":"creativeCommons:license", "ext":"cc:license"}))
return rfc2396_full()
class rss20Item(item, extension_rss20_item):
def do_comments(self):
return rfc2396_full(), noduplicates()
def do_enclosure(self):
return enclosure(), noduplicates(DuplicateEnclosure)
def do_pubDate(self):
if "dc_date" in self.children:
self.log(DuplicateItemSemantics({"core":"pubDate", "ext":"dc:date"}))
if "atom_published" in self.children:
self.log(DuplicateItemSemantics({"core":"pubDate", "ext":"atom:published"}))
return rfc822(), noduplicates()
def do_author(self):
if "dc_creator" in self.children:
self.log(DuplicateItemSemantics({"core":"author", "ext":"dc:creator"}))
return email_with_name(), noduplicates()
def do_category(self):
if "dc_subject" in self.children:
self.log(DuplicateItemSemantics({"core":"category", "ext":"dc:subject"}))
return category(), nonblank()
def do_guid(self):
if "atom_id" in self.children:
self.log(DuplicateItemSemantics({"core":"guid", "ext":"atom:id"}))
return guid(), noduplicates(), unique('guid',self.parent)
def do_source(self):
if "dc_source" in self.children:
self.log(DuplicateItemSemantics({"core":"source", "ext":"dc:source"}))
return source(), noduplicates()
class rss10Item(item, extension_rss10_item):
def validate(self):
if not "link" in self.children:
self.log(MissingElement({"parent":self.name, "element":"link"}))
if not "title" in self.children:
self.log(MissingElement({"parent":self.name, "element":"title"}))
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'about')]
def do_rdfs_label(self):
return text()
def do_rdfs_comment(self):
return text()
def prevalidate(self):
if self.attrs.has_key((rdfNS,"about")):
about = self.attrs[(rdfNS,"about")]
if not "abouts" in self.dispatcher.__dict__:
self.dispatcher.__dict__["abouts"] = []
if about in self.dispatcher.__dict__["abouts"]:
self.log(DuplicateValue({"parent":self.name, "element":"rdf:about", "value":about}))
else:
self.dispatcher.__dict__["abouts"].append(about)
#
# items element.
#
class items(validatorBase):
from root import rss11_namespace as rss11_ns
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'parseType')]
def do_item(self):
if self.rss11_ns not in self.dispatcher.defaultNamespaces:
self.log(UndefinedElement({"element":"item","parent":"items"}))
return rss10Item()
def do_rdf_Seq(self):
if self.rss11_ns in self.dispatcher.defaultNamespaces:
self.log(UndefinedElement({"element":"rdf:Seq","parent":"items"}))
return rdfSeq()
class rdfSeq(validatorBase):
def do_rdf_li(self):
return rdfLi()
class rdfLi(validatorBase):
def getExpectedAttrNames(self):
return [(None,u'resource'),
(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'resource')]
class category(nonhtml):
def getExpectedAttrNames(self):
return [(None, u'domain')]
class source(nonhtml):
def getExpectedAttrNames(self):
return [(None, u'url')]
def prevalidate(self):
self.validate_required_attribute((None,'url'), rfc2396_full)
return text.prevalidate(self)
class enclosure(validatorBase):
from validators import mime_re
def getExpectedAttrNames(self):
return [(None, u'url'), (None, u'length'), (None, u'type')]
def prevalidate(self):
try:
if int(self.attrs.getValue((None, 'length'))) < 0:
if int(self.attrs.getValue((None, 'length'))) == -1:
self.log(UseZeroForUnknown({"parent":self.name, "element":'length'}))
else:
self.log(InvalidNonNegativeInteger({"parent":self.name, "element":'length'}))
else:
self.log(ValidIntegerAttribute({"parent":self.parent.name, "element":self.name, "attr":'length'}))
except KeyError:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":'length'}))
except ValueError:
self.log(InvalidIntegerAttribute({"parent":self.parent.name, "element":self.name, "attr":'length'}))
try:
if not self.mime_re.match(self.attrs.getValue((None, 'type'))):
self.log(InvalidMIMEAttribute({"parent":self.parent.name, "element":self.name, "attr":'type'}))
else:
self.log(ValidMIMEAttribute({"parent":self.parent.name, "element":self.name, "attr":'type'}))
except KeyError:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":'type'}))
self.validate_required_attribute((None,'url'), httpURL)
if self.attrs.has_key((None,u"url")):
if hasattr(self.parent,'setEnclosure'):
self.parent.setEnclosure(self.attrs.getValue((None, 'url')))
return validatorBase.prevalidate(self)
class guid(rfc2396_full, noduplicates):
def getExpectedAttrNames(self):
return [(None, u'isPermaLink')]
def validate(self):
isPermalink = 1
try:
isPermalinkStr = self.attrs.getValue((None, 'isPermaLink'))
if isPermalinkStr not in ('true', 'false'):
self.log(InvalidBooleanAttribute({"parent":self.parent.name, "element":self.name, "attr":"isPermaLink"}))
else:
self.log(ValidBooleanAttribute({"parent":self.parent.name, "element":self.name, "attr":"isPermaLink"}))
isPermalink = (isPermalinkStr == 'true')
except KeyError:
pass
if isPermalink:
if not(rfc2396.validate(self, InvalidHttpGUID, ValidHttpGUID)):
return 0
else:
lu = self.value.lower()
if lu.startswith("tag:") or lu.startswith("urn:uuid:"):
self.log(InvalidPermalink({"parent":self.parent.name, "element":self.name}))
return 0
else:
return 1
elif len(self.value)<9 and self.value.isdigit():
self.log(NotSufficientlyUnique({"parent":self.parent.name, "element":self.name, "value":self.value}))
return noduplicates.validate(self)
else:
self.log(ValidHttpGUID({"parent":self.parent.name, "element":self.name}))
return noduplicates.validate(self)
class ContentItems(validatorBase):
def do_rdf_Bag(self):
return ContentBag(), noduplicates()
class ContentBag(validatorBase):
def do_rdf_li(self):
return ContentLi()
class ContentLi(validatorBase):
def do_content_item(self):
return ContentItem()
class ContentItem(validatorBase):
def do_content_format(self):
return rdfResourceURI(), noduplicates()
def do_content_encoding(self):
return rdfResourceURI(), noduplicates()
def do_rdf_value(self):
return text(), noduplicates()
| Python |
from base import validatorBase
from category import category
from validators import yesno
from logging import ConflictingCatAttr, ConflictingCatChildren
class categories(validatorBase):
def getExpectedAttrNames(self):
return [(None,u'scheme'),(None,u'fixed'),(None,u'href')]
def prevalidate(self):
self.validate_optional_attribute((None,'fixed'), yesno)
if self.attrs.has_key((None,'href')):
if self.attrs.has_key((None,'fixed')):
self.log(ConflictingCatAttr({'attr':'fixed'}))
if self.attrs.has_key((None,'scheme')):
self.log(ConflictingCatAttr({'attr':'scheme'}))
def validate(self):
if self.attrs.has_key((None,'href')) and self.children:
self.log(ConflictingCatChildren({}))
def do_atom_category(self):
return category()
| Python |
from base import validatorBase
from validators import *
class xrds(validatorBase):
def do_xrd_XRD(self):
return xrd()
class xrd(validatorBase):
def do_xrd_Service(self):
return service()
class service(validatorBase):
def getExpectedAttrNames(self):
return [(None,'priority')]
def prevalidate(self):
self.validate_optional_attribute((None,'priority'), nonNegativeInteger)
def do_xrd_Type(self):
return xrdtype()
def do_xrd_URI(self):
return xrdtype()
def do_openid_Delegate(self):
return delegate()
xrdtype = rfc3987
URI = rfc3987
delegate = rfc3987
| Python |
"""$Id: rdf.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from logging import *
from validators import rdfAbout, noduplicates, text, eater
from root import rss11_namespace as rss11_ns
from extension import extension_everywhere
rdfNS = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
#
# rdf:RDF element. The valid children include "channel", "item", "textinput", "image"
#
class rdf(validatorBase,object):
def do_rss090_channel(self):
from channel import channel
self.dispatcher.defaultNamespaces.append("http://my.netscape.com/rdf/simple/0.9/")
return channel(), noduplicates()
def do_channel(self):
from channel import rss10Channel
return rdfAbout(), rss10Channel(), noduplicates()
def _is_090(self):
return "http://my.netscape.com/rdf/simple/0.9/" in self.dispatcher.defaultNamespaces
def _withAbout(self,v):
if self._is_090():
return v
else:
return v, rdfAbout()
def do_item(self):
from item import rss10Item
return self._withAbout(rss10Item())
def do_textinput(self):
from textInput import textInput
return self._withAbout(textInput())
def do_image(self):
return self._withAbout(rss10Image())
def do_cc_License(self):
return eater()
def do_taxo_topic(self):
return eater()
def do_rdf_Description(self):
return eater()
def prevalidate(self):
self.setFeedType(TYPE_RSS1)
def validate(self):
if not "channel" in self.children and not "rss090_channel" in self.children:
self.log(MissingElement({"parent":self.name.replace('_',':'), "element":"channel"}))
from validators import rfc2396_full
class rss10Image(validatorBase, extension_everywhere):
def validate(self):
if not "title" in self.children:
self.log(MissingTitle({"parent":self.name, "element":"title"}))
if not "link" in self.children:
self.log(MissingLink({"parent":self.name, "element":"link"}))
if not "url" in self.children:
self.log(MissingElement({"parent":self.name, "element":"url"}))
def do_title(self):
from image import title
return title(), noduplicates()
def do_link(self):
return rfc2396_full(), noduplicates()
def do_url(self):
return rfc2396_full(), noduplicates()
def do_dc_creator(self):
return text()
def do_dc_subject(self):
return text() # duplicates allowed
def do_dc_date(self):
from validators import w3cdtf
return w3cdtf(), noduplicates()
def do_cc_license(self):
return eater()
#
# This class performs RSS 1.x specific validations on extensions.
#
class rdfExtension(validatorBase):
def __init__(self, qname, literal=False):
validatorBase.__init__(self)
self.qname=qname
self.literal=literal
def textOK(self):
pass
def setElement(self, name, attrs, parent):
validatorBase.setElement(self, name, attrs, parent)
if attrs.has_key((rdfNS,"parseType")):
if attrs[(rdfNS,"parseType")] == "Literal": self.literal=True
if not self.literal:
# ensure no rss11 children
if self.qname==rss11_ns:
from logging import UndefinedElement
self.log(UndefinedElement({"parent":parent.name, "element":name}))
# no duplicate rdf:abouts
if attrs.has_key((rdfNS,"about")):
about = attrs[(rdfNS,"about")]
if not "abouts" in self.dispatcher.__dict__:
self.dispatcher.__dict__["abouts"] = []
if about in self.dispatcher.__dict__["abouts"]:
self.log(DuplicateValue(
{"parent":parent.name, "element":"rdf:about", "value":about}))
else:
self.dispatcher.__dict__["abouts"].append(about)
def getExpectedAttrNames(self):
# no rss11 attributes
if self.literal or not self.attrs: return self.attrs.keys()
return [(ns,n) for ns,n in self.attrs.keys() if ns!=rss11_ns]
def validate(self):
# rdflib 2.0.5 does not catch mixed content errors
if self.value.strip() and self.children and not self.literal:
self.log(InvalidRDF({"message":"mixed content"}))
def startElementNS(self, name, qname, attrs):
# ensure element is "namespace well formed"
if name.find(':') != -1:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":name}))
# ensure all attribute namespaces are properly defined
for (namespace,attr) in attrs.keys():
if ':' in attr and not namespace:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":attr}))
# eat children
self.children.append((qname,name))
self.push(rdfExtension(qname, self.literal), name, attrs)
def characters(self, string):
if not self.literal: validatorBase.characters(self, string)
| Python |
"""$Id: iso639codes.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
isoLang = \
{'aa': 'Afar',
'ab': 'Abkhazian',
'ae': 'Avestan',
'af': 'Afrikaans',
'ak': 'Akan',
'am': 'Amharic',
'an': 'Aragonese',
'ar': 'Arabic',
'as': 'Assamese',
'av': 'Avaric',
'ay': 'Aymara',
'az': 'Azerbaijani',
'ba': 'Bashkir',
'be': 'Byelorussian',
'bg': 'Bulgarian',
'bh': 'Bihari',
'bi': 'Bislama',
'bm': 'Bambara',
'bn': 'Bengali;Bangla',
'bo': 'Tibetan',
'br': 'Breton',
'bs': 'Bosnian',
'ca': 'Catalan',
'ce': 'Chechen',
'ch': 'Chamorro',
'co': 'Corsican',
'cr': 'Cree',
'cs': 'Czech',
'cu': 'Church Slavic',
'cv': 'Chuvash',
'cy': 'Welsh',
'da': 'Danish',
'de': 'German',
'dv': 'Divehi',
'dz': 'Dzongkha',
'ee': 'Ewe',
'el': 'Greek',
'en': 'English',
'eo': 'Esperanto',
'es': 'Spanish',
'et': 'Estonian',
'eu': 'Basque',
'fa': 'Persian (Farsi)',
'ff': 'Fulah',
'fi': 'Finnish',
'fj': 'Fiji',
'fo': 'Faroese',
'fr': 'French',
'fy': 'Frisian, Western',
'ga': 'Irish',
'gd': 'Scots Gaelic',
'gl': 'Galician',
'gn': 'Guarani',
'gu': 'Gujarati',
'gv': 'Manx',
'ha': 'Hausa',
'he': 'Hebrew',
'hi': 'Hindi',
'ho': 'Hiri Motu',
'hr': 'Croatian',
'ht': 'Haitian',
'hu': 'Hungarian',
'hy': 'Armenian',
'hz': 'Herero',
'ia': 'Interlingua',
'id': 'Indonesian',
'ie': 'Interlingue',
'ig': 'Igbo',
'ii': 'Sichuan Yi',
'ik': 'Inupiak',
'io': 'Ido',
'is': 'Icelandic',
'it': 'Italian',
'iu': 'Inuktitut',
'ja': 'Japanese',
'jv': 'Javanese',
'ka': 'Georgian',
'kg': 'Kongo',
'ki': 'Kikuyu; Gikuyu',
'kj': 'Kuanyama; Kwanyama',
'kk': 'Kazakh',
'kl': 'Greenlandic',
'km': 'Cambodian',
'kn': 'Kannada',
'ko': 'Korean',
'kr': 'Kanuri',
'ks': 'Kashmiri',
'ku': 'Kurdish',
'kv': 'Komi',
'kw': 'Cornish',
'ky': 'Kirghiz',
'la': 'Latin',
'lb': 'Letzeburgesch; Luxembourgish',
'lg': 'Ganda',
'li': 'Limburgan; Limburger, Limburgish',
'ln': 'Lingala',
'lo': 'Lao',
'lt': 'Lithuanian',
'lu': 'Luba-Katanga',
'lv': 'Latvian',
'mg': 'Malagasy',
'mh': 'Marshallese',
'mi': 'Maori',
'mk': 'Macedonian',
'ml': 'Malayalam',
'mn': 'Mongolian',
'mo': 'Moldavian',
'mr': 'Marathi',
'ms': 'Malay',
'mt': 'Maltese',
'my': 'Burmese',
'na': 'Nauru',
'nb': 'Norwegian Bokmal',
'nd': 'Ndebele, North',
'ne': 'Nepali',
'ng': 'Ndonga',
'nl': 'Dutch',
'nn': 'Norwegian Nynorsk',
'no': 'Norwegian',
'nr': 'Ndebele, South',
'nv': 'Navaho; Navajo',
'ny': 'Chewa; Chichewa; Nyanha',
'oc': 'Occitan',
'oj': 'Ojibwa',
'om': 'Afan (Oromo)',
'or': 'Oriya',
'os': 'Ossetian; Ossetic',
'pa': 'Punjabi',
'pi': 'Pali',
'pl': 'Polish',
'ps': 'Pushto',
'pt': 'Portuguese',
'qu': 'Quechua',
'rm': 'Rhaeto-Romance',
'rn': 'Kurundi',
'ro': 'Romanian',
'ru': 'Russian',
'rw': 'Kinyarwanda',
'sa': 'Sanskrit',
'sc': 'Sardinian',
'sd': 'Sindhi',
'se': 'Northern Sami',
'sg': 'Sangho',
'sh': 'Serbo-Croatian',
'si': 'Singhalese',
'sk': 'Slovak',
'sl': 'Slovenian',
'sm': 'Samoan',
'sn': 'Shona',
'so': 'Somali',
'sq': 'Albanian',
'sr': 'Serbian',
'ss': 'Swati',
'st': 'Sotho, Southern',
'su': 'Sundanese',
'sv': 'Swedish',
'sw': 'Swahili',
'ta': 'Tamil',
'te': 'Telugu',
'tg': 'Tajik',
'th': 'Thai',
'ti': 'Tigrinya',
'tk': 'Turkmen',
'tl': 'Tagalog',
'tn': 'Tswana',
'to': 'Tonga',
'tr': 'Turkish',
'ts': 'Tsonga',
'tt': 'Tatar',
'tw': 'Twi',
'ty': 'Tahitian',
'ug': 'Uigur',
'uk': 'Ukrainian',
'ur': 'Urdu',
'uz': 'Uzbek',
've': 'Venda',
'vi': 'Vietnamese',
'vo': 'Volapuk',
'wa': 'Walloon',
'wo': 'Wolof',
'xh': 'Xhosa',
'yi': 'Yiddish',
'yo': 'Yoruba',
'za': 'Zhuang',
'zh': 'Chinese',
'zu': 'Zulu',
'x' : 'a user-defined language',
'xx': 'a user-defined language',
'abk': 'Abkhazian',
'ace': 'Achinese',
'ach': 'Acoli',
'ada': 'Adangme',
'ady': 'Adygei',
'ady': 'Adyghe',
'aar': 'Afar',
'afh': 'Afrihili',
'afr': 'Afrikaans',
'afa': 'Afro-Asiatic (Other)',
'ain': 'Ainu',
'aka': 'Akan',
'akk': 'Akkadian',
'alb': 'Albanian',
'sqi': 'Albanian',
'gws': 'Alemanic',
'ale': 'Aleut',
'alg': 'Algonquian languages',
'tut': 'Altaic (Other)',
'amh': 'Amharic',
'anp': 'Angika',
'apa': 'Apache languages',
'ara': 'Arabic',
'arg': 'Aragonese',
'arc': 'Aramaic',
'arp': 'Arapaho',
'arn': 'Araucanian',
'arw': 'Arawak',
'arm': 'Armenian',
'hye': 'Armenian',
'rup': 'Aromanian',
'art': 'Artificial (Other)',
'asm': 'Assamese',
'ast': 'Asturian',
'ath': 'Athapascan languages',
'aus': 'Australian languages',
'map': 'Austronesian (Other)',
'ava': 'Avaric',
'ave': 'Avestan',
'awa': 'Awadhi',
'aym': 'Aymara',
'aze': 'Azerbaijani',
'ast': 'Bable',
'ban': 'Balinese',
'bat': 'Baltic (Other)',
'bal': 'Baluchi',
'bam': 'Bambara',
'bai': 'Bamileke languages',
'bad': 'Banda',
'bnt': 'Bantu (Other)',
'bas': 'Basa',
'bak': 'Bashkir',
'baq': 'Basque',
'eus': 'Basque',
'btk': 'Batak (Indonesia)',
'bej': 'Beja',
'bel': 'Belarusian',
'bem': 'Bemba',
'ben': 'Bengali',
'ber': 'Berber (Other)',
'bho': 'Bhojpuri',
'bih': 'Bihari',
'bik': 'Bikol',
'byn': 'Bilin',
'bin': 'Bini',
'bis': 'Bislama',
'byn': 'Blin',
'nob': 'Bokmal, Norwegian',
'bos': 'Bosnian',
'bra': 'Braj',
'bre': 'Breton',
'bug': 'Buginese',
'bul': 'Bulgarian',
'bua': 'Buriat',
'bur': 'Burmese',
'mya': 'Burmese',
'cad': 'Caddo',
'car': 'Carib',
'spa': 'Castilian',
'cat': 'Catalan',
'cau': 'Caucasian (Other)',
'ceb': 'Cebuano',
'cel': 'Celtic (Other)',
'cai': 'Central American Indian (Other)',
'chg': 'Chagatai',
'cmc': 'Chamic languages',
'cha': 'Chamorro',
'che': 'Chechen',
'chr': 'Cherokee',
'nya': 'Chewa',
'chy': 'Cheyenne',
'chb': 'Chibcha',
'nya': 'Chichewa',
'chi': 'Chinese',
'zho': 'Chinese',
'chn': 'Chinook jargon',
'chp': 'Chipewyan',
'cho': 'Choctaw',
'zha': 'Chuang',
'chu': 'Church Slavic; Church Slavonic; Old Church Slavonic; Old Church Slavic; Old Bulgarian',
'chk': 'Chuukese',
'chv': 'Chuvash',
'nwc': 'Classical Nepal Bhasa; Classical Newari; Old Newari',
'cop': 'Coptic',
'cor': 'Cornish',
'cos': 'Corsican',
'cre': 'Cree',
'mus': 'Creek',
'crp': 'Creoles and pidgins(Other)',
'cpe': 'Creoles and pidgins, English-based (Other)',
'cpf': 'Creoles and pidgins, French-based (Other)',
'cpp': 'Creoles and pidgins, Portuguese-based (Other)',
'crh': 'Crimean Tatar; Crimean Turkish',
'scr': 'Croatian',
'hrv': 'Croatian',
'cus': 'Cushitic (Other)',
'cze': 'Czech',
'ces': 'Czech',
'dak': 'Dakota',
'dan': 'Danish',
'dar': 'Dargwa',
'day': 'Dayak',
'del': 'Delaware',
'din': 'Dinka',
'div': 'Divehi',
'doi': 'Dogri',
'dgr': 'Dogrib',
'dra': 'Dravidian (Other)',
'dua': 'Duala',
'dut': 'Dutch',
'nld': 'Dutch',
'dum': 'Dutch, Middle (ca. 1050-1350)',
'dyu': 'Dyula',
'dzo': 'Dzongkha',
'efi': 'Efik',
'egy': 'Egyptian (Ancient)',
'eka': 'Ekajuk',
'elx': 'Elamite',
'eng': 'English',
'enm': 'English, Middle (1100-1500)',
'ang': 'English, Old (ca.450-1100)',
'myv': 'Erzya',
'epo': 'Esperanto',
'est': 'Estonian',
'ewe': 'Ewe',
'ewo': 'Ewondo',
'fan': 'Fang',
'fat': 'Fanti',
'fao': 'Faroese',
'fij': 'Fijian',
'fil': 'Filipino; Pilipino',
'fin': 'Finnish',
'fiu': 'Finno-Ugrian (Other)',
'fon': 'Fon',
'fre': 'French',
'fra': 'French',
'frm': 'French, Middle (ca.1400-1600)',
'fro': 'French, Old (842-ca.1400)',
'frs': 'Frisian, Eastern',
'fry': 'Frisian, Western',
'fur': 'Friulian',
'ful': 'Fulah',
'gaa': 'Ga',
'gla': 'Gaelic',
'glg': 'Gallegan',
'lug': 'Ganda',
'gay': 'Gayo',
'gba': 'Gbaya',
'gez': 'Geez',
'geo': 'Georgian',
'kat': 'Georgian',
'ger': 'German',
'deu': 'German',
'nds': 'German, Low',
'gmh': 'German, Middle High (ca.1050-1500)',
'goh': 'German, Old High (ca.750-1050)',
'gem': 'Germanic (Other)',
'kik': 'Gikuyu',
'gil': 'Gilbertese',
'gon': 'Gondi',
'gor': 'Gorontalo',
'got': 'Gothic',
'grb': 'Grebo',
'grc': 'Greek, Ancient (to 1453)',
'gre': 'Greek, Modern (1453-)',
'ell': 'Greek, Modern (1453-)',
'kal': 'Greenlandic; Kalaallisut',
'grn': 'Guarani',
'guj': 'Gujarati',
'gwi': 'Gwich\'in',
'hai': 'Haida',
'hat': 'Haitian',
'hau': 'Hausa',
'haw': 'Hawaiian',
'heb': 'Hebrew',
'her': 'Herero',
'hil': 'Hiligaynon',
'him': 'Himachali',
'hin': 'Hindi',
'hmo': 'Hiri Motu',
'hit': 'Hittite',
'hmn': 'Hmong',
'hun': 'Hungarian',
'hup': 'Hupa',
'iba': 'Iban',
'ice': 'Icelandic',
'isl': 'Icelandic',
'ido': 'Ido',
'ibo': 'Igbo',
'ijo': 'Ijo',
'ilo': 'Iloko',
'smn': 'Inari Sami',
'inc': 'Indic (Other)',
'ine': 'Indo-European (Other)',
'ind': 'Indonesian',
'inh': 'Ingush',
'ina': 'Interlingua (International Auxiliary Language Association)',
'ile': 'Interlingue',
'iku': 'Inuktitut',
'ipk': 'Inupiaq',
'ira': 'Iranian (Other)',
'gle': 'Irish',
'mga': 'Irish, Middle (900-1200)',
'sga': 'Irish, Old (to 900)',
'iro': 'Iroquoian languages',
'ita': 'Italian',
'jpn': 'Japanese',
'jav': 'Javanese',
'jrb': 'Judeo-Arabic',
'jpr': 'Judeo-Persian',
'kbd': 'Kabardian',
'kab': 'Kabyle',
'kac': 'Kachin',
'kal': 'Kalaallisut',
'xal': 'Kalmyk',
'kam': 'Kamba',
'kan': 'Kannada',
'kau': 'Kanuri',
'krc': 'Karachay-Balkar',
'kaa': 'Kara-Kalpak',
'krl': 'Karelian',
'kar': 'Karen',
'kas': 'Kashmiri',
'csb': 'Kashubian',
'kaw': 'Kawi',
'kaz': 'Kazakh',
'kha': 'Khasi',
'khm': 'Khmer',
'khi': 'Khoisan (Other)',
'kho': 'Khotanese',
'kik': 'Kikuyu',
'kmb': 'Kimbundu',
'kin': 'Kinyarwanda',
'kir': 'Kirghiz',
'tlh': 'Klingon; tlhIngan-Hol',
'kom': 'Komi',
'kon': 'Kongo',
'kok': 'Konkani',
'kor': 'Korean',
'kos': 'Kosraean',
'kpe': 'Kpelle',
'kro': 'Kru',
'kua': 'Kuanyama',
'kum': 'Kumyk',
'kur': 'Kurdish',
'kru': 'Kurukh',
'kut': 'Kutenai',
'kua': 'Kwanyama',
'lad': 'Ladino',
'lah': 'Lahnda',
'lam': 'Lamba',
'lao': 'Lao',
'lat': 'Latin',
'lav': 'Latvian',
'ltz': 'Letzeburgesch',
'lez': 'Lezghian',
'lim': 'Limburgan',
'lin': 'Lingala',
'lit': 'Lithuanian',
'jbo': 'Lojban',
'nds': 'Low German',
'dsb': 'Lower Sorbian',
'loz': 'Lozi',
'lub': 'Luba-Katanga',
'lua': 'Luba-Lulua',
'lui': 'Luiseno',
'smj': 'Lule Sami',
'lun': 'Lunda',
'luo': 'Luo (Kenya and Tanzania)',
'lus': 'Lushai',
'ltz': 'Luxembourgish',
'mac': 'Macedonian',
'mkd': 'Macedonian',
'mad': 'Madurese',
'mag': 'Magahi',
'mai': 'Maithili',
'mak': 'Makasar',
'mlg': 'Malagasy',
'may': 'Malay',
'msa': 'Malay',
'mal': 'Malayalam',
'mlt': 'Maltese',
'mnc': 'Manchu',
'mdr': 'Mandar',
'man': 'Mandingo',
'mni': 'Manipuri',
'mno': 'Manobo languages',
'glv': 'Manx',
'mao': 'Maori',
'mri': 'Maori',
'mar': 'Marathi',
'chm': 'Mari',
'mah': 'Marshallese',
'mwr': 'Marwari',
'mas': 'Masai',
'myn': 'Mayan languages',
'men': 'Mende',
'mic': 'Micmac',
'min': 'Minangkabau',
'mwl': 'Mirandese',
'mis': 'Miscellaneous languages',
'moh': 'Mohawk',
'mdf': 'Moksha',
'mol': 'Moldavian',
'mkh': 'Mon-Khmer (Other)',
'lol': 'Mongo',
'mon': 'Mongolian',
'mos': 'Mossi',
'mul': 'Multiple languages',
'mun': 'Munda languages',
'nah': 'Nahuatl',
'nau': 'Nauru',
'nav': 'Navaho; Navajo',
'nde': 'Ndebele, North',
'nbl': 'Ndebele, South',
'ndo': 'Ndonga',
'nap': 'Neapolitan',
'nep': 'Nepali',
'new': 'Newari',
'nia': 'Nias',
'nic': 'Niger-Kordofanian (Other)',
'ssa': 'Nilo-Saharan (Other)',
'niu': 'Niuean',
'nog': 'Nogai',
'non': 'Norse, Old',
'nai': 'North American Indian (Other)',
'frr': 'Northern Frisian',
'sme': 'Northern Sami',
'nso': 'Northern Sotho; Pedi; Sepedi',
'nde': 'North Ndebele',
'nor': 'Norwegian',
'nob': 'Norwegian Bokmal',
'nno': 'Norwegian Nynorsk',
'nub': 'Nubian languages',
'nym': 'Nyamwezi',
'nya': 'Nyanja',
'nyn': 'Nyankole',
'nno': 'Nynorsk, Norwegian',
'nyo': 'Nyoro',
'nzi': 'Nzima',
'oci': 'Occitan (post 1500)',
'oji': 'Ojibwa',
'ori': 'Oriya',
'orm': 'Oromo',
'osa': 'Osage',
'oss': 'Ossetian; Ossetic',
'oto': 'Otomian languages',
'pal': 'Pahlavi',
'pau': 'Palauan',
'pli': 'Pali',
'pam': 'Pampanga',
'pag': 'Pangasinan',
'pan': 'Panjabi',
'pap': 'Papiamento',
'paa': 'Papuan (Other)',
'per': 'Persian',
'fas': 'Persian',
'peo': 'Persian, Old (ca.600-400)',
'phi': 'Philippine (Other)',
'phn': 'Phoenician',
'pon': 'Pohnpeian',
'pol': 'Polish',
'por': 'Portuguese',
'pra': 'Prakrit languages',
'oci': 'Provencal',
'pro': 'Provencal, Old (to 1500)',
'pan': 'Punjabi',
'pus': 'Pushto',
'que': 'Quechua',
'roh': 'Raeto-Romance',
'raj': 'Rajasthani',
'rap': 'Rapanui',
'rar': 'Rarotongan',
'qaa': 'Reserved for local use',
'qtz': 'Reserved for local use',
'roa': 'Romance (Other)',
'rum': 'Romanian',
'ron': 'Romanian',
'rom': 'Romany',
'run': 'Rundi',
'rus': 'Russian',
'sal': 'Salishan languages',
'sam': 'Samaritan Aramaic',
'smi': 'Sami languages (Other)',
'smo': 'Samoan',
'sad': 'Sandawe',
'sag': 'Sango',
'san': 'Sanskrit',
'sat': 'Santali',
'srd': 'Sardinian',
'sas': 'Sasak',
'nds': 'Saxon, Low',
'sco': 'Scots',
'gla': 'Scottish Gaelic',
'sel': 'Selkup',
'sem': 'Semitic (Other)',
'nso': 'Sepedi; Northern Sotho; Pedi',
'scc': 'Serbian',
'srp': 'Serbian',
'srr': 'Serer',
'shn': 'Shan',
'sna': 'Shona',
'iii': 'Sichuan Yi',
'scn': 'Sicilian',
'sid': 'Sidamo',
'sgn': 'Sign languages',
'bla': 'Siksika',
'snd': 'Sindhi',
'sin': 'Sinhalese',
'sit': 'Sino-Tibetan (Other)',
'sio': 'Siouan languages',
'sms': 'Skolt Sami',
'den': 'Slave (Athapascan)',
'sla': 'Slavic (Other)',
'slo': 'Slovak',
'slk': 'Slovak',
'slv': 'Slovenian',
'sog': 'Sogdian',
'som': 'Somali',
'son': 'Songhai',
'snk': 'Soninke',
'wen': 'Sorbian languages',
'nso': 'Sotho, Northern',
'sot': 'Sotho, Southern',
'sai': 'South American Indian (Other)',
'alt': 'Southern Altai',
'sma': 'Southern Sami',
'nbl': 'South Ndebele',
'spa': 'Spanish',
'srn': 'Sranan Tongo',
'suk': 'Sukuma',
'sux': 'Sumerian',
'sun': 'Sundanese',
'sus': 'Susu',
'swa': 'Swahili',
'ssw': 'Swati',
'swe': 'Swedish',
'gsw': 'Swiss German; Alemanic',
'syr': 'Syriac',
'tgl': 'Tagalog',
'tah': 'Tahitian',
'tai': 'Tai (Other)',
'tgk': 'Tajik',
'tmh': 'Tamashek',
'tam': 'Tamil',
'tat': 'Tatar',
'tel': 'Telugu',
'ter': 'Tereno',
'tet': 'Tetum',
'tha': 'Thai',
'tib': 'Tibetan',
'bod': 'Tibetan',
'tig': 'Tigre',
'tir': 'Tigrinya',
'tem': 'Timne',
'tiv': 'Tiv',
'tlh': 'tlhIngan-Hol; Klingon',
'tli': 'Tlingit',
'tpi': 'Tok Pisin',
'tkl': 'Tokelau',
'tog': 'Tonga (Nyasa)',
'ton': 'Tonga (Tonga Islands)',
'tsi': 'Tsimshian',
'tso': 'Tsonga',
'tsn': 'Tswana',
'tum': 'Tumbuka',
'tup': 'Tupi languages',
'tur': 'Turkish',
'ota': 'Turkish, Ottoman (1500-1928)',
'tuk': 'Turkmen',
'tvl': 'Tuvalu',
'tyv': 'Tuvinian',
'twi': 'Twi',
'udm': 'Udmurt',
'uga': 'Ugaritic',
'uig': 'Uighur',
'ukr': 'Ukrainian',
'umb': 'Umbundu',
'und': 'Undetermined',
'hsb': 'Upper Sorbian',
'urd': 'Urdu',
'uzb': 'Uzbek',
'vai': 'Vai',
'cat': 'Valencian',
'ven': 'Venda',
'vie': 'Vietnamese',
'vol': 'Volapuk',
'vot': 'Votic',
'wak': 'Wakashan languages',
'wal': 'Walamo',
'wln': 'Walloon',
'war': 'Waray',
'was': 'Washo',
'wel': 'Welsh',
'cym': 'Welsh',
'fry': 'Wester Frisian',
'wol': 'Wolof',
'xho': 'Xhosa',
'sah': 'Yakut',
'yao': 'Yao',
'yap': 'Yapese',
'yid': 'Yiddish',
'yor': 'Yoruba',
'ypk': 'Yupik languages',
'znd': 'Zande',
'zap': 'Zapotec',
'zen': 'Zenaga',
'zha': 'Zhuang',
'zul': 'Zulu',
'zun': 'Zuni' }
| Python |
from base import validatorBase
from validators import *
from logging import InvalidSseType, InvalidNSS, MissingElement, MissingByAndWhenAttrs
import re
class Sharing(validatorBase):
def getExpectedAttrNames(self):
return [ (None, u'expires'), (None, u'since'), (None, u'until') ]
def prevalidate(self):
if self.attrs.has_key((None,'until')):
self.validate_required_attribute((None,'since'), rfc3339)
else:
self.validate_optional_attribute((None,'since'), rfc3339)
if self.attrs.has_key((None,'since')):
self.validate_required_attribute((None,'until'), rfc3339)
else:
self.validate_optional_attribute((None,'until'), rfc3339)
self.validate_optional_attribute((None,'expires'), rfc3339)
if self.attrs.has_key((None,'since')):
if self.attrs.has_key((None,'until')):
if self.attrs[(None,'since')]>self.attrs[(None,'until')]:
self.log(SinceAfterUntil({}))
def do_sx_related(self):
return Related()
class Sync(validatorBase):
def getExpectedAttrNames(self):
return [ (None, u'deleted'), (None, u'noconflicts'),
(None, u'id'), (None, u'updates') ]
def prevalidate(self):
self.validate_optional_attribute((None,'deleted'), truefalsestrict)
self.validate_optional_attribute((None,'noconflicts'), truefalsestrict)
self.validate_required_attribute((None,'id'), unique('id',self.parent.parent))
self.validate_optional_attribute((None,'id'), rfc2141_nss)
self.validate_required_attribute((None,'updates'), UINT31)
def validate(self):
if not 'sx_history' in self.children:
self.log(MissingElement({'parent':self.name, 'element':'sx:history'}))
def do_sx_history(self):
return History()
def do_sx_conflicts(self):
return Conflicts()
class Related(validatorBase):
def getExpectedAttrNames(self):
return [ (None, u'link'), (None, u'title'), (None, u'type') ]
def prevalidate(self):
self.validate_required_attribute((None,'link'), rfc2396_full)
self.validate_optional_attribute((None,'title'), nonhtml)
self.validate_optional_attribute((None,'title'), nonblank)
self.validate_required_attribute((None,'type'), FeedType)
class History(validatorBase):
def getExpectedAttrNames(self):
return [ (None, u'by'), (None, u'sequence'), (None, u'when') ]
def prevalidate(self):
self.validate_optional_attribute((None,'by'), nonhtml)
self.validate_optional_attribute((None,'by'), nonblank)
self.validate_optional_attribute((None,'by'), rfc2141_nss)
self.validate_required_attribute((None,'sequence'), UINT31)
self.validate_optional_attribute((None,'when'), rfc3339)
if self.attrs.has_key((None,'when')):
if not self.attrs.has_key((None,'by')):
self.log(MissingRecommendedAttribute({"attr":"by"}))
elif self.attrs.has_key((None,'by')):
self.log(MissingRecommendedAttribute({"attr":"when"}))
else:
self.log(MissingByAndWhenAttrs({}))
class FeedType(enumeration):
error = InvalidSseType
valuelist = ['complete', 'aggregated']
class rfc2141_nss(text):
def validate(self):
if not re.match("^([0-9a-zA-Z()+,\\-\\.:=@;$_!*'/?#]|%[0-9a-fA-F][0-9a-fA-F])+$", self.value):
self.log(InvalidNSS({"element":self.name,"parent":self.parent.name}))
class Conflicts(validatorBase):
def do_entry(self):
from entry import entry
return entry()
def do_item(self):
from item import item
return item()
| Python |
"""$Id: logging.py 1059 2009-11-12 22:08:04Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1059 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
# feed types
TYPE_UNKNOWN = 0
TYPE_RSS1 = 1
TYPE_RSS2 = 2
TYPE_ATOM = 3
TYPE_ATOM_ENTRY = 4
TYPE_APP_CATEGORIES = 5
TYPE_APP_SERVICE = 6
TYPE_XRD = 7
TYPE_OPENSEARCH = 8
TYPE_OPML = 9
TYPE_KML20 = 10
TYPE_KML21 = 11
TYPE_KML22 = 12
FEEDTYPEDISPLAY = {0:"(unknown type)", 1:"RSS", 2:"RSS", 3:"Atom 1.0", 4:"Atom 1.0", 5:"Atom Publishing Protocol Category", 6:"Atom Publishing Protocol Service", 7:"XRD", 8:"OpenSearch", 9:"OPML", 10:"KML 2.0", 11:"KML 2.1", 12:"KML 2.2"}
VALIDFEEDGRAPHIC = {0:"", 1:"valid-rss.png", 2:"valid-rss-rogers.png", 3:"valid-atom.png", 4:"valid-atom.png", 5:"valid-atom.png", 6:"valid-atom.png", 7:"valid-xrd.png", 8:"valid-opensearch.png", 9:"valid-opml.gif", 10:"valid-kml.png", 11:"valid-kml.png", 12:"valid-kml.png"}
#
# logging support
#
class LoggedEvent:
def __init__(self, params):
self.params = params
class Info(LoggedEvent): pass
class Message(LoggedEvent): pass
class Warning(Message): pass
class Error(Message): pass
class ValidationFailure(Error):
def __init__(self, event):
LoggedEvent.__init__(self, {})
self.event = event
###################### error ######################
class SAXError(Error): pass
class WPBlankLine(SAXError): pass
class UnicodeError(Error): pass
class MissingNamespace(SAXError): pass
class NotInANamespace(MissingNamespace): pass
class UseOfExtensionAttr(Warning): pass
class UndefinedNamedEntity(SAXError): pass
class InvalidRSSVersion(Error): pass
class UndefinedElement(Error): pass
class NoBlink(UndefinedElement): pass
class NoThrWhen(UndefinedElement): pass
class MissingAttribute(Error): pass
class UnexpectedAttribute(Error): pass
class DuplicateElement(Error): pass
class NotEnoughHoursInTheDay(Error): pass
class EightDaysAWeek(Error): pass
class InvalidValue(Error): pass
class InvalidContact(InvalidValue): pass
class UnknownHost(Warning): pass
class InvalidAddrSpec(InvalidContact): pass
class InvalidLink(InvalidValue): pass
class UriNotIri(InvalidLink): pass
class InvalidIRI(InvalidLink): pass
class InvalidFullLink(InvalidLink): pass
class InvalidUriChar(InvalidLink): pass
class InvalidISO8601Date(InvalidValue): pass
class InvalidISO8601DateTime(InvalidValue): pass
class InvalidW3CDTFDate(InvalidISO8601Date): pass
class InvalidRFC2822Date(InvalidValue): pass
class IncorrectDOW(InvalidRFC2822Date): pass
class InvalidRFC3339Date(InvalidValue): pass
class InvalidURIAttribute(InvalidLink): pass
class InvalidURLAttribute(InvalidURIAttribute): pass
class InvalidIntegerAttribute(InvalidValue): pass
class InvalidBooleanAttribute(InvalidValue): pass
class InvalidMIMEAttribute(InvalidValue): pass
class InvalidInteger(InvalidValue): pass
class InvalidPercentage(InvalidValue): pass
class InvalidNonNegativeInteger(InvalidInteger): pass
class InvalidPositiveInteger(InvalidInteger): pass
class InvalidAlphanum(Error): pass
class InvalidWidth(InvalidValue): pass
class InvalidHeight(InvalidValue): pass
class InvalidHour(InvalidValue): pass
class InvalidDay(InvalidValue): pass
class InvalidHttpGUID(InvalidValue): pass
class InvalidLanguage(InvalidValue): pass
class InvalidUpdatePeriod(InvalidValue): pass
class InvalidItunesCategory(InvalidValue): pass
class ObsoleteItunesCategory(Warning): pass
class InvalidYesNo(InvalidValue): pass
class InvalidYesNoClean(InvalidValue): pass
class InvalidDuration(InvalidValue): pass
class TooLong(InvalidValue): pass
class InvalidKeywords(Warning): pass
class InvalidTextType(InvalidValue): pass
class InvalidCommaSeparatedIntegers(InvalidValue): pass
class UndeterminableVocabulary(Warning): pass
class InvalidFormComponentName(InvalidValue): pass
class InvalidAccessRestrictionRel(InvalidValue): pass
class NotURLEncoded(InvalidValue): pass
class InvalidLocalRole(InvalidValue): pass
class InvalidEncoding(InvalidValue): pass
class InvalidSyndicationRight(InvalidValue): pass
class InvalidLocalParameter(InvalidValue): pass
class MissingElement(Error): pass
class MissingDescription(MissingElement): pass
class MissingLink(MissingElement): pass
class MissingTitle(MissingElement): pass
class ItemMustContainTitleOrDescription(MissingElement): pass
class MissingXhtmlDiv(MissingElement): pass
class MissingContentOrAlternate(MissingElement): pass
class FatalSecurityRisk(Error): pass
class ContainsSystemEntity(Info): pass
class DuplicateValue(InvalidValue): pass
class InvalidDoctype(Error): pass
class BadXmlVersion(Error): pass
class DuplicateAtomLink(Error): pass
class MissingHref(MissingAttribute): pass
class AtomLinkNotEmpty(Warning): pass
class UnregisteredAtomLinkRel(Warning): pass
class HttpError(Error): pass
class IOError(Error): pass
class UnknownEncoding(Error): pass
class UnexpectedText(Error): pass
class UnexpectedWhitespace(Error): pass
class ValidatorLimit(Error): pass
class HttpProtocolError(Error): pass
class InvalidRDF(Error): pass
class InvalidLatitude(Error): pass
class InvalidLongitude(Error): pass
class MisplacedMetadata(Error): pass
class InvalidPermalink(Error): pass
class InvalidCreditRole(Error): pass
class InvalidMediaTextType(Error): pass
class InvalidMediaHash(Error): pass
class InvalidMediaRating(Error): pass
class InvalidNPTTime(Error): pass
class InvalidMediaRestriction(Error): pass
class InvalidMediaRestrictionRel(Error): pass
class InvalidMediaRestrictionType(Error): pass
class InvalidMediaMedium(Error): pass
class InvalidMediaExpression(Error): pass
class DeprecatedMediaAdult(Warning): pass
class MediaGroupWithoutAlternatives(Error): pass
class InvalidSseType(Error): pass
class InvalidNSS(Error): pass
class IntegerOverflow(Error): pass
class SinceAfterUntil(Error): pass
class MissingByAndWhenAttrs(Error): pass
###################### warning ######################
class DuplicateSemantics(Warning): pass
class DuplicateItemSemantics(DuplicateSemantics): pass
class DuplicateDescriptionSemantics(DuplicateSemantics): pass
class ImageLinkDoesntMatch(Warning): pass
class ImageUrlFormat(Warning): pass
class ContainsRelRef(Warning): pass
class ReservedPrefix(Warning): pass
class MediaRssNamespace(Error): pass
class NotSufficientlyUnique(Warning): pass
class ImplausibleDate(Warning): pass
class ProblematicalRFC822Date(Warning): pass
class SecurityRisk(Warning): pass
class SecurityRiskAttr(SecurityRisk): pass
class DangerousStyleAttr(SecurityRiskAttr): pass
class BadCharacters(Warning): pass
class ObscureEncoding(Warning): pass
class UnexpectedContentType(Warning): pass
class EncodingMismatch(Warning): pass
class NonSpecificMediaType(Warning): pass
class NonCanonicalURI(Warning): pass
class SameDocumentReference(Warning): pass
class ContainsEmail(Warning): pass
class ContainsHTML(Warning): pass
class ContainsUndeclaredHTML(ContainsHTML): pass
class MissingSelf(Warning): pass
class SelfDoesntMatchLocation(Warning): pass
class RelativeSelf(Warning): pass
class MissingSourceElement(Warning): pass
class MissingTypeAttr(Warning): pass
class DuplicateIds(Error): pass
class DuplicateEntries(Warning): pass
class DuplicateUpdated(Warning): pass
class NotBlank(Warning): pass
class AttrNotBlank(Warning): pass
class MissingSummary(Error): pass
class MissingTextualContent(Warning): pass
class NotUTF8(Warning): pass
class MissingItunesElement(Warning): pass
class MissingItunesEmail(Warning): pass
class UnsupportedItunesFormat(Warning): pass
class SelfNotAtom(Warning): pass
class DuplicateEnclosure(Warning): pass
class MissingGuid(Warning): pass
class ObsoleteWikiNamespace(Warning): pass
class CommentRSS(Warning): pass
class ShouldIncludeExample(Warning): pass
class InvalidAdultContent(Warning): pass
class InvalidSyndicationRight(InvalidValue): pass
class UndeclaredPrefix(InvalidValue): pass
class MisplacedXHTMLContent(Warning): pass
class SchemeNotIANARegistered(Warning): pass
class AvoidNamespacePrefix(Warning): pass
class UnknownNamespace(Warning): pass
class MissingRecommendedAttribute(Warning): pass
class QuestionableUsage(Warning): pass
###################### info ######################
class BestPractices(Info): pass
class MissingRecommendedElement(BestPractices): pass
class MissingDCLanguage(MissingRecommendedElement): pass
class NonstdPrefix(BestPractices): pass
class NonstdEncoding(BestPractices): pass
class MissingEncoding(BestPractices): pass
class TempRedirect(Info): pass
class TextXml(Info): pass
class Uncompressed(Info): pass
## Atom-specific errors
class ObsoleteVersion(Warning): pass
class ObsoleteNamespace(Error): pass
class ConflictingCatAttr(Error): pass
class ConflictingCatChildren(Error): pass
class InvalidMediaRange(Error): pass
class UndefinedParam(Warning): pass
class InvalidURI(InvalidValue) : pass
class InvalidURN(InvalidValue): pass
class InvalidUUID(InvalidValue): pass
class InvalidTAG(InvalidValue): pass
class InvalidContentMode(InvalidValue) : pass
class InvalidMIMEType(InvalidMediaRange) : pass
class InvalidNamespace(Error): pass
class NotEscaped(InvalidValue): pass
class NotBase64(InvalidValue): pass
class NotInline(Warning): pass # this one can never be sure...
class NotHtml(Warning): pass
class HtmlFragment(Warning): pass
class FeedHistoryRelInEntry(Warning): pass
class FeedRelInCompleteFeed(Error): pass
class CurrentNotSelfInCompleteFeed(Error): pass
class LinkPastEnd(Error): pass
class MissingCurrentInArchive(Warning): pass
class ArchiveIncomplete(Warning): pass
############## non-errors (logging successes) ###################
class Success(LoggedEvent): pass
class ValidValue(Success): pass
class ValidCloud(Success): pass
class ValidURI(ValidValue): pass
class ValidHttpGUID(ValidURI): pass
class ValidURLAttribute(ValidURI): pass
class ValidURN(ValidValue): pass
class ValidTAG(ValidValue): pass
class ValidTitle(ValidValue): pass
class ValidDate(ValidValue): pass
class ValidW3CDTFDate(ValidDate): pass
class ValidRFC2822Date(ValidDate): pass
class ValidAttributeValue(ValidValue): pass
class ValidBooleanAttribute(ValidAttributeValue): pass
class ValidLanguage(ValidValue): pass
class ValidHeight(ValidValue): pass
class ValidWidth(ValidValue): pass
class ValidTitle(ValidValue): pass
class ValidContact(ValidValue): pass
class ValidIntegerAttribute(ValidValue): pass
class ValidMIMEAttribute(ValidValue): pass
class ValidDay(ValidValue): pass
class ValidHour(ValidValue): pass
class ValidInteger(ValidValue): pass
class ValidPercentage(ValidValue): pass
class ValidUpdatePeriod(ValidValue): pass
class ValidContentMode(ValidValue): pass
class ValidElement(ValidValue): pass
class ValidCopyright(ValidValue): pass
class ValidGeneratorName(ValidValue): pass
class OptionalValueMissing(ValidValue): pass
class ValidDoctype(ValidValue): pass
class DeprecatedDTD(Error): pass
class ValidHtml(ValidValue): pass
class ValidAtomLinkRel(ValidValue): pass
class ValidLatitude(ValidValue): pass
class ValidLongitude(ValidValue): pass
class ValidNPTTime(ValidValue): pass
###################### opml ######################
class InvalidOPMLVersion(Error): pass
class MissingXmlURL(Warning): pass
class InvalidOutlineVersion(Warning): pass
class InvalidOutlineType(Warning): pass
class InvalidExpansionState(Error): pass
class InvalidTrueFalse(InvalidValue): pass
class MissingOutlineType(Warning): pass
class MissingTitleAttr(Warning): pass
class MissingUrlAttr(Warning): pass
###################### gbase ######################
class InvalidCountryCode(InvalidValue): pass
class InvalidCurrencyUnit(InvalidValue): pass
class InvalidFloat(InvalidValue): pass
class InvalidFloatUnit(InvalidValue): pass
class InvalidFullLocation(InvalidValue): pass
class InvalidGender(InvalidValue): pass
class InvalidIntUnit(InvalidValue): pass
class InvalidLabel(InvalidValue): pass
class InvalidLocation(InvalidValue): pass
class InvalidMaritalStatus(InvalidValue): pass
class InvalidPaymentMethod(InvalidValue): pass
class InvalidPriceType(InvalidValue): pass
class InvalidRatingType(InvalidValue): pass
class InvalidReviewerType(InvalidValue): pass
class InvalidSalaryType(InvalidValue): pass
class InvalidServiceType(InvalidValue): pass
class InvalidYear(InvalidValue): pass
class TooMany(DuplicateElement): pass
###################### georss ######################
class InvalidCoord(InvalidValue): pass
class InvalidCoordList(InvalidValue): pass
class CoordComma(Warning): pass
###################### meta ######################
class InvalidMetaName(InvalidValue): pass
class InvalidMetaContent(InvalidValue): pass
###################### kml ######################
class Deprecated(Warning): pass
class DeprecatedRootHref(Warning): pass
class InvalidAltitudeMode(InvalidValue): pass
class InvalidAngle(InvalidValue): pass
class InvalidColor(InvalidValue): pass
class InvalidColorMode(InvalidValue): pass
class InvalidItemIconState(InvalidValue): pass
class InvalidListItemType(InvalidValue): pass
class InvalidKmlCoordList(InvalidValue): pass
class InvalidKmlLatitude(InvalidValue): pass
class InvalidKmlLongitude(InvalidValue): pass
class InvalidKmlMediaType(Warning): pass
class InvalidKmlUnits(InvalidValue): pass
class InvalidRefreshMode(InvalidValue): pass
class InvalidSchemaFieldType(InvalidValue): pass
class InvalidStyleState(InvalidValue): pass
class InvalidViewRefreshMode(InvalidValue): pass
class InvalidZeroOne(InvalidValue): pass
class MissingId(Warning): pass
class ValidAngle(ValidValue): pass
###################### RSS 2.0 Profile ######################
class RSS20Profile(Warning): pass
class CharacterData(ContainsHTML): pass
class EmailFormat(RSS20Profile): pass
class MissingRealName(EmailFormat): pass
class MisplacedItem(RSS20Profile): pass
class ImageTitleDoesntMatch(RSS20Profile): pass
class AvoidTextInput(RSS20Profile): pass
class NeedDescriptionBeforeContent(RSS20Profile): pass
class SlashDate(RSS20Profile): pass
class UseZeroForMidnight(RSS20Profile): pass
class MissingAtomSelfLink(MissingSelf): pass
class UseZeroForUnknown(InvalidNonNegativeInteger): pass
| Python |
"""$Id: itunes.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from validators import *
class itunes:
def do_itunes_author(self):
return lengthLimitedText(255), noduplicates()
def do_itunes_block(self):
return yesnoclean(), noduplicates()
def do_itunes_explicit(self):
return yesnoclean(), noduplicates()
def do_itunes_keywords(self):
return lengthLimitedText(255), keywords(), noduplicates()
def do_itunes_subtitle(self):
return lengthLimitedText(255), noduplicates()
def do_itunes_summary(self):
return lengthLimitedText(4000), noduplicates()
def do_itunes_image(self):
return image(), noduplicates()
class itunes_channel(itunes):
from logging import MissingItunesElement
def validate(self):
if not 'language' in self.children and not self.xmlLang:
self.log(MissingItunesElement({"parent":self.name, "element":'language'}))
if not 'itunes_category' in self.children:
self.log(MissingItunesElement({"parent":self.name, "element":'itunes:category'}))
if not 'itunes_explicit' in self.children:
self.log(MissingItunesElement({"parent":self.name, "element":'itunes:explicit'}))
if not 'itunes_owner' in self.children:
self.log(MissingItunesEmail({"parent":self.name, "element":'itunes:email'}))
def setItunes(self, value):
if value and not self.itunes:
if self.dispatcher.encoding.lower() not in ['utf-8','utf8']:
from logging import NotUTF8
self.log(NotUTF8({"parent":self.parent.name, "element":self.name}))
if self.getFeedType() == TYPE_ATOM and 'entry' in self.children:
self.validate()
self.itunes |= value
def do_itunes_owner(self):
return owner(), noduplicates()
def do_itunes_category(self):
return category()
def do_itunes_pubDate(self):
return rfc822(), noduplicates()
def do_itunes_new_feed_url(self):
if self.child != 'itunes_new-feed-url':
self.log(UndefinedElement({"parent":self.name.replace("_",":"), "element":self.child}))
return rfc2396_full(), noduplicates()
class itunes_item(itunes):
supported_formats = ['m4a', 'mp3', 'mov', 'mp4', 'm4v', 'pdf']
def validate(self):
pass
def setItunes(self, value):
if value and not self.itunes:
self.parent.setItunes(True)
self.itunes = value
if hasattr(self, 'enclosures'):
save, self.enclosures = self.enclosures, []
for enclosure in save:
self.setEnclosure(enclosure)
def setEnclosure(self, url):
if self.itunes:
# http://www.apple.com/itunes/podcasts/techspecs.html#_Toc526931678
ext = url.split('.')[-1]
if ext not in itunes_item.supported_formats:
from logging import UnsupportedItunesFormat
self.log(UnsupportedItunesFormat({"parent":self.parent.name, "element":self.name, "extension":ext}))
if not hasattr(self, 'enclosures'): self.enclosures = []
self.enclosures.append(url)
def do_itunes_duration(self):
return duration(), noduplicates()
class owner(validatorBase):
def validate(self):
if not "itunes_email" in self.children:
self.log(MissingElement({"parent":self.name.replace("_",":"),
"element":"itunes:email"}))
def do_itunes_email(self):
return email(), noduplicates()
def do_itunes_name(self):
return lengthLimitedText(255), noduplicates()
class subcategory(validatorBase):
def __init__(self, newlist, oldlist):
validatorBase.__init__(self)
self.newlist = newlist
self.oldlist = oldlist
self.text = None
def getExpectedAttrNames(self):
return [(None, u'text')]
def prevalidate(self):
try:
self.text=self.attrs.getValue((None, "text"))
if not self.text in self.newlist:
if self.text in self.oldlist:
self.log(ObsoleteItunesCategory({"parent":self.parent.name.replace("_",":"),
"element":self.name.replace("_",":"),
"text":self.text}))
else:
self.log(InvalidItunesCategory({"parent":self.parent.name.replace("_",":"),
"element":self.name.replace("_",":"),
"text":self.text}))
except KeyError:
self.log(MissingAttribute({"parent":self.parent.name.replace("_",":"),
"element":self.name.replace("_",":"),
"attr":"text"}))
class image(validatorBase):
def getExpectedAttrNames(self):
return [(None, u'href')]
def prevalidate(self):
self.validate_required_attribute((None,'href'), httpURL)
class category(subcategory):
def __init__(self):
subcategory.__init__(self, valid_itunes_categories.keys(),
old_itunes_categories.keys())
def do_itunes_category(self):
if not self.text: return eater()
return subcategory(valid_itunes_categories.get(self.text,[]),
old_itunes_categories.get(self.text,[]))
valid_itunes_categories = {
"Arts": [
"Design",
"Fashion & Beauty",
"Food",
"Literature",
"Performing Arts",
"Visual Arts"],
"Business": [
"Business News",
"Careers",
"Investing",
"Management & Marketing",
"Shopping"],
"Comedy": [],
"Education": [
"Education Technology",
"Higher Education",
"K-12",
"Language Courses",
"Training"],
"Games & Hobbies": [
"Automotive",
"Aviation",
"Hobbies",
"Other Games",
"Video Games"],
"Government & Organizations": [
"Local",
"National",
"Non-Profit",
"Regional"],
"Health": [
"Alternative Health",
"Fitness & Nutrition",
"Self-Help",
"Sexuality"],
"Kids & Family": [],
"Music": [],
"News & Politics": [],
"Religion & Spirituality": [
"Buddhism",
"Christianity",
"Hinduism",
"Islam",
"Judaism",
"Other",
"Spirituality"],
"Science & Medicine": [
"Medicine",
"Natural Sciences",
"Social Sciences"],
"Society & Culture": [
"History",
"Personal Journals",
"Philosophy",
"Places & Travel"],
"Sports & Recreation": [
"Amateur",
"College & High School",
"Outdoor",
"Professional"],
"Technology": [
"Gadgets",
"Tech News",
"Podcasting",
"Software How-To"],
"TV & Film": [],
}
old_itunes_categories = {
"Arts & Entertainment": [
"Architecture",
"Books",
"Design",
"Entertainment",
"Games",
"Performing Arts",
"Photography",
"Poetry",
"Science Fiction"],
"Audio Blogs": [],
"Business": [
"Careers",
"Finance",
"Investing",
"Management",
"Marketing"],
"Comedy": [],
"Education": [
"Higher Education",
"K-12"],
"Family": [],
"Food": [],
"Health": [
"Diet & Nutrition",
"Fitness",
"Relationships",
"Self-Help",
"Sexuality"],
"International": [
"Australian",
"Belgian",
"Brazilian",
"Canadian",
"Chinese",
"Dutch",
"French",
"German",
"Hebrew",
"Italian",
"Japanese",
"Norwegian",
"Polish",
"Portuguese",
"Spanish",
"Swedish"],
"Movies & Television": [],
"Music": [],
"News": [],
"Politics": [],
"Public Radio": [],
"Religion & Spirituality": [
"Buddhism",
"Christianity",
"Islam",
"Judaism",
"New Age",
"Philosophy",
"Spirituality"],
"Science": [],
"Sports": [],
"Talk Radio": [],
"Technology": [
"Computers",
"Developers",
"Gadgets",
"Information Technology",
"News",
"Operating Systems",
"Podcasting",
"Smart Phones",
"Text/Speech"],
"Transportation": [
"Automotive",
"Aviation",
"Bicycles",
"Commuting"],
"Travel": []
}
class yesnoclean(text):
def normalizeWhitespace(self):
pass
def validate(self):
if not self.value.lower() in ['yes','no','clean']:
self.log(InvalidYesNoClean({"parent":self.parent.name, "element":self.name,"value":self.value}))
| Python |
# http://msdn.microsoft.com/XML/rss/sle/default.aspx
from base import validatorBase
from validators import eater, text
class sort(validatorBase):
def getExpectedAttrNames(self):
return [(None,u'data-type'),(None,u'default'),(None,u'element'),(None, u'label'),(None,u'ns')]
class group(validatorBase):
def getExpectedAttrNames(self):
return [(None,u'element'),(None, u'label'),(None,u'ns')]
class listinfo(validatorBase):
def do_cf_sort(self):
return sort()
def do_cf_group(self):
return group()
class treatAs(text): pass
| Python |
"""$Id: rss.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from logging import *
from validators import noduplicates
#
# Rss element. The only valid child element is "channel"
#
class rss(validatorBase):
def do_channel(self):
from channel import rss20Channel
return rss20Channel(), noduplicates()
def do_access_restriction(self):
from extension import access_restriction
return access_restriction(), noduplicates()
def getExpectedAttrNames(self):
return [(None, u'version')]
def prevalidate(self):
self.setFeedType(TYPE_RSS2) # could be anything in the 0.9x family, don't really care
self.version = "2.0"
if (None,'version') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"version"}))
elif [e for e in self.dispatcher.loggedEvents if e.__class__==ValidDoctype]:
self.version = self.attrs[(None,'version')]
if self.attrs[(None,'version')]<>'0.91':
self.log(InvalidDoctype({"parent":self.parent.name, "element":self.name, "attr":"version"}))
else:
self.version = self.attrs[(None,'version')]
if self.version not in ['0.91', '0.92', '2.0']:
self.log(InvalidRSSVersion({"parent":self.parent.name, "element":self.name, "value":self.version}))
def validate(self):
if not "channel" in self.children:
self.log(MissingElement({"parent":self.name, "element":"channel"}))
| Python |
"""$Id: textInput.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from validators import *
from extension import extension_everywhere
#
# textInput element.
#
class textInput(validatorBase, extension_everywhere):
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'about')]
def validate(self):
if not "title" in self.children:
self.log(MissingTitle({"parent":self.name, "element":"title"}))
if not "link" in self.children:
self.log(MissingLink({"parent":self.name, "element":"link"}))
if not "description" in self.children:
self.log(MissingDescription({"parent":self.name,"element":"description"}))
if not "name" in self.children:
self.log(MissingElement({"parent":self.name, "element":"name"}))
def do_title(self):
return nonhtml(), noduplicates()
def do_description(self):
return text(), noduplicates()
def do_name(self):
return formname(), noduplicates()
def do_link(self):
return rfc2396_full(), noduplicates()
def do_dc_creator(self):
return text() # duplicates allowed
def do_dc_subject(self):
return text() # duplicates allowed
def do_dc_date(self):
return w3cdtf(), noduplicates()
| Python |
"""$Id: link.py 1060 2010-01-23 01:29:14Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1060 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
#
# Atom link element
#
class link(nonblank,xmlbase,iso639,nonhtml,nonNegativeInteger,rfc3339,nonblank):
validRelations = [
# http://www.iana.org/assignments/link-relations.html
'alternate', # RFC4287
'current', # RFC5005
'describedby', # http://www.w3.org/TR/powder-dr/#assoc-linking
'edit', # RFC-ietf-atompub-protocol-17.txt
'edit-media', # RFC-ietf-atompub-protocol-17.txt
'enclosure', # RFC4287
'first', # RFC5005
'hub', # http://pubsubhubbub.googlecode.com/
'last', # RFC5005
'license', # RFC4946
'next', # RFC5005
'next-archive', # RFC5005
'payment', # Kinberg
'prev-archive', # RFC5005
'previous', # RFC5005
'related', # RFC4287
'replies', # RFC4685
'self', # RFC4287
'service', # Snell
'up', # Slater
'via' # RFC4287
]
rfc5005 = [
'current', # RFC5005
'first', # RFC5005
'last', # RFC5005
'next', # RFC5005
'next-archive', # RFC5005
'prev-archive', # RFC5005
'previous', # RFC5005
]
def getExpectedAttrNames(self):
return [(None, u'type'), (None, u'title'), (None, u'rel'),
(None, u'href'), (None, u'length'), (None, u'hreflang'),
(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'type'),
(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'resource'),
(u'http://purl.org/syndication/thread/1.0', u'count'),
(u'http://purl.org/syndication/thread/1.0', u'when'),
(u'http://purl.org/syndication/thread/1.0', u'updated')]
def validate(self):
self.type = ""
self.rel = "alternate"
self.href = ""
self.hreflang = ""
self.title = ""
if self.attrs.has_key((None, "rel")):
self.value = self.rel = self.attrs.getValue((None, "rel"))
if self.rel.startswith('http://www.iana.org/assignments/relation/'):
self.rel=self.rel[len('http://www.iana.org/assignments/relation/'):]
if self.rel in self.validRelations:
self.log(ValidAtomLinkRel({"parent":self.parent.name, "element":self.name, "attr":"rel", "value":self.rel}))
elif rfc2396_full.rfc2396_re.match(self.rel.encode('idna')):
self.log(ValidAtomLinkRel({"parent":self.parent.name, "element":self.name, "attr":"rel", "value":self.rel}))
else:
self.log(UnregisteredAtomLinkRel({"parent":self.parent.name, "element":self.name, "attr":"rel", "value":self.rel}))
nonblank.validate(self, errorClass=AttrNotBlank, extraParams={"attr": "rel"})
if self.rel in self.rfc5005 and self.parent.name == 'entry':
self.log(FeedHistoryRelInEntry({"rel":self.rel}))
if self.attrs.has_key((None, "type")):
self.value = self.type = self.attrs.getValue((None, "type"))
if not mime_re.match(self.type):
self.log(InvalidMIMEType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
elif self.rel == "self" and self.type not in ["application/atom+xml", "application/rss+xml", "application/rdf+xml"]:
self.log(SelfNotAtom({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
else:
self.log(ValidMIMEAttribute({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
if self.attrs.has_key((None, "title")):
self.log(ValidTitle({"parent":self.parent.name, "element":self.name, "attr":"title"}))
self.value = self.title = self.attrs.getValue((None, "title"))
nonblank.validate(self, errorClass=AttrNotBlank, extraParams={"attr": "title"})
nonhtml.validate(self)
if self.attrs.has_key((None, "length")):
self.name = 'length'
self.value = self.attrs.getValue((None, "length"))
nonNegativeInteger.validate(self)
nonblank.validate(self)
if self.attrs.has_key((None, "hreflang")):
self.name = 'hreflang'
self.value = self.hreflang = self.attrs.getValue((None, "hreflang"))
iso639.validate(self)
if self.attrs.has_key((None, "href")):
self.name = 'href'
self.value = self.href = self.attrs.getValue((None, "href"))
xmlbase.validate(self, extraParams={"attr": "href"})
if self.rel == "self" and self.parent.name in ["feed","channel"]:
# detect relative self values
from urlparse import urlparse
from xml.dom import XML_NAMESPACE
absolute = urlparse(self.href)[1]
element = self
while not absolute and element and hasattr(element,'attrs'):
pattrs = element.attrs
if pattrs and pattrs.has_key((XML_NAMESPACE, u'base')):
absolute=urlparse(pattrs.getValue((XML_NAMESPACE, u'base')))[1]
element = element.parent
if not absolute:
self.log(RelativeSelf({"value":self.href}))
from urlparse import urljoin
if urljoin(self.xmlBase,self.value) not in self.dispatcher.selfURIs:
if urljoin(self.xmlBase,self.value).split('#')[0] != self.xmlBase.split('#')[0]:
from uri import Uri
if self.value.startswith('http://feeds.feedburner.com/'):
if self.value.endswith('?format=xml'):
self.value = self.value.split('?')[0]
value = Uri(self.value)
for docbase in self.dispatcher.selfURIs:
if value == Uri(docbase): break
# don't complain when validating feedburner's xml view
if docbase.startswith('http://feeds.feedburner.com/'):
if docbase.endswith('?format=xml'):
if value == Uri(docbase.split('?')[0]): break
else:
self.log(SelfDoesntMatchLocation({"parent":self.parent.name, "element":self.name}))
self.dispatcher.selfURIs.append(urljoin(self.xmlBase,self.value))
else:
self.log(MissingHref({"parent":self.parent.name, "element":self.name, "attr":"href"}))
if self.attrs.has_key((u'http://purl.org/syndication/thread/1.0', u'count')):
if self.rel != "replies":
self.log(UnexpectedAttribute({"parent":self.parent.name, "element":self.name, "attribute":"thr:count"}))
self.value = self.attrs.getValue((u'http://purl.org/syndication/thread/1.0', u'count'))
self.name="thr:count"
nonNegativeInteger.validate(self)
if self.attrs.has_key((u'http://purl.org/syndication/thread/1.0', u'when')):
self.log(NoThrWhen({"parent":self.parent.name, "element":self.name, "attribute":"thr:when"}))
if self.attrs.has_key((u'http://purl.org/syndication/thread/1.0', u'updated')):
if self.rel != "replies":
self.log(UnexpectedAttribute({"parent":self.parent.name, "element":self.name, "attribute":"thr:updated"}))
self.value = self.attrs.getValue((u'http://purl.org/syndication/thread/1.0', u'updated'))
self.name="thr:updated"
rfc3339.validate(self)
def startElementNS(self, name, qname, attrs):
self.push(eater(), name, attrs)
def characters(self, text):
if text.strip():
self.log(AtomLinkNotEmpty({"parent":self.parent.name, "element":self.name}))
| Python |
"""$Id: channel.py 1033 2008-11-18 11:35:34Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1033 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from logging import *
from validators import *
from itunes import itunes_channel
from extension import *
#
# channel element.
#
class channel(validatorBase, rfc2396, extension_channel, itunes_channel):
def getExpectedAttrNames(self):
return [(u'urn:atom-extension:indexing', u'index')]
def prevalidate(self):
self.validate_optional_attribute((u'urn:atom-extension:indexing', u'index'), yesno)
def __init__(self):
self.link=None
self.docs=''
self.links = []
self.title=None
validatorBase.__init__(self)
def validate(self):
if not "description" in self.children:
self.log(MissingDescription({"parent":self.name,"element":"description"}))
if not "link" in self.children:
self.log(MissingLink({"parent":self.name, "element":"link"}))
if not "title" in self.children:
self.log(MissingTitle({"parent":self.name, "element":"title"}))
if not "dc_language" in self.children and not "language" in self.children:
if not self.xmlLang:
self.log(MissingDCLanguage({"parent":self.name, "element":"language"}))
if self.children.count("image") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"image"}))
if self.children.count("textInput") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"textInput"}))
if self.children.count("skipHours") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"skipHours"}))
if self.children.count("skipDays") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"skipDays"}))
if self.attrs.has_key((rdfNS,"about")):
self.value = self.attrs.getValue((rdfNS, "about"))
rfc2396.validate(self, extraParams={"attr": "rdf:about"})
if not "items" in self.children:
self.log(MissingElement({"parent":self.name, "element":"items"}))
if self.parent.name == 'rss' and self.parent.version == '2.0':
for link in self.links:
if link.rel=='self': break
else:
self.log(MissingAtomSelfLink({}))
if self.itunes: itunes_channel.validate(self)
# don't warn about use of extension attributes for rss-board compliant feeds
if self.docs == 'http://www.rssboard.org/rss-specification':
self.dispatcher.loggedEvents = [event for
event in self.dispatcher.loggedEvents
if not isinstance(event,UseOfExtensionAttr)]
def metadata(self):
pass
def do_image(self):
self.metadata()
from image import image
return image(), noduplicates()
def do_textInput(self):
self.metadata()
from textInput import textInput
return textInput(), noduplicates()
def do_textinput(self):
self.metadata()
if not self.attrs.has_key((rdfNS,"about")):
# optimize for RSS 2.0. If it is not valid RDF, assume that it is
# a simple misspelling (in other words, the error message will be
# less than helpful on RSS 1.0 feeds.
self.log(UndefinedElement({"parent":self.name, "element":"textinput"}))
return eater(), noduplicates()
def do_link(self):
self.metadata()
return link(), noduplicates()
def do_title(self):
self.metadata()
return title(), noduplicates(), nonblank()
def do_description(self):
self.metadata()
return nonhtml(), noduplicates()
def do_blink(self):
return blink(), noduplicates()
def do_atom_author(self):
from author import author
return author()
def do_atom_category(self):
from category import category
return category()
def do_atom_contributor(self):
from author import author
return author()
def do_atom_generator(self):
from generator import generator
return generator(), nonblank(), noduplicates()
def do_atom_id(self):
return rfc2396_full(), noduplicates()
def do_atom_icon(self):
return nonblank(), rfc2396(), noduplicates()
def do_atom_link(self):
self.metadata()
from link import link
self.links.append(link())
return self.links[-1]
def do_atom_logo(self):
return nonblank(), rfc2396(), noduplicates()
def do_atom_title(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_atom_subtitle(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_atom_rights(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_atom_updated(self):
return rfc3339(), noduplicates()
def do_dc_creator(self):
if "managingEditor" in self.children:
self.log(DuplicateSemantics({"core":"managingEditor", "ext":"dc:creator"}))
return text() # duplicates allowed
def do_dc_subject(self):
if "category" in self.children:
self.log(DuplicateSemantics({"core":"category", "ext":"dc:subject"}))
return text() # duplicates allowed
def do_dc_date(self):
if "pubDate" in self.children:
self.log(DuplicateSemantics({"core":"pubDate", "ext":"dc:date"}))
return w3cdtf(), noduplicates()
def do_cc_license(self):
if "creativeCommons_license" in self.children:
self.log(DuplicateSemantics({"core":"creativeCommons:license", "ext":"cc:license"}))
return eater()
def do_creativeCommons_license(self):
if "cc_license" in self.children:
self.log(DuplicateSemantics({"core":"creativeCommons:license", "ext":"cc:license"}))
return rfc2396_full()
class rss20Channel(channel):
def __init__(self):
self.itemlocs=[]
channel.__init__(self)
def metadata(self):
locator=self.dispatcher.locator
for line,col in self.itemlocs:
offset=(line - locator.getLineNumber(), col - locator.getColumnNumber())
self.log(MisplacedItem({"parent":self.name, "element":"item"}), offset)
self.itemlocs = []
def do_textInput(self):
self.log(AvoidTextInput({}))
return channel.do_textInput(self)
def do_item(self):
locator=self.dispatcher.locator
self.itemlocs.append((locator.getLineNumber(), locator.getColumnNumber()))
from item import rss20Item
return rss20Item()
def do_category(self):
self.metadata()
return category()
def do_cloud(self):
self.metadata()
return cloud(), noduplicates()
do_rating = validatorBase.leaf # TODO test cases?!?
def do_ttl(self):
self.metadata()
return positiveInteger(), nonblank(), noduplicates()
def do_docs(self):
self.metadata()
return docs(), noduplicates()
def do_generator(self):
self.metadata()
if "admin_generatorAgent" in self.children:
self.log(DuplicateSemantics({"core":"generator", "ext":"admin:generatorAgent"}))
return text(), noduplicates()
def do_pubDate(self):
self.metadata()
if "dc_date" in self.children:
self.log(DuplicateSemantics({"core":"pubDate", "ext":"dc:date"}))
return rfc822(), noduplicates()
def do_managingEditor(self):
self.metadata()
if "dc_creator" in self.children:
self.log(DuplicateSemantics({"core":"managingEditor", "ext":"dc:creator"}))
return email_with_name(), noduplicates()
def do_webMaster(self):
self.metadata()
if "dc_publisher" in self.children:
self.log(DuplicateSemantics({"core":"webMaster", "ext":"dc:publisher"}))
return email_with_name(), noduplicates()
def do_language(self):
self.metadata()
if "dc_language" in self.children:
self.log(DuplicateSemantics({"core":"language", "ext":"dc:language"}))
return iso639(), noduplicates()
def do_copyright(self):
self.metadata()
if "dc_rights" in self.children:
self.log(DuplicateSemantics({"core":"copyright", "ext":"dc:rights"}))
return nonhtml(), noduplicates()
def do_lastBuildDate(self):
self.metadata()
if "dcterms_modified" in self.children:
self.log(DuplicateSemantics({"core":"lastBuildDate", "ext":"dcterms:modified"}))
return rfc822(), noduplicates()
def do_skipHours(self):
self.metadata()
from skipHours import skipHours
return skipHours()
def do_skipDays(self):
self.metadata()
from skipDays import skipDays
return skipDays()
class rss10Channel(channel):
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'about'),
(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'about')]
def prevalidate(self):
if self.attrs.has_key((rdfNS,"about")):
if not "abouts" in self.dispatcher.__dict__:
self.dispatcher.__dict__["abouts"] = []
self.dispatcher.__dict__["abouts"].append(self.attrs[(rdfNS,"about")])
def do_items(self): # this actually should be from the rss1.0 ns
if not self.attrs.has_key((rdfNS,"about")):
self.log(MissingAttribute({"parent":self.name, "element":self.name, "attr":"rdf:about"}))
from item import items
return items(), noduplicates()
def do_rdfs_label(self):
return text()
def do_rdfs_comment(self):
return text()
class link(rfc2396_full):
def validate(self):
self.parent.link = self.value
rfc2396_full.validate(self)
class title(nonhtml):
def validate(self):
self.parent.title = self.value
nonhtml.validate(self)
class docs(rfc2396_full):
def validate(self):
self.parent.docs = self.value
rfc2396_full.validate(self)
class blink(text):
def validate(self):
self.log(NoBlink({}))
class category(nonhtml):
def getExpectedAttrNames(self):
return [(None, u'domain')]
class cloud(validatorBase):
def getExpectedAttrNames(self):
return [(None, u'domain'), (None, u'path'), (None, u'registerProcedure'),
(None, u'protocol'), (None, u'port')]
def prevalidate(self):
if (None, 'domain') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"domain"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"domain"}))
try:
if int(self.attrs.getValue((None, 'port'))) <= 0:
self.log(InvalidIntegerAttribute({"parent":self.parent.name, "element":self.name, "attr":'port'}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":'port'}))
except KeyError:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":'port'}))
except ValueError:
self.log(InvalidIntegerAttribute({"parent":self.parent.name, "element":self.name, "attr":'port'}))
if (None, 'path') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"path"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"path"}))
if (None, 'registerProcedure') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"registerProcedure"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"registerProcedure"}))
if (None, 'protocol') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"protocol"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"protocol"}))
## TODO - is there a list of accepted protocols for this thing?
return validatorBase.prevalidate(self)
| Python |
"""
$Id: mediaTypes.py 988 2008-03-12 18:22:48Z sa3ruby $
This module deals with valid internet media types for feeds.
"""
__author__ = "Joseph Walton <http://www.kafsemo.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2004 Joseph Walton"
from cgi import parse_header
from logging import *
FEED_TYPES = [
'text/xml', 'application/xml', 'application/rss+xml', 'application/rdf+xml',
'application/atom+xml', 'text/x-opml', 'application/xrds+xml',
'application/opensearchdescription+xml', 'application/vnd.google-earth.kml+xml', 'application/vnd.google-earth.kmz',
'application/atomsvc+xml', 'application/atomcat+xml',
]
# Is the Content-Type correct?
def checkValid(contentType, loggedEvents):
(mediaType, params) = parse_header(contentType)
if mediaType.lower() not in FEED_TYPES:
loggedEvents.append(UnexpectedContentType({"type": "Feeds", "contentType": mediaType}))
if 'charset' in params:
charset = params['charset']
else:
charset = None
return (mediaType, charset)
# Warn about mismatches between media type and feed version
def checkAgainstFeedType(mediaType, feedType, loggedEvents):
mtl = mediaType.lower()
if mtl in ['application/x.atom+xml', 'application/atom+xml']:
if feedType not in [TYPE_ATOM, TYPE_ATOM_ENTRY]:
loggedEvents.append(UnexpectedContentType({"type": 'Non-Atom 1.0 feeds', "contentType": mediaType}))
elif mtl == 'application/atomcat+xml':
if feedType != TYPE_APP_CATEGORIES:
loggedEvents.append(UnexpectedContentType({"type": 'Non-AtomPub Category document', "contentType": mediaType}))
elif mtl == 'application/atomsvc+xml':
if feedType != TYPE_APP_SERVICE:
loggedEvents.append(UnexpectedContentType({"type": 'Non-AtomPub Service document', "contentType": mediaType}))
elif mtl == 'application/rdf+xml':
if feedType != TYPE_RSS1:
loggedEvents.append(UnexpectedContentType({"type": 'Non-RSS 1.0 feeds', "contentType": mediaType}))
elif mtl == 'application/rss+xml':
if feedType not in [TYPE_RSS1, TYPE_RSS2]:
loggedEvents.append(UnexpectedContentType({"type": 'Non-RSS feeds', "contentType": mediaType}))
elif mtl == 'text/x-opml':
if feedType not in [TYPE_OPML]:
loggedEvents.append(UnexpectedContentType({"type": 'Non-OPML feeds', "contentType": mediaType}))
elif mtl == 'application/opensearchdescription+xml':
if feedType not in [TYPE_OPENSEARCH]:
loggedEvents.append(UnexpectedContentType({"type": 'Non-OpenSearchDescription documents', "contentType": mediaType}))
elif mtl == 'application/xrds+xml':
if feedType not in [TYPE_XRD]:
loggedEvents.append(UnexpectedContentType({"type": 'Non-Extensible Resource Descriptor documents', "contentType": mediaType}))
elif mtl == 'application/vnd.google-earth.kml+xml':
if feedType not in [TYPE_KML20, TYPE_KML21, TYPE_KML22]:
loggedEvents.append(UnexpectedContentType({"type": 'Non-KML documents', "contentType": mediaType}))
elif mtl == 'application/earthviewer':
loggedEvents.append(InvalidKmlMediaType({"type": 'Non-KML documents', "contentType": mediaType}))
# warn if a non-specific media type is used without a 'marker'
def contentSniffing(mediaType, rawdata, loggedEvents):
if mediaType not in FEED_TYPES: return
if mediaType == 'application/atom+xml': return
if mediaType == 'application/atomcat+xml': return
if mediaType == 'application/atomsvc+xml': return
if mediaType == 'application/rss+xml': return
if mediaType == 'text/x-opml': return
if mediaType == 'application/opensearchdescription+xml': return
if mediaType == 'application/xrds+xml': return
if mediaType == 'application/vnd.google-earth.kml+xml': return
block = rawdata[:512]
if block.find('<rss') >= 0: return
if block.find('<feed') >= 0: return
if block.find('<opml') >= 0: return
if block.find('<kml') >= 0: return
if block.find('<OpenSearchDescription') >= 0: return
if (block.find('<rdf:RDF') >=0 and
block.find('http://www.w3.org/1999/02/22-rdf-syntax-ns#') >= 0 and
block.find( 'http://purl.org/rss/1.0/')): return
from logging import NonSpecificMediaType
loggedEvents.append(NonSpecificMediaType({"contentType": mediaType}))
| Python |
"""$Id: category.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
#
# author element.
#
class category(validatorBase):
def getExpectedAttrNames(self):
return [(None,u'term'),(None,u'scheme'),(None,u'label')]
def prevalidate(self):
self.children.append(True) # force warnings about "mixed" content
self.validate_required_attribute((None,'term'), nonblank)
self.validate_optional_attribute((None,'scheme'), rfc3987_full)
self.validate_optional_attribute((None,'label'), nonhtml)
| Python |
from validators import *
from logging import *
import re
class OpenSearchDescription(validatorBase):
def __init__(self):
self.exampleFound = 0
validatorBase.__init__(self)
def validate(self):
name=self.name.replace("opensearch_",'')
if not "ShortName" in self.children:
self.log(MissingElement({"parent":name, "element":"ShortName"}))
if not "Description" in self.children:
self.log(MissingElement({"parent":name, "element":"Description"}))
if not "Url" in self.children:
self.log(MissingElement({"parent":name, "element":"Url"}))
if not self.exampleFound:
self.log(ShouldIncludeExample({}))
def do_ShortName(self):
return lengthLimitedText(16), noduplicates()
def do_Description(self):
return lengthLimitedText(1024), noduplicates()
def do_Url(self):
return Url()
def do_Contact(self):
return addr_spec(), noduplicates()
def do_Tags(self):
return lengthLimitedText(256), noduplicates()
def do_LongName(self):
return lengthLimitedText(48), noduplicates()
def do_Image(self):
return Image()
def do_Query(self):
return Query()
def do_Developer(self):
return lengthLimitedText(64), noduplicates()
def do_Attribution(self):
return lengthLimitedText(256), noduplicates()
def do_SyndicationRight(self):
return SyndicationRight(), noduplicates()
def do_AdultContent(self):
return AdultContent(), noduplicates()
def do_Language(self):
return Language()
def do_InputEncoding(self):
return Charset()
def do_OutputEncoding(self):
return Charset()
class Url(validatorBase):
def getExpectedAttrNames(self):
return [(None,attr) for attr in ['template', 'type', 'indexOffset',
'pageOffset']]
def prevalidate(self):
self.validate_required_attribute((None,'template'), Template())
self.validate_required_attribute((None,'type'), MimeType)
self.validate_optional_attribute((None,'indexOffset'), Integer)
self.validate_optional_attribute((None,'pageOffset'), Integer)
class Template(rfc2396_full):
tparam = re.compile("{((?:[-a-zA-Z0-9._~]|%[a-fA-F0-9]{2})+:?(?:[-a-zA-Z0-9._~]|%[a-fA-F0-9]{2})*)\??}")
valuelist = ['searchTerms', 'count', 'startIndex', 'startPage', 'language',
'inputEncoding', 'outputEncoding']
def validate(self):
for pname in self.tparam.findall(self.value):
if pname.find(':')<0:
if pname not in self.valuelist:
self.log(InvalidLocalParameter({'value':pname}))
else:
prefix,name = pname.split(':',1)
if not self.parent.namespaceFor(prefix):
self.log(UndeclaredPrefix({'value':prefix}))
self.value = self.tparam.sub(r'\1',self.value)
rfc2396_full.validate(self)
class Image(rfc2396_full):
def getExpectedAttrNames(self):
return [(None,attr) for attr in ['height', 'width', 'type']]
def prevalidate(self):
self.validate_required_attribute((None,'height'), nonNegativeInteger)
self.validate_required_attribute((None,'width'), nonNegativeInteger)
self.validate_required_attribute((None,'type'), MimeType)
class Query(validatorBase):
def getExpectedAttrNames(self):
return [(None,attr) for attr in ['role', 'title', 'totalResults',
'searchTerms', 'count', 'startIndex', 'startPage', 'language',
'inputEncoding', 'outputEncoding', 'parameter']]
def prevalidate(self):
self.validate_required_attribute((None,'role'), QueryRole)
self.validate_optional_attribute((None,'title'), lengthLimitedText(256))
self.validate_optional_attribute((None,'title'), nonhtml)
self.validate_optional_attribute((None,'totalResults'), nonNegativeInteger)
self.validate_optional_attribute((None,'searchTerms'), UrlEncoded)
self.validate_optional_attribute((None,'count'), nonNegativeInteger)
self.validate_optional_attribute((None,'startIndex'), Integer)
self.validate_optional_attribute((None,'startPage'), Integer)
self.validate_optional_attribute((None,'language'), iso639)
self.validate_optional_attribute((None,'inputEncoding'), Charset)
self.validate_optional_attribute((None,'outputEncoding'), Charset)
if self.attrs.has_key((None,"role")) and \
self.attrs.getValue((None,"role")) == "example":
self.parent.exampleFound = 1
class QueryRole(enumeration):
error = InvalidLocalRole
valuelist = ['request', 'example', 'related', 'correction', 'subset',
'superset']
def validate(self):
if self.value.find(':')<0:
enumeration.validate(self)
else:
prefix,name = self.value.split(':',1)
if not self.parent.namespaceFor(prefix):
self.log(UndeclaredPrefix({'value':prefix}))
class UrlEncoded(validatorBase):
def validate(self):
from urllib import quote, unquote
import re
for value in self.value.split():
if type(value) == unicode: value = value.encode('utf-8')
value = re.sub('%\w\w', lambda x: x.group(0).upper(), value)
if value != quote(unquote(value)):
self.log(NotURLEncoded({}))
break
class SyndicationRight(enumeration):
error = InvalidSyndicationRight
valuelist = ['open','limited','private','closed']
def validate(self):
self.value = self.value.lower()
enumeration.validate(self)
class AdultContent(enumeration):
error = InvalidAdultContent
valuelist = ['false', 'FALSE', '0', 'no', 'NO',
'true', 'TRUE', '1', 'yes', 'YES']
class Language(iso639):
def validate(self):
if self.value != '*':
iso639.validate(self)
| Python |
#!/usr/bin/python
"""
$Id$
This module deals with detecting XML encodings, using both BOMs and
explicit declarations.
"""
__author__ = "Joseph Walton <http://www.kafsemo.org/>"
__version__ = "$Revision$"
__copyright__ = "Copyright (c) 2004 Joseph Walton"
import codecs
import re
from logging import ObscureEncoding, NonstdEncoding
import logging
class FailingCodec:
def __init__(self, name):
self.name = name
def fail(self, txt, errors='strict'):
raise UnicodeError('No codec available for ' + self.name + ' in this installation of FeedValidator')
# Don't die if the codec can't be found, but return
# a decoder that will fail on use
def getdecoder(codec):
try:
return codecs.getdecoder(codec)
except:
return FailingCodec(codec).fail
# These are generic decoders that are only used
# to decode the XML declaration, from which we can read
# the real encoding
_decUTF32BE = getdecoder('UTF-32BE')
_decUTF32LE = getdecoder('UTF-32LE')
_decUTF16BE = getdecoder('UTF-16BE')
_decUTF16LE = getdecoder('UTF-16LE')
_decEBCDIC = getdecoder('IBM037') # EBCDIC
_decACE = getdecoder('ISO-8859-1') # An ASCII-compatible encoding
# Given a character index into a string, calculate its 1-based row and column
def _position(txt, idx):
row = txt.count('\n', 0, idx) + 1
ln = txt.rfind('\n', 0, idx) + 1
column = 0
for c in txt[ln:idx]:
if c == '\t':
column = (column // 8 + 1) * 8
else:
column += 1
column += 1
return (row, column)
def _normaliseNewlines(txt):
return txt.replace('\r\n', '\n').replace('\r', '\n')
def _logEvent(loggedEvents, e, pos=None):
if pos:
e.params['line'], e.params['column'] = pos
loggedEvents.append(e)
# Return the encoding from the declaration, or 'None'
# Return None if the 'permitted' list is passed in and the encoding
# isn't found in it. This is so that, e.g., a 4-byte-character XML file
# that claims to be US-ASCII will fail now.
def _decodeDeclaration(sig, dec, permitted, loggedEvents):
sig = _normaliseNewlines(dec(sig)[0])
eo = _encodingFromDecl(sig)
if not(eo):
_logEvent(loggedEvents,
logging.UnicodeError({'exception': 'This XML file (apparently ' + permitted[0] + ') requires an encoding declaration'}), (1, 1))
elif permitted and not(eo[0].upper() in permitted):
if _hasCodec(eo[0]):
# see if the codec is an alias of one of the permitted encodings
codec=codecs.lookup(eo[0])
for encoding in permitted:
if _hasCodec(encoding) and codecs.lookup(encoding)[-1]==codec[-1]: break
else:
_logEvent(loggedEvents,
logging.UnicodeError({'exception': 'This XML file claims an encoding of ' + eo[0] + ', but looks more like ' + permitted[0]}), eo[1])
return eo
# Return the encoding from the declaration, or 'fallback' if none is
# present. Return None if the 'permitted' list is passed in and
# the encoding isn't found in it
def _decodePostBOMDeclaration(sig, dec, permitted, loggedEvents, fallback=None):
sig = _normaliseNewlines(dec(sig)[0])
eo = _encodingFromDecl(sig)
if eo and not(eo[0].upper() in permitted):
_logEvent(loggedEvents,
logging.UnicodeError({'exception': 'Document starts with ' + permitted[0] + ' BOM marker but has incompatible declaration of ' + eo[0]}), eo[1])
return None
else:
return eo or (fallback, None)
def isStandard(x):
""" Is this encoding required by the XML 1.0 Specification, 4.3.3? """
return x.upper() in ['UTF-8', 'UTF-16']
def isCommon(x):
"""Is this encoding commonly used, according to
<http://www.syndic8.com/stats.php?Section=feeds#XMLEncodings>
(as of 2004-03-27)?"""
return isStandard(x) or x.upper() in ['US-ASCII', 'ISO-8859-1',
'EUC-JP', 'ISO-8859-2', 'ISO-8859-15', 'ISO-8859-7',
'KOI8-R', 'SHIFT_JIS', 'WINDOWS-1250', 'WINDOWS-1251',
'WINDOWS-1252', 'WINDOWS-1254', 'WINDOWS-1255', 'WINDOWS-1256',
# This doesn't seem to be popular, but is the Chinese
# government's mandatory standard
'GB18030'
]
# Inspired by xmlproc's autodetect_encoding, but rewritten
def _detect(doc_start, loggedEvents=[], fallback='UTF-8'):
"""This is the logic from appendix F.1 of the XML 1.0 specification.
Pass in the start of a document (>= 256 octets), and receive the encoding to
use, or None if there is a problem with the document."""
sig = doc_start[:4]
# With a BOM. We also check for a declaration, and make sure
# it doesn't contradict (for 4-byte encodings, it's required)
if sig == '\x00\x00\xFE\xFF': # UTF-32 BE
eo = _decodeDeclaration(doc_start[4:], _decUTF32BE, ['UTF-32', 'ISO-10646-UCS-4', 'CSUCS4', 'UCS-4'], loggedEvents)
elif sig == '\xFF\xFE\x00\x00': # UTF-32 LE
eo = _decodeDeclaration(doc_start[4:], _decUTF32LE, ['UTF-32', 'ISO-10646-UCS-4', 'CSUCS4', 'UCS-4'], loggedEvents)
elif sig == '\x00\x00\xFF\xFE' or sig == '\xFE\xFF\x00\x00':
raise UnicodeError('Unable to process UCS-4 with unusual octet ordering')
elif sig[:2] == '\xFE\xFF': # UTF-16 BE
eo = _decodePostBOMDeclaration(doc_start[2:], _decUTF16BE, ['UTF-16', 'ISO-10646-UCS-2', 'CSUNICODE', 'UCS-2'], loggedEvents, fallback='UTF-16')
elif sig[:2] == '\xFF\xFE': # UTF-16 LE
eo = _decodePostBOMDeclaration(doc_start[2:], _decUTF16LE, ['UTF-16', 'ISO-10646-UCS-2', 'CSUNICODE', 'UCS-2'], loggedEvents, fallback='UTF-16')
elif sig[:3] == '\xEF\xBB\xBF':
eo = _decodePostBOMDeclaration(doc_start[3:], _decACE, ['UTF-8'], loggedEvents, fallback='UTF-8')
# Without a BOM; we must read the declaration
elif sig == '\x00\x00\x00\x3C':
eo = _decodeDeclaration(doc_start, _decUTF32BE, ['UTF-32BE', 'UTF-32', 'ISO-10646-UCS-4', 'CSUCS4', 'UCS-4'], loggedEvents)
elif sig == '\x3C\x00\x00\x00':
eo = _decodeDeclaration(doc_start, _decUTF32LE, ['UTF-32LE', 'UTF-32', 'ISO-10646-UCS-4', 'CSUCS4', 'UCS-4'], loggedEvents)
elif sig == '\x00\x3C\x00\x3F':
eo = _decodeDeclaration(doc_start, _decUTF16BE, ['UTF-16BE', 'UTF-16', 'ISO-10646-UCS-2', 'CSUNICODE', 'UCS-2'], loggedEvents)
elif sig == '\x3C\x00\x3F\x00':
eo = _decodeDeclaration(doc_start, _decUTF16LE, ['UTF-16LE', 'UTF-16', 'ISO-10646-UCS-2', 'CSUNICODE', 'UCS-2'], loggedEvents)
elif sig == '\x3C\x3F\x78\x6D':
eo = _encodingFromDecl(_normaliseNewlines(_decACE(doc_start)[0])) or ('UTF-8', None)
elif sig == '\x4C\x6F\xA7\x94':
eo = _decodeDeclaration(doc_start, _decEBCDIC, ['IBM037', 'CP037', 'IBM038', 'EBCDIC-INT'], loggedEvents)
# There's no BOM, and no declaration. It's UTF-8, or mislabelled.
else:
eo = (fallback, None)
return eo
def detect(doc_start, loggedEvents=[], fallback='UTF-8'):
eo = _detect(doc_start, loggedEvents, fallback)
if eo:
return eo[0]
else:
return None
_encRe = re.compile(r'<\?xml\s+version\s*=\s*(?:"[-a-zA-Z0-9_.:]+"|\'[-a-zA-Z0-9_.:]+\')\s+(encoding\s*=\s*(?:"([-A-Za-z0-9._]+)"|\'([-A-Za-z0-9._]+)\'))')
def _encodingFromDecl(x):
m = _encRe.match(x)
if m:
if m.group(2):
return m.group(2), _position(x, m.start(2))
else:
return m.group(3), _position(x, m.start(3))
else:
return None
def removeDeclaration(x):
"""Replace an XML document string's encoding declaration with the
same number of spaces. Some XML parsers don't allow the
encoding to be overridden, and this is a workaround."""
m = _encRe.match(x)
if m:
s = m.start(1)
e = m.end(1)
res = x[:s] + ' ' * (e - s) + x[e:]
else:
res = x
return res
def _hasCodec(enc):
try:
return codecs.lookup(enc) is not None
except:
return False
def decode(mediaType, charset, bs, loggedEvents, fallback=None):
eo = _detect(bs, loggedEvents, fallback=None)
# Check declared encodings
if eo and eo[1] and _hasCodec(eo[0]):
if not(isCommon(eo[0])):
_logEvent(loggedEvents, ObscureEncoding({"encoding": eo[0]}), eo[1])
elif not(isStandard(eo[0])):
_logEvent(loggedEvents, NonstdEncoding({"encoding": eo[0]}), eo[1])
if eo:
encoding = eo[0]
else:
encoding = None
if charset and encoding and charset.lower() != encoding.lower():
# RFC 3023 requires us to use 'charset', but a number of aggregators
# ignore this recommendation, so we should warn.
loggedEvents.append(logging.EncodingMismatch({"charset": charset, "encoding": encoding}))
if mediaType and mediaType.startswith("text/") and charset is None:
loggedEvents.append(logging.TextXml({}))
# RFC 3023 requires text/* to default to US-ASCII. Issue a warning
# if this occurs, but continue validation using the detected encoding
try:
bs.decode("US-ASCII")
except:
if not encoding:
try:
bs.decode(fallback)
encoding=fallback
except:
pass
if encoding and encoding.lower() != 'us-ascii':
loggedEvents.append(logging.EncodingMismatch({"charset": "US-ASCII", "encoding": encoding}))
enc = charset or encoding
if enc is None:
loggedEvents.append(logging.MissingEncoding({}))
enc = fallback
elif not(_hasCodec(enc)):
if eo:
_logEvent(loggedEvents, logging.UnknownEncoding({'encoding': enc}), eo[1])
else:
_logEvent(loggedEvents, logging.UnknownEncoding({'encoding': enc}))
enc = fallback
if enc is None:
return enc, None
dec = getdecoder(enc)
try:
return enc, dec(bs)[0]
except UnicodeError, ue:
salvage = dec(bs, 'replace')[0]
if 'start' in ue.__dict__:
# XXX 'start' is in bytes, not characters. This is wrong for multibyte
# encodings
pos = _position(salvage, ue.start)
else:
pos = None
_logEvent(loggedEvents, logging.UnicodeError({"exception":ue}), pos)
return enc, salvage
_encUTF8 = codecs.getencoder('UTF-8')
def asUTF8(x):
"""Accept a Unicode string and return a UTF-8 encoded string, with
its encoding declaration removed, suitable for parsing."""
x = removeDeclaration(unicode(x))
return _encUTF8(x)[0]
if __name__ == '__main__':
from sys import argv
from os.path import isfile
for x in argv[1:]:
if isfile(x):
f = open(x, 'r')
l = f.read(1024)
log = []
eo = detect(l, log)
if eo:
print x,eo
else:
print repr(log)
| Python |
"""$Id: generator.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
#
# Atom generator element
#
class generator(nonhtml,rfc2396):
def getExpectedAttrNames(self):
return [(None, u'uri'), (None, u'version')]
def prevalidate(self):
if self.attrs.has_key((None, "url")):
self.value = self.attrs.getValue((None, "url"))
rfc2396.validate(self, extraParams={"attr": "url"})
if self.attrs.has_key((None, "uri")):
self.value = self.attrs.getValue((None, "uri"))
rfc2396.validate(self, errorClass=InvalidURIAttribute, extraParams={"attr": "uri"})
self.value=''
| Python |
"""$Id: skipHours.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import text
from logging import *
#
# skipHours element
#
class skipHours(validatorBase):
def __init__(self):
self.hours = []
validatorBase.__init__(self)
def validate(self):
if "hour" not in self.children:
self.log(MissingElement({"parent":self.name, "element":"hour"}))
if len(self.children) > 24:
self.log(NotEnoughHoursInTheDay({}))
def do_hour(self):
return hour()
class hour(text):
def validate(self):
try:
h = int(self.value)
if h in self.parent.hours or (h in [0,24] and 24-h in self.parent.hours):
self.log(DuplicateValue({"parent":self.parent.name, "element":self.name, "value":self.value}))
if (h < 0) or (h > 23):
raise ValueError
else:
self.parent.hours.append(h)
self.log(ValidHour({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
if self.value == '24':
self.log(UseZeroForMidnight({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(InvalidHour({"parent":self.parent.name, "element":self.name, "value":self.value}))
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.