code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
"""$Id: opml.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
from logging import *
from extension import extension_everywhere
import re
#
# Outline Processor Markup Language element.
#
class opml(validatorBase, extension_everywhere):
versionList = ['1.0', '1.1', '2.0']
def validate(self):
self.setFeedType(TYPE_OPML)
if (None,'version') in self.attrs.getNames():
if self.attrs[(None,'version')] not in opml.versionList:
self.log(InvalidOPMLVersion({"parent":self.parent.name, "element":self.name, "value":self.attrs[(None,'version')]}))
elif self.name != 'outlineDocument':
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"version"}))
if 'head' not in self.children:
self.log(MissingElement({"parent":self.name, "element":"head"}))
if 'body' not in self.children:
self.log(MissingElement({"parent":self.name, "element":"body"}))
def getExpectedAttrNames(self):
return [(None, u'version')]
def do_head(self):
return opmlHead()
def do_body(self):
return opmlBody()
class opmlHead(validatorBase, extension_everywhere):
def do_title(self):
return safeHtml(), noduplicates()
def do_dateCreated(self):
return rfc822(), noduplicates()
def do_dateModified(self):
return rfc822(), noduplicates()
def do_ownerName(self):
return safeHtml(), noduplicates()
def do_ownerEmail(self):
return email(), noduplicates()
def do_ownerId(self):
return httpURL(), noduplicates()
def do_expansionState(self):
return commaSeparatedLines(), noduplicates()
def do_vertScrollState(self):
return positiveInteger(), nonblank(), noduplicates()
def do_windowTop(self):
return positiveInteger(), nonblank(), noduplicates()
def do_windowLeft(self):
return positiveInteger(), nonblank(), noduplicates()
def do_windowBottom(self):
return positiveInteger(), nonblank(), noduplicates()
def do_windowRight(self):
return positiveInteger(), nonblank(), noduplicates()
class commaSeparatedLines(text):
linenumbers_re=re.compile('^(\d+(,\s*\d+)*)?$')
def validate(self):
if not self.linenumbers_re.match(self.value):
self.log(InvalidExpansionState({"parent":self.parent.name, "element":self.name, "value":self.value}))
class opmlBody(validatorBase, extension_everywhere):
def validate(self):
if 'outline' not in self.children:
self.log(MissingElement({"parent":self.name, "element":"outline"}))
def do_outline(self):
return opmlOutline()
class opmlOutline(validatorBase, extension_everywhere):
versionList = ['RSS', 'RSS1', 'RSS2', 'scriptingNews']
def getExpectedAttrNames(self):
return [
(None, u'category'),
(None, u'created'),
(None, u'description'),
(None, u'htmlUrl'),
(None, u'isBreakpoint'),
(None, u'isComment'),
(None, u'language'),
(None, u'text'),
(None, u'title'),
(None, u'type'),
(None, u'url'),
(None, u'version'),
(None, u'xmlUrl'),
]
def validate(self):
if not (None,'text') in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"text"}))
if (None,'type') in self.attrs.getNames():
if self.attrs[(None,'type')].lower() == 'rss':
if not (None,'xmlUrl') in self.attrs.getNames():
self.log(MissingXmlURL({"parent":self.parent.name, "element":self.name}))
if not (None,'title') in self.attrs.getNames():
self.log(MissingTitleAttr({"parent":self.parent.name, "element":self.name}))
elif self.attrs[(None,'type')].lower() == 'link':
if not (None,'url') in self.attrs.getNames():
self.log(MissingUrlAttr({"parent":self.parent.name, "element":self.name}))
else:
self.log(InvalidOutlineType({"parent":self.parent.name, "element":self.name, "value":self.attrs[(None,'type')]}))
if (None,'version') in self.attrs.getNames():
if self.attrs[(None,'version')] not in opmlOutline.versionList:
self.log(InvalidOutlineVersion({"parent":self.parent.name, "element":self.name, "value":self.attrs[(None,'version')]}))
if len(self.attrs)>1 and not (None,u'type') in self.attrs.getNames():
for name in u'description htmlUrl language title version xmlUrl'.split():
if (None, name) in self.attrs.getNames():
self.log(MissingOutlineType({"parent":self.parent.name, "element":self.name}))
break
self.validate_optional_attribute((None,'created'), rfc822)
self.validate_optional_attribute((None,'description'), safeHtml)
self.validate_optional_attribute((None,'htmlUrl'), rfc2396_full)
self.validate_optional_attribute((None,'isBreakpoint'), truefalse)
self.validate_optional_attribute((None,'isComment'), truefalse)
self.validate_optional_attribute((None,'language'), iso639)
self.validate_optional_attribute((None,'title'), safeHtml)
self.validate_optional_attribute((None,'text'), safeHtml)
self.validate_optional_attribute((None,'url'), rfc2396_full)
def characters(self, string):
if not self.value:
if string.strip():
self.log(UnexpectedText({"element":self.name,"parent":self.parent.name}))
self.value = string
def do_outline(self):
return opmlOutline()
| Python |
"""$Id: skipDays.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import text
from logging import *
#
# skipDays element
#
class skipDays(validatorBase):
def __init__(self):
self.days = []
validatorBase.__init__(self)
def validate(self):
if "day" not in self.children:
self.log(MissingElement({"parent":self.name, "element":"day"}))
if len(self.children) > 7:
self.log(EightDaysAWeek({}))
def do_day(self):
return day()
class day(text):
def validate(self):
if self.value not in ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'):
self.log(InvalidDay({"parent":self.parent.name, "element":self.name, "value":self.value}))
elif self.value in self.parent.days:
self.log(DuplicateValue({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.parent.days.append(self.value)
self.log(ValidDay({"parent":self.parent.name, "element":self.name, "value":self.value}))
| Python |
"""$Id: text_plain.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
"""Output class for plain text output"""
from base import BaseFormatter
import feedvalidator
class Formatter(BaseFormatter):
def format(self, event):
return '%s %s%s' % (self.getLineAndColumn(event), self.getMessage(event),
self.getCount(event))
| Python |
"""$Id: text_html.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
"""Output class for HTML text output"""
from base import BaseFormatter
import feedvalidator
from xml.sax.saxutils import escape
from feedvalidator.logging import Message, Info, Warning, Error
from config import DOCSURL
def escapeAndMark(x):
html = escape(x)
# Double-escape, and highlight, illegal characters.
for i in range(len(html)-1,-1,-1):
c = ord(html[i])
if 0x80 <= c <= 0x9F or c == 0xfffd:
if c == 0xfffd:
e = '?'
else:
e = '\\x%02x' % (c)
html = '%s<span class="badOctet">%s</span>%s' % (html[:i], e, html[i+1:])
return html.replace(" "," ")
class Formatter(BaseFormatter):
FRAGMENTLEN = 80
def __init__(self, events, rawdata):
BaseFormatter.__init__(self, events)
self.rawdata = rawdata
def getRootClass(self, aClass):
base = aClass.__bases__[0]
if base == Message: return aClass
if base.__name__.split('.')[-1] == 'LoggedEvent':
return aClass
else:
return self.getRootClass(base)
def getHelpURL(self, event):
rootClass = self.getRootClass(event.__class__).__name__
rootClass = rootClass.split('.')[-1]
rootClass = rootClass.lower()
# messageClass = self.getMessageClass(event).__name__.split('.')[-1]
messageClass = event.__class__.__name__.split('.')[-1]
return DOCSURL + '/' + rootClass + '/' + messageClass
def mostSeriousClass(self):
ms=0
for event in self.data:
level = -1
if isinstance(event,Info): level = 1
if isinstance(event,Warning): level = 2
if isinstance(event,Error): level = 3
ms = max(ms, level)
return [None, Info, Warning, Error][ms]
def header(self):
return '<ul>'
def footer(self):
return '</ul>'
def format(self, event):
if event.params.has_key('line'):
line = event.params['line']
if line >= len(self.rawdata.split('\n')):
# For some odd reason, UnicodeErrors tend to trigger a bug
# in the SAX parser that misrepresents the current line number.
# We try to capture the last known good line number/column as
# we go along, and now it's time to fall back to that.
line = event.params['line'] = event.params.get('backupline',0)
column = event.params['column'] = event.params.get('backupcolumn',0)
column = event.params['column']
codeFragment = self.rawdata.split('\n')[line-1]
markerColumn = column
if column > self.FRAGMENTLEN:
codeFragment = '... ' + codeFragment[column-(self.FRAGMENTLEN/2):]
markerColumn = 5 + (self.FRAGMENTLEN/2)
if len(codeFragment) > self.FRAGMENTLEN:
codeFragment = codeFragment[:(self.FRAGMENTLEN-4)] + ' ...'
else:
codeFragment = ''
line = None
markerColumn = None
html = escapeAndMark(codeFragment)
rc = u'<li><p>'
if line:
rc += u'''<a href="#l%s">''' % line
rc += u'''%s</a>, ''' % self.getLine(event)
rc += u'''%s: ''' % self.getColumn(event)
if 'value' in event.params:
rc += u'''<span class="message">%s: <code>%s</code></span>''' % (escape(self.getMessage(event)), escape(unicode(event.params['value'])))
else:
rc += u'''<span class="message">%s</span>''' % escape(self.getMessage(event))
rc += u'''%s ''' % self.getCount(event)
rc += u'''[<a title="more information about this error" href="%s.html">help</a>]</p>''' % self.getHelpURL(event)
rc += u'''<blockquote><pre>''' + html + '''<br />'''
if markerColumn:
rc += u' ' * markerColumn
rc += u'''<span class="marker">^</span>'''
rc += u'</pre></blockquote></li>'
return rc
| Python |
"""$Id: application_test.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
"""Output class for testing that all output messages are defined properly"""
from base import BaseFormatter
import feedvalidator
import os
LANGUAGE = os.environ.get('LANGUAGE', 'en')
lang = __import__('feedvalidator.i18n.%s' % LANGUAGE, globals(), locals(), LANGUAGE)
class Formatter(BaseFormatter):
def getMessage(self, event):
classes = [event.__class__]
while len(classes):
if lang.messages.has_key(classes[0]):
return lang.messages[classes[0]] % event.params
classes = classes + list(classes[0].__bases__)
del classes[0]
return None
def format(self, event):
"""returns the formatted representation of a single event"""
return self.getMessage(event)
| Python |
"""$Id: text_xml.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
"""Output class for xml output"""
from base import BaseFormatter
from feedvalidator.logging import *
import feedvalidator
def xmlEncode(value):
value = value.replace('&', '&')
value = value.replace('<', '<')
value = value.replace('>', '>')
value = value.replace('"', '"')
value = value.replace("'", ''')
return value
class Formatter(BaseFormatter):
def format(self, event):
params = event.params
params['type'] = event.__class__.__name__
params['text'] = self.getMessage(event)
# determine the level of severity
level = 'unknown'
if isinstance(event,Info): level = 'info'
if isinstance(event,Warning): level = 'warning'
if isinstance(event,Error): level = 'error'
params['level'] = level
# organize fixed elements into a known order
order = params.keys()
order.sort()
for key in ['msgcount', 'text', 'column', 'line', 'type', 'level']:
if key in order:
order.remove(key)
order.insert(0,key)
# output the elements
result = "<%s>\n" % level
for key in order:
value = xmlEncode(str(params[key]))
pub_key = key
if key == "backupcolumn":
pubkey = "column"
elif key == "backupline":
pubkey = "line"
result = result + (" <%s>%s</%s>\n" % (key, value, key))
result = result + "</%s>\n" % level
return result
| Python |
"""$Id: base.py 1055 2009-05-19 15:12:42Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1055 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
"""Base class for output classes"""
from UserList import UserList
import os
LANGUAGE = os.environ.get('LANGUAGE', 'en')
lang = __import__('feedvalidator.i18n.%s' % LANGUAGE, globals(), locals(), LANGUAGE)
from feedvalidator.logging import Info, Warning, Error
class BaseFormatter(UserList):
def __getitem__(self, i):
return self.format(self.data[i])
def getErrors(self):
return [self.format(msg) for msg in self.data if isinstance(msg,Error)]
def getWarnings(self):
return [self.format(msg) for msg in self.data if isinstance(msg,Warning)]
def getLine(self, event):
if not event.params.has_key('line'): return ''
return lang.line % event.params
def getColumn(self, event):
if not event.params.has_key('column'): return ''
return lang.column % event.params
def getLineAndColumn(self, event):
line = self.getLine(event)
if not line: return ''
column = self.getColumn(event)
return '%s, %s:' % (line, column)
def getCount(self, event):
if not event.params.has_key('msgcount'): return ''
count = int(event.params['msgcount'])
if count <= 1: return ''
return lang.occurances % event.params
def getMessageClass(self, event):
classes = [event.__class__]
while len(classes):
if lang.messages.has_key(classes[0]):
return classes[0]
classes = classes + list(classes[0].__bases__)
del classes[0]
return "Undefined message: %s[%s]" % (event.__class__, event.params)
def getMessage(self, event):
classes = [event.__class__]
while len(classes):
if lang.messages.has_key(classes[0]):
try:
return lang.messages[classes[0]] % event.params
except:
return lang.messages[classes[0]] + ' % ' + repr(event.params)
classes = classes + list(classes[0].__bases__)
del classes[0]
return "Undefined message: %s[%s]" % (event.__class__, event.params)
def format(self, event):
"""returns the formatted representation of a single event"""
return `event`
| Python |
"""$Id: __init__.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__all__ = ['base', 'text_plain', 'text_html']
| Python |
#!/usr/bin/python
"""
$Id: xmlEncoding.py 988 2008-03-12 18:22:48Z sa3ruby $
This module deals with detecting XML encodings, using both BOMs and
explicit declarations.
"""
__author__ = "Joseph Walton <http://www.kafsemo.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2004 Joseph Walton"
import codecs
import re
from logging import ObscureEncoding, NonstdEncoding
import logging
class FailingCodec:
def __init__(self, name):
self.name = name
def fail(self, txt, errors='strict'):
raise UnicodeError('No codec available for ' + self.name + ' in this installation of FeedValidator')
# Don't die if the codec can't be found, but return
# a decoder that will fail on use
def getdecoder(codec):
try:
return codecs.getdecoder(codec)
except:
return FailingCodec(codec).fail
# These are generic decoders that are only used
# to decode the XML declaration, from which we can read
# the real encoding
_decUTF32BE = getdecoder('UTF-32BE')
_decUTF32LE = getdecoder('UTF-32LE')
_decUTF16BE = getdecoder('UTF-16BE')
_decUTF16LE = getdecoder('UTF-16LE')
_decEBCDIC = getdecoder('IBM037') # EBCDIC
_decACE = getdecoder('ISO-8859-1') # An ASCII-compatible encoding
# Given a character index into a string, calculate its 1-based row and column
def _position(txt, idx):
row = txt.count('\n', 0, idx) + 1
ln = txt.rfind('\n', 0, idx) + 1
column = 0
for c in txt[ln:idx]:
if c == '\t':
column = (column // 8 + 1) * 8
else:
column += 1
column += 1
return (row, column)
def _normaliseNewlines(txt):
return txt.replace('\r\n', '\n').replace('\r', '\n')
def _logEvent(loggedEvents, e, pos=None):
if pos:
e.params['line'], e.params['column'] = pos
loggedEvents.append(e)
# Return the encoding from the declaration, or 'None'
# Return None if the 'permitted' list is passed in and the encoding
# isn't found in it. This is so that, e.g., a 4-byte-character XML file
# that claims to be US-ASCII will fail now.
def _decodeDeclaration(sig, dec, permitted, loggedEvents):
sig = _normaliseNewlines(dec(sig)[0])
eo = _encodingFromDecl(sig)
if not(eo):
_logEvent(loggedEvents,
logging.UnicodeError({'exception': 'This XML file (apparently ' + permitted[0] + ') requires an encoding declaration'}), (1, 1))
elif permitted and not(eo[0].upper() in permitted):
if _hasCodec(eo[0]):
# see if the codec is an alias of one of the permitted encodings
codec=codecs.lookup(eo[0])
for encoding in permitted:
if _hasCodec(encoding) and codecs.lookup(encoding)[-1]==codec[-1]: break
else:
_logEvent(loggedEvents,
logging.UnicodeError({'exception': 'This XML file claims an encoding of ' + eo[0] + ', but looks more like ' + permitted[0]}), eo[1])
return eo
# Return the encoding from the declaration, or 'fallback' if none is
# present. Return None if the 'permitted' list is passed in and
# the encoding isn't found in it
def _decodePostBOMDeclaration(sig, dec, permitted, loggedEvents, fallback=None):
sig = _normaliseNewlines(dec(sig)[0])
eo = _encodingFromDecl(sig)
if eo and not(eo[0].upper() in permitted):
_logEvent(loggedEvents,
logging.UnicodeError({'exception': 'Document starts with ' + permitted[0] + ' BOM marker but has incompatible declaration of ' + eo[0]}), eo[1])
return None
else:
return eo or (fallback, None)
def isStandard(x):
""" Is this encoding required by the XML 1.0 Specification, 4.3.3? """
return x.upper() in ['UTF-8', 'UTF-16']
def isCommon(x):
"""Is this encoding commonly used, according to
<http://www.syndic8.com/stats.php?Section=feeds#XMLEncodings>
(as of 2004-03-27)?"""
return isStandard(x) or x.upper() in ['US-ASCII', 'ISO-8859-1',
'EUC-JP', 'ISO-8859-2', 'ISO-8859-15', 'ISO-8859-7',
'KOI8-R', 'SHIFT_JIS', 'WINDOWS-1250', 'WINDOWS-1251',
'WINDOWS-1252', 'WINDOWS-1254', 'WINDOWS-1255', 'WINDOWS-1256',
# This doesn't seem to be popular, but is the Chinese
# government's mandatory standard
'GB18030'
]
# Inspired by xmlproc's autodetect_encoding, but rewritten
def _detect(doc_start, loggedEvents=[], fallback='UTF-8'):
"""This is the logic from appendix F.1 of the XML 1.0 specification.
Pass in the start of a document (>= 256 octets), and receive the encoding to
use, or None if there is a problem with the document."""
sig = doc_start[:4]
# With a BOM. We also check for a declaration, and make sure
# it doesn't contradict (for 4-byte encodings, it's required)
if sig == '\x00\x00\xFE\xFF': # UTF-32 BE
eo = _decodeDeclaration(doc_start[4:], _decUTF32BE, ['UTF-32', 'ISO-10646-UCS-4', 'CSUCS4', 'UCS-4'], loggedEvents)
elif sig == '\xFF\xFE\x00\x00': # UTF-32 LE
eo = _decodeDeclaration(doc_start[4:], _decUTF32LE, ['UTF-32', 'ISO-10646-UCS-4', 'CSUCS4', 'UCS-4'], loggedEvents)
elif sig == '\x00\x00\xFF\xFE' or sig == '\xFE\xFF\x00\x00':
raise UnicodeError('Unable to process UCS-4 with unusual octet ordering')
elif sig[:2] == '\xFE\xFF': # UTF-16 BE
eo = _decodePostBOMDeclaration(doc_start[2:], _decUTF16BE, ['UTF-16', 'ISO-10646-UCS-2', 'CSUNICODE', 'UCS-2'], loggedEvents, fallback='UTF-16')
elif sig[:2] == '\xFF\xFE': # UTF-16 LE
eo = _decodePostBOMDeclaration(doc_start[2:], _decUTF16LE, ['UTF-16', 'ISO-10646-UCS-2', 'CSUNICODE', 'UCS-2'], loggedEvents, fallback='UTF-16')
elif sig[:3] == '\xEF\xBB\xBF':
eo = _decodePostBOMDeclaration(doc_start[3:], _decACE, ['UTF-8'], loggedEvents, fallback='UTF-8')
# Without a BOM; we must read the declaration
elif sig == '\x00\x00\x00\x3C':
eo = _decodeDeclaration(doc_start, _decUTF32BE, ['UTF-32BE', 'UTF-32', 'ISO-10646-UCS-4', 'CSUCS4', 'UCS-4'], loggedEvents)
elif sig == '\x3C\x00\x00\x00':
eo = _decodeDeclaration(doc_start, _decUTF32LE, ['UTF-32LE', 'UTF-32', 'ISO-10646-UCS-4', 'CSUCS4', 'UCS-4'], loggedEvents)
elif sig == '\x00\x3C\x00\x3F':
eo = _decodeDeclaration(doc_start, _decUTF16BE, ['UTF-16BE', 'UTF-16', 'ISO-10646-UCS-2', 'CSUNICODE', 'UCS-2'], loggedEvents)
elif sig == '\x3C\x00\x3F\x00':
eo = _decodeDeclaration(doc_start, _decUTF16LE, ['UTF-16LE', 'UTF-16', 'ISO-10646-UCS-2', 'CSUNICODE', 'UCS-2'], loggedEvents)
elif sig == '\x3C\x3F\x78\x6D':
eo = _encodingFromDecl(_normaliseNewlines(_decACE(doc_start)[0])) or ('UTF-8', None)
elif sig == '\x4C\x6F\xA7\x94':
eo = _decodeDeclaration(doc_start, _decEBCDIC, ['IBM037', 'CP037', 'IBM038', 'EBCDIC-INT'], loggedEvents)
# There's no BOM, and no declaration. It's UTF-8, or mislabelled.
else:
eo = (fallback, None)
return eo
def detect(doc_start, loggedEvents=[], fallback='UTF-8'):
eo = _detect(doc_start, loggedEvents, fallback)
if eo:
return eo[0]
else:
return None
_encRe = re.compile(r'<\?xml\s+version\s*=\s*(?:"[-a-zA-Z0-9_.:]+"|\'[-a-zA-Z0-9_.:]+\')\s+(encoding\s*=\s*(?:"([-A-Za-z0-9._]+)"|\'([-A-Za-z0-9._]+)\'))')
def _encodingFromDecl(x):
m = _encRe.match(x)
if m:
if m.group(2):
return m.group(2), _position(x, m.start(2))
else:
return m.group(3), _position(x, m.start(3))
else:
return None
def removeDeclaration(x):
"""Replace an XML document string's encoding declaration with the
same number of spaces. Some XML parsers don't allow the
encoding to be overridden, and this is a workaround."""
m = _encRe.match(x)
if m:
s = m.start(1)
e = m.end(1)
res = x[:s] + ' ' * (e - s) + x[e:]
else:
res = x
return res
def _hasCodec(enc):
try:
return codecs.lookup(enc) is not None
except:
return False
def decode(mediaType, charset, bs, loggedEvents, fallback=None):
eo = _detect(bs, loggedEvents, fallback=None)
# Check declared encodings
if eo and eo[1] and _hasCodec(eo[0]):
if not(isCommon(eo[0])):
_logEvent(loggedEvents, ObscureEncoding({"encoding": eo[0]}), eo[1])
elif not(isStandard(eo[0])):
_logEvent(loggedEvents, NonstdEncoding({"encoding": eo[0]}), eo[1])
if eo:
encoding = eo[0]
else:
encoding = None
if charset and encoding and charset.lower() != encoding.lower():
# RFC 3023 requires us to use 'charset', but a number of aggregators
# ignore this recommendation, so we should warn.
loggedEvents.append(logging.EncodingMismatch({"charset": charset, "encoding": encoding}))
if mediaType and mediaType.startswith("text/") and charset is None:
loggedEvents.append(logging.TextXml({}))
# RFC 3023 requires text/* to default to US-ASCII. Issue a warning
# if this occurs, but continue validation using the detected encoding
try:
bs.decode("US-ASCII")
except:
if not encoding:
try:
bs.decode(fallback)
encoding=fallback
except:
pass
if encoding and encoding.lower() != 'us-ascii':
loggedEvents.append(logging.EncodingMismatch({"charset": "US-ASCII", "encoding": encoding}))
enc = charset or encoding
if enc is None:
loggedEvents.append(logging.MissingEncoding({}))
enc = fallback
elif not(_hasCodec(enc)):
if eo:
_logEvent(loggedEvents, logging.UnknownEncoding({'encoding': enc}), eo[1])
else:
_logEvent(loggedEvents, logging.UnknownEncoding({'encoding': enc}))
enc = fallback
if enc is None:
return enc, None
dec = getdecoder(enc)
try:
return enc, dec(bs)[0]
except UnicodeError, ue:
salvage = dec(bs, 'replace')[0]
if 'start' in ue.__dict__:
# XXX 'start' is in bytes, not characters. This is wrong for multibyte
# encodings
pos = _position(salvage, ue.start)
else:
pos = None
_logEvent(loggedEvents, logging.UnicodeError({"exception":ue}), pos)
return enc, salvage
_encUTF8 = codecs.getencoder('UTF-8')
def asUTF8(x):
"""Accept a Unicode string and return a UTF-8 encoded string, with
its encoding declaration removed, suitable for parsing."""
x = removeDeclaration(unicode(x))
return _encUTF8(x)[0]
if __name__ == '__main__':
from sys import argv
from os.path import isfile
for x in argv[1:]:
if isfile(x):
f = open(x, 'r')
l = f.read(1024)
log = []
eo = detect(l, log)
if eo:
print x,eo
else:
print repr(log)
| Python |
"""$Id: entry.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
from logging import *
from itunes import itunes_item
from extension import extension_entry
#
# pie/echo entry element.
#
class entry(validatorBase, extension_entry, itunes_item):
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'parseType')]
def prevalidate(self):
self.links=[]
self.content=None
def validate(self):
if not 'title' in self.children:
self.log(MissingElement({"parent":self.name, "element":"title"}))
if not 'author' in self.children and not 'author' in self.parent.children:
self.log(MissingElement({"parent":self.name, "element":"author"}))
if not 'id' in self.children:
self.log(MissingElement({"parent":self.name, "element":"id"}))
if not 'updated' in self.children:
self.log(MissingElement({"parent":self.name, "element":"updated"}))
if self.content:
if not 'summary' in self.children:
if self.content.attrs.has_key((None,"src")):
self.log(MissingSummary({"parent":self.parent.name, "element":self.name}))
ctype = self.content.type
if ctype.find('/') > -1 and not (
ctype.endswith('+xml') or ctype.endswith('/xml') or
ctype.startswith('text/')):
self.log(MissingSummary({"parent":self.parent.name, "element":self.name}))
else:
if not 'summary' in self.children:
self.log(MissingTextualContent({"parent":self.parent.name, "element":self.name}))
for link in self.links:
if link.rel == 'alternate': break
else:
self.log(MissingContentOrAlternate({"parent":self.parent.name, "element":self.name}))
# can only have one alternate per type
types={}
for link in self.links:
if not link.rel=='alternate': continue
if not link.type in types: types[link.type]=[]
if link.hreflang in types[link.type]:
self.log(DuplicateAtomLink({"parent":self.name, "element":"link", "type":link.type, "hreflang":link.hreflang}))
else:
types[link.type] += [link.hreflang]
if self.itunes: itunes_item.validate(self)
def do_author(self):
from author import author
return author()
def do_category(self):
from category import category
return category()
def do_content(self):
from content import content
self.content=content()
return self.content, noduplicates()
def do_contributor(self):
from author import author
return author()
def do_id(self):
return canonicaluri(), nows(), noduplicates(), unique('id',self.parent,DuplicateEntries)
def do_link(self):
from link import link
self.links += [link()]
return self.links[-1]
def do_published(self):
return rfc3339(), nows(), noduplicates()
def do_source(self):
return source(), noduplicates()
def do_rights(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_summary(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_title(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_updated(self):
return rfc3339(), nows(), noduplicates(), unique('updated',self.parent,DuplicateUpdated)
def do_app_edited(self):
return rfc3339(), nows(), noduplicates()
def do_app_control(self):
return app_control(), noduplicates()
class app_control(validatorBase):
def do_app_draft(self):
return yesno(), noduplicates()
from feed import feed
class source(feed):
def missingElement(self, params):
self.log(MissingSourceElement(params))
def validate(self):
self.validate_metadata()
def do_author(self):
if not 'author' in self.parent.children:
self.parent.children.append('author')
return feed.do_author(self)
def do_entry(self):
self.log(UndefinedElement({"parent":self.name, "element":"entry"}))
return eater()
| Python |
"""$Id: feed.py 1040 2009-02-15 20:01:32Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1040 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
from logging import *
from itunes import itunes_channel
from extension import extension_feed
#
# Atom root element
#
class feed(validatorBase, extension_feed, itunes_channel):
def getExpectedAttrNames(self):
return [(u'urn:atom-extension:indexing', u'index')]
def prevalidate(self):
self.links = []
self.validate_optional_attribute((u'urn:atom-extension:indexing', u'index'), yesno)
def missingElement(self, params):
offset = [self.line - self.dispatcher.locator.getLineNumber(),
self.col - self.dispatcher.locator.getColumnNumber()]
self.log(MissingElement(params), offset)
def validate_metadata(self):
if not 'title' in self.children:
self.missingElement({"parent":self.name, "element":"title"})
if not 'id' in self.children:
self.missingElement({"parent":self.name, "element":"id"})
if not 'updated' in self.children:
self.missingElement({"parent":self.name, "element":"updated"})
# complete feeds can only have current=self and no other links
if 'fh_complete' in self.children:
for link in self.links:
if link.rel in link.rfc5005:
if link.rel == "current":
if link.href not in self.dispatcher.selfURIs:
self.log(CurrentNotSelfInCompleteFeed({"rel":link.rel}))
else:
self.log(FeedRelInCompleteFeed({"rel":link.rel}))
# ensure that there is a link rel="self"
if self.name != 'source':
for link in self.links:
if link.rel=='self': break
else:
offset = [self.line - self.dispatcher.locator.getLineNumber(),
self.col - self.dispatcher.locator.getColumnNumber()]
self.log(MissingSelf({"parent":self.parent.name, "element":self.name}), offset)
types={}
archive=False
current=False
for link in self.links:
if link.rel == 'current': current = True
if link.rel in ['prev-archive', 'next-archive']: archive = True
# attempts to link past the end of the list
if link.rel == 'first' and link.href in self.dispatcher.selfURIs:
for link2 in self.links:
if link2.rel == 'previous':
self.log(LinkPastEnd({"self":link.rel, "rel":link2.rel}))
if link.rel == 'last' and link.href in self.dispatcher.selfURIs:
for link2 in self.links:
if link2.rel == 'next':
self.log(LinkPastEnd({"self":link.rel, "rel":link2.rel}))
# can only have one alternate per type
if not link.rel=='alternate': continue
if not link.type in types: types[link.type]={}
if link.rel in types[link.type]:
if link.hreflang in types[link.type][link.rel]:
self.log(DuplicateAtomLink({"parent":self.name, "element":"link", "type":link.type, "hreflang":link.hreflang}))
else:
types[link.type][link.rel] += [link.hreflang]
else:
types[link.type][link.rel] = [link.hreflang]
if 'fh_archive' in self.children:
# archives should either have links or be marked complete
if not archive and 'fh_complete' not in self.children:
self.log(ArchiveIncomplete({}))
# archives should have current links
if not current and ('fh_complete' not in self.children):
self.log(MissingCurrentInArchive({}))
if self.itunes: itunes_channel.validate(self)
def metadata(self):
if 'entry' in self.children:
self.log(MisplacedMetadata({"parent":self.name, "element":self.child}))
def validate(self):
entries = self.children.count('entry')
dups = 0
for event in self.dispatcher.loggedEvents:
if isinstance(event,DuplicateEntries):
dups += event.params.get('msgcount',1)
if entries > 9 and entries == dups + 1:
self.log(DuplicateIds({}))
self.dispatcher.loggedEvents = [event
for event in self.dispatcher.loggedEvents
if not isinstance(event,DuplicateEntries)]
if not 'entry' in self.children:
self.validate_metadata()
def do_author(self):
self.metadata()
from author import author
return author()
def do_category(self):
self.metadata()
from category import category
return category()
def do_contributor(self):
self.metadata()
from author import author
return author()
def do_generator(self):
self.metadata()
from generator import generator
return generator(), nonblank(), noduplicates()
def do_id(self):
self.metadata()
return canonicaluri(), nows(), noduplicates()
def do_icon(self):
self.metadata()
return nonblank(), nows(), rfc2396(), noduplicates()
def do_link(self):
self.metadata()
from link import link
self.links.append(link())
return self.links[-1]
def do_logo(self):
self.metadata()
return nonblank(), nows(), rfc2396(), noduplicates()
def do_title(self):
self.metadata()
from content import textConstruct
return textConstruct(), noduplicates()
def do_subtitle(self):
self.metadata()
from content import textConstruct
return textConstruct(), noduplicates()
def do_rights(self):
self.metadata()
from content import textConstruct
return textConstruct(), noduplicates()
def do_updated(self):
self.metadata()
return rfc3339(), nows(), noduplicates()
def do_entry(self):
if not 'entry' in self.children:
self.validate_metadata()
from entry import entry
return entry()
def do_app_collection(self):
from service import collection
return collection(), noduplicates()
| Python |
"""$Id: kml.py 1057 2009-07-21 21:54:14Z sa3ruby $"""
__author__ = "Gregor J. Rothfuss <http://greg.abstrakt.ch/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1057 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
import re
# This code tries to mimic the structure of the canonical KML XSD as much as possible.
# The KML XSD is at http://code.google.com/apis/kml/schema/kml21.xsd
# FeatureType from the XSD schema
#
class FeatureType(validatorBase):
def do_name(self):
return text(),noduplicates()
def do_visibility(self):
return zeroone(),noduplicates()
def do_open(self):
return zeroone(),noduplicates()
def do_address(self):
return nonhtml(),noduplicates()
def do_phoneNumber(self):
return text(),noduplicates() # todo: implement full check from http://www.koders.com/perl/fid426DF448FE99166A1AD0162538E583A0FA956EEA.aspx
def do_Snippet(self):
return Snippet(), noduplicates()
def do_description(self):
return text(), noduplicates()
def do_LookAt(self):
return LookAt(),noduplicates()
# TimePrimitive
def do_TimeStamp(self):
return TimeStamp(),noduplicates()
def do_TimeSpan(self):
return TimeSpan(),noduplicates()
# /TimePrimitive
def do_styleUrl(self):
return text(), noduplicates()
# StyleSelector
def do_Style(self):
return Style()
def do_StyleMap(self):
return StyleMap()
# /StyleSelector
# 2.0 only
def do_View(self):
return View(),noduplicates()
# /2.0 only
def do_Region(self):
return Region(), noduplicates()
def do_Metadata(self):
return Metadata()
def do_atom_link(self):
from link import link
return link()
def do_atom_author(self):
from author import author
return author()
# OverlayType from the XSD schema
#
class OverlayType(validatorBase):
def do_color(self):
return color(),noduplicates()
def do_drawOrder(self):
return Integer(),noduplicates()
def do_Icon(self):
return Icon(), noduplicates()
# ColorStyleType from the XSD schema
#
class ColorStyleType(validatorBase):
def do_color(self):
return color(),noduplicates()
def do_colorMode(self):
return colorMode(),noduplicates()
#
# Container from the XSD schema
#
class Container(validatorBase):
def do_Document(self):
return Document()
def do_Folder(self):
return Folder()
#
# Feature from the XSD schema
#
class Feature(validatorBase):
def do_Placemark(self):
return Placemark()
#
# Geometry from the XSD schema
#
class Geometry(Feature):
# TODO these should all be noduplicates(), but because they can appear
# inside MultiGeometry, they are not.
def do_Model(self):
return Model()
def do_LineString(self):
return LineString()
def do_LinearRing(self):
return LinearRing()
def do_Point(self):
return Point()
def do_Polygon(self):
return Polygon()
def do_MultiGeometry(self):
return MultiGeometry()
#
# GeometryElements from the XSD schema
#
class GeometryElements(Geometry):
def do_extrude(self):
return zeroone(),noduplicates()
def do_tessellate(self):
return zeroone(),noduplicates()
def do_altitudeMode(self):
return altitudeMode(),noduplicates()
#
# LinkType from the XSD schema
#
class LinkType(validatorBase):
def do_href(self):
return text(),noduplicates()
def do_refreshMode(self):
return refreshMode(),noduplicates()
def do_viewRefreshMode(self):
return viewRefreshMode(),noduplicates()
def do_viewRefreshTime(self):
return Float(), noduplicates()
def do_viewBoundScale(self):
return Float(), noduplicates()
def do_refreshVisibility(self):
return refreshVisibility(),noduplicates()
def do_refreshInterval(self):
return Float(), noduplicates()
def do_viewFormat(self):
return text(),noduplicates()
def do_httpQuery(self):
return text(),noduplicates()
#
# LookAtType from the XSD schema
#
class LookAtType(Feature):
def do_longitude(self):
return longitude(),noduplicates()
def do_latitude(self):
return latitude(),noduplicates()
def do_altitude(self):
return FloatWithNegative(),noduplicates()
def do_range(self):
return Float(),noduplicates()
def do_tilt(self):
return latitude(),noduplicates()
def do_heading(self):
return angle360(),noduplicates()
def do_altitudeMode(self):
return altitudeMode(),noduplicates()
#
# KML element.
#
class kml(validatorBase, Container, Feature):
from logging import TYPE_KML20, TYPE_KML21, TYPE_KML22
def do_NetworkLink(self):
return NetworkLink()
def do_GroundOverlay(self):
return GroundOverlay()
def do_ScreenOverlay(self):
return ScreenOverlay()
def do_NetworkLinkControl(self):
return NetworkLinkControl()
def do_atom_link(self):
from link import link
return link()
def do_atom_author(self):
from author import author
return author()
class NetworkLinkControl(validatorBase):
def do_minRefreshPeriod(self):
return Float(),noduplicates()
def do_linkName(self):
return text(),noduplicates()
def do_linkDescription(self):
return text(),noduplicates()
def do_cookie(self):
return text(),noduplicates()
def do_message(self):
return text(), noduplicates()
def do_linkSnippet(self):
return Snippet(), noduplicates()
def do_expires(self):
return w3cdtf(),noduplicates()
def do_Update(self):
return Update(),noduplicates()
def do_LookAt(self):
return LookAt(),noduplicates()
def do_View(self):
return View(),noduplicates()
class Update(validatorBase):
def validate(self):
if not "targetHref" in self.children:
self.log(MissingElement({"parent":self.name, "element":"targetHref"}))
def do_targetHref(self):
return text(),noduplicates()
# todo: child validation
def do_Change(self):
return noduplicates()
# todo: child validation
def do_Update(self):
return noduplicates()
# todo: child validation
def do_Delete(self):
return noduplicates()
class NetworkLink(validatorBase, FeatureType, Feature):
def validate(self):
if not "Link" in self.children and not "Url" in self.children:
self.log(MissingElement({"parent":self.name, "element":"Link"}))
def do_targetHref(self):
return Update(),noduplicates()
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_refreshInterval(self):
return Float(), noduplicates()
def do_flyToView(self):
return zeroone(),noduplicates()
def do_Link(self):
return Link(),noduplicates()
def do_Url(self):
return Url(),noduplicates()
class Document(validatorBase, FeatureType, Container, Feature):
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_ScreenOverlay(self):
return ScreenOverlay()
def do_GroundOverlay(self):
return GroundOverlay()
def do_NetworkLink(self):
return NetworkLink()
def do_Schema(self):
return Schema(), noduplicates()
class Schema(validatorBase):
def getExpectedAttrNames(self):
return [(None, u'name'), (None, u'parent')]
def do_SimpleField(self):
return SchemaField()
def do_SimpleArrayField(self):
return SchemaField()
def do_ObjField(self):
return SchemaField()
def do_ObjArrayField(self):
return SchemaField()
class SchemaField(validatorBase):
def getExpectedAttrNames(self):
return [
(None, u'name'),
(None, u'type'),
]
def validate(self):
self.validate_required_attribute((None,'name'), text)
self.validate_required_attribute((None,'type'), SchemaFieldType)
class Placemark(validatorBase, FeatureType, Geometry):
def prevalidate(self):
if not self.attrs.has_key((None,"id")):
self.log(MissingId({"parent":self.name, "element":"id"}))
self.validate_optional_attribute((None,'id'), unique('id',self.parent))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_GeometryCollection(self):
return GeometryCollection()
class MultiGeometry(Geometry):
# TODO: check for either geometry or multigeometry in feature, but not both?
def getExpectedAttrNames(self):
return [(None, u'id')]
class ScreenOverlay(validatorBase, FeatureType, OverlayType):
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_geomColor(self):
return geomColor(),noduplicates()
def do_overlayXY(self):
return overlayxy(), noduplicates()
def do_screenXY(self):
return overlayxy(), noduplicates()
def do_rotationXY(self):
return overlayxy(), noduplicates()
def do_size(self):
return overlayxy(), noduplicates()
class GroundOverlay(validatorBase, FeatureType, OverlayType):
def validate(self):
if not "LatLonBox" in self.children:
self.log(MissingElement({"parent":self.name, "element":"LatLonBox"}))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_altitude(self):
return FloatWithNegative(),noduplicates()
def do_altitudeMode(self):
return altitudeMode(),noduplicates()
def do_geomColor(self):
return geomColor(),noduplicates()
def do_LatLonBox(self):
return LatLonBox(), noduplicates()
class overlayxy(validatorBase):
def getExpectedAttrNames(self):
return [
(None, u'x'),
(None, u'y'),
(None, u'xunits'),
(None, u'yunits'),
]
def validate(self):
self.validate_required_attribute((None,'x'), FloatWithNegative)
self.validate_required_attribute((None,'y'), FloatWithNegative)
self.validate_required_attribute((None,'xunits'), kmlunits)
self.validate_required_attribute((None,'yunits'), kmlunits)
class Region(validatorBase):
def validate(self):
if not "LatLonAltBox" in self.children:
self.log(MissingElement({"parent":self.name, "element":"LatLonAltBox"}))
def do_LatLonAltBox(self):
return LatLonAltBox(), noduplicates()
def do_Lod(self):
return Lod(), noduplicates()
class LatLonBox(validatorBase):
def getExpectedAttrNames(self):
return [(None, u'id')]
def validate(self):
if not "north" in self.children:
self.log(MissingElement({"parent":self.name, "element":"north"}))
if not "south" in self.children:
self.log(MissingElement({"parent":self.name, "element":"south"}))
if not "east" in self.children:
self.log(MissingElement({"parent":self.name, "element":"east"}))
if not "west" in self.children:
self.log(MissingElement({"parent":self.name, "element":"west"}))
def do_north(self):
return latitude(),noduplicates()
def do_south(self):
return latitude(),noduplicates()
def do_east(self):
return longitude(),noduplicates()
def do_west(self):
return longitude(),noduplicates()
def do_rotation(self):
return longitude(),noduplicates()
class LatLonAltBox(validatorBase, LatLonBox):
def do_minAltitude(self):
return Float(),noduplicates()
def do_maxAltitude(self):
return Float(), noduplicates()
def do_altitudeMode(self):
return altitudeMode(),noduplicates()
class Lod(validatorBase):
def do_minLodPixels(self):
return Float(),noduplicates()
def do_maxLodPixels(self):
return Float(),noduplicates()
def do_minFadeExtent(self):
return Float(),noduplicates()
def do_maxFadeExtent(self):
return Float(),noduplicates()
class Metadata(validatorBase):
# TODO do smarter validation here
def validate(self):
return noduplicates()
class Snippet(text):
def validate(self):
return nonhtml(),noduplicates()
def getExpectedAttrNames(self):
return [(None, u'maxLines')]
class Folder(validatorBase, FeatureType, Container, Feature):
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_NetworkLink(self):
return NetworkLink()
def do_GroundOverlay(self):
return GroundOverlay()
def do_ScreenOverlay(self):
return ScreenOverlay()
class LookAt(validatorBase, LookAtType):
def getExpectedAttrNames(self):
return [(None, u'id')]
class StyleMap(validatorBase):
def validate(self):
if not "Pair" in self.children:
self.log(MissingElement({"parent":self.name, "element":"Pair"}))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_Pair(self):
return Pair()
class Style(validatorBase):
def prevalidate(self):
self.validate_optional_attribute((None,'id'), unique('id',self.parent))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_LineStyle(self):
return LineStyle(), noduplicates()
def do_PolyStyle(self):
return PolyStyle(), noduplicates()
def do_IconStyle(self):
return IconStyle(), noduplicates()
def do_ListStyle(self):
return ListStyle(), noduplicates()
def do_LabelStyle(self):
return LabelStyle(), noduplicates()
def do_BalloonStyle(self):
return BalloonStyle(), noduplicates()
def do_scale(self):
return Float(),noduplicates()
def do_labelColor(self):
return labelColor(),noduplicates()
class IconStyle(validatorBase, ColorStyleType):
def prevalidate(self):
self.validate_optional_attribute((None,'id'), unique('id',self.parent))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_heading(self):
return angle360(),noduplicates()
def do_Icon(self):
return Icon(),noduplicates()
def do_scale(self):
return Float(),noduplicates()
def do_hotSpot(self):
return overlayxy(), noduplicates()
class Icon(validatorBase):
def validate(self):
if not 'href' in self.children:
self.log(MissingElement({"parent":self.name, "element":"href"}))
def do_href(self):
# if not self.getFeedType() == TYPE_KML20 and self.startswith('root://'):
# self.log(DeprecatedRootHref())
return text(),noduplicates() # would be url, but has these weird root://
def do_x(self):
return noiconoffset()
def do_y(self):
return noiconoffset()
def do_w(self):
return noiconoffset()
def do_h(self):
return noiconoffset()
def do_refreshInterval(self):
return Float(), noduplicates()
def do_refreshMode(self):
return refreshMode(), noduplicates()
def do_viewRefreshMode(self):
return viewRefreshMode(), noduplicates()
def do_viewRefreshTime(self):
return Float(), noduplicates()
def do_viewBoundScale(self):
return Float(), noduplicates()
class BalloonStyle(validatorBase):
def prevalidate(self):
self.validate_optional_attribute((None,'id'), unique('id',self.parent))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_textColor(self):
return color(),noduplicates()
def do_bgColor(self):
return color(),noduplicates()
def do_color(self):
return color(),noduplicates()
def do_text(self):
return text(),noduplicates()
class ListStyle(validatorBase):
def prevalidate(self):
self.validate_optional_attribute((None,'id'), unique('id',self.parent))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_bgColor(self):
return color(),noduplicates()
def do_ItemIcon(self):
return ItemIcon()
def do_listItemType(self):
return listItemType(),noduplicates()
def do_scale(self):
return Float(),noduplicates()
class ItemIcon(validatorBase):
def validate(self):
if not 'href' in self.children:
self.log(MissingElement({"parent":self.name, "element":"href"}))
def do_href(self):
return text(),noduplicates()
def do_state(self):
return itemIconState(),noduplicates()
class LabelStyle(validatorBase, ColorStyleType):
def prevalidate(self):
self.validate_optional_attribute((None,'id'), unique('id',self.parent))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_labelColor(self):
return labelColor(),noduplicates()
def do_scale(self):
return Float(),noduplicates()
class LineStyle(validatorBase, ColorStyleType):
def prevalidate(self):
self.validate_optional_attribute((None,'id'), unique('id',self.parent))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_width(self):
return Float(),noduplicates()
class PolyStyle(validatorBase, ColorStyleType):
def prevalidate(self):
self.validate_optional_attribute((None,'id'), unique('id',self.parent))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_fill(self):
return zeroone(), noduplicates()
def do_outline(self):
return zeroone(), noduplicates()
class Link(validatorBase, LinkType):
def getExpectedAttrNames(self):
return [(None, u'id')]
class Pair(validatorBase):
def validate(self):
if not 'key' in self.children:
self.log(MissingElement({"parent":self.name, "element":"key"}))
if not 'styleUrl' in self.children:
self.log(MissingElement({"parent":self.name, "element":"styleUrl"}))
def do_key(self):
return styleState(),noduplicates()
def do_styleUrl(self):
return text(),noduplicates()
class Point(validatorBase, GeometryElements):
def validate(self):
if not "coordinates" in self.children:
self.log(MissingElement({"parent":self.name, "element":"coordinates"}))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_coordinates(self):
return coordinates()
class Model(validatorBase):
# TODO seems to me that Location and Orientation ought to be required?
def validate(self):
if not "Link" in self.children:
self.log(MissingElement({"parent":self.name, "element":"Link"}))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_altitudeMode(self):
return altitudeMode(), noduplicates()
def do_Location(self):
return Location(), noduplicates()
def do_Orientation(self):
return Orientation(), noduplicates()
def do_Scale(self):
return Scale(), noduplicates()
def do_Link(self):
return Link(), noduplicates()
def do_ResourceMap(self):
return ResourceMap(), noduplicates()
class ResourceMap(validatorBase):
def do_Alias(self):
return Alias()
class Alias(validatorBase):
def do_targetHref(self):
return text(),noduplicates()
def do_sourceHref(self):
return text(),noduplicates()
class Location(validatorBase):
# TODO they are loosely defined in the schema, but 0,0,0 makes no sense.
def validate(self):
if not "longitude" in self.children:
self.log(MissingElement({"parent":self.name, "element":"longitude"}))
if not "latitude" in self.children:
self.log(MissingElement({"parent":self.name, "element":"latitude"}))
if not "altitude" in self.children:
self.log(MissingElement({"parent":self.name, "element":"altitude"}))
def do_longitude(self):
return longitude(), noduplicates()
def do_latitude(self):
return latitude(), noduplicates()
def do_altitude(self):
return FloatWithNegative(), noduplicates()
class Scale(validatorBase):
def do_x(self):
return Float(), noduplicates()
def do_y(self):
return Float(), noduplicates()
def do_z(self):
return Float(), noduplicates()
class Orientation(validatorBase):
def do_heading(self):
return angle360(), noduplicates()
def do_tilt(self):
return angle360(), noduplicates()
def do_roll(self):
return angle360(), noduplicates()
class Polygon(validatorBase, GeometryElements):
def validate(self):
if not "outerBoundaryIs" in self.children:
self.log(MissingElement({"parent":self.name, "element":"outerBoundaryIs"}))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_outerBoundaryIs(self):
return boundary(), noduplicates()
def do_innerBoundaryIs(self):
return boundary()
class boundary(validatorBase):
def validate(self):
if not "LinearRing" in self.children:
self.log(MissingElement({"parent":self.name, "element":"LinearRing"}))
def do_LinearRing(self):
return LinearRing()
class LineString(validatorBase, GeometryElements):
def validate(self):
if not "coordinates" in self.children:
self.log(MissingElement({"parent":self.name, "element":"coordinates"}))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_coordinates(self):
return coordinates(), noduplicates()
class LinearRing(validatorBase, GeometryElements):
def validate(self):
if not "coordinates" in self.children:
self.log(MissingElement({"parent":self.name, "element":"coordinates"}))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_coordinates(self):
return coordinates(), noduplicates()
class TimeSpan(validatorBase):
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_begin(self):
return w3cdtf(),noduplicates()
def do_end(self):
return w3cdtf(),noduplicates()
class TimeStamp(validatorBase):
def validate(self):
if not "when" in self.children:
self.log(MissingElement({"parent":self.name, "element":"when"}))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_when(self):
return unbounded_w3cdtf(),noduplicates()
class kmlunits(enumeration):
error = InvalidKmlUnits
valuelist = [
"fraction", "pixels", "insetPixels"
]
class colorMode(enumeration):
error = InvalidColorMode
valuelist = [
"normal", "random"
]
class refreshMode(enumeration):
error = InvalidRefreshMode
valuelist = [
"onChange", "onInterval", "onExpire"
]
class viewRefreshMode(enumeration):
error = InvalidViewRefreshMode
valuelist = [
"never", "onRequest", "onStop", "onRegion"
]
class styleState(enumeration):
error = InvalidStyleState
valuelist = [
"normal", "highlight"
]
class listItemType(enumeration):
error = InvalidListItemType
valuelist = [
"radioFolder", "check", "checkHideChildren", "checkOffOnly"
]
class itemIconState(enumeration):
error = InvalidItemIconState
valuelist = [
"open", "closed", "error", "fetching0", "fetching1", "fetching2",
"open error", "closed error", "fetching0 error", "fetching1 error",
"fetching2 error"
]
class altitudeMode(enumeration):
error = InvalidAltitudeMode
valuelist = [
"clampToGround", "relativeToGround", "absolute"
]
class SchemaFieldType(enumeration):
error = InvalidSchemaFieldType
valuelist = [
"string", "int", "uint", "short", "ushort", "float", "double","bool"
]
#
# Deprecated in 2.0
#
class antialias(validatorBase):
def prevalidate(self):
self.log(Deprecated({"element":self.name, "replacement":"none"}))
def validate(self):
return zeroone(),noduplicates()
class View(validatorBase, LookAtType):
def prevalidate(self):
self.log(Deprecated({"element":self.name, "replacement":"LookAt"}))
def getExpectedAttrNames(self):
return [(None, u'id')]
#
# Deprecated in 2.1
#
class labelColor(text):
def prevalidate(self):
if not self.getFeedType() == TYPE_KML20:
self.log(Deprecated({"element":self.name, "replacement":"LabelStyle"}))
def validate(self):
if not re.match("([a-f]|[A-F]|[0-9]){8}",self.value):
return self.log(InvalidColor({'value':self.value}))
class geomColor(text):
def prevalidate(self):
if not self.getFeedType() == TYPE_KML20:
self.log(Deprecated({"element":self.name, "replacement":"color"}))
def validate(self):
if not re.match("([a-f]|[A-F]|[0-9]){8}",self.value):
return self.log(InvalidColor({'value':self.value}))
class geomScale(text):
def prevalidate(self):
if not self.getFeedType() == TYPE_KML20:
self.log(Deprecated({"element":self.name, "replacement":"scale"}))
def validate(self):
return Float()
class GeometryCollection(validatorBase, Geometry):
def prevalidate(self):
if not self.getFeedType() == TYPE_KML20:
self.log(Deprecated({"element":self.name, "replacement":"MultiGeometry"}))
def getExpectedAttrNames(self):
return [(None, u'id')]
class Url(validatorBase, LinkType):
def prevalidate(self):
if not self.getFeedType() == TYPE_KML20:
self.log(Deprecated({"element":self.name, "replacement":"Link"}))
class refreshVisibility(validatorBase):
def prevalidate(self):
if not self.getFeedType() == TYPE_KML20:
self.log(Deprecated({"element":self.name, "replacement":"Update"}))
def validate(self):
return zeroone, noduplicates()
# In theory, the spec also supports things like .2 if unit is fractions. ugh.
class noiconoffset(text):
def validate(self):
if not self.getFeedType() == TYPE_KML20:
self.log(Deprecated({"element":self.name, "replacement":"Icon"}))
return Integer(), noduplicates()
#
# Validators
#
class zeroone(text):
def normalizeWhitespace(self):
pass
def validate(self):
if not self.value.lower() in ['0','1']:
self.log(InvalidZeroOne({"parent":self.parent.name, "element":self.name,"value":self.value}))
class color(text):
def validate(self):
if not re.match("^([a-f]|[A-F]|[0-9]){8}$",self.value):
return self.log(InvalidColor({'value':self.value}))
class coordinates(text):
def validate(self):
values = self.value.strip().split()
for value in values:
# ensure that commas are only used to separate lat and long (and alt)
if not re.match('^[-+.0-9]+,[-+.0-9]+(,[-+.0-9]+)?$',
value.strip()):
return self.log(InvalidKmlCoordList({'value':self.value}))
# Now validate individual coordinates
point = value.split(',');
# First coordinate is longitude
try:
lon = float(point[0].strip())
if lon > 180 or lon < -180:
raise ValueError
else:
self.log(ValidLongitude({"parent":self.parent.name, "element":self.name, "value":lon}))
except ValueError:
self.log(InvalidKmlLongitude({"parent":self.parent.name, "element":self.name, "value":lon}))
# Second coordinate is latitude
try:
lat = float(point[1].strip())
if lat > 90 or lat < -90:
raise ValueError
else:
self.log(ValidLatitude({"parent":self.parent.name, "element":self.name, "value":lat}))
except ValueError:
self.log(InvalidKmlLatitude({"parent":self.parent.name, "element":self.name, "value":lat}))
# Third coordinate value (altitude) has to be float, if present
if len(point) == 3:
if not re.match('\d+\.?\d*$', point[2]):
self.log(InvalidFloat({"attr":self.name, "value":point[2]}))
class angle360(text):
def validate(self):
try:
angle = float(self.value)
if angle > 360 or angle < -360:
raise ValueError
else:
self.log(ValidAngle({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidAngle({"parent":self.parent.name, "element":self.name, "value":self.value}))
class FloatWithNegative(text):
def validate(self, name=None):
if not re.match('-?\d+\.?\d*$', self.value):
self.log(InvalidFloat({"attr":name or self.name, "value":self.value}))
| Python |
"""$Id: validators.py 1058 2009-08-22 09:35:18Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1058 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from logging import *
import re, time, datetime
from uri import canonicalForm, urljoin
from rfc822 import AddressList, parsedate, parsedate_tz, mktime_tz
rdfNS = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
def implausible_822(value):
if value[0] < 1990: return True
try:
from rfc822 import parsedate_tz, mktime_tz
except:
# no time zone functions available, granularity is a day
pvalue=parsedate(value)
return value > time.gmtime(time.time()+86400) or pvalue[0]<1990
try:
pvalue=parsedate_tz(value)
zvalue=mktime_tz(pvalue)
except:
# outside of range of what parsedate supports: definitely problematic
return True
# when time zone functions are available, granularity is ten minutes
return zvalue > time.time()+600 or pvalue[0]<1990
def implausible_8601(value):
if value < '1990-01-01': return True
try:
import xml.utils.iso8601
except:
# no time zone functions available, granularity is a day
tomorrow=time.strftime("%Y-%m-%dT%H:%M:%SZ",time.gmtime(time.time()+86400))
return (value > tomorrow)
try:
zvalue = xml.utils.iso8601.parse(value)
except:
# outside of range of what parse supports: definitely problematic
return True
# when time zone functions are available, granularity is ten minutes
return zvalue > time.time() + 600
#
# Valid mime type
#
mime_re = re.compile('[^\s()<>,;:\\"/[\]?=]+/[^\s()<>,;:\\"/[\]?=]+(\s*;\s*[^\s()<>,;:\\"/[\]?=]+=("(\\"|[^"])*"|[^\s()<>,;:\\"/[\]?=]+))*$')
#
# Extensibility hook: logic varies based on type of feed
#
def any(self, name, qname, attrs):
if self.getFeedType() != TYPE_RSS1:
return eater()
else:
from rdf import rdfExtension
return rdfExtension(qname)
#
# This class simply eats events. Useful to prevent cascading of errors
#
class eater(validatorBase):
def getExpectedAttrNames(self):
return self.attrs.getNames()
def characters(self, string):
for c in string:
if 0x80 <= ord(c) <= 0x9F or c == u'\ufffd':
from validators import BadCharacters
self.log(BadCharacters({"parent":self.parent.name, "element":self.name}))
def startElementNS(self, name, qname, attrs):
# RSS 2.0 arbitrary restriction on extensions
feedtype=self.getFeedType()
if (not qname) and feedtype and (feedtype==TYPE_RSS2) and self.name.find('_')>=0:
from logging import NotInANamespace
self.log(NotInANamespace({"parent":self.name, "element":name, "namespace":'""'}))
# ensure element is "namespace well formed"
if name.find(':') != -1:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":name}))
# ensure all attribute namespaces are properly defined
for (namespace,attr) in attrs.keys():
if ':' in attr and not namespace:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":attr}))
for c in attrs.get((namespace,attr)):
if 0x80 <= ord(c) <= 0x9F or c == u'\ufffd':
from validators import BadCharacters
self.log(BadCharacters({"parent":name, "element":attr}))
# eat children
self.push(self.__class__(), name, attrs)
from HTMLParser import HTMLParser, HTMLParseError
class HTMLValidator(HTMLParser):
htmltags = [
"a", "abbr", "acronym", "address", "applet", "area", "article", "aside",
"audio", "b", "base", "basefont", "bdo", "big", "blockquote", "body",
"br", "button", "canvas", "caption", "center", "cite", "code", "col",
"colgroup", "command", "datagrid", "datalist", "dd", "del", "details",
"dialog", "dir", "div", "dfn", "dl", "dt", "em", "event-source",
"fieldset", "figure", "font", "footer", "form", "frame", "frameset",
"h1", "h2", "h3", "h4", "h5", "h6", "head", "header", "hr", "html", "i",
"iframe", "img", "input", "ins", "isindex", "kbd", "label", "legend",
"li", "link", "m", "map", "menu", "meta", "meter", "nav", "noframes",
"noscript", "object", "ol", "output", "optgroup", "option", "p", "param",
"pre", "progress", "q", "s", "samp", "script", "section", "select",
"small", "source", "span", "strike", "strong", "style", "sub", "sup",
"table", "tbody", "td", "textarea", "tfoot", "th", "thead", "time",
"title", "tr", "tt", "u", "ul", "var", "xmp", "plaintext", "embed",
"comment", "listing", "video", "wbr"]
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'article',
'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button', 'canvas',
'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'command',
'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn', 'dialog', 'dir',
'div', 'dl', 'dt', 'em', 'event-source', 'fieldset', 'figure', 'footer',
'font', 'form', 'header', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i',
'img', 'input', 'ins', 'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map',
'menu', 'meter', 'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup',
'option', 'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong', 'sub',
'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot', 'th', 'thead',
'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript', 'wbr']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autoplay', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'coords', 'data', 'datafld',
'datapagesize', 'datasrc', 'datetime', 'default', 'delay', 'dir',
'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'prompt', 'pqg', 'radiogroup', 'readonly', 'rel', 'repeat-max',
'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows',
'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src',
'start', 'step', 'summary', 'suppress', 'tabindex', 'target', 'template',
'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign',
'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap',
'xml:lang', 'xmlns']
acceptable_css_properties = ['azimuth', 'background', 'background-color',
'border', 'border-bottom', 'border-bottom-color', 'border-bottom-style',
'border-bottom-width', 'border-collapse', 'border-color', 'border-left',
'border-left-color', 'border-left-style', 'border-left-width',
'border-right', 'border-right-color', 'border-right-style',
'border-right-width', 'border-spacing', 'border-style', 'border-top',
'border-top-color', 'border-top-style', 'border-top-width', 'border-width',
'clear', 'color', 'cursor', 'direction', 'display', 'elevation', 'float',
'font', 'font-family', 'font-size', 'font-style', 'font-variant',
'font-weight', 'height', 'letter-spacing', 'line-height',
'list-style-type', 'margin', 'margin-bottom', 'margin-left',
'margin-right', 'margin-top', 'overflow', 'padding', 'padding-bottom',
'padding-left', 'padding-right', 'padding-top', 'pause', 'pause-after',
'pause-before', 'pitch', 'pitch-range', 'richness', 'speak',
'speak-header', 'speak-numeral', 'speak-punctuation', 'speech-rate',
'stress', 'text-align', 'text-decoration', 'text-indent', 'unicode-bidi',
'vertical-align', 'voice-family', 'volume', 'white-space', 'width']
# survey of common keywords found in feeds
acceptable_css_keywords = ['aqua', 'auto', 'black', 'block', 'blue', 'bold',
'both', 'bottom', 'brown', 'center', 'collapse', 'dashed', 'dotted',
'fuchsia', 'gray', 'green', '!important', 'italic', 'left', 'lime',
'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d?\.?\d?\d(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = ['annotation', 'annotation-xml', 'maction', 'math',
'merror', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none', 'semantics']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant',
'maxsize', 'minsize', 'other', 'rowalign', 'rowalign', 'rowalign',
'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
# svgtiny - foreignObject + linearGradient + radialGradient + stop - image
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'font-face',
'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use']
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
'min', 'name', 'offset', 'opacity', 'orient', 'origin',
'overline-position', 'overline-thickness', 'panose-1', 'path',
'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'style', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole', 'xlink:href',
'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type', 'xml:base',
'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1', 'y2',
'zoomAndPan']
def log(self,msg):
offset = [self.element.line + self.getpos()[0] - 1 -
self.element.dispatcher.locator.getLineNumber(),
-self.element.dispatcher.locator.getColumnNumber()]
self.element.log(msg, offset)
def __init__(self,value,element):
self.element=element
self.stack = []
self.valid = True
HTMLParser.__init__(self)
if value.lower().find('<?import ') >= 0:
self.log(SecurityRisk({"parent":self.element.parent.name, "element":self.element.name, "tag":"?import"}))
try:
self.feed(value)
self.close()
if self.valid:
self.log(ValidHtml({"parent":self.element.parent.name, "element":self.element.name}))
except HTMLParseError, msg:
element = self.element
offset = [element.line - element.dispatcher.locator.getLineNumber(),
- element.dispatcher.locator.getColumnNumber()]
match = re.search(', at line (\d+), column (\d+)',str(msg))
if match: offset[0] += int(match.group(1))-1
element.log(NotHtml({"parent":element.parent.name, "element":element.name, "message":"Invalid HTML", "value": str(msg)}),offset)
def handle_starttag(self, tag, attributes):
if tag.lower() not in self.htmltags:
self.log(NotHtml({"parent":self.element.parent.name, "element":self.element.name,"value":tag, "message": "Non-html tag"}))
self.valid = False
elif tag.lower() not in HTMLValidator.acceptable_elements:
if not 'embed' in self.stack and not 'object' in self.stack:
self.log(SecurityRisk({"parent":self.element.parent.name, "element":self.element.name, "tag":tag}))
else:
for (name,value) in attributes:
if name.lower() == 'style':
for evil in checkStyle(value):
self.log(DangerousStyleAttr({"parent":self.element.parent.name, "element":self.element.name, "attr":"style", "value":evil}))
elif name.lower() not in self.acceptable_attributes:
self.log(SecurityRiskAttr({"parent":self.element.parent.name, "element":self.element.name, "attr":name}))
self.stack.append(tag)
def handle_endtag(self, tag):
if tag in self.stack:
while self.stack[-1] != tag: self.stack.pop()
self.stack.pop()
def handle_charref(self, name):
if name.startswith('x'):
value = int(name[1:],16)
else:
value = int(name)
if 0x80 <= value <= 0x9F or value == 0xfffd:
self.log(BadCharacters({"parent":self.element.parent.name,
"element":self.element.name, "value":"&#" + name + ";"}))
#
# Scub CSS properties for potentially evil intent
#
def checkStyle(style):
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return [style]
if not re.match("^(\s*[-\w]+\s*:\s*[^:;]*(;|$))*$", style):
return [style]
unsafe = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style.lower()):
if prop not in HTMLValidator.acceptable_css_properties:
if prop not in unsafe: unsafe.append(prop)
elif prop.split('-')[0] in ['background','border','margin','padding']:
for keyword in value.split():
if keyword not in HTMLValidator.acceptable_css_keywords and \
not HTMLValidator.valid_css_values.match(keyword):
if keyword not in unsafe: unsafe.append(keyword)
return unsafe
#
# This class simply html events. Identifies unsafe events
#
class htmlEater(validatorBase):
def getExpectedAttrNames(self):
if self.attrs and len(self.attrs):
return self.attrs.getNames()
def textOK(self): pass
def startElementNS(self, name, qname, attrs):
for attr in attrs.getNames():
if attr[0]==None:
if attr[1].lower() == 'style':
for value in checkStyle(attrs.get(attr)):
self.log(DangerousStyleAttr({"parent":self.parent.name, "element":self.name, "attr":attr[1], "value":value}))
elif attr[1].lower() not in HTMLValidator.acceptable_attributes:
self.log(SecurityRiskAttr({"parent":self.parent.name, "element":self.name, "attr":attr[1]}))
self.push(htmlEater(), self.name, attrs)
if name.lower() not in HTMLValidator.acceptable_elements:
self.log(SecurityRisk({"parent":self.parent.name, "element":self.name, "tag":name}))
def endElementNS(self,name,qname):
pass
#
# text: i.e., no child elements allowed (except rdf:Description).
#
class text(validatorBase):
def textOK(self): pass
def getExpectedAttrNames(self):
if self.getFeedType() == TYPE_RSS1:
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'parseType'),
(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'datatype'),
(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'resource')]
else:
return []
def startElementNS(self, name, qname, attrs):
if self.getFeedType() == TYPE_RSS1:
if self.value.strip() or self.children:
if self.attrs.get((u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'parseType')) != 'Literal':
self.log(InvalidRDF({"message":"mixed content"}))
if name=="div" and qname=="http://www.w3.org/1999/xhtml":
from content import diveater
self.push(diveater(), name, attrs)
else:
from rdf import rdfExtension
self.push(rdfExtension(qname), name, attrs)
else:
from base import namespaces
ns = namespaces.get(qname, '')
if name.find(':') != -1:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":name}))
else:
self.log(UndefinedElement({"parent":self.name, "element":name}))
self.push(eater(), name, attrs)
#
# noduplicates: no child elements, no duplicate siblings
#
class noduplicates(validatorBase):
def __init__(self, message=DuplicateElement):
self.message=message
validatorBase.__init__(self)
def startElementNS(self, name, qname, attrs):
pass
def characters(self, string):
pass
def prevalidate(self):
if self.name in self.parent.children:
self.log(self.message({"parent":self.parent.name, "element":self.name}))
#
# valid e-mail addr-spec
#
class addr_spec(text):
domains = """
AC AD AE AERO AF AG AI AL AM AN AO AQ AR ARPA AS ASIA AT AU AW AX AZ BA BB
BD BE BF BG BH BI BIZ BJ BM BN BO BR BS BT BV BW BY BZ CA CAT CC CD CF CG
CH CI CK CL CM CN CO COM COOP CR CU CV CX CY CZ DE DJ DK DM DO DZ EC EDU
EE EG ER ES ET EU FI FJ FK FM FO FR GA GB GD GE GF GG GH GI GL GM GN GOV
GP GQ GR GS GT GU GW GY HK HM HN HR HT HU ID IE IL IM IN INFO INT IO IQ IR
IS IT JE JM JO JOBS JP KE KG KH KI KM KN KP KR KW KY KZ LA LB LC LI LK LR
LS LT LU LV LY MA MC MD ME MG MH MIL MK ML MM MN MO MOBI MP MQ MR MS MT MU
MUSEUM MV MW MX MY MZ NA NAME NC NE NET NF NG NI NL NO NP NR NU NZ OM ORG
PA PE PF PG PH PK PL PM PN PR PRO PS PT PW PY QA RE RO RS RU RW SA SB SC
SD SE SG SH SI SJ SK SL SM SN SO SR ST SU SV SY SZ TC TD TEL TF TG TH TJ
TK TL TM TN TO TP TR TRAVEL TT TV TW TZ UA UG UK UM US UY UZ VA VC VE VG
VI VN VU WF WS XN--0ZWM56D XN--11B5BS3A9AJ6G XN--80AKHBYKNJ4F
XN--9T4B11YI5A XN--DEBA0AD XN--G6W251D XN--HGBK6AJ7F53BBA
XN--HLCJ6AYA9ESC7A XN--JXALPDLP XN--KGBECHTV XN--ZCKZAH YE YT YU ZA ZM ZW
""" # http://data.iana.org/TLD/tlds-alpha-by-domain.txt
domain_re = '''(([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([A-Z0-9\-]+\.)+))(%s|[0-9]{1,3})''' % '|'.join(domains.strip().split())
email_re = re.compile("([A-Z0-9_\-\+\.\']+)@" + domain_re + "$", re.I)
simple_email_re = re.compile('^[\w._%+-]+@[A-Za-z][\w.-]+$')
message = InvalidAddrSpec
def validate(self, value=None):
if not value: value=self.value
if not self.email_re.match(value):
if not self.simple_email_re.match(value):
self.log(self.message({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
try:
import socket
socket.gethostbyname(value.split('@')[-1])
except:
self.log(UnknownHost({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidContact({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# iso639 language code
#
def iso639_validate(log,value,element,parent):
import iso639codes
if '-' in value:
lang, sublang = value.split('-', 1)
else:
lang = value
if not iso639codes.isoLang.has_key(unicode.lower(unicode(lang))):
log(InvalidLanguage({"parent":parent, "element":element, "value":value}))
else:
log(ValidLanguage({"parent":parent, "element":element}))
class iso639(text):
def validate(self):
iso639_validate(self.log, self.value, self.name, self.parent.name)
#
# Encoding charset
#
class Charset(text):
def validate(self):
try:
import codecs
codecs.lookup(self.value)
except:
self.log(InvalidEncoding({'value': self.value}))
#
# Mime type
#
class MimeType(text):
def validate(self):
if not mime_re.match(self.value):
self.log(InvalidMIMEType({'attr':'type'}))
class MediaRange(MimeType):
def validate(self):
if not self.value.strip(): return
original_value = self.value
for value in original_value.split(','):
self.value = value.strip()
if value.find(';q=')>=0:
self.log(UndefinedParam({'param':'q'}))
MimeType.validate(self)
#
# iso8601 dateTime
#
class unbounded_iso8601(text):
iso8601_re = re.compile("^\d\d\d\d(-\d\d(-\d\d(T\d\d:\d\d(:\d\d(\.\d*)?)?" +
"(Z|([+-]\d\d:\d\d))?)?)?)?$")
message = InvalidISO8601DateTime
def validate(self):
if not self.iso8601_re.match(self.value):
self.log(self.message({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
work=self.value.split('T')
date=work[0].split('-')
year=int(date[0])
if len(date)>1:
month=int(date[1])
try:
if len(date)>2: datetime.date(year,month,int(date[2]))
except ValueError, e:
return self.log(self.message({"parent":self.parent.name, "element":self.name, "value":str(e)}))
if len(work) > 1:
time=work[1].split('Z')[0].split('+')[0].split('-')[0]
time=time.split(':')
if int(time[0])>23:
self.log(self.message({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
if len(time)>1 and int(time[1])>60:
self.log(self.message({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
if len(time)>2 and float(time[2])>60.0:
self.log(self.message({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
self.log(ValidW3CDTFDate({"parent":self.parent.name, "element":self.name, "value":self.value}))
return 1
class iso8601(unbounded_iso8601):
bounded = 1
def validate(self):
if self.bounded and unbounded_iso8601.validate(self):
if implausible_8601(self.value):
self.log(ImplausibleDate({"parent":self.parent.name,
"element":self.name, "value":self.value}))
return 1
class w3cdtf(iso8601):
# The same as in iso8601, except a timezone is not optional when
# a time is present
iso8601_re = re.compile("^\d\d\d\d(-\d\d(-\d\d(T\d\d:\d\d(:\d\d(\.\d*)?)?" +
"(Z|([+-]\d\d:\d\d)))?)?)?$")
message = InvalidW3CDTFDate
class unbounded_w3cdtf(w3cdtf):
bounded = 0
class rfc3339(iso8601):
# The same as in iso8601, except that the only thing that is optional
# is the seconds
iso8601_re = re.compile("^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d*)?" +
"(Z|([+-]\d\d:\d\d))$")
message = InvalidRFC3339Date
class iso8601_date(iso8601):
date_re = re.compile("^\d\d\d\d-\d\d-\d\d$")
def validate(self):
if iso8601.validate(self):
if not self.date_re.search(self.value):
self.log(InvalidISO8601Date({"parent":self.parent.name, "element":self.name, "value":self.value}))
iana_schemes = [ # http://www.iana.org/assignments/uri-schemes.html
"ftp", "http", "gopher", "mailto", "news", "nntp", "telnet", "wais",
"file", "prospero", "z39.50s", "z39.50r", "cid", "mid", "vemmi",
"service", "imap", "nfs", "acap", "rtsp", "tip", "pop", "data", "dav",
"opaquelocktoken", "sip", "sips", "tel", "fax", "modem", "ldap",
"https", "soap.beep", "soap.beeps", "xmlrpc.beep", "xmlrpc.beeps",
"urn", "go", "h323", "ipp", "tftp", "mupdate", "pres", "im", "mtqp",
"iris.beep", "dict", "snmp", "crid", "tag", "dns", "info",
"aaa", "aaas", "cap", "iax", "icap", "iris", "iris.xpc", "iris.xpcs",
"iris.lwz", "msrp", "msrps", "shttp", "thismessage", "tv", "xmpp"
]
#
# rfc2396 fully qualified (non-relative) uri
#
class rfc2396(text):
rfc2396_re = re.compile("([a-zA-Z][0-9a-zA-Z+\\-\\.]*:)?/{0,2}" +
"(\\[[0-9A-Fa-f:]+\\])?" +
"[0-9a-zA-Z;/?:@&=+$\\.\\-_!~*'()%,#]*$")
urn_re = re.compile(r"^[Uu][Rr][Nn]:[a-zA-Z0-9][a-zA-Z0-9-]{1,31}:([a-zA-Z0-9()+,\.:=@;$_!*'\-]|%[0-9A-Fa-f]{2})+$")
uuid_re = re.compile(r"^[Uu][Rr][Nn]:[Uu][Uu][Ii][Dd]:[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$")
tag_re = re.compile(r"^tag:([a-z0-9\-\._]+?@)?[a-z0-9\.\-]+?,\d{4}(-\d{2}(-\d{2})?)?:[0-9a-zA-Z;/\?:@&=+$\.\-_!~*'\(\)%,]*(#[0-9a-zA-Z;/\?:@&=+$\.\-_!~*'\(\)%,]*)?$")
urichars_re=re.compile("[0-9a-zA-Z;/?:@&=+$\\.\\-_!~*'()%,#]")
def validate(self, errorClass=InvalidLink, successClass=ValidURI, extraParams={}):
success = 0
scheme=self.value.split(':')[0].lower()
if scheme=='tag':
if self.tag_re.match(self.value):
success = 1
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(ValidTAG(logparams))
else:
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(InvalidTAG(logparams))
elif scheme=="urn":
if self.value.lower().startswith('urn:uuid:') and not \
self.uuid_re.match(self.value):
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(InvalidUUID(logparams))
elif self.urn_re.match(self.value):
success = 1
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(ValidURN(logparams))
else:
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(InvalidURN(logparams))
elif not self.rfc2396_re.match(self.value):
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
for c in self.value:
if ord(c)<128 and not rfc2396.urichars_re.match(c):
logparams['char'] = repr(str(c))
logparams['value'] = self.value
self.log(InvalidUriChar(logparams))
break
else:
try:
if self.rfc2396_re.match(self.value.encode('idna')):
errorClass=UriNotIri
except:
pass
self.log(errorClass(logparams))
elif scheme in ['http','ftp']:
if not re.match('^\w+://[^/].*',self.value):
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(errorClass(logparams))
else:
success = 1
elif self.value.find(':')>=0 and scheme.isalpha() and scheme not in iana_schemes:
self.log(SchemeNotIANARegistered({"parent":self.parent.name, "element":self.name, "value":scheme}))
else:
success = 1
if success:
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(successClass(logparams))
return success
#
# rfc3987 iri
#
class rfc3987(rfc2396):
def validate(self, errorClass=InvalidIRI, successClass=ValidURI, extraParams={}):
try:
if self.value: self.value = self.value.encode('idna')
except:
pass # apparently '.' produces label too long
return rfc2396.validate(self, errorClass, successClass, extraParams)
class rfc2396_full(rfc2396):
rfc2396_re = re.compile("[a-zA-Z][0-9a-zA-Z+\\-\\.]*:(//)?" +
"[0-9a-zA-Z;/?:@&=+$\\.\\-_!~*'()%,#]+$")
def validate(self, errorClass=InvalidFullLink, successClass=ValidURI, extraParams={}):
return rfc2396.validate(self, errorClass, successClass, extraParams)
#
# URI reference resolvable relative to xml:base
#
class xmlbase(rfc3987):
def validate(self, errorClass=InvalidIRI, successClass=ValidURI, extraParams={}):
if rfc3987.validate(self, errorClass, successClass, extraParams):
if self.dispatcher.xmlBase != self.xmlBase:
docbase=canonicalForm(self.dispatcher.xmlBase).split('#')[0]
elembase=canonicalForm(self.xmlBase).split('#')[0]
value=canonicalForm(urljoin(elembase,self.value)).split('#')[0]
if (value==elembase) and (elembase.encode('idna')!=docbase):
self.log(SameDocumentReference({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# rfc822 dateTime (+Y2K extension)
#
class rfc822(text):
rfc822_re = re.compile("(((mon)|(tue)|(wed)|(thu)|(fri)|(sat)|(sun))\s*,\s*)?" +
"\d\d?\s+((jan)|(feb)|(mar)|(apr)|(may)|(jun)|(jul)|(aug)|(sep)|(oct)|" +
"(nov)|(dec))\s+\d\d(\d\d)?\s+\d\d:\d\d(:\d\d)?\s+(([+-]\d\d\d\d)|" +
"(ut)|(gmt)|(est)|(edt)|(cst)|(cdt)|(mst)|(mdt)|(pst)|(pdt)|[a-ik-z])?$",
re.UNICODE)
rfc2822_re = re.compile("(((Mon)|(Tue)|(Wed)|(Thu)|(Fri)|(Sat)|(Sun)), )?" +
"\d\d? ((Jan)|(Feb)|(Mar)|(Apr)|(May)|(Jun)|(Jul)|(Aug)|(Sep)|(Oct)|" +
"(Nov)|(Dec)) \d\d\d\d \d\d:\d\d(:\d\d)? (([+-]?\d\d[03]0)|" +
"(UT)|(GMT)|(EST)|(EDT)|(CST)|(CDT)|(MST)|(MDT)|(PST)|(PDT)|Z)$")
def validate(self):
if self.rfc2822_re.match(self.value):
import calendar
value = parsedate(self.value)
try:
if value[0] > 1900:
dow = datetime.date(*value[:3]).strftime("%a")
if self.value.find(',')>0 and dow.lower() != self.value[:3].lower():
self.log(IncorrectDOW({"parent":self.parent.name, "element":self.name, "value":self.value[:3]}))
return
except ValueError, e:
self.log(InvalidRFC2822Date({"parent":self.parent.name, "element":self.name, "value":str(e)}))
return
if implausible_822(self.value):
self.log(ImplausibleDate({"parent":self.parent.name,
"element":self.name, "value":self.value}))
else:
self.log(ValidRFC2822Date({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
value1,value2 = '', self.value
value2 = re.sub(r'[\\](.)','',value2)
while value1!=value2: value1,value2=value2,re.sub('\([^(]*?\)',' ',value2)
if not self.rfc822_re.match(value2.strip().lower()):
self.log(InvalidRFC2822Date({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ProblematicalRFC822Date({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# Decode html entityrefs
#
from htmlentitydefs import name2codepoint
def decodehtml(data):
chunks=re.split('&#?(\w+);',data)
for i in range(1,len(chunks),2):
if chunks[i].isdigit():
# print chunks[i]
chunks[i]=unichr(int(chunks[i]))
elif chunks[i] in name2codepoint:
chunks[i]=unichr(name2codepoint[chunks[i]])
else:
chunks[i]='&' + chunks[i] +';'
# print repr(chunks)
return u"".join(map(unicode,chunks))
#
# Scan HTML for relative URLs
#
class absUrlMixin:
anchor_re = re.compile('<a\s+href=(?:"(.*?)"|\'(.*?)\'|([\w-]+))[\s>]', re.IGNORECASE)
img_re = re.compile('<img\s+[^>]*src=(?:"(.*?)"|\'(.*?)\'|([\w-]+))[\s>]', re.IGNORECASE)
absref_re = re.compile("\w+:")
def validateAbsUrl(self,value):
refs = self.img_re.findall(self.value) + self.anchor_re.findall(self.value)
for ref in [reduce(lambda a,b: a or b, x) for x in refs]:
ref = decodehtml(ref).strip()
if not self.absref_re.match(ref):
for c in ref:
if ord(c)<128 and not rfc2396.urichars_re.match(c):
# print "Invalid character:", ref
# self.log(InvalidUriChar({'value':repr(str(c))}))
self.log(InvalidUriChar({'value':ref, 'char':repr(str(c))}))
break
else:
self.log(ContainsRelRef({"parent":self.parent.name, "element":self.name, "value": ref}))
#
# Scan HTML for 'devious' content
#
class safeHtmlMixin:
def validateSafe(self,value):
HTMLValidator(value, self)
class safeHtml(text, safeHtmlMixin, absUrlMixin):
def prevalidate(self):
self.children.append(True) # force warnings about "mixed" content
def validate(self):
self.validateSafe(self.value)
self.validateAbsUrl(self.value)
#
# Elements for which email addresses are discouraged
#
class nonemail(text):
email_re = re.compile("<" + addr_spec.email_re.pattern[:-1] + ">", re.I)
def validate(self):
if self.email_re.search(self.value):
self.log(ContainsEmail({"parent":self.parent.name, "element":self.name}))
#
# Elements for which html is discouraged, also checks for relative URLs
#
class nonhtml(text,safeHtmlMixin):#,absUrlMixin):
htmlEndTag_re = re.compile("</(\w+)>")
htmlEntity_re = re.compile("&(#?\w+)")
def start(self):
nonhtml.startline = self.__dict__['startline'] = self.line
def prevalidate(self):
self.start()
self.children.append(True) # force warnings about "mixed" content
def validate(self, message=ContainsHTML):
tags = [t for t in self.htmlEndTag_re.findall(self.value) if t.lower() in HTMLValidator.htmltags]
if tags:
self.log(message({"parent":self.parent.name, "element":self.name, "value":tags[0]}))
# experimental RSS-Profile support
elif self.htmlEntity_re.search(self.value):
for value in self.htmlEntity_re.findall(self.value):
from htmlentitydefs import name2codepoint
if value in name2codepoint or value == 'apos' or not value.isalpha():
if not hasattr(self,'startline'): self.startline=self.line
lines = self.dispatcher.rssCharData[self.startline-1:self.line]
if not [chardata for chardata in lines if chardata]:
self.log(message({"parent":self.parent.name, "element":self.name, "value":'&'+value+';'}))
# experimental RSS-Profile support
# &#x � &ent </ <a
elif self.getFeedType() == TYPE_RSS2:
if re.search('&#[x0-9]|<[/a-zA-Z]', self.value):
lines = self.dispatcher.rssCharData[self.startline-1:self.line]
if not [chardata for chardata in lines if chardata]:
rss = self.parent.parent
while rss and rss.name!='rss': rss=rss.parent
if rss.version.startswith("2."):
self.log(CharacterData({}))
#
# valid e-mail addresses
#
class email(addr_spec,nonhtml):
message = InvalidContact
def validate(self):
value=self.value
list = AddressList(value)
if len(list)==1: value=list[0][1]
nonhtml.validate(self)
addr_spec.validate(self, value)
class email_with_name(email):
def validate(self):
if self.value.startswith('mailto:'):
from urllib import unquote
self.value = unquote(self.value.split(':',1)[1])
if self.value.find('@')>0:
if not self.value.endswith(")"):
if self.value.find(' ')>0:
self.log(EmailFormat({}))
else:
self.log(MissingRealName({}))
else:
email.validate(self)
else:
email.validate(self)
class nonNegativeInteger(text):
def validate(self):
try:
t = int(self.value)
if t < 0:
raise ValueError
else:
self.log(ValidInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidNonNegativeInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
class positiveInteger(text):
max = 0
def validate(self):
try:
t = int(self.value)
if t <= 0:
raise ValueError
elif self.max and t>self.max:
self.log(IntegerOverflow({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidPositiveInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
class UINT31(positiveInteger):
max = 2147483647
class Integer(text):
def validate(self):
if self.value == '': return
try:
t = int(self.value)
self.log(ValidInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
class Float(text):
def validate(self, name=None):
if not re.match('\d+\.?\d*$', self.value):
self.log(InvalidFloat({"attr":name or self.name, "value":self.value}))
class alphanumeric(text):
def validate(self):
if not re.match('^\s*[A-Za-z0-9]+\s*$', self.value):
self.log(InvalidAlphanum({"attr":self.name, "value":self.value}))
class percentType(text):
def validate(self):
try:
t = float(self.value)
if t < 0.0 or t > 100.0:
raise ValueError
else:
self.log(ValidPercentage({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidPercentage({"parent":self.parent.name, "element":self.name, "value":self.value}))
class latitude(text):
def validate(self):
try:
lat = float(self.value)
if lat > 90 or lat < -90:
raise ValueError
else:
self.log(ValidLatitude({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidLatitude({"parent":self.parent.name, "element":self.name, "value":self.value}))
class longitude(text):
def validate(self):
try:
lon = float(self.value)
if lon > 180 or lon < -180:
raise ValueError
else:
self.log(ValidLongitude({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidLongitude({"parent":self.parent.name, "element":self.name, "value":self.value}))
class httpURL(text):
http_re = re.compile("http://" + addr_spec.domain_re + '(?::\d+)?' + '(/|$)', re.IGNORECASE)
def validate(self):
if not self.http_re.match(self.value):
self.log(InvalidURLAttribute({"parent":self.parent.name, "element":self.name, "value":self.value}))
elif not rfc2396_full.rfc2396_re.match(self.value):
self.log(InvalidURLAttribute({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidURLAttribute({"parent":self.parent.name, "element":self.name, "value":self.value}))
class rdfResourceURI(rfc2396):
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'resource'),
(u'http://purl.org/dc/elements/1.1/', u'title')]
def validate(self):
if (rdfNS, 'resource') in self.attrs.getNames():
self.value=self.attrs.getValue((rdfNS, 'resource'))
rfc2396.validate(self)
elif self.getFeedType() == TYPE_RSS1:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"rdf:resource"}))
class rdfAbout(validatorBase):
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'about')]
def startElementNS(self, name, qname, attrs):
pass
def validate(self):
if (rdfNS, 'about') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"rdf:about"}))
else:
test=rfc2396().setElement(self.name, self.attrs, self)
test.value=self.attrs.getValue((rdfNS, 'about'))
test.validate()
class nonblank(text):
def validate(self, errorClass=NotBlank, extraParams={}):
if not self.value:
logparams={"parent":self.parent.name,"element":self.name}
logparams.update(extraParams)
self.log(errorClass(logparams))
class nows(text):
def __init__(self):
self.ok = 1
text.__init__(self)
def characters(self, string):
text.characters(self, string)
if self.ok and (self.value != self.value.strip()):
self.log(UnexpectedWhitespace({"parent":self.parent.name, "element":self.name}))
self.ok = 0
class unique(nonblank):
def __init__(self, name, scope, message=DuplicateValue):
self.scope_name=name
self.scope=scope
self.message=message
nonblank.__init__(self)
if not name+'s' in self.scope.__dict__:
self.scope.__dict__[name+'s']=[]
def validate(self):
nonblank.validate(self)
list=self.scope.__dict__[self.scope_name+'s']
if self.value in list:
self.log(self.message({"parent":self.parent.name, "element":self.name,"value":self.value}))
elif self.value:
list.append(self.value)
class rfc3987_full(xmlbase):
rfc2396_re = rfc2396_full.rfc2396_re
def validate(self, errorClass=InvalidFullLink, successClass=ValidURI, extraParams={}):
return rfc2396.validate(self, errorClass, successClass, extraParams)
class canonicaluri(rfc3987_full):
def validate(self):
prestrip = self.value
self.value = self.value.strip()
if rfc3987_full.validate(self):
c = canonicalForm(self.value)
if c is None or c != prestrip:
self.log(NonCanonicalURI({"parent":self.parent.name,"element":self.name,"uri":prestrip, "curi":c or 'N/A'}))
class yesno(text):
def normalizeWhitespace(self):
pass
def validate(self):
if not self.value in ['yes','no']:
self.log(InvalidYesNo({"parent":self.parent.name, "element":self.name,"value":self.value}))
class truefalse(text):
def normalizeWhitespace(self):
pass
def validate(self):
if not self.value.lower() in ['true','false']:
self.log(InvalidTrueFalse({"parent":self.parent.name, "element":self.name,"value":self.value}))
class truefalsestrict(text):
def normalizeWhitespace(self):
pass
def validate(self):
if not self.value in ['true','false']:
self.log(InvalidTrueFalse({"parent":self.parent.name, "element":self.name,"value":self.value}))
class duration(text):
duration_re = re.compile("\d+(:[0-5][0-9](:[0-5][0-9])?)?$")
def validate(self):
if not self.duration_re.match(self.value):
self.log(InvalidDuration({"parent":self.parent.name, "element":self.name
, "value":self.value}))
class lengthLimitedText(nonhtml):
def __init__(self, max):
self.max = max
text.__init__(self)
def validate(self):
if len(self.value)>self.max:
self.log(TooLong({"parent":self.parent.name, "element":self.name,
"len": len(self.value), "max": self.max}))
nonhtml.validate(self)
class keywords(text):
def validate(self):
if self.value.find(' ')>=0 and self.value.find(',')<0:
self.log(InvalidKeywords({"parent":self.parent.name, "element":self.name}))
class commaSeparatedIntegers(text):
def validate(self):
if not re.match("^\d+(,\s*\d+)*$", self.value):
self.log(InvalidCommaSeparatedIntegers({"parent":self.parent.name,
"element":self.name}))
class formname(text):
def validate(self):
if not re.match("^[a-zA-z][a-zA-z0-9:._]*", self.value):
self.log(InvalidFormComponentName({"parent":self.parent.name,
"element":self.name, "value":self.value}))
class enumeration(text):
def validate(self):
if self.value not in self.valuelist:
self.log(self.error({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class caseinsensitive_enumeration(enumeration):
def validate(self):
self.value=self.value.lower()
enumeration.validate(self)
class iso3166(enumeration):
error = InvalidCountryCode
valuelist = [
"AD", "AE", "AF", "AG", "AI", "AM", "AN", "AO", "AQ", "AR", "AS", "AT",
"AU", "AW", "AZ", "BA", "BB", "BD", "BE", "BF", "BG", "BH", "BI", "BJ",
"BM", "BN", "BO", "BR", "BS", "BT", "BV", "BW", "BY", "BZ", "CA", "CC",
"CD", "CF", "CG", "CH", "CI", "CK", "CL", "CM", "CN", "CO", "CR", "CU",
"CV", "CX", "CY", "CZ", "DE", "DJ", "DK", "DM", "DO", "DZ", "EC", "EE",
"EG", "EH", "ER", "ES", "ET", "FI", "FJ", "FK", "FM", "FO", "FR", "GA",
"GB", "GD", "GE", "GF", "GH", "GI", "GL", "GM", "GN", "GP", "GQ", "GR",
"GS", "GT", "GU", "GW", "GY", "HK", "HM", "HN", "HR", "HT", "HU", "ID",
"IE", "IL", "IN", "IO", "IQ", "IR", "IS", "IT", "JM", "JO", "JP", "KE",
"KG", "KH", "KI", "KM", "KN", "KP", "KR", "KW", "KY", "KZ", "LA", "LB",
"LC", "LI", "LK", "LR", "LS", "LT", "LU", "LV", "LY", "MA", "MC", "MD",
"MG", "MH", "MK", "ML", "MM", "MN", "MO", "MP", "MQ", "MR", "MS", "MT",
"MU", "MV", "MW", "MX", "MY", "MZ", "NA", "NC", "NE", "NF", "NG", "NI",
"NL", "NO", "NP", "NR", "NU", "NZ", "OM", "PA", "PE", "PF", "PG", "PH",
"PK", "PL", "PM", "PN", "PR", "PS", "PT", "PW", "PY", "QA", "RE", "RO",
"RU", "RW", "SA", "SB", "SC", "SD", "SE", "SG", "SH", "SI", "SJ", "SK",
"SL", "SM", "SN", "SO", "SR", "ST", "SV", "SY", "SZ", "TC", "TD", "TF",
"TG", "TH", "TJ", "TK", "TM", "TN", "TO", "TR", "TT", "TV", "TW", "TZ",
"UA", "UG", "UM", "US", "UY", "UZ", "VA", "VC", "VE", "VG", "VI", "VN",
"VU", "WF", "WS", "YE", "YT", "ZA", "ZM", "ZW"]
class iso4217(enumeration):
error = InvalidCurrencyUnit
valuelist = [
"AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZM",
"BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV",
"BRL", "BSD", "BTN", "BWP", "BYR", "BZD", "CAD", "CDF", "CHE", "CHF",
"CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CSD", "CUP", "CVE",
"CYP", "CZK", "DJF", "DKK", "DOP", "DZD", "EEK", "EGP", "ERN", "ETB",
"EUR", "FJD", "FKP", "GBP", "GEL", "GHC", "GIP", "GMD", "GNF", "GTQ",
"GWP", "GYD", "HKD", "HNL", "HRK", "HTG", "HUF", "IDR", "ILS", "INR",
"IQD", "IRR", "ISK", "JMD", "JOD", "JPY", "KES", "KGS", "KHR", "KMF",
"KPW", "KRW", "KWD", "KYD", "KZT", "LAK", "LBP", "LKR", "LRD", "LSL",
"LTL", "LVL", "LYD", "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP",
"MRO", "MTL", "MUR", "MWK", "MXN", "MXV", "MYR", "MZM", "NAD", "NGN",
"NIO", "NOK", "NPR", "NZD", "OMR", "PAB", "PEN", "PGK", "PHP", "PKR",
"PLN", "PYG", "QAR", "ROL", "RON", "RUB", "RWF", "SAR", "SBD", "SCR",
"SDD", "SEK", "SGD", "SHP", "SIT", "SKK", "SLL", "SOS", "SRD", "STD",
"SVC", "SYP", "SZL", "THB", "TJS", "TMM", "TND", "TOP", "TRL", "TRY",
"TTD", "TWD", "TZS", "UAH", "UGX", "USD", "USN", "USS", "UYU", "UZS",
"VEB", "VND", "VUV", "WST", "XAF", "XAG", "XAU", "XBA", "XBB", "XBC",
"XBD", "XCD", "XDR", "XFO", "XFU", "XOF", "XPD", "XPF", "XPT", "XTS",
"XXX", "YER", "ZAR", "ZMK", "ZWD"]
| Python |
#$Id
####
# Copyright 2000,2001 by Timothy O'Malley <timo@alum.mit.edu>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
"""Timeout Socket
This module enables a timeout mechanism on all TCP connections. It
does this by inserting a shim into the socket module. After this module
has been imported, all socket creation goes through this shim. As a
result, every TCP connection will support a timeout.
The beauty of this method is that it immediately and transparently
enables the entire python library to support timeouts on TCP sockets.
As an example, if you wanted to SMTP connections to have a 20 second
timeout:
import timeoutsocket
import smtplib
timeoutsocket.setDefaultSocketTimeout(20)
The timeout applies to the socket functions that normally block on
execution: read, write, connect, and accept. If any of these
operations exceeds the specified timeout, the exception Timeout
will be raised.
The default timeout value is set to None. As a result, importing
this module does not change the default behavior of a socket. The
timeout mechanism only activates when the timeout has been set to
a numeric value. (This behavior mimics the behavior of the
select.select() function.)
This module implements two classes: TimeoutSocket and TimeoutFile.
The TimeoutSocket class defines a socket-like object that attempts to
avoid the condition where a socket may block indefinitely. The
TimeoutSocket class raises a Timeout exception whenever the
current operation delays too long.
The TimeoutFile class defines a file-like object that uses the TimeoutSocket
class. When the makefile() method of TimeoutSocket is called, it returns
an instance of a TimeoutFile.
Each of these objects adds two methods to manage the timeout value:
get_timeout() --> returns the timeout of the socket or file
set_timeout() --> sets the timeout of the socket or file
As an example, one might use the timeout feature to create httplib
connections that will timeout after 30 seconds:
import timeoutsocket
import httplib
H = httplib.HTTP("www.python.org")
H.sock.set_timeout(30)
Note: When used in this manner, the connect() routine may still
block because it happens before the timeout is set. To avoid
this, use the 'timeoutsocket.setDefaultSocketTimeout()' function.
Good Luck!
"""
__version__ = "$Revision: 511 $"
__author__ = "Timothy O'Malley <timo@alum.mit.edu>"
#
# Imports
#
import select, string
import socket
if not hasattr(socket, "_no_timeoutsocket"):
_socket = socket.socket
else:
_socket = socket._no_timeoutsocket
#
# Set up constants to test for Connected and Blocking operations.
# We delete 'os' and 'errno' to keep our namespace clean(er).
# Thanks to Alex Martelli and G. Li for the Windows error codes.
#
import os
if os.name == "nt":
_IsConnected = ( 10022, 10056 )
_ConnectBusy = ( 10035, )
_AcceptBusy = ( 10035, )
else:
import errno
_IsConnected = ( errno.EISCONN, )
_ConnectBusy = ( errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK )
_AcceptBusy = ( errno.EAGAIN, errno.EWOULDBLOCK )
del errno
del os
#
# Default timeout value for ALL TimeoutSockets
#
_DefaultTimeout = None
def setDefaultSocketTimeout(timeout):
global _DefaultTimeout
_DefaultTimeout = timeout
def getDefaultSocketTimeout():
return _DefaultTimeout
#
# Exceptions for socket errors and timeouts
#
Error = socket.error
class Timeout(Exception):
pass
#
# Factory function
#
from socket import AF_INET, SOCK_STREAM
def timeoutsocket(family=AF_INET, type=SOCK_STREAM, proto=None):
if family != AF_INET or type != SOCK_STREAM:
if proto:
return _socket(family, type, proto)
else:
return _socket(family, type)
return TimeoutSocket( _socket(family, type), _DefaultTimeout )
# end timeoutsocket
#
# The TimeoutSocket class definition
#
class TimeoutSocket:
"""TimeoutSocket object
Implements a socket-like object that raises Timeout whenever
an operation takes too long.
The definition of 'too long' can be changed using the
set_timeout() method.
"""
_copies = 0
_blocking = 1
def __init__(self, sock, timeout):
self._sock = sock
self._timeout = timeout
# end __init__
def __getattr__(self, key):
return getattr(self._sock, key)
# end __getattr__
def get_timeout(self):
return self._timeout
# end set_timeout
def set_timeout(self, timeout=None):
self._timeout = timeout
# end set_timeout
def setblocking(self, blocking):
self._blocking = blocking
return self._sock.setblocking(blocking)
# end set_timeout
def connect_ex(self, addr):
errcode = 0
try:
self.connect(addr)
except Error, why:
errcode = why[0]
return errcode
# end connect_ex
def connect(self, addr, port=None, dumbhack=None):
# In case we were called as connect(host, port)
if port != None: addr = (addr, port)
# Shortcuts
sock = self._sock
timeout = self._timeout
blocking = self._blocking
# First, make a non-blocking call to connect
try:
sock.setblocking(0)
sock.connect(addr)
sock.setblocking(blocking)
return
except Error, why:
# Set the socket's blocking mode back
sock.setblocking(blocking)
# If we are not blocking, re-raise
if not blocking:
raise
# If we are already connected, then return success.
# If we got a genuine error, re-raise it.
errcode = why[0]
if dumbhack and errcode in _IsConnected:
return
elif errcode not in _ConnectBusy:
raise
# Now, wait for the connect to happen
# ONLY if dumbhack indicates this is pass number one.
# If select raises an error, we pass it on.
# Is this the right behavior?
if not dumbhack:
r,w,e = select.select([], [sock], [], timeout)
if w:
return self.connect(addr, dumbhack=1)
# If we get here, then we should raise Timeout
raise Timeout("Attempted connect to %s timed out." % str(addr) )
# end connect
def accept(self, dumbhack=None):
# Shortcuts
sock = self._sock
timeout = self._timeout
blocking = self._blocking
# First, make a non-blocking call to accept
# If we get a valid result, then convert the
# accept'ed socket into a TimeoutSocket.
# Be carefult about the blocking mode of ourselves.
try:
sock.setblocking(0)
newsock, addr = sock.accept()
sock.setblocking(blocking)
timeoutnewsock = self.__class__(newsock, timeout)
timeoutnewsock.setblocking(blocking)
return (timeoutnewsock, addr)
except Error, why:
# Set the socket's blocking mode back
sock.setblocking(blocking)
# If we are not supposed to block, then re-raise
if not blocking:
raise
# If we got a genuine error, re-raise it.
errcode = why[0]
if errcode not in _AcceptBusy:
raise
# Now, wait for the accept to happen
# ONLY if dumbhack indicates this is pass number one.
# If select raises an error, we pass it on.
# Is this the right behavior?
if not dumbhack:
r,w,e = select.select([sock], [], [], timeout)
if r:
return self.accept(dumbhack=1)
# If we get here, then we should raise Timeout
raise Timeout("Attempted accept timed out.")
# end accept
def send(self, data, flags=0):
sock = self._sock
if self._blocking:
r,w,e = select.select([],[sock],[], self._timeout)
if not w:
raise Timeout("Send timed out")
return sock.send(data, flags)
# end send
def recv(self, bufsize, flags=0):
sock = self._sock
if self._blocking:
r,w,e = select.select([sock], [], [], self._timeout)
if not r:
raise Timeout("Recv timed out")
return sock.recv(bufsize, flags)
# end recv
def makefile(self, flags="r", bufsize=-1):
self._copies = self._copies +1
return TimeoutFile(self, flags, bufsize)
# end makefile
def close(self):
if self._copies <= 0:
self._sock.close()
else:
self._copies = self._copies -1
# end close
# end TimeoutSocket
class TimeoutFile:
"""TimeoutFile object
Implements a file-like object on top of TimeoutSocket.
"""
def __init__(self, sock, mode="r", bufsize=4096):
self._sock = sock
self._bufsize = 4096
if bufsize > 0: self._bufsize = bufsize
if not hasattr(sock, "_inqueue"): self._sock._inqueue = ""
# end __init__
def __getattr__(self, key):
return getattr(self._sock, key)
# end __getattr__
def close(self):
self._sock.close()
self._sock = None
# end close
def write(self, data):
self.send(data)
# end write
def read(self, size=-1):
_sock = self._sock
_bufsize = self._bufsize
while 1:
datalen = len(_sock._inqueue)
if datalen >= size >= 0:
break
bufsize = _bufsize
if size > 0:
bufsize = min(bufsize, size - datalen )
buf = self.recv(bufsize)
if not buf:
break
_sock._inqueue = _sock._inqueue + buf
data = _sock._inqueue
_sock._inqueue = ""
if size > 0 and datalen > size:
_sock._inqueue = data[size:]
data = data[:size]
return data
# end read
def readline(self, size=-1):
_sock = self._sock
_bufsize = self._bufsize
while 1:
idx = string.find(_sock._inqueue, "\n")
if idx >= 0:
break
datalen = len(_sock._inqueue)
if datalen >= size >= 0:
break
bufsize = _bufsize
if size > 0:
bufsize = min(bufsize, size - datalen )
buf = self.recv(bufsize)
if not buf:
break
_sock._inqueue = _sock._inqueue + buf
data = _sock._inqueue
_sock._inqueue = ""
if idx >= 0:
idx = idx + 1
_sock._inqueue = data[idx:]
data = data[:idx]
elif size > 0 and datalen > size:
_sock._inqueue = data[size:]
data = data[:size]
return data
# end readline
def readlines(self, sizehint=-1):
result = []
data = self.read()
while data:
idx = string.find(data, "\n")
if idx >= 0:
idx = idx + 1
result.append( data[:idx] )
data = data[idx:]
else:
result.append( data )
data = ""
return result
# end readlines
def flush(self): pass
# end TimeoutFile
#
# Silently replace the socket() builtin function with
# our timeoutsocket() definition.
#
if not hasattr(socket, "_no_timeoutsocket"):
socket._no_timeoutsocket = socket.socket
socket.socket = timeoutsocket
del socket
socket = timeoutsocket
# Finis
| Python |
"""$Id: base.py 1049 2009-05-06 02:00:03Z rothfuss $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1049 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from xml.sax.handler import ContentHandler
from xml.sax.xmlreader import Locator
from logging import NonCanonicalURI, NotUTF8
import re
# references:
# http://web.resource.org/rss/1.0/modules/standard.html
# http://web.resource.org/rss/1.0/modules/proposed.html
# http://dmoz.org/Reference/Libraries/Library_and_Information_Science/Technical_Services/Cataloguing/Metadata/RDF/Applications/RSS/Specifications/RSS1.0_Modules/
namespaces = {
"http://www.bloglines.com/about/specs/fac-1.0": "access",
"http://webns.net/mvcb/": "admin",
"http://purl.org/rss/1.0/modules/aggregation/": "ag",
"http://purl.org/rss/1.0/modules/annotate/": "annotate",
"http://www.w3.org/2007/app": "app",
"http://media.tangent.org/rss/1.0/": "audio",
"http://backend.userland.com/blogChannelModule": "blogChannel",
"http://web.resource.org/cc/": "cc",
"http://www.microsoft.com/schemas/rss/core/2005": "cf",
"http://backend.userland.com/creativeCommonsRssModule": "creativeCommons",
"http://purl.org/rss/1.0/modules/company": "company",
"http://purl.org/rss/1.0/modules/content/": "content",
"http://conversationsnetwork.org/rssNamespace-1.0/": "conversationsNetwork",
"http://my.theinfo.org/changed/1.0/rss/": "cp",
"http://purl.org/dc/elements/1.1/": "dc",
"http://purl.org/dc/terms/": "dcterms",
"http://purl.org/rss/1.0/modules/email/": "email",
"http://purl.org/rss/1.0/modules/event/": "ev",
"http://purl.org/syndication/history/1.0": "fh",
"http://www.w3.org/2003/01/geo/wgs84_pos#": "geo",
"http://geourl.org/rss/module/": "geourl",
"http://www.georss.org/georss": "georss",
"http://www.opengis.net/gml": "gml",
"http://postneo.com/icbm": "icbm",
"http://purl.org/rss/1.0/modules/image/": "image",
"urn:atom-extension:indexing": "indexing",
"http://www.itunes.com/dtds/podcast-1.0.dtd": "itunes",
"http://rssnamespace.org/feedburner/ext/1.0": "feedburner",
"http://xmlns.com/foaf/0.1/": "foaf",
"http://purl.org/rss/1.0/modules/link/": "l",
"http://search.yahoo.com/mrss/": "media",
"http://www.w3.org/1998/Math/MathML": "mathml",
"http://a9.com/-/spec/opensearchrss/1.0/": "opensearch10",
"http://a9.com/-/spec/opensearch/1.1/": "opensearch",
"http://www.opml.org/spec2": "opml",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://www.w3.org/2000/01/rdf-schema#": "rdfs",
"http://purl.org/rss/1.0/modules/reference/": "ref",
"http://purl.org/rss/1.0/modules/richequiv/": "reqv",
"http://purl.org/rss/1.0/modules/rss091#": "rss091",
"http://purl.org/rss/1.0/modules/search/": "search",
"http://purl.org/rss/1.0/modules/slash/": "slash",
"http://purl.org/rss/1.0/modules/servicestatus/": "ss",
"http://hacks.benhammersley.com/rss/streaming/": "str",
"http://purl.org/rss/1.0/modules/subscription/": "sub",
"http://feedsync.org/2007/feedsync": "sx",
"http://www.w3.org/2000/svg": "svg",
"http://purl.org/rss/1.0/modules/syndication/": "sy",
"http://purl.org/rss/1.0/modules/taxonomy/": "taxo",
"http://purl.org/rss/1.0/modules/threading/": "thr",
"http://purl.org/syndication/thread/1.0": "thr",
"http://madskills.com/public/xml/rss/module/trackback/": "trackback",
"http://wellformedweb.org/CommentAPI/": "wfw",
"http://purl.org/rss/1.0/modules/wiki/": "wiki",
"http://www.usemod.com/cgi-bin/mb.pl?ModWiki": "wiki",
"http://schemas.xmlsoap.org/soap/envelope/": "soap",
"http://www.w3.org/2005/Atom": "atom",
"http://www.w3.org/1999/xhtml": "xhtml",
"http://my.netscape.com/rdf/simple/0.9/": "rss090",
"http://purl.org/rss/1.0/": "rss1",
"http://purl.org/net/rss1.1#": "rss11",
"http://base.google.com/ns/1.0": "g",
"http://www.w3.org/XML/1998/namespace": "xml",
"http://openid.net/xmlns/1.0": "openid",
"http://earth.google.com/kml/2.0": "kml20",
"http://earth.google.com/kml/2.1": "kml21",
"http://www.opengis.net/kml/2.2": "kml22",
"http://www.w3.org/1999/xlink": "xlink",
"xri://$xrd*($v*2.0)": "xrd",
"xri://$xrds": "xrds",
}
def near_miss(ns):
try:
return re.match(".*\w", ns).group().lower()
except:
return ns
nearly_namespaces = dict([(near_miss(u),p) for u,p in namespaces.items()])
stdattrs = [(u'http://www.w3.org/XML/1998/namespace', u'base'),
(u'http://www.w3.org/XML/1998/namespace', u'id'),
(u'http://www.w3.org/XML/1998/namespace', u'lang'),
(u'http://www.w3.org/XML/1998/namespace', u'space')]
#
# From the SAX parser's point of view, this class is the one responsible for
# handling SAX events. In actuality, all this class does is maintain a
# pushdown stack of the *real* content handlers, and delegates sax events
# to the current one.
#
class SAXDispatcher(ContentHandler):
firstOccurrenceOnly = 0
def __init__(self, base, selfURIs, encoding):
from root import root
ContentHandler.__init__(self)
self.lastKnownLine = 1
self.lastKnownColumn = 0
self.loggedEvents = []
self.feedType = 0
try:
self.xmlBase = base.encode('idna')
except:
self.xmlBase = base
self.selfURIs = selfURIs
self.encoding = encoding
self.handler_stack=[[root(self, base)]]
self.defaultNamespaces = []
# experimental RSS-Profile support
self.rssCharData = []
def setDocumentLocator(self, locator):
self.locator = locator
ContentHandler.setDocumentLocator(self, self.locator)
def setFirstOccurrenceOnly(self, firstOccurrenceOnly=1):
self.firstOccurrenceOnly = firstOccurrenceOnly
def startPrefixMapping(self, prefix, uri):
for handler in iter(self.handler_stack[-1]):
handler.namespace[prefix] = uri
if uri and len(uri.split())>1:
from xml.sax import SAXException
self.error(SAXException('Invalid Namespace: %s' % uri))
if prefix in namespaces.values():
if not namespaces.get(uri,'') == prefix and prefix:
from logging import ReservedPrefix, MediaRssNamespace
preferredURI = [key for key, value in namespaces.items() if value == prefix][0]
if uri == 'http://search.yahoo.com/mrss':
self.log(MediaRssNamespace({'prefix':prefix, 'ns':preferredURI}))
else:
self.log(ReservedPrefix({'prefix':prefix, 'ns':preferredURI}))
elif prefix=='wiki' and uri.find('usemod')>=0:
from logging import ObsoleteWikiNamespace
self.log(ObsoleteWikiNamespace({'preferred':namespaces[uri], 'ns':uri}))
elif prefix in ['atom','xhtml']:
from logging import TYPE_ATOM, AvoidNamespacePrefix
if self.getFeedType() == TYPE_ATOM:
self.log(AvoidNamespacePrefix({'prefix':prefix}))
elif namespaces.has_key(uri):
if not namespaces[uri] == prefix and prefix:
from logging import NonstdPrefix
self.log(NonstdPrefix({'preferred':namespaces[uri], 'ns':uri}))
if namespaces[uri] in ['atom', 'xhtml']:
from logging import TYPE_UNKNOWN, TYPE_ATOM, AvoidNamespacePrefix
if self.getFeedType() in [TYPE_ATOM,TYPE_UNKNOWN]:
self.log(AvoidNamespacePrefix({'prefix':prefix}))
elif uri == 'http://search.yahoo.com/mrss':
from logging import MediaRssNamespace
uri = 'http://search.yahoo.com/mrss/'
self.log(MediaRssNamespace({'prefix':prefix, 'ns':uri}))
else:
from validators import rfc3987
rule=rfc3987()
rule.setElement('xmlns:'+str(prefix), {}, self.handler_stack[-1][0])
rule.value=uri
if not uri or rule.validate():
from logging import UnknownNamespace
self.log(UnknownNamespace({'namespace':uri}))
def namespaceFor(self, prefix):
return None
def startElementNS(self, name, qname, attrs):
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
qname, name = name
for handler in iter(self.handler_stack[-1]):
handler.startElementNS(name, qname, attrs)
if len(attrs):
present = attrs.getNames()
unexpected = filter(lambda x: x not in stdattrs, present)
for handler in iter(self.handler_stack[-1]):
ean = handler.getExpectedAttrNames()
if ean: unexpected = filter(lambda x: x not in ean, unexpected)
for u in unexpected:
if u[0] and near_miss(u[0]) not in nearly_namespaces:
feedtype=self.getFeedType()
if (not qname) and feedtype and (feedtype==TYPE_RSS2):
from logging import UseOfExtensionAttr
self.log(UseOfExtensionAttr({"attribute":u, "element":name}))
continue
from logging import UnexpectedAttribute
if not u[0]: u=u[1]
self.log(UnexpectedAttribute({"parent":name, "attribute":u, "element":name}))
def resolveEntity(self, publicId, systemId):
if not publicId and not systemId:
import cStringIO
return cStringIO.StringIO()
try:
def log(exception):
from logging import SAXError
self.log(SAXError({'exception':str(exception)}))
if self.xmlvalidator:
self.xmlvalidator(log)
self.xmlvalidator=0
except:
pass
if (publicId=='-//Netscape Communications//DTD RSS 0.91//EN' and
systemId=='http://my.netscape.com/publish/formats/rss-0.91.dtd'):
from logging import ValidDoctype, DeprecatedDTD
self.log(ValidDoctype({}))
self.log(DeprecatedDTD({}))
elif (publicId=='-//Netscape Communications//DTD RSS 0.91//EN' and
systemId=='http://www.rssboard.org/rss-0.91.dtd'):
from logging import ValidDoctype
self.log(ValidDoctype({}))
else:
from logging import ContainsSystemEntity
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
self.log(ContainsSystemEntity({}))
from StringIO import StringIO
return StringIO()
def skippedEntity(self, name):
from logging import ValidDoctype
if [e for e in self.loggedEvents if e.__class__ == ValidDoctype]:
from htmlentitydefs import name2codepoint
if name in name2codepoint: return
from logging import UndefinedNamedEntity
self.log(UndefinedNamedEntity({'value':name}))
def characters(self, string):
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
for handler in iter(self.handler_stack[-1]):
handler.characters(string)
def endElementNS(self, name, qname):
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
qname, name = name
for handler in iter(self.handler_stack[-1]):
handler.endElementNS(name, qname)
del self.handler_stack[-1]
def push(self, handlers, name, attrs, parent):
if hasattr(handlers,'__iter__'):
for handler in iter(handlers):
handler.setElement(name, attrs, parent)
handler.value=""
handler.prevalidate()
else:
handlers.setElement(name, attrs, parent)
handlers.value=""
handlers.prevalidate()
handlers = [handlers]
self.handler_stack.append(handlers)
def log(self, event, offset=(0,0)):
def findDuplicate(self, event):
duplicates = [e for e in self.loggedEvents if e.__class__ == event.__class__]
if duplicates and (event.__class__ in [NonCanonicalURI]):
return duplicates[0]
for dup in duplicates:
for k, v in event.params.items():
if k != 'value':
if not k in dup.params or dup.params[k] != v: break
else:
return dup
if event.params.has_key('element') and event.params['element']:
if not isinstance(event.params['element'],tuple):
event.params['element']=':'.join(event.params['element'].split('_', 1))
elif event.params['element'][0]==u'http://www.w3.org/XML/1998/namespace':
event.params['element'] = 'xml:' + event.params['element'][-1]
if self.firstOccurrenceOnly:
dup = findDuplicate(self, event)
if dup:
dup.params['msgcount'] = dup.params['msgcount'] + 1
return
event.params['msgcount'] = 1
try:
line = self.locator.getLineNumber() + offset[0]
backupline = self.lastKnownLine
column = (self.locator.getColumnNumber() or 0) + offset[1]
backupcolumn = self.lastKnownColumn
except AttributeError:
line = backupline = column = backupcolumn = 1
event.params['line'] = line
event.params['backupline'] = backupline
event.params['column'] = column
event.params['backupcolumn'] = backupcolumn
self.loggedEvents.append(event)
def error(self, exception):
from logging import SAXError
self.log(SAXError({'exception':str(exception)}))
raise exception
fatalError=error
warning=error
def getFeedType(self):
return self.feedType
def setFeedType(self, feedType):
self.feedType = feedType
#
# This base class for content handlers keeps track of such administrative
# details as the parent of the current element, and delegating both log
# and push events back up the stack. It will also concatenate up all of
# the SAX events associated with character data into a value, handing such
# things as CDATA and entities.
#
# Subclasses are expected to declare "do_name" methods for every
# element that they support. These methods are expected to return the
# appropriate handler for the element.
#
# The name of the element and the names of the children processed so
# far are also maintained.
#
# Hooks are also provided for subclasses to do "prevalidation" and
# "validation".
#
from logging import TYPE_RSS2
class validatorBase(ContentHandler):
def __init__(self):
ContentHandler.__init__(self)
self.value = ""
self.attrs = None
self.children = []
self.isValid = 1
self.name = None
self.itunes = False
self.namespace = {}
def setElement(self, name, attrs, parent):
self.name = name
self.attrs = attrs
self.parent = parent
self.dispatcher = parent.dispatcher
self.line = self.dispatcher.locator.getLineNumber()
self.col = self.dispatcher.locator.getColumnNumber()
self.xmlLang = parent.xmlLang
if attrs and attrs.has_key((u'http://www.w3.org/XML/1998/namespace', u'base')):
self.xmlBase=attrs.getValue((u'http://www.w3.org/XML/1998/namespace', u'base'))
from validators import rfc3987
self.validate_attribute((u'http://www.w3.org/XML/1998/namespace',u'base'),
rfc3987)
from urlparse import urljoin
self.xmlBase = urljoin(parent.xmlBase, self.xmlBase)
else:
self.xmlBase = parent.xmlBase
return self
def simplename(self, name):
if not name[0]: return name[1]
return namespaces.get(name[0], name[0]) + ":" + name[1]
def namespaceFor(self, prefix):
if self.namespace.has_key(prefix):
return self.namespace[prefix]
elif self.parent:
return self.parent.namespaceFor(prefix)
else:
return None
def validate_attribute(self, name, rule):
if not isinstance(rule,validatorBase): rule = rule()
if isinstance(name,str): name = (None,name)
rule.setElement(self.simplename(name), {}, self)
rule.value=self.attrs.getValue(name)
rule.validate()
def validate_required_attribute(self, name, rule):
if self.attrs and self.attrs.has_key(name):
self.validate_attribute(name, rule)
else:
from logging import MissingAttribute
self.log(MissingAttribute({"attr": self.simplename(name)}))
def validate_optional_attribute(self, name, rule):
if self.attrs and self.attrs.has_key(name):
self.validate_attribute(name, rule)
def getExpectedAttrNames(self):
None
def unknown_starttag(self, name, qname, attrs):
from validators import any
return any(self, name, qname, attrs)
def startElementNS(self, name, qname, attrs):
if attrs.has_key((u'http://www.w3.org/XML/1998/namespace', u'lang')):
self.xmlLang=attrs.getValue((u'http://www.w3.org/XML/1998/namespace', u'lang'))
if self.xmlLang:
from validators import iso639_validate
iso639_validate(self.log, self.xmlLang, "xml:lang", name)
from validators import eater
feedtype=self.getFeedType()
if (not qname) and feedtype and (feedtype!=TYPE_RSS2):
from logging import UndeterminableVocabulary
self.log(UndeterminableVocabulary({"parent":self.name, "element":name, "namespace":'""'}))
qname="null"
if qname in self.dispatcher.defaultNamespaces: qname=None
nm_qname = near_miss(qname)
if nearly_namespaces.has_key(nm_qname):
prefix = nearly_namespaces[nm_qname]
qname, name = None, prefix + "_" + name
if prefix == 'itunes' and not self.itunes and not self.parent.itunes:
if hasattr(self, 'setItunes'): self.setItunes(True)
# ensure all attribute namespaces are properly defined
for (namespace,attr) in attrs.keys():
if ':' in attr and not namespace:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":attr}))
if qname=='http://purl.org/atom/ns#':
from logging import ObsoleteNamespace
self.log(ObsoleteNamespace({"element":"feed"}))
for key, string in attrs.items():
for c in string:
if 0x80 <= ord(c) <= 0x9F or c == u'\ufffd':
from validators import BadCharacters
self.log(BadCharacters({"parent":name, "element":key[-1]}))
if qname:
handler = self.unknown_starttag(name, qname, attrs)
name="unknown_"+name
self.child=name
else:
try:
self.child=name
if name.startswith('dc_'):
# handle "Qualified" Dublin Core
handler = getattr(self, "do_" + name.replace("-","_").split('.')[0])()
else:
handler = getattr(self, "do_" + name.replace("-","_"))()
except AttributeError:
if name.find(':') != -1:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":name}))
handler = eater()
elif name.startswith('xhtml_'):
from logging import MisplacedXHTMLContent
self.log(MisplacedXHTMLContent({"parent": ':'.join(self.name.split("_",1)), "element":name}))
handler = eater()
else:
try:
from extension import Questionable
# requalify the name with the default namespace
qname = name
from logging import TYPE_APP_CATEGORIES, TYPE_APP_SERVICE
if self.getFeedType() in [TYPE_APP_CATEGORIES, TYPE_APP_SERVICE]:
if qname.startswith('app_'): qname=qname[4:]
if name.find('_')<0 and self.name.find('_')>=0:
if 'http://www.w3.org/2005/Atom' in self.dispatcher.defaultNamespaces:
qname='atom_'+qname
# is this element questionable?
handler = getattr(Questionable(), "do_" + qname.replace("-","_"))()
from logging import QuestionableUsage
self.log(QuestionableUsage({"parent": ':'.join(self.name.split("_",1)), "element":qname}))
except AttributeError:
from logging import UndefinedElement
self.log(UndefinedElement({"parent": ':'.join(self.name.split("_",1)), "element":name}))
handler = eater()
self.push(handler, name, attrs)
# MAP - always append name, even if already exists (we need this to
# check for too many hour elements in skipHours, and it doesn't
# hurt anything else)
self.children.append(self.child)
def normalizeWhitespace(self):
self.value = self.value.strip()
def endElementNS(self, name, qname):
self.normalizeWhitespace()
self.validate()
if self.isValid and self.name:
from validators import ValidElement
self.log(ValidElement({"parent":self.parent.name, "element":name}))
def textOK(self):
from validators import UnexpectedText
self.log(UnexpectedText({"element":self.name,"parent":self.parent.name}))
def characters(self, string):
if string.strip(): self.textOK()
line=column=0
pc=' '
for c in string:
# latin characters double encoded as utf-8
if 0x80 <= ord(c) <= 0xBF:
if 0xC2 <= ord(pc) <= 0xC3:
try:
string.encode('iso-8859-1').decode('utf-8')
from validators import BadCharacters
self.log(BadCharacters({"parent":self.parent.name, "element":self.name}), offset=(line,max(1,column-1)))
except:
pass
pc = c
# win1252
if 0x80 <= ord(c) <= 0x9F or c == u'\ufffd':
from validators import BadCharacters
self.log(BadCharacters({"parent":self.parent.name, "element":self.name}), offset=(line,column))
column=column+1
if ord(c) in (10,13):
column=0
line=line+1
self.value = self.value + string
def log(self, event, offset=(0,0)):
if not event.params.has_key('element'):
event.params['element'] = self.name
self.dispatcher.log(event, offset)
self.isValid = 0
def setFeedType(self, feedType):
self.dispatcher.setFeedType(feedType)
def getFeedType(self):
return self.dispatcher.getFeedType()
def push(self, handler, name, value):
self.dispatcher.push(handler, name, value, self)
def leaf(self):
from validators import text
return text()
def prevalidate(self):
pass
def validate(self):
pass
| Python |
"""$Id: content.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase, namespaces
from validators import *
from logging import *
#
# item element.
#
class textConstruct(validatorBase,rfc2396,nonhtml):
from validators import mime_re
import re
def getExpectedAttrNames(self):
return [(None, u'type'),(None, u'src')]
def normalizeWhitespace(self):
pass
def maptype(self):
if self.type.find('/') > -1:
self.log(InvalidTextType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
def prevalidate(self):
nonhtml.start(self)
if self.attrs.has_key((None,"src")):
self.type=''
else:
self.type='text'
if self.getFeedType() == TYPE_RSS2 and self.name != 'atom_summary':
self.log(DuplicateDescriptionSemantics({"element":self.name}))
if self.attrs.has_key((None,"type")):
self.type=self.attrs.getValue((None,"type"))
if not self.type:
self.log(AttrNotBlank({"parent":self.parent.name, "element":self.name, "attr":"type"}))
self.maptype()
if self.attrs.has_key((None,"src")):
self.children.append(True) # force warnings about "mixed" content
self.value=self.attrs.getValue((None,"src"))
rfc2396.validate(self, errorClass=InvalidURIAttribute, extraParams={"attr": "src"})
self.value=""
if not self.attrs.has_key((None,"type")):
self.log(MissingTypeAttr({"parent":self.parent.name, "element":self.name, "attr":"type"}))
if self.type in ['text','html','xhtml'] and not self.attrs.has_key((None,"src")):
pass
elif self.type and not self.mime_re.match(self.type):
self.log(InvalidMIMEType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
else:
self.log(ValidMIMEAttribute({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
if not self.xmlLang:
self.log(MissingDCLanguage({"parent":self.name, "element":"xml:lang"}))
def validate(self):
if self.type in ['text','xhtml']:
if self.type=='xhtml':
nonhtml.validate(self, NotInline)
else:
nonhtml.validate(self, ContainsUndeclaredHTML)
else:
if self.type.find('/') > -1 and not (
self.type.endswith('+xml') or self.type.endswith('/xml') or
self.type.startswith('text/')):
import base64
try:
self.value=base64.decodestring(self.value)
if self.type.endswith('/html'): self.type='html'
except:
self.log(NotBase64({"parent":self.parent.name, "element":self.name,"value":self.value}))
if self.type=='html' or self.type.endswith("/html"):
self.validateSafe(self.value)
if self.type.endswith("/html"):
if self.value.find("<html")<0 and not self.attrs.has_key((None,"src")):
self.log(HtmlFragment({"parent":self.parent.name, "element":self.name,"value":self.value, "type":self.type}))
else:
nonhtml.validate(self, ContainsUndeclaredHTML)
if not self.value and len(self.children)==0 and not self.attrs.has_key((None,"src")):
self.log(NotBlank({"parent":self.parent.name, "element":self.name}))
def textOK(self):
if self.children: validatorBase.textOK(self)
def characters(self, string):
for c in string:
if 0x80 <= ord(c) <= 0x9F or c == u'\ufffd':
from validators import BadCharacters
self.log(BadCharacters({"parent":self.parent.name, "element":self.name}))
if (self.type=='xhtml') and string.strip() and not self.value.strip():
self.log(MissingXhtmlDiv({"parent":self.parent.name, "element":self.name}))
validatorBase.characters(self,string)
def startElementNS(self, name, qname, attrs):
if (self.type<>'xhtml') and not (
self.type.endswith('+xml') or self.type.endswith('/xml')):
self.log(UndefinedElement({"parent":self.name, "element":name}))
if self.type=="xhtml":
if name<>'div' and not self.value.strip():
self.log(MissingXhtmlDiv({"parent":self.parent.name, "element":self.name}))
elif qname not in ["http://www.w3.org/1999/xhtml"]:
self.log(NotHtml({"parent":self.parent.name, "element":self.name, "message":"unexpected namespace", "value": qname}))
if self.type=="application/xhtml+xml":
if name<>'html':
self.log(HtmlFragment({"parent":self.parent.name, "element":self.name,"value":self.value, "type":self.type}))
elif qname not in ["http://www.w3.org/1999/xhtml"]:
self.log(NotHtml({"parent":self.parent.name, "element":self.name, "message":"unexpected namespace", "value":qname}))
if self.attrs.has_key((None,"mode")):
if self.attrs.getValue((None,"mode")) == 'escaped':
self.log(NotEscaped({"parent":self.parent.name, "element":self.name}))
if name=="div" and qname=="http://www.w3.org/1999/xhtml":
handler=diveater()
else:
handler=eater()
self.children.append(handler)
self.push(handler, name, attrs)
# treat xhtml:div as part of the content for purposes of detecting escaped html
class diveater(eater):
def __init__(self):
eater.__init__(self)
self.mixed = False
def textOK(self):
pass
def characters(self, string):
validatorBase.characters(self, string)
def startElementNS(self, name, qname, attrs):
if not qname:
self.log(MissingNamespace({"parent":"xhtml:div", "element":name}))
elif qname == 'http://www.w3.org/1999/xhtml':
if name not in HTMLValidator.htmltags:
self.log(NotHtml({'message':'Non-XHTML element', 'value':name}))
elif name not in HTMLValidator.acceptable_elements:
self.log(SecurityRisk({'tag':name}))
for ns,attr in attrs.getNames():
if not ns and attr not in HTMLValidator.acceptable_attributes:
if attr == 'style':
for value in checkStyle(attrs.get((ns,attr))):
self.log(DangerousStyleAttr({"attr":attr, "value":value}))
else:
self.log(SecurityRiskAttr({'attr':attr}))
elif qname == 'http://www.w3.org/2000/svg':
if name not in HTMLValidator.svg_elements:
self.log(SecurityRisk({'tag':name}))
for ns,attr in attrs.getNames():
if not ns and attr not in HTMLValidator.svg_attributes:
self.log(SecurityRiskAttr({'attr':attr}))
elif qname == 'http://www.w3.org/1998/Math/MathML':
if name not in HTMLValidator.mathml_elements:
self.log(SecurityRisk({'tag':name}))
for ns,attr in attrs.getNames():
if not ns and attr not in HTMLValidator.mathml_attributes:
self.log(SecurityRiskAttr({'attr':attr}))
elif namespaces.has_key(qname):
if self.name != 'metadata':
self.log(UndefinedElement({"parent": self.name, "element":namespaces[qname] + ":" + name}))
self.push(eater(), name, attrs)
return
self.mixed = True
eater.startElementNS(self, name, qname, attrs)
def validate(self):
if not self.mixed: self.parent.value += self.value
class content(textConstruct):
def maptype(self):
if self.type == 'multipart/alternative':
self.log(InvalidMIMEType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
| Python |
"""$Id: extension.py 1061 2010-01-25 17:50:01Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net>, Mark Pilgrim <http://diveintomark.org/> and Phil Ringnalda <http://philringnalda.com>"
__version__ = "$Revision: 1061 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby, Mark Pilgrim and Phil Ringnalda"
from validators import *
from logging import *
########################################################################
# Extensions that are valid everywhere #
########################################################################
class extension_everywhere:
def do_dc_title(self):
return text(), noduplicates()
def do_dc_description(self):
return text(), noduplicates()
def do_dc_publisher(self):
if "webMaster" in self.children:
self.log(DuplicateSemantics({"core":"webMaster", "ext":"dc:publisher"}))
return text() # duplicates allowed
def do_dc_contributor(self):
return text() # duplicates allowed
def do_dc_type(self):
return text(), noduplicates()
def do_dc_format(self):
return text(), noduplicates()
def do_dc_identifier(self):
return text()
def do_dc_source(self):
if "source" in self.children:
self.log(DuplicateItemSemantics({"core":"source", "ext":"dc:source"}))
return text(), noduplicates()
def do_dc_language(self):
if "language" in self.children:
self.log(DuplicateSemantics({"core":"language", "ext":"dc:language"}))
return iso639(), noduplicates()
def do_dc_relation(self):
return text(), # duplicates allowed
def do_dc_coverage(self):
return text(), # duplicates allowed
def do_dc_rights(self):
if "copyright" in self.children:
self.log(DuplicateSemantics({"core":"copyright", "ext":"dc:rights"}))
return nonhtml(), noduplicates()
def do_dcterms_alternative(self):
return text() #duplicates allowed
def do_dcterms_abstract(self):
return text(), noduplicates()
def do_dcterms_tableOfContents(self):
return rdfResourceURI(), noduplicates()
def do_dcterms_created(self):
return w3cdtf(), noduplicates()
def do_dcterms_valid(self):
return eater()
def do_dcterms_available(self):
return eater()
def do_dcterms_issued(self):
return w3cdtf(), noduplicates()
def do_dcterms_modified(self):
if "lastBuildDate" in self.children:
self.log(DuplicateSemantics({"core":"lastBuildDate", "ext":"dcterms:modified"}))
return w3cdtf(), noduplicates()
def do_dcterms_dateAccepted(self):
return text(), noduplicates()
def do_dcterms_dateCopyrighted(self):
return text(), noduplicates()
def do_dcterms_dateSubmitted(self):
return text(), noduplicates()
def do_dcterms_extent(self):
return positiveInteger(), nonblank(), noduplicates()
# def do_dcterms_medium(self):
# spec defines it as something that should never be used
# undefined element'll do for now
def do_dcterms_isVersionOf(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_hasVersion(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_isReplacedBy(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_replaces(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_isRequiredBy(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_requires(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_isPartOf(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_hasPart(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_isReferencedBy(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_references(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_isFormatOf(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_hasFormat(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_conformsTo(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_spatial(self):
return eater()
def do_dcterms_temporal(self):
return eater()
def do_dcterms_audience(self):
return text()
def do_dcterms_mediator(self):
return text(), noduplicates()
# added to DMCI, but no XML mapping has been defined
def do_dcterms_accessRights(self):
return eater()
def do_dcterms_accrualMethod(self):
return eater()
def do_dcterms_accrualPeriodicity(self):
return eater()
def do_dcterms_accrualPolicy(self):
return eater()
def do_dcterms_bibliographicCitation(self):
return eater()
def do_dcterms_educationLevel(self):
return eater()
def do_dcterms_instructionalMethod(self):
return eater()
def do_dcterms_license(self):
return eater()
def do_dcterms_provenance(self):
return eater()
def do_dcterms_rightsHolder(self):
return eater()
def do_rdf_RDF(self):
return eater()
def do_rdf_type(self):
return eater()
def do_rdf_Description(self):
return eater()
def do_rdfs_seeAlso(self):
return rdfResourceURI() # duplicates allowed
def do_geo_Point(self):
return geo_point()
def do_geo_lat(self):
return latitude()
def do_geo_long(self):
return longitude()
def do_geo_alt(self):
return decimal()
def do_geourl_latitude(self):
return latitude()
def do_geourl_longitude(self):
return longitude()
def do_georss_where(self):
return georss_where()
def do_georss_point(self):
return gml_pos()
def do_georss_line(self):
return gml_posList()
def do_georss_polygon(self):
return gml_posList()
def do_georss_featuretypetag(self):
return text()
def do_georss_relationshiptag(self):
return text()
def do_georss_featurename(self):
return text()
def do_georss_elev(self):
return decimal()
def do_georss_floor(self):
return Integer()
def do_georss_radius(self):
return Float()
def do_icbm_latitude(self):
return latitude()
def do_icbm_longitude(self):
return longitude()
def do_opml_dateCreated(self):
return rfc822(), noduplicates()
def do_opml_dateModified(self):
return rfc822(), noduplicates()
def do_opml_ownerName(self):
return safeHtml(), noduplicates()
def do_opml_ownerEmail(self):
return email(), noduplicates()
def do_opml_ownerId(self):
return httpURL(), noduplicates()
########################################################################
# Extensions that are valid at either the channel or item levels #
########################################################################
from media import media_elements, media_content, media_group
class extension_channel_item(extension_everywhere, media_elements):
def do_taxo_topics(self):
return eater()
def do_l_link(self):
return l_link()
########################################################################
# Extensions that are valid at only at the item level #
########################################################################
class extension_item(extension_channel_item):
def do_annotate_reference(self):
return rdfResourceURI(), noduplicates()
def do_ag_source(self):
return text(), noduplicates()
def do_ag_sourceURL(self):
return rfc2396_full(), noduplicates()
def do_ag_timestamp(self):
return iso8601(), noduplicates()
def do_ev_startdate(self):
return unbounded_iso8601(), noduplicates()
def do_ev_enddate(self):
return unbounded_iso8601(), noduplicates()
def do_ev_location(self):
return eater()
def do_ev_organizer(self):
return eater()
def do_ev_type(self):
return text(), noduplicates()
def do_feedburner_awareness(self):
return rfc2396_full(), noduplicates()
def do_feedburner_origEnclosureLink(self):
return rfc2396_full(), noduplicates()
def do_feedburner_origLink(self):
return rfc2396_full(), noduplicates()
def do_foaf_maker(self):
return eater()
def do_foaf_primaryTopic(self):
return eater()
def do_slash_comments(self):
return nonNegativeInteger(), noduplicates()
def do_slash_section(self):
return text()
def do_slash_department(self):
return text()
def do_slash_hit_parade(self):
return commaSeparatedIntegers(), noduplicates()
def do_thr_children(self):
if self.getFeedType() != TYPE_RSS1:
self.log(UndefinedElement({'parent':self.name,"element":"thr:children"}))
return eater()
def do_thr_total(self):
return nonNegativeInteger(), noduplicates()
def do_thr_in_reply_to(self):
return in_reply_to()
def do_wfw_comment(self):
return rfc2396_full(), noduplicates()
def do_wfw_commentRss(self):
return rfc2396_full(), noduplicates()
def do_wfw_commentRSS(self):
self.log(CommentRSS({"parent":self.parent.name, "element":self.name}))
return rfc2396_full(), noduplicates()
def do_wiki_diff(self):
return text()
def do_wiki_history(self):
return text()
def do_wiki_importance(self):
return text()
def do_wiki_status(self):
return text()
def do_wiki_version(self):
return text()
def do_g_actor(self):
return nonhtml(), noduplicates()
def do_g_age(self):
return nonNegativeInteger(), noduplicates()
def do_g_agent(self):
return nonhtml(), noduplicates()
def do_g_area(self):
return nonhtml(), noduplicates() # intUnit
def do_g_apparel_type(self):
return nonhtml(), noduplicates()
def do_g_artist(self):
return nonhtml(), noduplicates()
def do_g_author(self):
return nonhtml(), noduplicates()
def do_g_bathrooms(self):
return nonNegativeInteger(), noduplicates()
def do_g_bedrooms(self):
return nonNegativeInteger(), noduplicates()
def do_g_brand(self):
return nonhtml(), noduplicates()
def do_g_calories(self):
return g_float(), noduplicates()
def do_g_cholesterol(self):
return g_float(), noduplicates()
def do_g_color(self):
return nonhtml(), noduplicates()
def do_g_cooking_time(self):
return g_float(), noduplicates()
def do_g_condition(self):
return nonhtml(), noduplicates()
def do_g_course(self):
return nonhtml(), noduplicates()
def do_g_course_date_range(self):
return g_dateTimeRange(), noduplicates()
def do_g_course_number(self):
return nonhtml(), noduplicates()
def do_g_course_times(self):
return nonhtml(), noduplicates()
def do_g_cuisine(self):
return nonhtml(), noduplicates()
def do_g_currency(self):
return iso4217(), noduplicates()
def do_g_delivery_notes(self):
return nonhtml(), noduplicates()
def do_g_delivery_radius(self):
return floatUnit(), noduplicates()
def do_g_education(self):
return nonhtml(), noduplicates()
def do_g_employer(self):
return nonhtml(), noduplicates()
def do_g_ethnicity(self):
return nonhtml(), noduplicates()
def do_g_event_date_range(self):
return g_dateTimeRange(), noduplicates()
def do_g_expiration_date(self):
return iso8601_date(), noduplicates()
def do_g_expiration_date_time(self):
return iso8601(), noduplicates()
def do_g_fiber(self):
return g_float(), noduplicates()
def do_g_from_location(self):
return g_locationType(), noduplicates()
def do_g_gender(self):
return g_genderEnumeration(), noduplicates()
def do_g_hoa_dues(self):
return g_float(), noduplicates()
def do_g_format(self):
return nonhtml(), noduplicates()
def do_g_id(self):
return nonhtml(), noduplicates()
def do_g_image_link(self):
return rfc2396_full(), maxten()
def do_g_immigration_status(self):
return nonhtml(), noduplicates()
def do_g_interested_in(self):
return nonhtml(), noduplicates()
def do_g_isbn(self):
return nonhtml(), noduplicates()
def do_g_job_function(self):
return nonhtml(), noduplicates()
def do_g_job_industry(self):
return nonhtml(), noduplicates()
def do_g_job_type(self):
return nonhtml(), noduplicates()
def do_g_label(self):
return g_labelType(), maxten()
def do_g_listing_type(self):
return truefalse(), noduplicates()
def do_g_location(self):
return g_full_locationType(), noduplicates()
def do_g_main_ingredient(self):
return nonhtml(), noduplicates()
def do_g_make(self):
return nonhtml(), noduplicates()
def do_g_manufacturer(self):
return nonhtml(), noduplicates()
def do_g_manufacturer_id(self):
return nonhtml(), noduplicates()
def do_g_marital_status(self):
return g_maritalStatusEnumeration(), noduplicates()
def do_g_meal_type(self):
return nonhtml(), noduplicates()
def do_g_megapixels(self):
return floatUnit(), noduplicates()
def do_g_memory(self):
return floatUnit(), noduplicates()
def do_g_mileage(self):
return g_intUnit(), noduplicates()
def do_g_model(self):
return nonhtml(), noduplicates()
def do_g_model_number(self):
return nonhtml(), noduplicates()
def do_g_name_of_item_being_reviewed(self):
return nonhtml(), noduplicates()
def do_g_news_source(self):
return nonhtml(), noduplicates()
def do_g_occupation(self):
return nonhtml(), noduplicates()
def do_g_payment_notes(self):
return nonhtml(), noduplicates()
def do_g_pages(self):
return positiveInteger(), nonblank(), noduplicates()
def do_g_payment_accepted(self):
return g_paymentMethodEnumeration()
def do_g_pickup(self):
return truefalse(), noduplicates()
def do_g_preparation_time(self):
return floatUnit(), noduplicates()
def do_g_price(self):
return floatUnit(), noduplicates()
def do_g_price_type(self):
return g_priceTypeEnumeration(), noduplicates()
def do_g_processor_speed(self):
return floatUnit(), noduplicates()
def do_g_product_type(self):
return nonhtml(), noduplicates()
def do_g_property_type(self):
return nonhtml(), noduplicates()
def do_g_protein(self):
return floatUnit(), noduplicates()
def do_g_publication_name(self):
return nonhtml(), noduplicates()
def do_g_publication_volume(self):
return nonhtml(), noduplicates()
def do_g_publish_date(self):
return iso8601_date(), noduplicates()
def do_g_quantity(self):
return nonNegativeInteger(), nonblank(), noduplicates()
def do_g_rating(self):
return g_ratingTypeEnumeration(), noduplicates()
def do_g_review_type(self):
return nonhtml(), noduplicates()
def do_g_reviewer_type(self):
return g_reviewerTypeEnumeration(), noduplicates()
def do_g_salary(self):
return g_float(), noduplicates()
def do_g_salary_type(self):
return g_salaryTypeEnumeration(), noduplicates()
def do_g_saturated_fat(self):
return g_float(), noduplicates()
def do_g_school_district(self):
return nonhtml(), noduplicates()
def do_g_service_type(self):
return nonhtml(), noduplicates()
def do_g_servings(self):
return g_float(), noduplicates()
def do_g_sexual_orientation(self):
return nonhtml(), noduplicates()
def do_g_size(self):
return nonhtml(), noduplicates() # TODO: expressed in either two or three dimensions.
def do_g_shipping(self):
return g_shipping(), noduplicates()
def do_g_sodium(self):
return g_float(), noduplicates()
def do_g_subject(self):
return nonhtml(), noduplicates()
def do_g_subject_area(self):
return nonhtml(), noduplicates()
def do_g_tax_percent(self):
return percentType(), noduplicates()
def do_g_tax_region(self):
return nonhtml(), noduplicates()
def do_g_to_location(self):
return g_locationType(), noduplicates()
def do_g_total_carbs(self):
return g_float(), noduplicates()
def do_g_total_fat(self):
return g_float(), noduplicates()
def do_g_travel_date_range(self):
return g_dateTimeRange(), noduplicates()
def do_g_university(self):
return nonhtml(), noduplicates()
def do_g_upc(self):
return nonhtml(), noduplicates()
def do_g_url_of_item_being_reviewed(self):
return rfc2396_full(), noduplicates()
def do_g_vehicle_type(self):
return nonhtml(), noduplicates()
def do_g_vin(self):
return nonhtml(), noduplicates()
def do_g_weight(self):
return floatUnit(), noduplicates()
def do_g_year(self):
return g_year(), noduplicates()
def do_media_group(self):
return media_group()
def do_media_content(self):
return media_content()
def do_sx_sync(self):
import sse
return sse.Sync()
def do_conversationsNetwork_introMilliseconds(self):
return nonNegativeInteger(), noduplicates()
def do_conversationsNetwork_image(self):
return httpURL(), noduplicates()
def do_conversationsNetwork_ratingAverage(self):
return Float(), noduplicates()
def do_conversationsNetwork_ratingCount(self):
return nonNegativeInteger(), noduplicates()
def do_conversationsNetwork_ratingIndividual(self):
return positiveInteger(), noduplicates()
def do_conversationsNetwork_ratingTimestamp(self):
return iso8601(), noduplicates()
class heisen_uri(rfc3987, rfc2396_full):
def validate(self):
if self.getFeedType() == TYPE_ATOM:
rfc3987.validate(self)
elif not rfc2396_full.rfc2396_re.match(self.value):
self.log(ContainsRelRef({'parent':self.parent.name}))
class feedFlare(nonhtml):
def getExpectedAttrNames(self):
return [(None,u'href'),(None,u'src')]
def prevalidate(self):
self.validate_required_attribute((None,'href'), heisen_uri)
self.validate_required_attribute((None,'src'), heisen_uri)
return text.prevalidate(self)
class feedInfo(validatorBase):
def getExpectedAttrNames(self):
return [(None,u'uri')]
def prevalidate(self):
self.validate_required_attribute((None,'uri'), rfc3987)
class xmlView(validatorBase):
def getExpectedAttrNames(self):
return [(None,u'href')]
def prevalidate(self):
self.validate_required_attribute((None,'href'), rfc2396_full)
class georss_where(validatorBase):
def do_gml_Point(self):
return gml_point()
def do_gml_LineString(self):
return gml_line()
def do_gml_Polygon(self):
return gml_polygon()
def do_gml_Envelope(self):
return gml_envelope()
class geo_srsName(validatorBase):
def getExpectedAttrNames(self):
return [(None, u'srsName')]
class gml_point(geo_srsName):
def do_gml_pos(self):
return gml_pos()
class geo_point(validatorBase):
def do_geo_lat(self):
return latitude()
def do_geo_long(self):
return longitude()
def validate(self):
if "geo_lat" not in self.children:
self.log(MissingElement({"parent":self.name.replace('_',':'), "element":"geo:lat"}))
if "geo_long" not in self.children:
self.log(MissingElement({"parent":self.name.replace('_',':'), "element":"geo:long"}))
class gml_pos(text):
def validate(self):
if not re.match('^[-+]?\d+\.?\d*[ ,][-+]?\d+\.?\d*$', self.value):
return self.log(InvalidCoord({'value':self.value}))
if self.value.find(',')>=0:
self.log(CoordComma({'value':self.value}))
class gml_line(geo_srsName):
def do_gml_posList(self):
return gml_posList()
class gml_posList(text):
def validate(self):
if self.value.find(',')>=0:
# ensure that commas are only used to separate lat and long
if not re.match('^[-+.0-9]+[, ][-+.0-9]( [-+.0-9]+[, ][-+.0-9])+$',
value.strip()):
return self.log(InvalidCoordList({'value':self.value}))
self.log(CoordComma({'value':self.value}))
self.value=self.value.replace(',',' ')
values = self.value.strip().split()
if len(values)<3 or len(values)%2 == 1:
return self.log(InvalidCoordList({'value':self.value}))
for value in values:
if not re.match('^[-+]?\d+\.?\d*$', value):
return self.log(InvalidCoordList({'value':value}))
class gml_polygon(geo_srsName):
def do_gml_exterior(self):
return gml_exterior()
class gml_exterior(validatorBase):
def do_gml_LinearRing(self):
return gml_linearRing()
class gml_linearRing(geo_srsName):
def do_gml_posList(self):
return gml_posList()
class gml_envelope(geo_srsName):
def do_gml_lowerCorner(self):
return gml_pos()
def do_gml_upperCorner(self):
return gml_pos()
class access_restriction(enumeration):
error = InvalidAccessRestrictionRel
valuelist = ["allow", "deny"]
def getExpectedAttrNames(self):
return [(None, u'relationship')]
def prevalidate(self):
self.children.append(True) # force warnings about "mixed" content
if not self.attrs.has_key((None,"relationship")):
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"relationship"}))
else:
self.value=self.attrs.getValue((None,"relationship"))
########################################################################
# Extensions that are valid at only at the RSS 2.0 item level #
########################################################################
class extension_rss20_item(extension_item):
def do_trackback_ping(self):
return rfc2396_full(), noduplicates()
def do_trackback_about(self):
return rfc2396_full()
def do_dcterms_accessRights(self):
return eater()
def do_dcterms_accrualMethod(self):
return eater()
def do_dcterms_accrualPeriodicity(self):
return eater()
def do_dcterms_accrualPolicy(self):
return eater()
def do_dcterms_bibliographicCitation(self):
return eater()
def do_dcterms_educationLevel(self):
return eater()
def do_dcterms_instructionalMethod(self):
return eater()
def do_dcterms_license(self):
return eater()
def do_dcterms_provenance(self):
return eater()
def do_dcterms_rightsHolder(self):
return eater()
########################################################################
# Extensions that are valid at only at the RSS 1.0 item level #
########################################################################
class extension_rss10_item(extension_item):
def do_trackback_ping(self):
return rdfResourceURI(), noduplicates()
def do_trackback_about(self):
return rdfResourceURI()
def do_l_permalink(self):
return l_permalink()
class l_permalink(rdfResourceURI, MimeType):
lNS = u'http://purl.org/rss/1.0/modules/link/'
def getExpectedAttrNames(self):
return rdfResourceURI.getExpectedAttrNames(self) + [(self.lNS, u'type')]
def validate(self):
if (self.lNS, 'type') in self.attrs.getNames():
self.value=self.attrs.getValue((self.lNS, 'type'))
MimeType.validate(self)
return rdfResourceURI.validate(self)
class l_link(rdfResourceURI, MimeType):
lNS = u'http://purl.org/rss/1.0/modules/link/'
def getExpectedAttrNames(self):
return rdfResourceURI.getExpectedAttrNames(self) + [
(self.lNS, u'lang'), (self.lNS, u'rel'),
(self.lNS, u'type'), (self.lNS, u'title')
]
def prevalidate(self):
self.validate_optional_attribute((self.lNS,'lang'), iso639)
self.validate_required_attribute((self.lNS,'rel'), rfc2396_full)
self.validate_optional_attribute((self.lNS,'title'), nonhtml)
if self.attrs.has_key((self.lNS, "type")):
if self.attrs.getValue((self.lNS, "type")).find(':') < 0:
self.validate_optional_attribute((self.lNS,'type'), MimeType)
else:
self.validate_optional_attribute((self.lNS,'type'), rfc2396_full)
########################################################################
# Extensions that are valid at only at the Atom entry level #
########################################################################
class extension_entry(extension_item):
def do_dc_creator(self): # atom:creator
return text() # duplicates allowed
def do_dc_subject(self): # atom:category
return text() # duplicates allowed
def do_dc_date(self): # atom:published
return w3cdtf(), noduplicates()
def do_creativeCommons_license(self):
return rfc2396_full()
def do_trackback_ping(self):
return rfc2396_full(), noduplicates()
# XXX This should have duplicate semantics with link[@rel='related']
def do_trackback_about(self):
return rfc2396_full()
########################################################################
# Extensions that are valid at only at the channel level #
########################################################################
class extension_channel(extension_channel_item):
def do_admin_generatorAgent(self):
if "generator" in self.children:
self.log(DuplicateSemantics({"core":"generator", "ext":"admin:generatorAgent"}))
return admin_generatorAgent(), noduplicates()
def do_admin_errorReportsTo(self):
return admin_errorReportsTo(), noduplicates()
def do_blogChannel_blogRoll(self):
return rfc2396_full(), noduplicates()
def do_blogChannel_mySubscriptions(self):
return rfc2396_full(), noduplicates()
def do_blogChannel_blink(self):
return rfc2396_full(), noduplicates()
def do_blogChannel_changes(self):
return rfc2396_full(), noduplicates()
def do_sy_updatePeriod(self):
return sy_updatePeriod(), noduplicates()
def do_sy_updateFrequency(self):
return positiveInteger(), nonblank(), noduplicates()
def do_sy_updateBase(self):
return w3cdtf(), noduplicates()
def do_foaf_maker(self):
return eater()
def do_cp_server(self):
return rdfResourceURI()
def do_wiki_interwiki(self):
return text()
def do_thr_in_reply_to(self):
return in_reply_to()
def do_cf_listinfo(self):
from cf import listinfo
return listinfo()
def do_cf_treatAs(self):
from cf import treatAs
return treatAs()
def do_feedburner_awareness(self):
return rfc2396_full(), noduplicates()
def do_feedburner_browserFriendly(self):
return nonhtml(), noduplicates()
def do_feedburner_emailServiceId(self):
return noduplicates()
def do_feedburner_feedFlare(self):
return feedFlare()
def do_feedburner_info(self):
return feedInfo()
def do_feedburner_feedburnerHostname(self):
return rfc2396_full(), noduplicates()
def do_opensearch_totalResults(self):
return nonNegativeInteger(), noduplicates()
do_opensearch10_totalResults = do_opensearch_totalResults
def do_opensearch_startIndex(self):
return Integer(), noduplicates()
do_opensearch10_startIndex = do_opensearch_startIndex
def do_opensearch_itemsPerPage(self):
return nonNegativeInteger(), noduplicates()
do_opensearch10_itemsPerPage = do_opensearch_itemsPerPage
def do_opensearch_Query(self):
from opensearch import Query
return Query()
def do_xhtml_div(self):
return eater()
def do_xhtml_meta(self):
return xhtml_meta()
def do_sx_sharing(self):
import sse
return sse.Sharing()
def do_fh_archive(self):
return validatorBase()
def do_fh_complete(self):
return validatorBase()
class xhtml_meta(validatorBase):
def getExpectedAttrNames(self):
return [ (None, u'name'), (None, u'content') ]
def prevalidate(self):
self.validate_required_attribute((None,'name'), xhtmlMetaEnumeration)
self.validate_required_attribute((None,'content'), robotsEnumeration)
class xhtmlMetaEnumeration(caseinsensitive_enumeration):
error = InvalidMetaName
valuelist = ["robots"]
class robotsEnumeration(caseinsensitive_enumeration):
error = InvalidMetaContent
valuelist = [
"all", "none",
"index", "index,follow", "index,nofollow",
"noindex", "noindex,follow", "noindex,nofollow",
"follow", "follow,index", "follow,noindex",
"nofollow", "nofollow,index", "nofollow,noindex"]
########################################################################
# Extensions that are valid at only at the Atom feed level #
########################################################################
class extension_feed(extension_channel):
def do_dc_creator(self): # atom:creator
return text() # duplicates allowed
def do_dc_subject(self): # atom:category
return text() # duplicates allowed
def do_dc_date(self): # atom:updated
return w3cdtf(), noduplicates()
def do_creativeCommons_license(self):
return rfc2396_full()
def do_access_restriction(self):
return access_restriction()
########################################################################
# Validators #
########################################################################
class admin_generatorAgent(rdfResourceURI): pass
class admin_errorReportsTo(rdfResourceURI): pass
class sy_updatePeriod(text):
def validate(self):
if self.value not in ('hourly', 'daily', 'weekly', 'monthly', 'yearly'):
self.log(InvalidUpdatePeriod({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidUpdatePeriod({"parent":self.parent.name, "element":self.name, "value":self.value}))
class g_complex_type(validatorBase):
def getExpectedAttrNames(self):
if self.getFeedType() == TYPE_RSS1:
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'parseType')]
else:
return []
class g_shipping(g_complex_type):
def do_g_service(self):
return g_serviceTypeEnumeration(), noduplicates()
def do_g_country(self):
return iso3166(), noduplicates()
def do_g_price(self):
return floatUnit(), noduplicates()
class g_dateTimeRange(g_complex_type):
def do_g_start(self):
return iso8601(), noduplicates()
def do_g_end(self):
return iso8601(), noduplicates()
class g_labelType(text):
def validate(self):
if self.value.find(',')>=0:
self.log(InvalidLabel({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class g_locationType(text):
def validate(self):
if len(self.value.split(',')) not in [2,3]:
self.log(InvalidLocation({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class g_full_locationType(text):
def validate(self):
fields = self.value.split(',')
if len(fields) != 5 or 0 in [len(f.strip()) for f in fields]:
self.log(InvalidFullLocation({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class g_genderEnumeration(enumeration):
error = InvalidGender
valuelist = ["Male", "M", "Female", "F"]
class g_maritalStatusEnumeration(enumeration):
error = InvalidMaritalStatus
valuelist = ["single", "divorced", "separated", "widowed", "married", "in relationship"]
class g_paymentMethodEnumeration(enumeration):
error = InvalidPaymentMethod
valuelist = ["Cash", "Check", "Visa", "MasterCard",
"AmericanExpress", "Discover", "WireTransfer"]
class g_priceTypeEnumeration(enumeration):
error = InvalidPriceType
valuelist = ["negotiable", "starting"]
class g_ratingTypeEnumeration(enumeration):
error = InvalidRatingType
valuelist = ["1", "2", "3", "4", "5"]
class g_reviewerTypeEnumeration(enumeration):
error = InvalidReviewerType
valuelist = ["editorial", "user"]
class g_salaryTypeEnumeration(enumeration):
error = InvalidSalaryType
valuelist = ["starting", "negotiable"]
class g_serviceTypeEnumeration(enumeration):
error = InvalidServiceType
valuelist = ['FedEx', 'UPS', 'DHL', 'Mail', 'Other', 'Overnight', 'Standard']
class g_float(text):
def validate(self):
import re
if not re.match('\d+\.?\d*\s*\w*', self.value):
self.log(InvalidFloat({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class floatUnit(text):
def validate(self):
import re
if not re.match('\d+\.?\d*\s*\w*$', self.value):
self.log(InvalidFloatUnit({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class decimal(text):
def validate(self):
import re
if not re.match('[-+]?\d+\.?\d*\s*$', self.value):
self.log(InvalidFloatUnit({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class g_year(text):
def validate(self):
import time
try:
year = int(self.value)
if year < 1900 or year > time.localtime()[0]+4: raise InvalidYear
except:
self.log(InvalidYear({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class g_intUnit(text):
def validate(self):
try:
if int(self.value.split(' ')[0].replace(',','')) < 0: raise InvalidIntUnit
except:
self.log(InvalidIntUnit({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class maxten(validatorBase):
def textOK(self):
pass
def prevalidate(self):
if 10 == len([1 for child in self.parent.children if self.name==child]):
self.log(TooMany({"parent":self.parent.name, "element":self.name}))
class in_reply_to(canonicaluri, xmlbase):
def getExpectedAttrNames(self):
return [(None, u'href'), (None, u'ref'), (None, u'source'), (None, u'type')]
def validate(self):
if self.attrs.has_key((None, "href")):
self.value = self.attrs.getValue((None, "href"))
self.name = "href"
xmlbase.validate(self)
if self.attrs.has_key((None, "ref")):
self.value = self.attrs.getValue((None, "ref"))
self.name = "ref"
canonicaluri.validate(self)
if self.attrs.has_key((None, "source")):
self.value = self.attrs.getValue((None, "source"))
self.name = "source"
xmlbase.validate(self)
if self.attrs.has_key((None, "type")):
self.value = self.attrs.getValue((None, "type"))
if not mime_re.match(self.value):
self.log(InvalidMIMEType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.value}))
else:
self.log(ValidMIMEAttribute({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.value}))
########################################################################
# Extensions that you just gotta question #
########################################################################
class Questionable(extension_everywhere):
children = []
def do_atom_author(self):
from author import author
return author()
def do_atom_category(self):
from category import category
return category()
def do_atom_content(self):
from content import content
return content()
def do_atom_contributor(self):
from author import author
return author()
def do_atom_generator(self):
from generator import generator
return generator()
def do_atom_icon(self):
return rfc2396(), noduplicates()
def do_atom_id(self):
return canonicaluri(), noduplicates()
def do_atom_link(self):
from link import link
return link()
def do_atom_logo(self):
return rfc2396(), noduplicates()
def do_atom_published(self):
return rfc3339(), noduplicates()
def do_atom_rights(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_atom_subtitle(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_atom_summary(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_atom_title(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_atom_updated(self):
return rfc3339(), noduplicates()
def do_app_workspace(self):
from service import workspace
return workspace()
def do_app_collection(self):
from service import collection
return collection()
def do_app_categories(self):
from categories import categories
return categories()
| Python |
"""$Id: __init__.py 1027 2008-09-26 14:41:21Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1027 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
import socket
if hasattr(socket, 'setdefaulttimeout'):
socket.setdefaulttimeout(10)
Timeout = socket.timeout
else:
import timeoutsocket
timeoutsocket.setDefaultSocketTimeout(10)
Timeout = timeoutsocket.Timeout
import urllib2
import logging
from logging import *
from xml.sax import SAXException
from xml.sax.xmlreader import InputSource
import re
import xmlEncoding
import mediaTypes
from httplib import BadStatusLine
MAXDATALENGTH = 2000000
def _validate(aString, firstOccurrenceOnly, loggedEvents, base, encoding, selfURIs=None, mediaType=None):
"""validate RSS from string, returns validator object"""
from xml.sax import make_parser, handler
from base import SAXDispatcher
from exceptions import UnicodeError
from cStringIO import StringIO
if re.match("^\s+<\?xml",aString) and re.search("<generator.*wordpress.*</generator>",aString):
lt = aString.find('<'); gt = aString.find('>')
if lt > 0 and gt > 0 and lt < gt:
loggedEvents.append(logging.WPBlankLine({'line':1,'column':1}))
# rearrange so that other errors can be found
aString = aString[lt:gt+1]+aString[0:lt]+aString[gt+1:]
# By now, aString should be Unicode
source = InputSource()
source.setByteStream(StringIO(xmlEncoding.asUTF8(aString)))
validator = SAXDispatcher(base, selfURIs or [base], encoding)
validator.setFirstOccurrenceOnly(firstOccurrenceOnly)
if mediaType == 'application/atomsvc+xml':
validator.setFeedType(TYPE_APP_SERVICE)
elif mediaType == 'application/atomcat+xml':
validator.setFeedType(TYPE_APP_CATEGORIES)
validator.loggedEvents += loggedEvents
# experimental RSS-Profile support
validator.rssCharData = [s.find('&#x')>=0 for s in aString.split('\n')]
xmlver = re.match("^<\?\s*xml\s+version\s*=\s*['\"]([-a-zA-Z0-9_.:]*)['\"]",aString)
if xmlver and xmlver.group(1)<>'1.0':
validator.log(logging.BadXmlVersion({"version":xmlver.group(1)}))
try:
from xml.sax.expatreader import ExpatParser
class fake_dtd_parser(ExpatParser):
def reset(self):
ExpatParser.reset(self)
self._parser.UseForeignDTD(1)
parser = fake_dtd_parser()
except:
parser = make_parser()
parser.setFeature(handler.feature_namespaces, 1)
parser.setContentHandler(validator)
parser.setErrorHandler(validator)
parser.setEntityResolver(validator)
if hasattr(parser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
parser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
def xmlvalidate(log):
import libxml2
from StringIO import StringIO
from random import random
prefix="...%s..." % str(random()).replace('0.','')
msg=[]
libxml2.registerErrorHandler(lambda msg,str: msg.append(str), msg)
input = libxml2.inputBuffer(StringIO(xmlEncoding.asUTF8(aString)))
reader = input.newTextReader(prefix)
reader.SetParserProp(libxml2.PARSER_VALIDATE, 1)
ret = reader.Read()
while ret == 1: ret = reader.Read()
msg=''.join(msg)
for line in msg.splitlines():
if line.startswith(prefix): log(line.split(':',4)[-1].strip())
validator.xmlvalidator=xmlvalidate
try:
parser.parse(source)
except SAXException:
pass
except UnicodeError:
import sys
exctype, value = sys.exc_info()[:2]
validator.log(logging.UnicodeError({"exception":value}))
if validator.getFeedType() == TYPE_RSS1:
try:
from rdflib.syntax.parsers.RDFXMLHandler import RDFXMLHandler
class Handler(RDFXMLHandler):
ns_prefix_map = {}
prefix_ns_map = {}
def add(self, triple): pass
def __init__(self, dispatcher):
RDFXMLHandler.__init__(self, self)
self.dispatcher=dispatcher
def error(self, message):
self.dispatcher.log(InvalidRDF({"message": message}))
source.getByteStream().reset()
parser.reset()
parser.setContentHandler(Handler(parser.getContentHandler()))
parser.setErrorHandler(handler.ErrorHandler())
parser.parse(source)
except:
pass
return validator
def validateStream(aFile, firstOccurrenceOnly=0, contentType=None, base=""):
loggedEvents = []
if contentType:
(mediaType, charset) = mediaTypes.checkValid(contentType, loggedEvents)
else:
(mediaType, charset) = (None, None)
rawdata = aFile.read(MAXDATALENGTH)
if aFile.read(1):
raise ValidationFailure(logging.ValidatorLimit({'limit': 'feed length > ' + str(MAXDATALENGTH) + ' bytes'}))
encoding, rawdata = xmlEncoding.decode(mediaType, charset, rawdata, loggedEvents, fallback='utf-8')
validator = _validate(rawdata, firstOccurrenceOnly, loggedEvents, base, encoding, mediaType=mediaType)
if mediaType and validator.feedType:
mediaTypes.checkAgainstFeedType(mediaType, validator.feedType, validator.loggedEvents)
return {"feedType":validator.feedType, "loggedEvents":validator.loggedEvents}
def validateString(aString, firstOccurrenceOnly=0, fallback=None, base=""):
loggedEvents = []
if type(aString) != unicode:
encoding, aString = xmlEncoding.decode("", None, aString, loggedEvents, fallback)
else:
encoding = "utf-8" # setting a sane (?) default
if aString is not None:
validator = _validate(aString, firstOccurrenceOnly, loggedEvents, base, encoding)
return {"feedType":validator.feedType, "loggedEvents":validator.loggedEvents}
else:
return {"loggedEvents": loggedEvents}
def validateURL(url, firstOccurrenceOnly=1, wantRawData=0):
"""validate RSS from URL, returns events list, or (events, rawdata) tuple"""
loggedEvents = []
request = urllib2.Request(url)
request.add_header("Accept-encoding", "gzip, deflate")
request.add_header("User-Agent", "FeedValidator/1.3")
usock = None
try:
try:
usock = urllib2.urlopen(request)
rawdata = usock.read(MAXDATALENGTH)
if usock.read(1):
raise ValidationFailure(logging.ValidatorLimit({'limit': 'feed length > ' + str(MAXDATALENGTH) + ' bytes'}))
# check for temporary redirects
if usock.geturl()<>request.get_full_url():
from httplib import HTTPConnection
spliturl=url.split('/',3)
if spliturl[0]=="http:":
conn=HTTPConnection(spliturl[2])
conn.request("GET",'/'+spliturl[3].split("#",1)[0])
resp=conn.getresponse()
if resp.status<>301:
loggedEvents.append(TempRedirect({}))
except BadStatusLine, status:
raise ValidationFailure(logging.HttpError({'status': status.__class__}))
except urllib2.HTTPError, status:
rawdata = status.read()
if len(rawdata) < 512 or 'content-encoding' in status.headers:
loggedEvents.append(logging.HttpError({'status': status}))
usock = status
else:
rawdata=re.sub('<!--.*?-->','',rawdata)
lastline = rawdata.strip().split('\n')[-1].strip()
if lastline in ['</rss>','</feed>','</rdf:RDF>', '</kml>']:
loggedEvents.append(logging.HttpError({'status': status}))
usock = status
else:
raise ValidationFailure(logging.HttpError({'status': status}))
except urllib2.URLError, x:
raise ValidationFailure(logging.HttpError({'status': x.reason}))
except Timeout, x:
raise ValidationFailure(logging.IOError({"message": 'Server timed out', "exception":x}))
except Exception, x:
raise ValidationFailure(logging.IOError({"message": x.__class__.__name__,
"exception":x}))
if usock.headers.get('content-encoding', None) == None:
loggedEvents.append(Uncompressed({}))
if usock.headers.get('content-encoding', None) == 'gzip':
import gzip, StringIO
try:
rawdata = gzip.GzipFile(fileobj=StringIO.StringIO(rawdata)).read()
except:
import sys
exctype, value = sys.exc_info()[:2]
event=logging.IOError({"message": 'Server response declares Content-Encoding: gzip', "exception":value})
raise ValidationFailure(event)
if usock.headers.get('content-encoding', None) == 'deflate':
import zlib
try:
rawdata = zlib.decompress(rawdata, -zlib.MAX_WBITS)
except:
import sys
exctype, value = sys.exc_info()[:2]
event=logging.IOError({"message": 'Server response declares Content-Encoding: deflate', "exception":value})
raise ValidationFailure(event)
if usock.headers.get('content-type', None) == 'application/vnd.google-earth.kmz':
import tempfile, zipfile, os
try:
(fd, tempname) = tempfile.mkstemp()
os.write(fd, rawdata)
os.close(fd)
zfd = zipfile.ZipFile(tempname)
namelist = zfd.namelist()
for name in namelist:
if name.endswith('.kml'):
rawdata = zfd.read(name)
zfd.close()
os.unlink(tempname)
except:
import sys
value = sys.exc_info()[:1]
event=logging.IOError({"message": 'Problem decoding KMZ', "exception":value})
raise ValidationFailure(event)
mediaType = None
charset = None
# Is the Content-Type correct?
contentType = usock.headers.get('content-type', None)
if contentType:
(mediaType, charset) = mediaTypes.checkValid(contentType, loggedEvents)
# Check for malformed HTTP headers
for (h, v) in usock.headers.items():
if (h.find(' ') >= 0):
loggedEvents.append(HttpProtocolError({'header': h}))
selfURIs = [request.get_full_url()]
baseURI = usock.geturl()
if not baseURI in selfURIs: selfURIs.append(baseURI)
# Get baseURI from content-location and/or redirect information
if usock.headers.get('content-location', None):
from urlparse import urljoin
baseURI=urljoin(baseURI,usock.headers.get('content-location', ""))
elif usock.headers.get('location', None):
from urlparse import urljoin
baseURI=urljoin(baseURI,usock.headers.get('location', ""))
if not baseURI in selfURIs: selfURIs.append(baseURI)
usock.close()
usock = None
mediaTypes.contentSniffing(mediaType, rawdata, loggedEvents)
encoding, rawdata = xmlEncoding.decode(mediaType, charset, rawdata, loggedEvents, fallback='utf-8')
if rawdata is None:
return {'loggedEvents': loggedEvents}
rawdata = rawdata.replace('\r\n', '\n').replace('\r', '\n') # normalize EOL
validator = _validate(rawdata, firstOccurrenceOnly, loggedEvents, baseURI, encoding, selfURIs, mediaType=mediaType)
# Warn about mismatches between media type and feed version
if mediaType and validator.feedType:
mediaTypes.checkAgainstFeedType(mediaType, validator.feedType, validator.loggedEvents)
params = {"feedType":validator.feedType, "loggedEvents":validator.loggedEvents}
if wantRawData:
params['rawdata'] = rawdata
return params
finally:
try:
if usock: usock.close()
except:
pass
__all__ = ['base',
'channel',
'compatibility',
'image',
'item',
'logging',
'rdf',
'root',
'rss',
'skipHours',
'textInput',
'util',
'validators',
'validateURL',
'validateString']
| Python |
"""$Id: en.py 1041 2009-02-18 00:19:04Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1041 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
import feedvalidator
from feedvalidator.logging import *
line = "line %(line)s"
column = "column %(column)s"
occurances = " (%(msgcount)s occurrences)"
messages = {
SAXError: "XML parsing error: %(exception)s",
WPBlankLine: "Blank line before XML declaration",
NotHtml: "%(message)s",
UnicodeError: "%(exception)s (maybe a high-bit character?)",
UndefinedElement: "Undefined %(parent)s element: %(element)s",
MissingNamespace: "Missing namespace for %(element)s",
MissingElement: "Missing %(parent)s element: %(element)s",
MissingRecommendedElement: "%(parent)s should contain a %(element)s element",
MissingAttribute: "Missing %(element)s attribute: %(attr)s",
MissingRecommendedAttribute: "Missing recommended %(element)s attribute: %(attr)s",
UnexpectedAttribute: "Unexpected %(attribute)s attribute on %(element)s element",
NoBlink: "There is no blink element in RSS; use blogChannel:blink instead",
NoThrWhen: "thr:when attribute obsolete; use thr:updated instead",
NoBlink: "There is no thr:when attribute in Atom; use thr:updated instead",
InvalidWidth: "%(element)s must be between 1 and 144",
InvalidHeight: "%(element)s must be between 1 and 400",
InvalidHour: "%(element)s must be an integer between 0 and 23",
InvalidDay: "%(element)s must be Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, or Sunday",
InvalidInteger: "%(element)s must be an integer",
InvalidNonNegativeInteger: "%(element)s must be a non-negative integer",
InvalidPositiveInteger: "%(element)s must be a positive integer",
InvalidAlphanum: "%(element)s must be alphanumeric",
InvalidLatitude: "%(element)s must be between -90 and 90",
InvalidLongitude: "%(element)s must be between -180 and 180",
InvalidCommaSeparatedIntegers: "%(element)s must be comma-separated integers",
InvalidHttpGUID: "guid must be a full URL, unless isPermaLink attribute is false",
InvalidUpdatePeriod: "%(element)s must be hourly, daily, weekly, monthly, or yearly",
NotBlank: "%(element)s should not be blank",
AttrNotBlank: "The %(attr)s attribute of %(element)s should not be blank",
DuplicateElement: "%(parent)s contains more than one %(element)s",
DuplicateSemantics: "A channel should not include both %(core)s and %(ext)s",
DuplicateItemSemantics: "An item should not include both %(core)s and %(ext)s",
DuplicateValue: "%(element)s values must not be duplicated within a feed",
NonstdPrefix: '"%(preferred)s" is the preferred prefix for the namespace "%(ns)s"',
ReservedPrefix: 'The prefix "%(prefix)s" generally is associated with the namespace "%(ns)s"',
MediaRssNamespace: 'Missing trailing slash in mediaRSS namespace',
InvalidContact: "Invalid email address",
InvalidAddrSpec: "%(element)s must be an email address",
InvalidLink: "%(element)s must be a valid URI",
InvalidIRI: "%(element)s must be a valid IRI",
InvalidFullLink: "%(element)s must be a full and valid URL",
InvalidUriChar: "Invalid character in a URI",
InvalidISO8601Date: "%(element)s must be an ISO8601 date",
InvalidISO8601DateTime: "%(element)s must be an ISO8601 date-time",
InvalidW3CDTFDate: "%(element)s must be an W3CDTF date",
InvalidRFC2822Date: "%(element)s must be an RFC-822 date-time",
IncorrectDOW: "Incorrect day of week",
InvalidRFC3339Date: "%(element)s must be an RFC-3339 date-time",
InvalidNPTTime: "%(attr)s must be an NPT-time",
InvalidLanguage: "%(element)s must be an ISO-639 language code",
InvalidURIAttribute: "%(attr)s attribute of %(element)s must be a valid URI",
InvalidURLAttribute: "%(element)s must be a full URL",
InvalidIntegerAttribute: "%(attr)s attribute of %(element)s must be a positive integer",
InvalidBooleanAttribute: "%(attr)s attribute of %(element)s must be 'true' or 'false'",
InvalidMIMEAttribute: "%(attr)s attribute of %(element)s must be a valid MIME type",
ItemMustContainTitleOrDescription: "item must contain either title or description",
ContainsHTML: "%(element)s should not contain HTML",
ContainsEmail: "%(element)s should not include email address",
ContainsUndeclaredHTML: "%(element)s should not contain HTML unless declared in the type attribute",
NotEnoughHoursInTheDay: "skipHours can not contain more than 24 hour elements",
EightDaysAWeek: "skipDays can not contain more than 7 day elements",
SecurityRisk: "%(element)s should not contain %(tag)s tag",
SecurityRiskAttr: "%(element)s should not contain %(attr)s attribute",
ContainsRelRef: "%(element)s should not contain relative URL references",
ContainsSystemEntity: "Feeds must not contain SYSTEM entities",
InvalidContentMode: "mode must be 'xml', 'escaped', or 'base64'",
InvalidMIMEType: "Not a valid MIME type",
NotEscaped: "%(element)s claims to be escaped, but isn't",
NotInline: "%(element)s claims to be inline, but may contain html",
NotBase64: "%(element)s claims to be base64-encoded, but isn't",
InvalidURN: "%(element)s is not a valid URN",
InvalidUUID: "%(element)s is not a valid UUID",
InvalidTAG: "%(element)s is not a valid TAG",
InvalidURI: "%(element)s is not a valid URI",
ObsoleteVersion: "This feed is an obsolete version",
ObsoleteNamespace: "This feed uses an obsolete namespace",
InvalidNamespace: "%(element)s is in an invalid namespace: %(namespace)s",
InvalidDoctype: "This feed contains conflicting DOCTYPE and version information",
DuplicateAtomLink: "Duplicate alternate links with the same type and hreflang",
MissingHref: "%(element)s must have an href attribute",
AtomLinkNotEmpty: "%(element)s should not have text (all data is in attributes)",
BadCharacters: '%(element)s contains bad characters',
BadXmlVersion: "Incorrect XML Version: %(version)s",
UnregisteredAtomLinkRel: "Unregistered link relationship",
HttpError: "Server returned %(status)s",
IOError: "%(exception)s (%(message)s; misconfigured server?)",
ObscureEncoding: "Obscure XML character encoding: %(encoding)s",
NonstdEncoding: "This encoding is not mandated by the XML specification: %(encoding)s",
UnexpectedContentType: '%(type)s should not be served with the "%(contentType)s" media type',
EncodingMismatch: 'Your feed appears to be encoded as "%(encoding)s", but your server is reporting "%(charset)s"',
UnknownEncoding: "Unknown XML character encoding: %(encoding)s",
NotSufficientlyUnique: "The specified guid is not sufficiently unique",
MissingEncoding: "No character encoding was specified",
UnexpectedText: "Unexpected Text",
ValidatorLimit: "Unable to validate, due to hardcoded resource limits (%(limit)s)",
TempRedirect: "Temporary redirect",
TextXml: "Content type of text/xml with no charset",
Uncompressed: "Response is not compressed",
HttpProtocolError: 'Response includes bad HTTP header name: "%(header)s"',
NonCanonicalURI: 'Identifier "%(uri)s" is not in canonical form (the canonical form would be "%(curi)s")',
InvalidRDF: 'RDF parsing error: %(message)s',
InvalidDuration: 'Invalid duration',
InvalidYesNo: '%(element)s must be "yes", "no"',
InvalidYesNoClean: '%(element)s must be "yes", "no", or "clean"',
TooLong: 'length of %(len)d exceeds the maximum allowable for %(element)s of %(max)d',
InvalidItunesCategory: '%(text)s is not one of the predefined iTunes categories or sub-categories',
ObsoleteItunesCategory: '%(text)s is an obsolete iTunes category or sub-category',
InvalidKeywords: 'Use commas to separate keywords',
InvalidTextType: 'type attribute must be "text", "html", or "xhtml"',
MissingXhtmlDiv: 'Missing xhtml:div element',
MissingSelf: 'Missing atom:link with rel="self"',
MissingAtomSelfLink: 'Missing atom:link with rel="self"',
DuplicateEntries: 'Two entries with the same id',
DuplicateIds: 'All entries have the same id',
MisplacedMetadata: '%(element)s must appear before all entries',
MissingSummary: 'Missing summary',
MissingTextualContent: 'Missing textual content',
MissingContentOrAlternate: 'Missing content or alternate link',
MissingSourceElement: "Missing %(parent)s element: %(element)s",
MissingTypeAttr: "Missing %(element)s attribute: %(attr)s",
HtmlFragment: "%(type)s type used for a document fragment",
DuplicateUpdated: "Two entries with the same value for atom:updated",
UndefinedNamedEntity: "Undefined named entity",
ImplausibleDate: "Implausible date",
UnexpectedWhitespace: "Whitespace not permitted here",
SameDocumentReference: "Same-document reference",
SelfDoesntMatchLocation: "Self reference doesn't match document location",
InvalidOPMLVersion: 'The "version" attribute for the opml element must be 1.0 or 1.1.',
MissingXmlURL: 'An <outline> element whose type is "rss" must have an "xmlUrl" attribute.',
InvalidOutlineVersion: 'An <outline> element whose type is "rss" may have a version attribute, whose value must be RSS, RSS1, RSS2, or scriptingNews.',
InvalidOutlineType: 'The type attribute on an <outline> element should be a known type.',
InvalidExpansionState: '<expansionState> is a comma-separated list of line numbers.',
InvalidTrueFalse: '%(element)s must be "true" or "false"',
MissingOutlineType: 'An <outline> element with more than just a "text" attribute should have a "type" attribute indicating how the other attributes are to be interpreted.',
MissingTitleAttr: 'Missing outline attribute: title',
MissingUrlAttr: 'Missing outline attribute: url',
NotUTF8: 'iTunes elements should only be present in feeds encoded as UTF-8',
MissingItunesElement: 'Missing recommended iTunes %(parent)s element: %(element)s',
UnsupportedItunesFormat: 'Format %(extension)s is not supported by iTunes',
InvalidCountryCode: "Invalid country code: \"%(value)s\"",
InvalidCurrencyUnit: "Invalid value for %(attr)s",
InvalidFloat: "Invalid value for %(attr)s",
InvalidFloatUnit: "Invalid value for %(attr)s",
InvalidFullLocation: "Invalid value for %(attr)s",
InvalidGender: "Invalid value for %(attr)s",
InvalidIntUnit: "Invalid value for %(attr)s",
InvalidLabel: "Invalid value for %(attr)s",
InvalidLocation: "Invalid value for %(attr)s",
InvalidMaritalStatus: "Invalid value for %(attr)s",
InvalidPaymentMethod: "Invalid value for %(attr)s",
InvalidPercentage: '%(element)s must be a percentage',
InvalidPriceType: "Invalid value for %(attr)s",
InvalidRatingType: "Invalid value for %(attr)s",
InvalidReviewerType: "Invalid value for %(attr)s",
InvalidSalaryType: "Invalid value for %(attr)s",
InvalidServiceType: "Invalid value for %(attr)s",
InvalidValue: "Invalid value for %(attr)s",
InvalidYear: "Invalid value for %(attr)s",
TooMany: "%(parent)s contains more than ten %(element)s elements",
InvalidPermalink: "guid must be a full URL, unless isPermaLink attribute is false",
NotInANamespace: "Missing namespace for %(element)s",
UndeterminableVocabulary:"Missing namespace for %(element)s",
SelfNotAtom: '"self" link references a non-Atom representation',
InvalidFormComponentName: 'Invalid form component name',
ImageLinkDoesntMatch: "Image link doesn't match channel link",
ImageUrlFormat: "Image not in required format",
ProblematicalRFC822Date: "Problematical RFC 822 date-time value",
DuplicateEnclosure: "item contains more than one enclosure",
MissingItunesEmail: "The recommended <itunes:email> element is missing",
MissingGuid: "%(parent)s should contain a %(element)s element",
UriNotIri: "IRI found where URL expected",
ObsoleteWikiNamespace: "Obsolete Wiki Namespace",
DuplicateDescriptionSemantics: "Avoid %(element)s",
InvalidCreditRole: "Invalid Credit Role",
InvalidMediaTextType: 'type attribute must be "plain" or "html"',
InvalidMediaHash: 'Invalid Media Hash',
InvalidMediaRating: 'Invalid Media Rating',
InvalidMediaRestriction: "media:restriction must be 'all' or 'none'",
InvalidMediaRestrictionRel: "relationship must be 'allow' or 'disallow'",
InvalidMediaRestrictionType: "type must be 'country' or 'uri'",
InvalidMediaMedium: 'Invalid content medium: "%(value)s"',
InvalidMediaExpression: 'Invalid content expression: "%(value)s"',
DeprecatedMediaAdult: 'media:adult is deprecated',
MediaGroupWithoutAlternatives: 'media:group must have multiple media:content children',
CommentRSS: 'wfw:commentRSS should be wfw:commentRss',
NonSpecificMediaType: '"%(contentType)s" media type is not specific enough',
DangerousStyleAttr: "style attribute contains potentially dangerous content",
NotURLEncoded: "%(element)s must be URL encoded",
InvalidLocalRole: "Invalid local role",
InvalidEncoding: "Invalid character encoding",
ShouldIncludeExample: "OpenSearchDescription should include an example Query",
InvalidAdultContent: "Non-boolean value for %(element)s",
InvalidLocalParameter: "Invalid local parameter name",
UndeclaredPrefix: "Undeclared %(element)s prefix",
UseOfExtensionAttr: "Use of extension attribute on RSS 2.0 core element: %(attribute)s",
DeprecatedDTD: "The use of this DTD has been deprecated by Netscape",
MisplacedXHTMLContent: "Misplaced XHTML content",
SchemeNotIANARegistered: "URI scheme not IANA registered",
InvalidCoord: "Invalid coordinates",
InvalidCoordList: "Invalid coordinate list",
CoordComma: "Comma found in coordinate pair",
AvoidNamespacePrefix: "Avoid Namespace Prefix: %(prefix)s",
Deprecated: "%(element)s has been superceded by %(replacement)s.",
DeprecatedRootHref: "root:// URLs have been superceded by full http:// URLs",
InvalidAltitudeMode: "Invalid altitudeMode",
InvalidAngle: "%(element)s must be between -360 and 360",
InvalidColor: "Not a valid color",
InvalidColorMode: "Invalid colorMode.",
InvalidItemIconState: "Invalid state for Icon",
InvalidListItemType: "Invalid list item type",
InvalidKmlCoordList: "Invalid coordinate list. Make sure that coordinates are of the form longitude,latitude or longitude,latitude,altitude and seperated by a single space. It is also a good idea to avoid line breaks or other extraneous white space",
InvalidKmlLatitude: "Invalid latitude found within coordinates. Latitudes have to be between -90 and 90.",
InvalidKmlLongitude: "Invalid longitude found within coordinates. Longitudes have to be between -180 and 180.",
InvalidKmlMediaType: "%(contentType)s is an invalid KML media type. Use application/vnd.google-earth.kml+xml or application/vnd.google-earth.kmz",
InvalidKmlUnits: "Invalid units.",
InvalidRefreshMode: "Invalid refreshMode",
InvalidSchemaFieldType: "Invalid Schema field type",
InvalidStyleState: "Invalid key for StyleMap.",
InvalidViewRefreshMode: "Invalid viewRefreshMode.",
InvalidZeroOne: "Invalid value. Should be 0 or 1.",
MissingId: "%(parent)s should contain a %(element)s attribute. This is important if you want to link directly to features.",
InvalidSseType: "sx:related type must be either 'aggregated' or 'compete'",
FeedHistoryRelInEntry: "%(rel)s link relation found in entry",
LinkPastEnd: "%(rel)s link in %(self)s entry in list",
FeedRelInCompleteFeed: "%(rel)s link relation found in complete feed",
MissingCurrentInArchive: "Current link not found in archive feed",
CurrentNotSelfInCompleteFeed: "Current not self in complete feed",
ArchiveIncomplete: "Archive incomplete",
RelativeSelf: "Relative href value on self link",
ConflictingCatAttr: "Categories can't have both href and %(attr)s attributes",
ConflictingCatChildren: "Categories can't have both href attributes and children",
UndefinedParam: "Undefined media-range parameter",
CharacterData: 'Encode "&" and "<" in plain text using hexadecimal character references.',
EmailFormat: 'Email address is not in the recommended format',
MissingRealName: 'Email address is missing real name',
MisplacedItem: 'Misplaced Item',
ImageTitleDoesntMatch: "Image title doesn't match channel title",
AvoidTextInput: "Avoid Text Input",
NeedDescriptionBeforeContent: "Ensure description precedes content:encoded",
SlashDate: "Ensure lastBuildDate is present when slash:comments is used",
UseZeroForMidnight: "Use zero for midnight",
UseZeroForUnknown: "Use zero for unknown length",
UnknownHost: "Unknown host",
UnknownNamespace: "Use of unknown namespace: %(namespace)s",
IntegerOverflow: "%(element)s value too large",
InvalidNSS: "Invalid Namespace Specific String: %(element)s",
SinceAfterUntil: "Since After until",
MissingByAndWhenAttrs: "Missing by and when attributes",
QuestionableUsage: "Undocumented use of %(element)s",
InvalidRSSVersion: "Invalid RSS Version",
}
| Python |
"""$Id: __init__.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
| Python |
from base import validatorBase
from validators import *
from extension import extension_everywhere
class service(validatorBase, extension_everywhere):
def getExpectedAttrNames(self):
return [] # (None,u'scheme'),(None,u'fixed')]
def validate(self):
if not "app_workspace" in self.children:
self.log(MissingElement({"parent":self.name, "element":"app:workspace"}))
def do_app_workspace(self):
return workspace()
class workspace(validatorBase, extension_everywhere):
def validate(self):
if not "atom_title" in self.children:
self.log(MissingElement({"parent":self.name, "element":"atom:title"}))
def do_app_collection(self):
return collection()
def do_atom_title(self):
from content import textConstruct
return textConstruct(), noduplicates()
class collection(validatorBase, extension_everywhere):
def getExpectedAttrNames(self):
return [(None,u'href')]
def prevalidate(self):
self.validate_required_attribute((None,'href'), rfc3987)
def validate(self):
if not "atom_title" in self.children:
self.log(MissingElement({"parent":self.name, "element":"atom:title"}))
def do_atom_title(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_title(self):
from root import atom_namespace
assert(atom_namespace in self.dispatcher.defaultNamespaces)
self.child = 'atom_title'
return self.do_atom_title()
def do_app_categories(self):
from categories import categories
return categories()
def do_app_accept(self):
from categories import categories
return MediaRange()
| Python |
"""$Id: author.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
#
# author element.
#
class author(validatorBase):
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'parseType')]
def validate(self):
if not "name" in self.children and not "atom_name" in self.children:
self.log(MissingElement({"parent":self.name, "element":"name"}))
def do_name(self):
return nonhtml(), nonemail(), nonblank(), noduplicates()
def do_email(self):
return addr_spec(), noduplicates()
def do_uri(self):
return nonblank(), rfc3987(), nows(), noduplicates()
def do_foaf_workplaceHomepage(self):
return rdfResourceURI()
def do_foaf_homepage(self):
return rdfResourceURI()
def do_foaf_weblog(self):
return rdfResourceURI()
def do_foaf_plan(self):
return text()
def do_foaf_firstName(self):
return text()
def do_xhtml_div(self):
from content import diveater
return diveater()
# RSS/Atom support
do_atom_name = do_name
do_atom_email = do_email
do_atom_uri = do_uri
| Python |
"""$Id: image.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
from extension import extension_everywhere
#
# image element.
#
class image(validatorBase, extension_everywhere):
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'resource'),
(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'about'),
(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'parseType')]
def validate(self):
if self.value.strip():
self.log(UnexpectedText({"parent":self.parent.name, "element":"image"}))
if self.attrs.has_key((rdfNS,"resource")):
return # looks like an RSS 1.0 feed
if not "title" in self.children:
self.log(MissingTitle({"parent":self.name, "element":"title"}))
if not "url" in self.children:
self.log(MissingElement({"parent":self.name, "element":"url"}))
if self.attrs.has_key((rdfNS,"parseType")):
return # looks like an RSS 1.1 feed
if not "link" in self.children:
self.log(MissingLink({"parent":self.name, "element":"link"}))
def do_title(self):
return title(), noduplicates()
def do_link(self):
return link(), noduplicates()
def do_url(self):
return url(), noduplicates()
def do_width(self):
return width(), noduplicates()
def do_height(self):
return height(), noduplicates()
def do_description(self):
return nonhtml(), noduplicates()
def do_dc_creator(self):
return text()
def do_dc_subject(self):
return text() # duplicates allowed
def do_dc_date(self):
return w3cdtf(), noduplicates()
def do_cc_license(self):
return eater()
class link(rfc2396_full):
def validate(self):
rfc2396_full.validate(self)
if hasattr(self.parent.parent, 'link') and \
self.parent.parent.link and self.parent.parent.link != self.value:
self.log(ImageLinkDoesntMatch({"parent":self.parent.name, "element":self.name}))
class url(rfc2396_full):
def validate(self):
rfc2396_full.validate(self)
import re
ext = self.value.split('.')[-1].lower()
if re.match("^\w+$", ext) and ext not in ['jpg','jpeg','gif','png']:
self.log(ImageUrlFormat({"parent":self.parent.name, "element":self.name}))
class title(nonhtml, noduplicates):
def validate(self):
if not self.value.strip():
self.log(NotBlank({"parent":self.parent.name, "element":self.name}))
else:
self.log(ValidTitle({"parent":self.parent.name, "element":self.name}))
nonhtml.validate(self)
if hasattr(self.parent.parent, 'title') and \
self.parent.parent.title and self.parent.parent.title != self.value:
self.log(ImageTitleDoesntMatch({"parent":self.parent.name, "element":self.name}))
class width(text, noduplicates):
def validate(self):
try:
w = int(self.value)
if (w <= 0) or (w > 144):
self.log(InvalidWidth({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidWidth({"parent":self.parent.name, "element":self.name}))
except ValueError:
self.log(InvalidWidth({"parent":self.parent.name, "element":self.name, "value":self.value}))
class height(text, noduplicates):
def validate(self):
try:
h = int(self.value)
if (h <= 0) or (h > 400):
self.log(InvalidHeight({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidHeight({"parent":self.parent.name, "element":self.name}))
except ValueError:
self.log(InvalidHeight({"parent":self.parent.name, "element":self.name, "value":self.value}))
| Python |
from validators import *
class media_elements:
def do_media_adult(self):
self.log(DeprecatedMediaAdult({"parent":self.name, "element":"media:adult"}))
return truefalse(), noduplicates()
def do_media_category(self):
return media_category()
def do_media_copyright(self):
return media_copyright(), noduplicates()
def do_media_credit(self):
return media_credit()
def do_media_description(self):
return media_title(), noduplicates()
def do_media_keywords(self):
return text()
def do_media_hash(self):
return media_hash()
def do_media_player(self):
return media_player()
def do_media_rating(self):
return media_rating()
def do_media_restriction(self):
return media_restriction()
def do_media_text(self):
return media_text()
def do_media_title(self):
return media_title(), noduplicates()
def do_media_thumbnail(self):
return media_thumbnail()
class media_category(nonhtml,rfc2396_full):
def getExpectedAttrNames(self):
return [(None,u'label'),(None, u'scheme')]
def prevalidate(self):
self.name = "label"
self.value = self.attrs.get((None,u'label'))
if self.value: nonhtml.validate(self)
self.name = "scheme"
self.value = self.attrs.get((None,u'scheme'))
if self.value: rfc2396_full.validate(self)
self.name = "media_category"
self.value = ""
class media_copyright(nonhtml,rfc2396_full):
def getExpectedAttrNames(self):
return [(None,u'url')]
def prevalidate(self):
self.name = "url"
self.value = self.attrs.get((None,u'url'))
if self.value: rfc2396_full.validate(self)
self.name = "media_copyright"
self.value = ""
class media_credit(text,rfc2396_full):
EBU = [
"actor", "adaptor", "anchor person", "animal trainer", "animator",
"announcer", "armourer", "art director", "artist/performer",
"assistant camera", "assistant chief lighting technician",
"assistant director", "assistant producer", "assistant visual editor",
"author", "broadcast assistant", "broadcast journalist", "camera operator",
"carpenter", "casting", "causeur", "chief lighting technician", "choir",
"choreographer", "clapper loader", "commentary or commentator",
"commissioning broadcaster", "composer", "computer programmer",
"conductor", "consultant", "continuity checker", "correspondent",
"costume designer", "dancer", "dialogue coach", "director",
"director of photography", "distribution company", "draughtsman",
"dresser", "dubber", "editor/producer", "editor", "editor", "ensemble",
"executive producer", "expert", "fight director", "floor manager",
"focus puller", "foley artist", "foley editor", "foley mixer",
"graphic assistant", "graphic designer", "greensman", "grip",
"hairdresser", "illustrator", "interviewed guest", "interviewer",
"key character", "key grip", "key talents", "leadman", "librettist",
"lighting director", "lighting technician", "location manager",
"lyricist", "make up artist", "manufacturer", "matte artist",
"music arranger", "music group", "musician", "news reader", "orchestra",
"participant", "photographer", "post", "producer", "production assistant",
"production company", "production department", "production manager",
"production secretary", "programme production researcher",
"property manager", "publishing company", "puppeteer", "pyrotechnician",
"reporter", "rigger", "runner", "scenario", "scenic operative",
"script supervisor", "second assistant camera",
"second assistant director", "second unit director", "set designer",
"set dresser", "sign language", "singer", "sound designer", "sound mixer",
"sound recordist", "special effects", "stunts", "subtitles",
"technical director", "term", "translation", "transportation manager",
"treatment/programme proposal", "vision mixer", "visual editor",
"visual effects", "wardrobe", "witness",
# awaiting confirmation
"artist", "performer", "editor", "producer", "treatment",
"treatment proposal", "programme proposal",
]
def getExpectedAttrNames(self):
return [(None, u'role'),(None,u'scheme')]
def prevalidate(self):
scheme = self.attrs.get((None, 'scheme')) or 'urn:ebu'
role = self.attrs.get((None, 'role'))
if role:
if scheme=='urn:ebu' and role not in self.EBU:
self.log(InvalidCreditRole({"parent":self.parent.name, "element":self.name, "attr":"role", "value":role}))
elif role != role.lower():
self.log(InvalidCreditRole({"parent":self.parent.name, "element":self.name, "attr":"role", "value":role}))
self.value = scheme
self.name = "scheme"
if scheme != 'urn:ebu': rfc2396_full.validate(self)
self.name = "media_credit"
self.value = ""
class media_hash(text):
def getExpectedAttrNames(self):
return [(None,u'algo')]
def prevalidate(self):
self.algo = self.attrs.get((None, 'algo'))
if self.algo and self.algo not in ['md5', 'sha-1']:
self.log(InvalidMediaHash({"parent":self.parent.name, "element":self.name, "attr":"algo", "value":self.algo}))
def validate(self):
self.value = self.value.strip()
if not re.match("^[0-9A-Za-z]+$",self.value):
self.log(InvalidMediaHash({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
if self.algo == 'sha-1':
if len(self.value) != 40:
self.log(InvalidMediaHash({"parent":self.parent.name, "element":self.name, "algo":self.algo, "value":self.value}))
else:
if len(self.value) != 32:
self.log(InvalidMediaHash({"parent":self.parent.name, "element":self.name, "algo":self.algo, "value":self.value}))
class media_rating(rfc2396_full):
def getExpectedAttrNames(self):
return [(None, u'scheme')]
def validate(self):
scheme = self.attrs.get((None, 'scheme')) or 'urn:simple'
if scheme == 'urn:simple':
if self.value not in ['adult', 'nonadult']:
self.log(InvalidMediaRating({"parent":self.parent.name, "element":self.name, "scheme":scheme, "value":self.value}))
elif scheme == 'urn:mpaa':
if self.value not in ['g', 'm', 'nc-17', 'pg', 'pg-13', 'r', 'x']:
self.log(InvalidMediaRating({"parent":self.parent.name, "element":self.name, "scheme":scheme, "value":self.value}))
elif scheme == 'urn:v-chip':
if self.value not in ['14+', '18+', 'c', 'c8', 'g', 'pg',
'tv-14', 'tv-g', 'tv-ma', 'tv-pg', 'tv-y', 'tv-y7', 'tv-y7-fv']:
self.log(InvalidMediaRating({"parent":self.parent.name, "element":self.name, "scheme":scheme, "value":self.value}))
elif scheme == 'urn:icra':
code = '([nsvlocx]z [01]|(n[a-c]|s[a-f]|v[a-j]|l[a-c]|o[a-h]|c[a-b]|x[a-e]) 1)'
if not re.match(r"^r \(%s( %s)*\)$" %(code,code),self.value):
self.log(InvalidMediaRating({"parent":self.parent.name, "element":self.name, "scheme":scheme, "value":self.value}))
pass
else:
self.value = scheme
self.name = 'scheme'
rfc2396_full.validate(self)
class media_restriction(text,rfc2396_full,iso3166):
def getExpectedAttrNames(self):
return [(None, u'relationship'),(None,u'type')]
def validate(self):
relationship = self.attrs.get((None, 'relationship'))
if not relationship:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"relationship"}))
elif relationship not in ['allow','disallow']:
self.log(InvalidMediaRestrictionRel({"parent":self.parent.name, "element":self.name, "attr":"relationship", "value":relationship}))
type = self.attrs.get((None, 'type'))
if not type:
if self.value and self.value not in ['all','none']:
self.log(InvalidMediaRestriction({"parent":self.parent.name, "element":self.name, "value":self.value}))
elif type == 'country':
self.name = 'country'
countries = self.value.upper().split(' ')
for self.value in countries:
iso3166.validate(self)
elif type == 'uri':
rfc2396_full.validate(self)
else:
self.log(InvalidMediaRestrictionType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":type}))
class media_player(validatorBase,positiveInteger,rfc2396_full):
def getExpectedAttrNames(self):
return [(None,u'height'),(None,u'url'),(None, u'width')]
def validate(self):
self.value = self.attrs.get((None, 'url'))
if self.value:
self.name = "url"
rfc2396_full.validate(self)
else:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"url"}))
self.value = self.attrs.get((None, 'height'))
self.name = "height"
if self.value: positiveInteger.validate(self)
self.value = self.attrs.get((None, 'width'))
self.name = "width"
if self.value: positiveInteger.validate(self)
class media_text(nonhtml):
def getExpectedAttrNames(self):
return [(None,u'end'),(None,u'lang'),(None,u'start'),(None, u'type')]
def prevalidate(self):
self.type = self.attrs.get((None, 'type'))
if self.type and self.type not in ['plain', 'html']:
self.log(InvalidMediaTextType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
start = self.attrs.get((None, 'start'))
if start and not media_thumbnail.npt_re.match(start):
self.log(InvalidNPTTime({"parent":self.parent.name, "element":self.name, "attr":"start", "value":start}))
else:
self.log(ValidNPTTime({"parent":self.parent.name, "element":self.name, "attr":"start", "value":start}))
end = self.attrs.get((None, 'end'))
if end and not media_thumbnail.npt_re.match(end):
self.log(InvalidNPTTime({"parent":self.parent.name, "element":self.name, "attr":"end", "value":end}))
else:
self.log(ValidNPTTime({"parent":self.parent.name, "element":self.name, "attr":"end", "value":end}))
lang = self.attrs.get((None, 'lang'))
if lang: iso639_validate(self.log,lang,'lang',self.parent)
def validate(self):
if self.type == 'html':
self.validateSafe(self.value)
else:
nonhtml.validate(self, ContainsUndeclaredHTML)
class media_title(nonhtml):
def getExpectedAttrNames(self):
return [(None, u'type')]
def prevalidate(self):
self.type = self.attrs.get((None, 'type'))
if self.type and self.type not in ['plain', 'html']:
self.log(InvalidMediaTextType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
def validate(self):
if self.type == 'html':
self.validateSafe(self.value)
else:
nonhtml.validate(self, ContainsUndeclaredHTML)
class media_thumbnail(validatorBase,positiveInteger,rfc2396_full):
npt_re = re.compile("^(now)|(\d+(\.\d+)?)|(\d+:\d\d:\d\d(\.\d+)?)$")
def getExpectedAttrNames(self):
return [(None,u'height'),(None,u'time'),(None,u'url'),(None, u'width')]
def validate(self):
time = self.attrs.get((None, 'time'))
if time and not media_thumbnail.npt_re.match(time):
self.log(InvalidNPTTime({"parent":self.parent.name, "element":self.name, "attr":"time", "value":time}))
else:
self.log(ValidNPTTime({"parent":self.parent.name, "element":self.name, "attr":"time", "value":time}))
self.value = self.attrs.get((None, 'url'))
if self.value:
self.name = "url"
rfc2396_full.validate(self)
else:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"url"}))
self.value = self.attrs.get((None, 'height'))
self.name = "height"
if self.value: positiveInteger.validate(self)
self.value = self.attrs.get((None, 'width'))
self.name = "width"
if self.value: positiveInteger.validate(self)
from extension import extension_everywhere
class media_content(validatorBase, media_elements, extension_everywhere,
positiveInteger, rfc2396_full, truefalse, nonNegativeInteger):
def getExpectedAttrNames(self):
return [
(None,u'bitrate'),
(None,u'channels'),
(None,u'duration'),
(None,u'expression'),
(None,u'fileSize'),
(None,u'framerate'),
(None,u'height'),
(None,u'isDefault'),
(None,u'lang'),
(None,u'medium'),
(None,u'samplingrate'),
(None,u'type'),
(None,u'url'),
(None,u'width')
]
def validate(self):
self.value = self.attrs.get((None,u'bitrate'))
if self.value and not re.match('\d+\.?\d*', self.value):
self.log(InvalidFloat({"parent":self.parent.name, "element":self.name,
"attr": 'bitrate', "value":self.value}))
self.value = self.attrs.get((None, 'channels'))
self.name = "channels"
if self.value: nonNegativeInteger.validate(self)
self.value = self.attrs.get((None,u'duration'))
if self.value and not re.match('\d+\.?\d*', self.value):
self.log(InvalidFloat({"parent":self.parent.name, "element":self.name,
"attr": 'duration', "value":self.value}))
self.value = self.attrs.get((None,u'expression'))
if self.value and self.value not in ['sample', 'full', 'nonstop']:
self.log(InvalidMediaExpression({"parent":self.parent.name, "element":self.name, "value": self.value}))
self.value = self.attrs.get((None, 'fileSize'))
self.name = "fileSize"
if self.value: positiveInteger.validate(self)
self.value = self.attrs.get((None,u'framerate'))
if self.value and not re.match('\d+\.?\d*', self.value):
self.log(InvalidFloat({"parent":self.parent.name, "element":self.name,
"attr": 'framerate', "value":self.value}))
self.value = self.attrs.get((None, 'height'))
self.name = "height"
if self.value: positiveInteger.validate(self)
self.value = self.attrs.get((None, 'isDefault'))
if self.value: truefalse.validate(self)
self.value = self.attrs.get((None, 'lang'))
if self.value: iso639_validate(self.log,self.value,'lang',self.parent)
self.value = self.attrs.get((None,u'medium'))
if self.value and self.value not in ['image', 'audio', 'video', 'document', 'executable']:
self.log(InvalidMediaMedium({"parent":self.parent.name, "element":self.name, "value": self.value}))
self.value = self.attrs.get((None,u'samplingrate'))
if self.value and not re.match('\d+\.?\d*', self.value):
self.log(InvalidFloat({"parent":self.parent.name, "element":self.name,
"attr": 'samplingrate', "value":self.value}))
self.value = self.attrs.get((None,u'type'))
if self.value and not mime_re.match(self.value):
self.log(InvalidMIMEAttribute({"parent":self.parent.name, "element":self.name, "attr":'type'}))
self.name = "url"
self.value = self.attrs.get((None,u'url'))
if self.value: rfc2396_full.validate(self)
self.value = self.attrs.get((None, 'width'))
self.name = "width"
if self.value: positiveInteger.validate(self)
class media_group(validatorBase, media_elements):
def do_media_content(self):
return media_content()
def validate(self):
if len([child for child in self.children if child=='media_content']) < 2:
self.log(MediaGroupWithoutAlternatives({}))
| Python |
#!/usr/bin/python
"""$Id: testXmlEncodingDecode.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Joseph Walton <http://www.kafsemo.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2004 Joseph Walton"
import os, sys
curdir = os.path.abspath(os.path.dirname(sys.argv[0]))
srcdir = os.path.split(curdir)[0]
if srcdir not in sys.path:
sys.path.insert(0, srcdir)
basedir = os.path.split(srcdir)[0]
import unittest
from feedvalidator import xmlEncoding
from feedvalidator.logging import *
ctAX='application/xml'
class TestDecode(unittest.TestCase):
def _assertEqualUnicode(self, a, b):
self.assertNotEqual(a, None, 'Decoded strings should not equal None')
self.assertEqual(type(a), unicode, 'Decoded strings should be Unicode (was ' + str(type(a)) + ')')
self.assertEqual(type(b), unicode, 'Test suite error: test strings must be Unicode')
self.assertEqual(a, b)
def testProvidedEncoding(self):
loggedEvents=[]
(encoding, decoded) = xmlEncoding.decode(ctAX, 'UTF-8', '<x/>', loggedEvents)
self.assertEquals('UTF-8', encoding)
self._assertEqualUnicode(decoded, u'<x/>')
self.assertEqual(loggedEvents, [])
loggedEvents=[]
(encoding, decoded) = xmlEncoding.decode(ctAX, 'UTF-8', '<?xml version="1.0" encoding="utf-8"?><x/>', loggedEvents)
self.assertEquals('UTF-8', encoding)
self._assertEqualUnicode(decoded, u'<?xml version="1.0" encoding="utf-8"?><x/>')
self.assertEquals(loggedEvents, [])
def testNoDeclarationOrBOM(self):
loggedEvents=[]
self.assertEquals(xmlEncoding.decode(ctAX, None, '<x/>', loggedEvents)[-1], None)
self.assertEquals(len(loggedEvents), 1)
self.assertEquals(loggedEvents[0].__class__, MissingEncoding, "Must warn if there's no clue as to encoding")
# This document is currently detected as UTF-8, rather than None.
#
# def testMissingEncodingDeclaration(self):
# loggedEvents=[]
# self._assertEqualUnicode(xmlEncoding.decode(ctAX, None, '<?xml version="1.0"?><x/>', loggedEvents), u'<?xml version="1.0"?><x/>')
# self.assertEquals(len(loggedEvents), 1)
# self.assertEquals(loggedEvents[0].__class__, MissingEncoding, "Must warn if there's no clue as to encoding")
def testJustDeclaration(self):
loggedEvents=[]
(encoding, decoded) = xmlEncoding.decode(ctAX, None, '<?xml version="1.0" encoding="utf-8"?><x/>', loggedEvents)
self.assertEquals(encoding, 'utf-8')
self._assertEqualUnicode(decoded, u'<?xml version="1.0" encoding="utf-8"?><x/>')
self.assertEquals(loggedEvents, [])
def testSupplyUnknownEncoding(self):
loggedEvents=[]
self.assertEquals(xmlEncoding.decode(ctAX, 'X-FAKE', '<x/>', loggedEvents)[-1], None)
self.assertEquals(len(loggedEvents), 1)
self.assertEquals(loggedEvents[0].__class__, UnknownEncoding, 'Must fail if an unknown encoding is used')
def testDeclareUnknownEncoding(self):
loggedEvents=[]
self.assertEquals(xmlEncoding.decode(ctAX, None, '<?xml version="1.0" encoding="X-FAKE"?><x/>', loggedEvents)[-1], None)
self.assert_(loggedEvents)
self.assertEquals(loggedEvents[-1].__class__, UnknownEncoding)
def testWarnMismatch(self):
loggedEvents=[]
self.assertEquals(xmlEncoding.decode(ctAX, 'US-ASCII', '<?xml version="1.0" encoding="UTF-8"?><x/>', loggedEvents)[-1], u'<?xml version="1.0" encoding="UTF-8"?><x/>')
self.assert_(loggedEvents)
self.assertEquals(loggedEvents[-1].__class__, EncodingMismatch)
def testDecodeUTF8(self):
loggedEvents=[]
self.assertEquals(xmlEncoding.decode(ctAX, 'utf-8', '<x>\xc2\xa3</x>', loggedEvents)[-1], u'<x>\u00a3</x>')
self.assertEquals(loggedEvents, [])
def testDecodeBadUTF8(self):
"""Ensure bad UTF-8 is flagged as such, but still decoded."""
loggedEvents=[]
self.assertEquals(xmlEncoding.decode(ctAX, 'utf-8', '<x>\xa3</x>', loggedEvents)[-1], u'<x>\ufffd</x>')
self.assert_(loggedEvents)
self.assertEquals(loggedEvents[-1].__class__, UnicodeError)
def testRemovedBOM(self):
"""Make sure the initial BOM signature is not in the decoded string."""
loggedEvents=[]
self.assertEquals(xmlEncoding.decode(ctAX, 'UTF-16', '\xff\xfe\x3c\x00\x78\x00\x2f\x00\x3e\x00', loggedEvents)[-1], u'<x/>')
self.assertEquals(loggedEvents, [])
class TestRemoveDeclaration(unittest.TestCase):
def testRemoveSimple(self):
self.assertEqual(xmlEncoding.removeDeclaration(
'<?xml version="1.0" encoding="utf-8"?>'),
'<?xml version="1.0" ?>')
self.assertEqual(xmlEncoding.removeDeclaration(
"<?xml version='1.0' encoding='us-ascii' ?>"),
"<?xml version='1.0' ?>")
def testNotRemoved(self):
"""Make sure that invalid, or missing, declarations aren't affected."""
for x in [
'<?xml encoding="utf-8"?>', # Missing version
'<doc />', # No declaration
' <?xml version="1.0" encoding="utf-8"?>' # Space before declaration
]:
self.assertEqual(xmlEncoding.removeDeclaration(x), x)
def buildTestSuite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(TestDecode))
suite.addTest(loader.loadTestsFromTestCase(TestRemoveDeclaration))
return suite
if __name__ == "__main__":
unittest.main()
| Python |
#!/usr/bin/python
"""$Id: testMediaTypes.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Joseph Walton <http://www.kafsemo.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2004 Joseph Walton"
import os, sys
curdir = os.path.abspath(os.path.dirname(sys.argv[0]))
srcdir = os.path.split(curdir)[0]
if srcdir not in sys.path:
sys.path.insert(0, srcdir)
basedir = os.path.split(srcdir)[0]
import unittest
from feedvalidator import mediaTypes
from feedvalidator.logging import TYPE_RSS1, TYPE_RSS2, TYPE_ATOM
def l(x):
if x:
return x.lower()
else:
return x
class MediaTypesTest(unittest.TestCase):
def testCheckValid(self):
el = []
(t, c) = mediaTypes.checkValid(self.contentType, el)
self.assertEqual(l(t), l(self.mediaType), 'Media type should be ' + self.mediaType)
self.assertEqual(l(c), l(self.charset), 'Charset should be ' + str(self.charset) + ' for ' + self.mediaType + ' was ' + str(c))
if (self.error):
self.assertEqual(len(el), 1, 'Expected errors to be logged')
else:
self.assertEqual(len(el), 0, 'Did not expect errors to be logged')
def testCheckAgainstFeedType(self):
FT=['Unknown', 'RSS 1.0', 'RSS 2.0', 'Atom', 'Atom 0.3']
el = []
r = mediaTypes.checkAgainstFeedType(self.mediaType, self.feedType, el)
if (self.error):
self.assertEqual(len(el), 1, 'Expected errors to be logged (' + self.mediaType + ',' + FT[self.feedType] + ')')
else:
self.assertEqual(len(el), 0, 'Did not expect errors to be logged (' + self.mediaType + ',' + FT[self.feedType] + ')')
# Content-Type, Media type, Charset, Error?
cvCases = [
['text/xml', 'text/xml', None, False],
['text/xml; charset=UTF-8', 'text/xml', 'utf-8', False],
['application/xml', 'application/xml', None, False],
['text/plain', 'text/plain', None, True],
['application/octet-stream', 'application/octet-stream', None, True]
]
# Media type, Feed type, Error?
caftCases = [
['text/xml', TYPE_RSS1, False],
['application/xml', TYPE_RSS1, False],
['application/rss+xml', TYPE_RSS1, False],
['application/rdf+xml', TYPE_RSS1, False],
['application/x.atom+xml', TYPE_RSS1, True],
['application/atom+xml', TYPE_RSS1, True],
['text/xml', TYPE_RSS2, False],
['application/xml', TYPE_RSS1, False],
['application/rss+xml', TYPE_RSS2, False],
['application/rdf+xml', TYPE_RSS2, True],
['application/x.atom+xml', TYPE_RSS2, True],
['application/atom+xml', TYPE_RSS2, True],
['text/xml', TYPE_ATOM, False],
['application/xml', TYPE_ATOM, False],
['application/rss+xml', TYPE_ATOM, True],
['application/rdf+xml', TYPE_ATOM, True],
['application/x.atom+xml', TYPE_ATOM, False],
['application/atom+xml', TYPE_ATOM, False],
]
def buildTestSuite():
suite = unittest.TestSuite()
for (ct, mt, cs, e) in cvCases:
t = MediaTypesTest('testCheckValid')
t.contentType = ct;
t.mediaType = mt
t.charset = cs
t.error = e
suite.addTest(t)
for (mt, ft, e) in caftCases:
t = MediaTypesTest('testCheckAgainstFeedType')
t.mediaType = mt
t.feedType = ft
t.error = e
suite.addTest(t)
return suite
if __name__ == "__main__":
s = buildTestSuite()
unittest.TextTestRunner().run(s)
| Python |
#!/usr/bin/python
import os, sys, unittest
curdir = os.path.abspath(os.path.dirname(sys.argv[0]))
srcdir = os.path.split(curdir)[0]
if srcdir not in sys.path:
sys.path.insert(0, srcdir)
basedir = os.path.split(srcdir)[0]
from feedvalidator.base import namespaces
from os.path import dirname,join
class HowtoNsTest(unittest.TestCase):
def test_howto_declare_namespaces(self):
base=dirname(dirname(dirname(os.path.abspath(__file__))))
filename=join(join(join(base,'docs'),'howto'),'declare_namespaces.html')
handle=open(filename)
page=handle.read()
handle.close()
for uri,prefix in namespaces.items():
if prefix=='xml': continue
if prefix=='soap': continue
if uri.find('ModWiki')>0: continue
xmlns = 'xmlns:%s="%s"' % (prefix,uri)
self.assertTrue(page.find(xmlns)>=0,xmlns)
def buildTestSuite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(HowtoNsTest))
return suite
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
"""$Id: testUri.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Joseph Walton <http://www.kafsemo.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2004 Joseph Walton"
import os, sys
curdir = os.path.abspath(os.path.dirname(sys.argv[0]))
srcdir = os.path.split(curdir)[0]
if srcdir not in sys.path:
sys.path.insert(0, srcdir)
basedir = os.path.split(srcdir)[0]
import unittest
class UriTest(unittest.TestCase):
pass
testsEqual = [
['http://example.com/', 'http://example.com'],
['HTTP://example.com/', 'http://example.com/'],
['http://example.com/', 'http://example.com:/'],
['http://example.com/', 'http://example.com:80/'],
['http://example.com/', 'http://Example.com/'],
['http://example.com/~smith/', 'http://example.com/%7Esmith/'],
['http://example.com/~smith/', 'http://example.com/%7esmith/'],
['http://example.com/%7Esmith/', 'http://example.com/%7esmith/'],
['http://example.com/%C3%87', 'http://example.com/C%CC%A7'],
['tag:example.com,2004:Test', 'TAG:example.com,2004:Test'],
['ftp://example.com/', 'ftp://EXAMPLE.COM/'],
['ftp://example.com/', 'ftp://example.com:21/'],
['mailto:user@example.com', 'mailto:user@EXAMPLE.COM'],
['../%C3%87', '../C%CC%A7'],
]
testsDifferent = [
['http://example.com/', 'http://example.org/'],
['http://example.com/index.html', 'http://example.com'],
['FTP://example.com/', 'http://example.com/'],
['http://example.com/', 'http://example.com:8080/'],
['http://example.com:8080/', 'http://example.com:80/'],
['http://example.com/index.html', 'http://example.com/INDEX.HTML'],
['http://example.com/~smith/', 'http://example.com/%7Esmith'],
['http://example.com/~smith/', 'http://example.com/%2fsmith/'],
['http://user:password@example.com/', 'http://USER:PASSWORD@example.com/'],
# Not a valid HTTP URL
['http://example.com:x', 'http://example.com/'],
['tag:example.com,2004:Test', 'tag:EXAMPLE.COM,2004:Test'],
['tag:user@example.com,2004:Test', 'tag:user@EXAMPLE.COM,2004:Test'],
['tag:example.com,2004:test', 'Tag:example.com,2004:TEST'],
['tag:example.com,2004:Test', 'Tag:example.com,2004-01:Test'],
['tag:user@example.com,2004:Test', 'tag:USER@example.com,2004:Test'],
['ftp://example.com/', 'ftp://example.com/test'],
['mailto:user@example.com', 'mailto:USER@example.com'],
['mailto:user@example.com?subject=test', 'mailto:user@example.com?subject=TEST']
]
# Examples from PaceCanonicalIds
testsCanonical = [
['HTTP://example.com/', 'http://example.com/'],
['http://EXAMPLE.COM/', 'http://example.com/'],
['http://example.com/%7Ejane', 'http://example.com/~jane'],
['http://example.com/?q=1%2f2', 'http://example.com/?q=1%2F2'],
['http://example.com/?q=1/2'],
['http://example.com/a/./b', 'http://example.com/a/b'],
['http://example.com/a/../a/b', 'http://example.com/a/b'],
['http://user:password@example.com/', 'http://user:password@example.com/'],
['http://User:Password@Example.com/', 'http://User:Password@example.com/'],
['http://@example.com/', 'http://example.com/'],
['http://@Example.com/', 'http://example.com/'],
['http://:@example.com/', 'http://example.com/'],
['http://:@Example.com/', 'http://example.com/'],
['http://example.com', 'http://example.com/'],
['http://example.com:80/', 'http://example.com/'],
['http://www.w3.org/2000/01/rdf-schema#'],
['http://example.com/?q=C%CC%A7', 'http://example.com/?q=%C3%87'],
['http://example.com/?q=%E2%85%A0'],
['http://example.com/?'],
[u'http://example.com/%C3%87'],
# Other tests
['mailto:user@EXAMPLE.COM', 'mailto:user@example.com'],
['TAG:example.com,2004:Test', 'tag:example.com,2004:Test'],
['ftp://Example.Com:21/', 'ftp://example.com/'],
['http://example.com/?q=%E2%85%A0'],
['ldap://[2001:db8::7]/c=GB?objectClass?one'],
['mailto:John.Doe@example.com'],
['news:comp.infosystems.www.servers.unix'],
['tel:+1-816-555-1212'],
['telnet://192.0.2.16:80/'],
['urn:oasis:names:specification:docbook:dtd:xml:4.1.2'],
['http://example.com:081/', 'http://example.com:81/'],
['/test#test#test', '/test#test%23test'],
['http://com./'],
['http://example.com./', 'http://example.com/'],
['http://www.example.com//a//', 'http://www.example.com//a//'],
['http://www.example.com/./a//', 'http://www.example.com/a//'],
['http://www.example.com//a/./', 'http://www.example.com//a/'],
['http://example.com/%2F/'],
["aa1+-.:///?a1-._~!$&'()*+,;=:@/?#a1-._~!$&'()*+,;=:@/?"],
['http://example.com/?a+b'],
['http://a/b/c/../../../../g', 'http://a/g'],
['/.foo', '/.foo'],
['/foo/bar/.', '/foo/bar/'],
['/foo/bar/..', '/foo/'],
['http:test'],
['tag:'],
['file://', 'file:///'],
['telnet://example.com:23/', 'telnet://example.com/'],
['x://:@a/', 'x://a/'],
['tag:www.stanleysy.com,2005://1.119'],
['tag:timothy@hpl.hp.com,2001:web/externalHome'],
['http://xxx/read?id=abc%26x%3Dz&x=y'],
['tag:www.stanleysy.com,2005:%2F%2F1.119'],
# IPv6 literals should be accepted
['http://[fe80::290:4bff:fe1e:4374]/tests/atom/ipv6/'],
['http://[fe80::290:4bff:fe1e:4374]:80/tests/atom/ipv6/',
'http://[fe80::290:4bff:fe1e:4374]/tests/atom/ipv6/'],
['http://[fe80::290:4bff:fe1e:4374]:8080/tests/atom/ipv6/'],
['http://[fe80::290:4bff:fe1e:4374]:/tests/atom/ipv6/',
'http://[fe80::290:4bff:fe1e:4374]/tests/atom/ipv6/'],
]
# These are invalid URI references, but we can still sensibly
# normalise them
testNormalisableBadUris = [
['http://example.com/\\/', 'http://example.com/%5C/'],
['http://example.com/?a b', 'http://example.com/?a%20b'],
]
testsInvalid = [
# This URI is not in canonical form, and cannot be normalised
'http://example.com/?q=%C7'
# Don't try to deal with relative URI references
'foo/../bar',
'./http://',
'./\\/',
# Bad IPv6 literals
'http://fe80::290:4bff:fe1e:4374]/tests/atom/ipv6/',
'http://[fe80::290:4bff:fe1e:4374/tests/atom/ipv6/',
]
import feedvalidator.uri
from feedvalidator.validators import rfc2396
def buildTestSuite():
i = 0
for t in testsEqual:
i+=1
def tstEqual(self, a, b):
self.assertEqual(feedvalidator.uri.Uri(a), feedvalidator.uri.Uri(b))
func = lambda self, a=t[0], b=t[1]: tstEqual(self, a, b)
func.__doc__ = 'Test ' + t[0] + " == " + t[1]
setattr(UriTest, 'test' + str(i), func)
for t in testsDifferent:
i+=1
def tstDifferent(self, a, b):
self.assertNotEqual(feedvalidator.uri.Uri(a), feedvalidator.uri.Uri(b))
func = lambda self, a=t[0], b=t[1]: tstDifferent(self, a, b)
func.__doc__ = 'Test ' + t[0] + " != " + t[1]
setattr(UriTest, 'test' + str(i), func)
for t in testsCanonical + testNormalisableBadUris:
i+=1
o = t[0]
if len(t) > 1:
c = t[1]
else:
c = o
def tstCanonicalForm(self, a, b):
cf = feedvalidator.uri.canonicalForm(a)
self.assertEqual(cf, b, 'Became: ' + str(cf))
func = lambda self, a=o, b=c: tstCanonicalForm(self, a, b)
func.__doc__ = 'Test ' + o + ' becomes ' + c
setattr(UriTest, 'test' + str(i), func)
for a in testsInvalid:
i+= 1
def tstCanFindCanonicalForm(self, a):
self.assertEquals(feedvalidator.uri.canonicalForm(a), None)
func = lambda self, a=a: tstCanFindCanonicalForm(self, a)
func.__doc__ = 'Test ' + a + ' cannot be canonicalised'
setattr(UriTest, 'test' + str(i), func)
# Test everything against the rfc2396 matcher
r2 = feedvalidator.validators.rfc2396()
for t in testsEqual + testsDifferent + testsCanonical:
i+=1
def tstMatchesRe(self, a):
self.assertTrue(r2.rfc2396_re.match(a))
func = lambda self, a=t[0]: tstMatchesRe(self, a)
func.__doc__ = 'Test ' + t[0] + ' is matched by the URI regular expression'
setattr(UriTest, 'test' + str(i), func)
return unittest.TestLoader().loadTestsFromTestCase(UriTest)
if __name__ == '__main__':
buildTestSuite()
unittest.main()
| Python |
#!/usr/bin/python
"""$Id: testXmlEncoding.py 988 2008-03-12 18:22:48Z sa3ruby $
Test XML character decoding against a range of encodings, valid and not."""
__author__ = "Joseph Walton <http://www.kafsemo.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2004, 2006 Joseph Walton"
import os, sys
import codecs
import re
curdir = os.path.abspath(os.path.dirname(__file__))
srcdir = os.path.split(curdir)[0]
if srcdir not in sys.path:
sys.path.insert(0, srcdir)
basedir = os.path.split(srcdir)[0]
skippedNames = []
import unittest, new, glob, re
from feedvalidator import xmlEncoding
class EncodingTestCase(unittest.TestCase):
def testEncodingMatches(self):
try:
enc = xmlEncoding.detect(self.bytes)
except UnicodeError,u:
self.fail("'" + self.filename + "' should not cause an exception (" + str(u) + ")")
self.assert_(enc, 'An encoding must be returned for all valid files ('
+ self.filename + ')')
self.assertEqual(enc, self.expectedEncoding, 'Encoding for '
+ self.filename + ' should be ' + self.expectedEncoding + ', but was ' + enc)
def testEncodingFails(self):
eventLog = []
try:
encoding = xmlEncoding.detect(self.bytes, eventLog)
except UnicodeError,u:
self.fail("'" + self.filename + "' should not cause an exception (" + str(u) + ")")
if encoding:
self.fail("'" + self.filename + "' should not parse successfully (as " + encoding + ")")
if not(eventLog):
self.fail("'" + self.filename + "' should give a reason for parse failure")
bom8='\xEF\xBB\xBF'
bom16BE='\xFE\xFF'
bom16LE='\xFF\xFE'
bom32BE='\x00\x00\xFE\xFF'
bom32LE='\xFF\xFE\x00\x00'
# Some fairly typical Unicode text. It should survive XML roundtripping.
docText=u'<x>\u201c"This\uFEFF" is\na\r\u00A3t\u20Acst\u201D</x>'
validDecl = re.compile('[A-Za-z][-A-Za-z0-9._]*')
def makeDecl(enc=None):
if enc:
assert validDecl.match(enc), "'" + enc + "' is not a valid encoding name"
return "<?xml version='1.0' encoding='" + enc + "'?>"
else:
return "<?xml version='1.0'?>"
def encoded(enc, txt=docText):
return codecs.getencoder(enc)(txt, 'xmlcharrefreplace')[0]
def genValidXmlTestCases():
someFailed = False
# Required
yield('UTF-8', ['BOM', 'declaration'],
bom8 + makeDecl('UTF-8') + encoded('UTF-8'))
yield('UTF-8', [],
encoded('UTF-8'))
yield('UTF-8', ['noenc'],
makeDecl() + encoded('UTF-8'))
yield('UTF-8', ['declaration'],
makeDecl('UTF-8') + encoded('UTF-8'))
yield('UTF-8', ['BOM'],
bom8 + encoded('UTF-8'))
yield('UTF-8', ['BOM', 'noenc'],
bom8 + makeDecl('UTF-8') + encoded('UTF-8'))
yield('UTF-16', ['BOM', 'declaration', 'BE'],
bom16BE + encoded('UTF-16BE', makeDecl('UTF-16') + docText))
yield('UTF-16', ['BOM', 'declaration', 'LE'],
bom16LE + encoded('UTF-16LE', makeDecl('UTF-16') + docText))
yield('UTF-16', ['BOM', 'BE'],
bom16BE + encoded('UTF-16BE'))
yield('UTF-16', ['BOM', 'BE', 'noenc'],
bom16BE + encoded('UTF-16BE', makeDecl() + docText))
yield('UTF-16', ['BOM', 'LE'],
bom16LE + encoded('UTF-16LE'))
yield('UTF-16', ['BOM', 'LE', 'noenc'],
bom16LE + encoded('UTF-16LE', makeDecl() + docText))
yield('UTF-16', ['declaration', 'BE'],
encoded('UTF-16BE', makeDecl('UTF-16') + docText))
yield('UTF-16', ['declaration', 'LE'],
encoded('UTF-16LE', makeDecl('UTF-16') + docText))
# Standard wide encodings
try:
yield('ISO-10646-UCS-2', ['BOM', 'declaration', 'BE'],
bom16BE + encoded('UCS-2BE', makeDecl('ISO-10646-UCS-2') + docText))
yield('ISO-10646-UCS-2', ['BOM', 'declaration', 'LE'],
bom16LE + encoded('UCS-2LE', makeDecl('ISO-10646-UCS-2') + docText))
yield('UTF-32', ['BOM', 'declaration', 'BE'],
bom32BE + encoded('UTF-32BE', makeDecl('UTF-32') + docText))
yield('UTF-32', ['BOM', 'declaration', 'LE'],
bom32LE + encoded('UTF-32LE', makeDecl('UTF-32') + docText))
yield('UTF-32', ['declaration', 'BE'],
encoded('UTF-32BE', makeDecl('UTF-32') + docText))
yield('UTF-32', ['declaration', 'LE'],
encoded('UTF-32LE', makeDecl('UTF-32') + docText))
yield('ISO-10646-UCS-4', ['BOM', 'declaration', 'BE'],
bom32BE + encoded('UCS-4BE', makeDecl('ISO-10646-UCS-4') + docText))
yield('ISO-10646-UCS-4', ['BOM', 'declaration', 'LE'],
bom32LE + encoded('UCS-4LE', makeDecl('ISO-10646-UCS-4') + docText))
except LookupError, e:
print e
someFailed = True
# Encodings that don't have BOMs, and require declarations
withDeclarations = [
# Common ASCII-compatible encodings
'US-ASCII', 'ISO-8859-1', 'ISO-8859-15', 'WINDOWS-1252',
# EBCDIC
'IBM037', 'IBM038',
# Encodings with explicit endianness
'UTF-16BE', 'UTF-16LE',
'UTF-32BE', 'UTF-32LE',
# (UCS doesn't seem to define endian'd encodings)
]
for enc in withDeclarations:
try:
yield(enc, ['declaration'], encoded(enc, makeDecl(enc) + docText))
except LookupError, e:
print e
someFailed = True
# 10646-UCS encodings, with no BOM but with a declaration
try:
yield('ISO-10646-UCS-2', ['declaration', 'BE'],
encoded('UCS-2BE', makeDecl('ISO-10646-UCS-2') + docText))
yield('ISO-10646-UCS-2', ['declaration', 'LE'],
encoded('UCS-2LE', makeDecl('ISO-10646-UCS-2') + docText))
yield('ISO-10646-UCS-4', ['declaration', 'BE'],
encoded('UCS-4BE', makeDecl('ISO-10646-UCS-4') + docText))
yield('ISO-10646-UCS-4', ['declaration', 'LE'],
bom32LE + encoded('UCS-4LE', makeDecl('ISO-10646-UCS-4') + docText))
except LookupError, e:
print e
someFailed = True
# Files with aliases for declarations. The declared alias should be
# reported back, rather than the canonical form.
try:
yield('csUnicode', ['alias', 'BOM', 'BE'],
bom16BE + encoded('UCS-2BE', makeDecl('csUnicode') + docText))
yield('csUnicode', ['alias', 'LE'],
encoded('UCS-2LE', makeDecl('csUnicode') + docText))
yield('csucs4', ['alias', 'BE'],
encoded('csucs4', makeDecl('csucs4') + docText))
except LookupError, e:
print e
someFailed = True
if someFailed:
print "Unable to generate some tests; see README for details"
def genInvalidXmlTestCases():
# Invalid files
someFailed = False
# UTF-32 with a non-four-byte declaration
try:
yield('UTF-32', ['BOM', 'BE', 'declaration'],
encoded('UTF-32', makeDecl('US-ASCII') + docText))
except LookupError, e:
print e
someFailed = True
# UTF-16 with a non-two-byte declaration
yield('UTF-16', ['BOM', 'BE', 'declaration'],
encoded('UTF-16', makeDecl('UTF-8') + docText))
# UTF-16BE, with a BOM
yield('UTF-16BE', ['BOM', 'declaration'],
bom16BE + encoded('UTF-16BE', makeDecl('UTF-16BE') + docText))
# UTF-8, with a BOM, declaring US-ASCII
yield('UTF-8', ['BOM', 'declaration'],
bom8 + encoded('UTF-8', makeDecl('US-ASCII') + docText))
try:
# UTF-32, with a BOM, beginning without a declaration
yield('UTF-32', ['BOM', 'BE'],
bom32BE + encoded('UTF-32BE'))
# UTF-32, with a BOM, and a declaration with no encoding
yield('UTF-32', ['BOM', 'BE', 'noenc'],
bom32BE + encoded('UTF-32BE', makeDecl() + docText))
except LookupError, e:
print e
someFailed = True
# UTF-16, no BOM, no declaration
# yield('UTF-16', ['BE'], encoded('UTF-16BE'))
# This case falls through, and is identified as UTF-8; leave it out
# until we're doing decoding as well as detection.
if someFailed:
print "Unable to generate some tests; see README for details"
def genXmlTestCases():
for (enc, t, x) in genValidXmlTestCases():
yield (enc, t, x, True)
for (enc, t, x) in genInvalidXmlTestCases():
yield (enc, t, x, False)
def buildTestSuite():
import codecs
suite = unittest.TestSuite()
for (enc, t, x, valid) in genXmlTestCases():
t.sort()
if valid: pfx = 'valid_'
else: pfx = 'invalid_'
name = pfx + '_'.join([enc] + t) + '.xml'
# name, x is content
try:
alias = enc
if enc.startswith('ISO-10646-'):
alias = enc[10:]
c = codecs.lookup(alias)
if valid:
t = EncodingTestCase('testEncodingMatches')
t.expectedEncoding = enc
else:
t = EncodingTestCase('testEncodingFails')
t.filename = name
t.bytes = x
suite.addTest(t)
except LookupError,e:
print "Skipping " + name + ": " + str(e)
skippedNames.append(name)
return suite
if __name__ == "__main__":
s = buildTestSuite()
unittest.TextTestRunner().run(s)
if skippedNames:
print "Tests skipped:",len(skippedNames)
print "Please see README for details"
| Python |
#!/usr/bin/python
"""$Id$"""
__author__ = "Joseph Walton <http://www.kafsemo.org/>"
__version__ = "$Revision$"
__copyright__ = "Copyright (c) 2004 Joseph Walton"
import os, sys
curdir = os.path.abspath(os.path.dirname(sys.argv[0]))
srcdir = os.path.split(curdir)[0]
if srcdir not in sys.path:
sys.path.insert(0, srcdir)
basedir = os.path.split(srcdir)[0]
import unittest
from feedvalidator import mediaTypes
from feedvalidator.logging import TYPE_RSS1, TYPE_RSS2, TYPE_ATOM
def l(x):
if x:
return x.lower()
else:
return x
class MediaTypesTest(unittest.TestCase):
def testCheckValid(self):
el = []
(t, c) = mediaTypes.checkValid(self.contentType, el)
self.assertEqual(l(t), l(self.mediaType), 'Media type should be ' + self.mediaType)
self.assertEqual(l(c), l(self.charset), 'Charset should be ' + str(self.charset) + ' for ' + self.mediaType + ' was ' + str(c))
if (self.error):
self.assertEqual(len(el), 1, 'Expected errors to be logged')
else:
self.assertEqual(len(el), 0, 'Did not expect errors to be logged')
def testCheckAgainstFeedType(self):
FT=['Unknown', 'RSS 1.0', 'RSS 2.0', 'Atom', 'Atom 0.3']
el = []
r = mediaTypes.checkAgainstFeedType(self.mediaType, self.feedType, el)
if (self.error):
self.assertEqual(len(el), 1, 'Expected errors to be logged (' + self.mediaType + ',' + FT[self.feedType] + ')')
else:
self.assertEqual(len(el), 0, 'Did not expect errors to be logged (' + self.mediaType + ',' + FT[self.feedType] + ')')
# Content-Type, Media type, Charset, Error?
cvCases = [
['text/xml', 'text/xml', None, False],
['text/xml; charset=UTF-8', 'text/xml', 'utf-8', False],
['application/xml', 'application/xml', None, False],
['text/plain', 'text/plain', None, True],
['application/octet-stream', 'application/octet-stream', None, True]
]
# Media type, Feed type, Error?
caftCases = [
['text/xml', TYPE_RSS1, False],
['application/xml', TYPE_RSS1, False],
['application/rss+xml', TYPE_RSS1, False],
['application/rdf+xml', TYPE_RSS1, False],
['application/x.atom+xml', TYPE_RSS1, True],
['application/atom+xml', TYPE_RSS1, True],
['text/xml', TYPE_RSS2, False],
['application/xml', TYPE_RSS1, False],
['application/rss+xml', TYPE_RSS2, False],
['application/rdf+xml', TYPE_RSS2, True],
['application/x.atom+xml', TYPE_RSS2, True],
['application/atom+xml', TYPE_RSS2, True],
['text/xml', TYPE_ATOM, False],
['application/xml', TYPE_ATOM, False],
['application/rss+xml', TYPE_ATOM, True],
['application/rdf+xml', TYPE_ATOM, True],
['application/x.atom+xml', TYPE_ATOM, False],
['application/atom+xml', TYPE_ATOM, False],
]
def buildTestSuite():
suite = unittest.TestSuite()
for (ct, mt, cs, e) in cvCases:
t = MediaTypesTest('testCheckValid')
t.contentType = ct;
t.mediaType = mt
t.charset = cs
t.error = e
suite.addTest(t)
for (mt, ft, e) in caftCases:
t = MediaTypesTest('testCheckAgainstFeedType')
t.mediaType = mt
t.feedType = ft
t.error = e
suite.addTest(t)
return suite
if __name__ == "__main__":
s = buildTestSuite()
unittest.TextTestRunner().run(s)
| Python |
#!/usr/bin/python
"""$Id$"""
__author__ = "Joseph Walton <http://www.kafsemo.org/>"
__version__ = "$Revision$"
__copyright__ = "Copyright (c) 2004 Joseph Walton"
import os, sys
curdir = os.path.abspath(os.path.dirname(sys.argv[0]))
srcdir = os.path.split(curdir)[0]
if srcdir not in sys.path:
sys.path.insert(0, srcdir)
basedir = os.path.split(srcdir)[0]
import unittest
from feedvalidator import xmlEncoding
from feedvalidator.logging import *
ctAX='application/xml'
class TestDecode(unittest.TestCase):
def _assertEqualUnicode(self, a, b):
self.assertNotEqual(a, None, 'Decoded strings should not equal None')
self.assertEqual(type(a), unicode, 'Decoded strings should be Unicode (was ' + str(type(a)) + ')')
self.assertEqual(type(b), unicode, 'Test suite error: test strings must be Unicode')
self.assertEqual(a, b)
def testProvidedEncoding(self):
loggedEvents=[]
(encoding, decoded) = xmlEncoding.decode(ctAX, 'UTF-8', '<x/>', loggedEvents)
self.assertEquals('UTF-8', encoding)
self._assertEqualUnicode(decoded, u'<x/>')
self.assertEqual(loggedEvents, [])
loggedEvents=[]
(encoding, decoded) = xmlEncoding.decode(ctAX, 'UTF-8', '<?xml version="1.0" encoding="utf-8"?><x/>', loggedEvents)
self.assertEquals('UTF-8', encoding)
self._assertEqualUnicode(decoded, u'<?xml version="1.0" encoding="utf-8"?><x/>')
self.assertEquals(loggedEvents, [])
def testNoDeclarationOrBOM(self):
loggedEvents=[]
self.assertEquals(xmlEncoding.decode(ctAX, None, '<x/>', loggedEvents)[-1], None)
self.assertEquals(len(loggedEvents), 1)
self.assertEquals(loggedEvents[0].__class__, MissingEncoding, "Must warn if there's no clue as to encoding")
# This document is currently detected as UTF-8, rather than None.
#
# def testMissingEncodingDeclaration(self):
# loggedEvents=[]
# self._assertEqualUnicode(xmlEncoding.decode(ctAX, None, '<?xml version="1.0"?><x/>', loggedEvents), u'<?xml version="1.0"?><x/>')
# self.assertEquals(len(loggedEvents), 1)
# self.assertEquals(loggedEvents[0].__class__, MissingEncoding, "Must warn if there's no clue as to encoding")
def testJustDeclaration(self):
loggedEvents=[]
(encoding, decoded) = xmlEncoding.decode(ctAX, None, '<?xml version="1.0" encoding="utf-8"?><x/>', loggedEvents)
self.assertEquals(encoding, 'utf-8')
self._assertEqualUnicode(decoded, u'<?xml version="1.0" encoding="utf-8"?><x/>')
self.assertEquals(loggedEvents, [])
def testSupplyUnknownEncoding(self):
loggedEvents=[]
self.assertEquals(xmlEncoding.decode(ctAX, 'X-FAKE', '<x/>', loggedEvents)[-1], None)
self.assertEquals(len(loggedEvents), 1)
self.assertEquals(loggedEvents[0].__class__, UnknownEncoding, 'Must fail if an unknown encoding is used')
def testDeclareUnknownEncoding(self):
loggedEvents=[]
self.assertEquals(xmlEncoding.decode(ctAX, None, '<?xml version="1.0" encoding="X-FAKE"?><x/>', loggedEvents)[-1], None)
self.assert_(loggedEvents)
self.assertEquals(loggedEvents[-1].__class__, UnknownEncoding)
def testWarnMismatch(self):
loggedEvents=[]
self.assertEquals(xmlEncoding.decode(ctAX, 'US-ASCII', '<?xml version="1.0" encoding="UTF-8"?><x/>', loggedEvents)[-1], u'<?xml version="1.0" encoding="UTF-8"?><x/>')
self.assert_(loggedEvents)
self.assertEquals(loggedEvents[-1].__class__, EncodingMismatch)
def testDecodeUTF8(self):
loggedEvents=[]
self.assertEquals(xmlEncoding.decode(ctAX, 'utf-8', '<x>\xc2\xa3</x>', loggedEvents)[-1], u'<x>\u00a3</x>')
self.assertEquals(loggedEvents, [])
def testDecodeBadUTF8(self):
"""Ensure bad UTF-8 is flagged as such, but still decoded."""
loggedEvents=[]
self.assertEquals(xmlEncoding.decode(ctAX, 'utf-8', '<x>\xa3</x>', loggedEvents)[-1], u'<x>\ufffd</x>')
self.assert_(loggedEvents)
self.assertEquals(loggedEvents[-1].__class__, UnicodeError)
def testRemovedBOM(self):
"""Make sure the initial BOM signature is not in the decoded string."""
loggedEvents=[]
self.assertEquals(xmlEncoding.decode(ctAX, 'UTF-16', '\xff\xfe\x3c\x00\x78\x00\x2f\x00\x3e\x00', loggedEvents)[-1], u'<x/>')
self.assertEquals(loggedEvents, [])
class TestRemoveDeclaration(unittest.TestCase):
def testRemoveSimple(self):
self.assertEqual(xmlEncoding.removeDeclaration(
'<?xml version="1.0" encoding="utf-8"?>'),
'<?xml version="1.0" ?>')
self.assertEqual(xmlEncoding.removeDeclaration(
"<?xml version='1.0' encoding='us-ascii' ?>"),
"<?xml version='1.0' ?>")
def testNotRemoved(self):
"""Make sure that invalid, or missing, declarations aren't affected."""
for x in [
'<?xml encoding="utf-8"?>', # Missing version
'<doc />', # No declaration
' <?xml version="1.0" encoding="utf-8"?>' # Space before declaration
]:
self.assertEqual(xmlEncoding.removeDeclaration(x), x)
def buildTestSuite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(TestDecode))
suite.addTest(loader.loadTestsFromTestCase(TestRemoveDeclaration))
return suite
if __name__ == "__main__":
unittest.main()
| Python |
#!/usr/bin/python
"""$Id$"""
__author__ = "Joseph Walton <http://www.kafsemo.org/>"
__version__ = "$Revision$"
__copyright__ = "Copyright (c) 2004 Joseph Walton"
import os, sys
curdir = os.path.abspath(os.path.dirname(sys.argv[0]))
srcdir = os.path.split(curdir)[0]
if srcdir not in sys.path:
sys.path.insert(0, srcdir)
basedir = os.path.split(srcdir)[0]
import unittest
class UriTest(unittest.TestCase):
pass
testsEqual = [
['http://example.com/', 'http://example.com'],
['HTTP://example.com/', 'http://example.com/'],
['http://example.com/', 'http://example.com:/'],
['http://example.com/', 'http://example.com:80/'],
['http://example.com/', 'http://Example.com/'],
['http://example.com/~smith/', 'http://example.com/%7Esmith/'],
['http://example.com/~smith/', 'http://example.com/%7esmith/'],
['http://example.com/%7Esmith/', 'http://example.com/%7esmith/'],
['http://example.com/%C3%87', 'http://example.com/C%CC%A7'],
['tag:example.com,2004:Test', 'TAG:example.com,2004:Test'],
['ftp://example.com/', 'ftp://EXAMPLE.COM/'],
['ftp://example.com/', 'ftp://example.com:21/'],
['mailto:user@example.com', 'mailto:user@EXAMPLE.COM'],
['../%C3%87', '../C%CC%A7'],
]
testsDifferent = [
['http://example.com/', 'http://example.org/'],
['http://example.com/index.html', 'http://example.com'],
['FTP://example.com/', 'http://example.com/'],
['http://example.com/', 'http://example.com:8080/'],
['http://example.com:8080/', 'http://example.com:80/'],
['http://example.com/index.html', 'http://example.com/INDEX.HTML'],
['http://example.com/~smith/', 'http://example.com/%7Esmith'],
['http://example.com/~smith/', 'http://example.com/%2fsmith/'],
['http://user:password@example.com/', 'http://USER:PASSWORD@example.com/'],
# Not a valid HTTP URL
['http://example.com:x', 'http://example.com/'],
['tag:example.com,2004:Test', 'tag:EXAMPLE.COM,2004:Test'],
['tag:user@example.com,2004:Test', 'tag:user@EXAMPLE.COM,2004:Test'],
['tag:example.com,2004:test', 'Tag:example.com,2004:TEST'],
['tag:example.com,2004:Test', 'Tag:example.com,2004-01:Test'],
['tag:user@example.com,2004:Test', 'tag:USER@example.com,2004:Test'],
['ftp://example.com/', 'ftp://example.com/test'],
['mailto:user@example.com', 'mailto:USER@example.com'],
['mailto:user@example.com?subject=test', 'mailto:user@example.com?subject=TEST']
]
# Examples from PaceCanonicalIds
testsCanonical = [
['HTTP://example.com/', 'http://example.com/'],
['http://EXAMPLE.COM/', 'http://example.com/'],
['http://example.com/%7Ejane', 'http://example.com/~jane'],
['http://example.com/?q=1%2f2', 'http://example.com/?q=1%2F2'],
['http://example.com/?q=1/2'],
['http://example.com/a/./b', 'http://example.com/a/b'],
['http://example.com/a/../a/b', 'http://example.com/a/b'],
['http://user:password@example.com/', 'http://user:password@example.com/'],
['http://User:Password@Example.com/', 'http://User:Password@example.com/'],
['http://@example.com/', 'http://example.com/'],
['http://@Example.com/', 'http://example.com/'],
['http://:@example.com/', 'http://example.com/'],
['http://:@Example.com/', 'http://example.com/'],
['http://example.com', 'http://example.com/'],
['http://example.com:80/', 'http://example.com/'],
['http://www.w3.org/2000/01/rdf-schema#'],
['http://example.com/?q=C%CC%A7', 'http://example.com/?q=%C3%87'],
['http://example.com/?q=%E2%85%A0'],
['http://example.com/?'],
[u'http://example.com/%C3%87'],
# Other tests
['mailto:user@EXAMPLE.COM', 'mailto:user@example.com'],
['TAG:example.com,2004:Test', 'tag:example.com,2004:Test'],
['ftp://Example.Com:21/', 'ftp://example.com/'],
['http://example.com/?q=%E2%85%A0'],
['ldap://[2001:db8::7]/c=GB?objectClass?one'],
['mailto:John.Doe@example.com'],
['news:comp.infosystems.www.servers.unix'],
['tel:+1-816-555-1212'],
['telnet://192.0.2.16:80/'],
['urn:oasis:names:specification:docbook:dtd:xml:4.1.2'],
['http://example.com:081/', 'http://example.com:81/'],
['/test#test#test', '/test#test%23test'],
['http://com./'],
['http://example.com./', 'http://example.com/'],
['http://www.example.com//a//', 'http://www.example.com//a//'],
['http://www.example.com/./a//', 'http://www.example.com/a//'],
['http://www.example.com//a/./', 'http://www.example.com//a/'],
['http://example.com/%2F/'],
["aa1+-.:///?a1-._~!$&'()*+,;=:@/?#a1-._~!$&'()*+,;=:@/?"],
['http://example.com/?a+b'],
['http://a/b/c/../../../../g', 'http://a/g'],
['/.foo', '/.foo'],
['/foo/bar/.', '/foo/bar/'],
['/foo/bar/..', '/foo/'],
['http:test'],
['tag:'],
['file://', 'file:///'],
['telnet://example.com:23/', 'telnet://example.com/'],
['x://:@a/', 'x://a/'],
['tag:www.stanleysy.com,2005://1.119'],
['tag:timothy@hpl.hp.com,2001:web/externalHome'],
['http://xxx/read?id=abc%26x%3Dz&x=y'],
['tag:www.stanleysy.com,2005:%2F%2F1.119'],
# IPv6 literals should be accepted
['http://[fe80::290:4bff:fe1e:4374]/tests/atom/ipv6/'],
['http://[fe80::290:4bff:fe1e:4374]:80/tests/atom/ipv6/',
'http://[fe80::290:4bff:fe1e:4374]/tests/atom/ipv6/'],
['http://[fe80::290:4bff:fe1e:4374]:8080/tests/atom/ipv6/'],
['http://[fe80::290:4bff:fe1e:4374]:/tests/atom/ipv6/',
'http://[fe80::290:4bff:fe1e:4374]/tests/atom/ipv6/'],
]
# These are invalid URI references, but we can still sensibly
# normalise them
testNormalisableBadUris = [
['http://example.com/\\/', 'http://example.com/%5C/'],
['http://example.com/?a b', 'http://example.com/?a%20b'],
]
testsInvalid = [
# This URI is not in canonical form, and cannot be normalised
'http://example.com/?q=%C7'
# Don't try to deal with relative URI references
'foo/../bar',
'./http://',
'./\\/',
# Bad IPv6 literals
'http://fe80::290:4bff:fe1e:4374]/tests/atom/ipv6/',
'http://[fe80::290:4bff:fe1e:4374/tests/atom/ipv6/',
]
import feedvalidator.uri
from feedvalidator.validators import rfc2396
def buildTestSuite():
i = 0
for t in testsEqual:
i+=1
def tstEqual(self, a, b):
self.assertEqual(feedvalidator.uri.Uri(a), feedvalidator.uri.Uri(b))
func = lambda self, a=t[0], b=t[1]: tstEqual(self, a, b)
func.__doc__ = 'Test ' + t[0] + " == " + t[1]
setattr(UriTest, 'test' + str(i), func)
for t in testsDifferent:
i+=1
def tstDifferent(self, a, b):
self.assertNotEqual(feedvalidator.uri.Uri(a), feedvalidator.uri.Uri(b))
func = lambda self, a=t[0], b=t[1]: tstDifferent(self, a, b)
func.__doc__ = 'Test ' + t[0] + " != " + t[1]
setattr(UriTest, 'test' + str(i), func)
for t in testsCanonical + testNormalisableBadUris:
i+=1
o = t[0]
if len(t) > 1:
c = t[1]
else:
c = o
def tstCanonicalForm(self, a, b):
cf = feedvalidator.uri.canonicalForm(a)
self.assertEqual(cf, b, 'Became: ' + str(cf))
func = lambda self, a=o, b=c: tstCanonicalForm(self, a, b)
func.__doc__ = 'Test ' + o + ' becomes ' + c
setattr(UriTest, 'test' + str(i), func)
for a in testsInvalid:
i+= 1
def tstCanFindCanonicalForm(self, a):
self.assertEquals(feedvalidator.uri.canonicalForm(a), None)
func = lambda self, a=a: tstCanFindCanonicalForm(self, a)
func.__doc__ = 'Test ' + a + ' cannot be canonicalised'
setattr(UriTest, 'test' + str(i), func)
# Test everything against the rfc2396 matcher
r2 = feedvalidator.validators.rfc2396()
for t in testsEqual + testsDifferent + testsCanonical:
i+=1
def tstMatchesRe(self, a):
self.assertTrue(r2.rfc2396_re.match(a))
func = lambda self, a=t[0]: tstMatchesRe(self, a)
func.__doc__ = 'Test ' + t[0] + ' is matched by the URI regular expression'
setattr(UriTest, 'test' + str(i), func)
return unittest.TestLoader().loadTestsFromTestCase(UriTest)
if __name__ == '__main__':
buildTestSuite()
unittest.main()
| Python |
#!/usr/bin/python
"""$Id$
Test XML character decoding against a range of encodings, valid and not."""
__author__ = "Joseph Walton <http://www.kafsemo.org/>"
__version__ = "$Revision$"
__copyright__ = "Copyright (c) 2004, 2006 Joseph Walton"
import os, sys
import codecs
import re
curdir = os.path.abspath(os.path.dirname(__file__))
srcdir = os.path.split(curdir)[0]
if srcdir not in sys.path:
sys.path.insert(0, srcdir)
basedir = os.path.split(srcdir)[0]
skippedNames = []
import unittest, new, glob, re
from feedvalidator import xmlEncoding
class EncodingTestCase(unittest.TestCase):
def testEncodingMatches(self):
try:
enc = xmlEncoding.detect(self.bytes)
except UnicodeError,u:
self.fail("'" + self.filename + "' should not cause an exception (" + str(u) + ")")
self.assert_(enc, 'An encoding must be returned for all valid files ('
+ self.filename + ')')
self.assertEqual(enc, self.expectedEncoding, 'Encoding for '
+ self.filename + ' should be ' + self.expectedEncoding + ', but was ' + enc)
def testEncodingFails(self):
eventLog = []
try:
encoding = xmlEncoding.detect(self.bytes, eventLog)
except UnicodeError,u:
self.fail("'" + self.filename + "' should not cause an exception (" + str(u) + ")")
if encoding:
self.fail("'" + self.filename + "' should not parse successfully (as " + encoding + ")")
if not(eventLog):
self.fail("'" + self.filename + "' should give a reason for parse failure")
bom8='\xEF\xBB\xBF'
bom16BE='\xFE\xFF'
bom16LE='\xFF\xFE'
bom32BE='\x00\x00\xFE\xFF'
bom32LE='\xFF\xFE\x00\x00'
# Some fairly typical Unicode text. It should survive XML roundtripping.
docText=u'<x>\u201c"This\uFEFF" is\na\r\u00A3t\u20Acst\u201D</x>'
validDecl = re.compile('[A-Za-z][-A-Za-z0-9._]*')
def makeDecl(enc=None):
if enc:
assert validDecl.match(enc), "'" + enc + "' is not a valid encoding name"
return "<?xml version='1.0' encoding='" + enc + "'?>"
else:
return "<?xml version='1.0'?>"
def encoded(enc, txt=docText):
return codecs.getencoder(enc)(txt, 'xmlcharrefreplace')[0]
def genValidXmlTestCases():
someFailed = False
# Required
yield('UTF-8', ['BOM', 'declaration'],
bom8 + makeDecl('UTF-8') + encoded('UTF-8'))
yield('UTF-8', [],
encoded('UTF-8'))
yield('UTF-8', ['noenc'],
makeDecl() + encoded('UTF-8'))
yield('UTF-8', ['declaration'],
makeDecl('UTF-8') + encoded('UTF-8'))
yield('UTF-8', ['BOM'],
bom8 + encoded('UTF-8'))
yield('UTF-8', ['BOM', 'noenc'],
bom8 + makeDecl('UTF-8') + encoded('UTF-8'))
yield('UTF-16', ['BOM', 'declaration', 'BE'],
bom16BE + encoded('UTF-16BE', makeDecl('UTF-16') + docText))
yield('UTF-16', ['BOM', 'declaration', 'LE'],
bom16LE + encoded('UTF-16LE', makeDecl('UTF-16') + docText))
yield('UTF-16', ['BOM', 'BE'],
bom16BE + encoded('UTF-16BE'))
yield('UTF-16', ['BOM', 'BE', 'noenc'],
bom16BE + encoded('UTF-16BE', makeDecl() + docText))
yield('UTF-16', ['BOM', 'LE'],
bom16LE + encoded('UTF-16LE'))
yield('UTF-16', ['BOM', 'LE', 'noenc'],
bom16LE + encoded('UTF-16LE', makeDecl() + docText))
yield('UTF-16', ['declaration', 'BE'],
encoded('UTF-16BE', makeDecl('UTF-16') + docText))
yield('UTF-16', ['declaration', 'LE'],
encoded('UTF-16LE', makeDecl('UTF-16') + docText))
# Standard wide encodings
try:
yield('ISO-10646-UCS-2', ['BOM', 'declaration', 'BE'],
bom16BE + encoded('UCS-2BE', makeDecl('ISO-10646-UCS-2') + docText))
yield('ISO-10646-UCS-2', ['BOM', 'declaration', 'LE'],
bom16LE + encoded('UCS-2LE', makeDecl('ISO-10646-UCS-2') + docText))
yield('UTF-32', ['BOM', 'declaration', 'BE'],
bom32BE + encoded('UTF-32BE', makeDecl('UTF-32') + docText))
yield('UTF-32', ['BOM', 'declaration', 'LE'],
bom32LE + encoded('UTF-32LE', makeDecl('UTF-32') + docText))
yield('UTF-32', ['declaration', 'BE'],
encoded('UTF-32BE', makeDecl('UTF-32') + docText))
yield('UTF-32', ['declaration', 'LE'],
encoded('UTF-32LE', makeDecl('UTF-32') + docText))
yield('ISO-10646-UCS-4', ['BOM', 'declaration', 'BE'],
bom32BE + encoded('UCS-4BE', makeDecl('ISO-10646-UCS-4') + docText))
yield('ISO-10646-UCS-4', ['BOM', 'declaration', 'LE'],
bom32LE + encoded('UCS-4LE', makeDecl('ISO-10646-UCS-4') + docText))
except LookupError, e:
print e
someFailed = True
# Encodings that don't have BOMs, and require declarations
withDeclarations = [
# Common ASCII-compatible encodings
'US-ASCII', 'ISO-8859-1', 'ISO-8859-15', 'WINDOWS-1252',
# EBCDIC
'IBM037', 'IBM038',
# Encodings with explicit endianness
'UTF-16BE', 'UTF-16LE',
'UTF-32BE', 'UTF-32LE',
# (UCS doesn't seem to define endian'd encodings)
]
for enc in withDeclarations:
try:
yield(enc, ['declaration'], encoded(enc, makeDecl(enc) + docText))
except LookupError, e:
print e
someFailed = True
# 10646-UCS encodings, with no BOM but with a declaration
try:
yield('ISO-10646-UCS-2', ['declaration', 'BE'],
encoded('UCS-2BE', makeDecl('ISO-10646-UCS-2') + docText))
yield('ISO-10646-UCS-2', ['declaration', 'LE'],
encoded('UCS-2LE', makeDecl('ISO-10646-UCS-2') + docText))
yield('ISO-10646-UCS-4', ['declaration', 'BE'],
encoded('UCS-4BE', makeDecl('ISO-10646-UCS-4') + docText))
yield('ISO-10646-UCS-4', ['declaration', 'LE'],
bom32LE + encoded('UCS-4LE', makeDecl('ISO-10646-UCS-4') + docText))
except LookupError, e:
print e
someFailed = True
# Files with aliases for declarations. The declared alias should be
# reported back, rather than the canonical form.
try:
yield('csUnicode', ['alias', 'BOM', 'BE'],
bom16BE + encoded('UCS-2BE', makeDecl('csUnicode') + docText))
yield('csUnicode', ['alias', 'LE'],
encoded('UCS-2LE', makeDecl('csUnicode') + docText))
yield('csucs4', ['alias', 'BE'],
encoded('csucs4', makeDecl('csucs4') + docText))
except LookupError, e:
print e
someFailed = True
if someFailed:
print "Unable to generate some tests; see README for details"
def genInvalidXmlTestCases():
# Invalid files
someFailed = False
# UTF-32 with a non-four-byte declaration
try:
yield('UTF-32', ['BOM', 'BE', 'declaration'],
encoded('UTF-32', makeDecl('US-ASCII') + docText))
except LookupError, e:
print e
someFailed = True
# UTF-16 with a non-two-byte declaration
yield('UTF-16', ['BOM', 'BE', 'declaration'],
encoded('UTF-16', makeDecl('UTF-8') + docText))
# UTF-16BE, with a BOM
yield('UTF-16BE', ['BOM', 'declaration'],
bom16BE + encoded('UTF-16BE', makeDecl('UTF-16BE') + docText))
# UTF-8, with a BOM, declaring US-ASCII
yield('UTF-8', ['BOM', 'declaration'],
bom8 + encoded('UTF-8', makeDecl('US-ASCII') + docText))
try:
# UTF-32, with a BOM, beginning without a declaration
yield('UTF-32', ['BOM', 'BE'],
bom32BE + encoded('UTF-32BE'))
# UTF-32, with a BOM, and a declaration with no encoding
yield('UTF-32', ['BOM', 'BE', 'noenc'],
bom32BE + encoded('UTF-32BE', makeDecl() + docText))
except LookupError, e:
print e
someFailed = True
# UTF-16, no BOM, no declaration
# yield('UTF-16', ['BE'], encoded('UTF-16BE'))
# This case falls through, and is identified as UTF-8; leave it out
# until we're doing decoding as well as detection.
if someFailed:
print "Unable to generate some tests; see README for details"
def genXmlTestCases():
for (enc, t, x) in genValidXmlTestCases():
yield (enc, t, x, True)
for (enc, t, x) in genInvalidXmlTestCases():
yield (enc, t, x, False)
def buildTestSuite():
import codecs
suite = unittest.TestSuite()
for (enc, t, x, valid) in genXmlTestCases():
t.sort()
if valid: pfx = 'valid_'
else: pfx = 'invalid_'
name = pfx + '_'.join([enc] + t) + '.xml'
# name, x is content
try:
alias = enc
if enc.startswith('ISO-10646-'):
alias = enc[10:]
c = codecs.lookup(alias)
if valid:
t = EncodingTestCase('testEncodingMatches')
t.expectedEncoding = enc
else:
t = EncodingTestCase('testEncodingFails')
t.filename = name
t.bytes = x
suite.addTest(t)
except LookupError,e:
print "Skipping " + name + ": " + str(e)
skippedNames.append(name)
return suite
if __name__ == "__main__":
s = buildTestSuite()
unittest.TextTestRunner().run(s)
if skippedNames:
print "Tests skipped:",len(skippedNames)
print "Please see README for details"
| Python |
#!/usr/bin/python
import os, sys, unittest
curdir = os.path.abspath(os.path.dirname(sys.argv[0]))
srcdir = os.path.split(curdir)[0]
if srcdir not in sys.path:
sys.path.insert(0, srcdir)
basedir = os.path.split(srcdir)[0]
from feedvalidator.base import namespaces
from os.path import dirname,join
class HowtoNsTest(unittest.TestCase):
def test_howto_declare_namespaces(self):
base=dirname(dirname(dirname(os.path.abspath(__file__))))
filename=join(join(join(base,'docs'),'howto'),'declare_namespaces.html')
handle=open(filename)
page=handle.read()
handle.close()
for uri,prefix in namespaces.items():
if prefix=='xml': continue
if prefix=='soap': continue
if uri.find('ModWiki')>0: continue
xmlns = 'xmlns:%s="%s"' % (prefix,uri)
self.assertTrue(page.find(xmlns)>=0,xmlns)
def buildTestSuite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(HowtoNsTest))
return suite
if __name__ == '__main__':
unittest.main()
| Python |
from os import environ
# This is a test config, used by the runtests script, to ensure check.cgi
# runs without requiring a web server.
HOMEURL = 'http://localhost/check'
PYDIR = '/usr/lib/python/'
WEBDIR = environ['FEEDVALIDATOR_HOME']
SRCDIR = WEBDIR + '/src'
DOCSURL = 'docs'
CSSURL = 'css'
| Python |
#!/usr/bin/python
# This is a simple demo of validation through the web service.
WS_HOST = 'www.feedvalidator.org'
WS_URI = '/check.cgi'
import urllib, httplib
from xml.dom import minidom
from sys import exit
# Fetch the feed to validate
rawData = open('../testcases/rss/may/image_height_recommended.xml').read()
# Specify the content type, including the charset if known
hdrs = {'Content-Type': 'application/xml'}
# Simply POST the feed contents to the validator URL
connection=httplib.HTTPConnection(WS_HOST, 80)
connection.request('POST', WS_URI, rawData, hdrs)
response=connection.getresponse()
# The response is a SOAP message, as XML (otherwise there's a problem
# with the validator)
try:
document=minidom.parseString(response.read())
except:
print "Server error, unable to validate:",response.status,response.reason
print "(Unable to parse response as XML.)"
exit(20)
# If the status is OK, validation took place.
if response.status == 200:
errors = document.getElementsByTagName("text")
if not errors:
print "The feed is valid!"
exit(0)
else:
# Errors were found
for node in errors:
print "".join([child.data for child in node.childNodes])
exit(5)
# If there was a problem on the server, show details
elif response.status >= 500:
errors = document.getElementsByTagName("faultstring")
for node in errors:
print "".join([child.data for child in node.childNodes])
traceback = document.getElementsByTagNameNS("http://www.python.org/doc/current/lib/module-traceback.html", "traceback")
if traceback:
print "".join([child.data for child in traceback[0].childNodes])
exit(10)
# The unexpected happened...
else:
print "Unexpected server response:",response.status,response.reason
exit(20)
| Python |
#!/usr/bin/python
"""$Id: validtest.py 1014 2008-05-21 20:43:22Z joe.walton.gglcd $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1014 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
import feedvalidator
import unittest, new, os, sys, glob, re
from feedvalidator.logging import Message,SelfDoesntMatchLocation,MissingSelf
from feedvalidator import compatibility
from feedvalidator.formatter.application_test import Formatter
class TestCase(unittest.TestCase):
def failIfNoMessage(self, theList):
filterFunc = compatibility.AA
events = filterFunc(theList)
output = Formatter(events)
for e in events:
if not output.format(e):
raise self.failureException, 'could not contruct message for %s' % e
def failUnlessContainsInstanceOf(self, theClass, params, theList, msg=None):
"""Fail if there are no instances of theClass in theList with given params"""
self.failIfNoMessage(theList)
failure=(msg or 'no %s instances in %s' % (theClass.__name__, `theList`))
for item in theList:
if issubclass(item.__class__, theClass):
if not params: return
for k, v in params.items():
if str(item.params[k]) <> v:
failure=("%s.%s value was %s, expected %s" %
(theClass.__name__, k, item.params[k], v))
break
else:
return
raise self.failureException, failure
def failIfContainsInstanceOf(self, theClass, params, theList, msg=None):
"""Fail if there are instances of theClass in theList with given params"""
self.failIfNoMessage(theList)
for item in theList:
if theClass==Message and isinstance(item,SelfDoesntMatchLocation):
continue
if theClass==Message and isinstance(item,MissingSelf):
continue
if issubclass(item.__class__, theClass):
if not params:
raise self.failureException, \
(msg or 'unexpected %s' % (theClass.__name__))
allmatch = 1
for k, v in params.items():
if item.params[k] != v:
allmatch = 0
if allmatch:
raise self.failureException, \
"unexpected %s.%s with a value of %s" % \
(theClass.__name__, k, v)
desc_re = re.compile("<!--\s*Description:\s*(.*?)\s*Expect:\s*(!?)(\w*)(?:{(.*?)})?\s*-->")
validome_re = re.compile("<!--\s*Description:\s*(.*?)\s*Message:\s*(!?)(\w*).*?\s*-->", re.S)
def getDescription(xmlfile):
"""Extract description and exception from XML file
The deal here is that each test case is an XML file which contains
not only a possibly invalid RSS feed but also the description of the
test, i.e. the exception that we would expect the RSS validator to
raise (or not) when it validates the feed. The expected exception and
the human-readable description are placed into an XML comment like this:
<!--
Description: channel must include title
Expect: MissingTitle
-->
"""
stream = open(xmlfile)
xmldoc = stream.read()
stream.close()
search_results = desc_re.search(xmldoc)
if search_results:
description, cond, excName, plist = list(search_results.groups())
else:
search_results = validome_re.search(xmldoc)
if search_results:
plist = ''
description, cond, excName = list(search_results.groups())
excName = excName.capitalize()
if excName=='Valid': cond,excName = '!', 'Message'
else:
raise RuntimeError, "can't parse %s" % xmlfile
if cond == "":
method = TestCase.failUnlessContainsInstanceOf
else:
method = TestCase.failIfContainsInstanceOf
params = {}
if plist:
for entry in plist.split(','):
name,value = entry.lstrip().split(':',1)
params[name] = value
exc = getattr(feedvalidator, excName)
description = xmlfile + ": " + description
return method, description, params, exc
def buildTestCase(xmlfile, xmlBase, description, method, exc, params):
"""factory to create functions which validate `xmlfile`
the returned function asserts that validating `xmlfile` (an XML file)
will return a list of exceptions that include an instance of
`exc` (an Exception class)
"""
func = lambda self, xmlfile=xmlfile, exc=exc, params=params: \
method(self, exc, params, feedvalidator.validateString(open(xmlfile).read(), fallback='US-ASCII', base=xmlBase)['loggedEvents'])
func.__doc__ = description
return func
def buildTestSuite():
curdir = os.path.dirname(os.path.abspath(__file__))
basedir = os.path.split(curdir)[0]
for xmlfile in sys.argv[1:] or (glob.glob(os.path.join(basedir, 'testcases', '**', '**', '*.xml')) + glob.glob(os.path.join(basedir, 'testcases', 'opml', '**', '*.opml'))):
method, description, params, exc = getDescription(xmlfile)
xmlBase = os.path.abspath(xmlfile).replace(basedir,"http://www.feedvalidator.org")
testName = 'test_' + xmlBase.replace(os.path.sep, "/")
testFunc = buildTestCase(xmlfile, xmlBase, description, method, exc, params)
instanceMethod = new.instancemethod(testFunc, None, TestCase)
setattr(TestCase, testName, instanceMethod)
return unittest.TestLoader().loadTestsFromTestCase(TestCase)
if __name__ == '__main__':
suite = buildTestSuite()
unittest.main(argv=sys.argv[:1])
| Python |
class Identifier(unicode):
"""
See http://www.w3.org/2002/07/rdf-identifer-terminology/
regarding choice of terminology.
"""
| Python |
from rdflib.URIRef import URIRef
class Namespace(URIRef):
def __getitem__(self, key, default=None):
return URIRef(self + key)
| Python |
from rdflib.Namespace import Namespace
# The RDF Namespace
# http://ilrt.org/discovery/2001/07/rdf-syntax-grammar/#section-Namespace
RDFNS = Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
# Syntax names
RDF = RDFNS["RDF"]
DESCRIPTION = RDFNS["Description"]
ID = RDFNS["ID"]
ABOUT = RDFNS["about"]
PARSE_TYPE = RDFNS["parseType"]
RESOURCE = RDFNS["resource"]
LI = RDFNS["li"]
NODE_ID = RDFNS["nodeID"]
DATATYPE = RDFNS["datatype"]
# RDF Classes
SEQ = RDFNS["Seq"]
BAG = RDFNS["Bag"]
ALT = RDFNS["Alt"]
STATEMENT = RDFNS["Statement"]
PROPERTY = RDFNS["Property"]
XMLLiteral = RDFNS["XMLLiteral"]
LIST = RDFNS["List"]
# RDF Properties
SUBJECT = RDFNS["subject"]
PREDICATE = RDFNS["predicate"]
OBJECT = RDFNS["object"]
TYPE = RDFNS["type"]
VALUE = RDFNS["value"]
FIRST = RDFNS["first"]
REST = RDFNS["rest"]
# and _n where n is a non-negative integer
# RDF Resources
NIL = RDFNS["nil"]
# http://www.w3.org/TR/rdf-syntax-grammar/#eventterm-attribute-URI
# A mapping from unqualified terms to there qualified version.
UNQUALIFIED = {"about" : ABOUT, "ID" : ID,
"type" : TYPE, "resource": RESOURCE, "parseType": PARSE_TYPE}
# http://www.w3.org/TR/rdf-syntax-grammar/#coreSyntaxTerms
CORE_SYNTAX_TERMS = [RDF, ID, ABOUT, PARSE_TYPE, RESOURCE, NODE_ID, DATATYPE]
# http://www.w3.org/TR/rdf-syntax-grammar/#syntaxTerms
SYNTAX_TERMS = CORE_SYNTAX_TERMS + [DESCRIPTION, LI]
# http://www.w3.org/TR/rdf-syntax-grammar/#oldTerms
OLD_TERMS = [RDFNS["aboutEach"], RDFNS["aboutEachPrefix"], RDFNS["bagID"]]
# SCHEMA
RDFSNS = Namespace("http://www.w3.org/2000/01/rdf-schema#")
RDFS_CLASS = RDFSNS["Class"]
RDFS_RESOURCE = RDFSNS["Resource"]
RDFS_SUBCLASSOF = RDFSNS["subClassOf"]
RDFS_SUBPROPERTYOF = RDFSNS["subPropertyOf"]
RDFS_ISDEFINEDBY = RDFSNS["isDefinedBy"]
RDFS_LABEL = RDFSNS["label"]
RDFS_COMMENT = RDFSNS["comment"]
RDFS_RANGE = RDFSNS["range"]
RDFS_DOMAIN = RDFSNS["domain"]
RDFS_LITERAL = RDFSNS["Literal"]
RDFS_CONTAINER = RDFSNS["Container"]
RDFS_SEEALSO = RDFSNS["seeAlso"]
| Python |
from string import ascii_letters
from random import choice
from rdflib.Identifier import Identifier
from rdflib.Literal import Literal
# Create a (hopefully) unique prefix so that BNode values do not
# collide with ones created with a different instance of this module.
prefix = ""
for i in xrange(0,8):
prefix += choice(ascii_letters)
node_id = 0
class BNode(Identifier):
def __new__(cls, value=None):
if value==None:
global node_id
node_id += 1
value = "_:%s%s" % (prefix, node_id)
return Identifier.__new__(cls, value)
def n3(self):
return str(self)
| Python |
from sys import version_info
if version_info[0:2] > (2, 2):
from unicodedata import normalize
else:
normalize = None
from rdflib.Identifier import Identifier
from rdflib.exceptions import Error
class Literal(Identifier):
"""
http://www.w3.org/TR/rdf-concepts/#section-Graph-Literal
"""
def __new__(cls, value, lang='', datatype=''):
value = unicode(value)
return Identifier.__new__(cls, value)
def __init__(self, value, lang='', datatype=''):
if normalize and value:
if not isinstance(value, unicode):
value = unicode(value)
if value != normalize("NFC", value):
raise Error("value must be in NFC normalized form.")
if datatype:
lang = ''
self.language = lang
self.datatype = datatype
def __add__(self, val):
s = super(Literal, self).__add__(val)
return Literal(s, self.language, self.datatype)
def n3(self):
language = self.language
datatype = self.datatype
encoded = self.encode('unicode-escape')
if language:
if datatype:
return '"%s"@%s^^<%s>' % (encoded, language, datatype)
else:
return '"%s"@%s' % (encoded, language)
else:
if datatype:
return '"%s"^^<%s>' % (encoded, datatype)
else:
return '"%s"' % encoded
def __eq__(self, other):
if other==None:
return 0
elif isinstance(other, Literal):
result = self.__cmp__(other)==0
if result==1:
if self.language==other.language:
return 1
else:
return 0
else:
return result
elif isinstance(other, Identifier):
return 0
else:
return unicode(self)==other
| Python |
# From: http://www.w3.org/TR/REC-xml#NT-CombiningChar
#
# * Name start characters must have one of the categories Ll, Lu, Lo,
# Lt, Nl.
#
# * Name characters other than Name-start characters must have one of
# the categories Mc, Me, Mn, Lm, or Nd.
#
# * Characters in the compatibility area (i.e. with character code
# greater than #xF900 and less than #xFFFE) are not allowed in XML
# names.
#
# * Characters which have a font or compatibility decomposition
# (i.e. those with a "compatibility formatting tag" in field 5 of the
# database -- marked by field 5 beginning with a "<") are not allowed.
#
# * The following characters are treated as name-start characters rather
# than name characters, because the property file classifies them as
# Alphabetic: [#x02BB-#x02C1], #x0559, #x06E5, #x06E6.
#
# * Characters #x20DD-#x20E0 are excluded (in accordance with Unicode
# 2.0, section 5.14).
#
# * Character #x00B7 is classified as an extender, because the property
# list so identifies it.
#
# * Character #x0387 is added as a name character, because #x00B7 is its
# canonical equivalent.
#
# * Characters ':' and '_' are allowed as name-start characters.
#
# * Characters '-' and '.' are allowed as name characters.
from unicodedata import category, decomposition
NAME_START_CATEGORIES = ["Ll", "Lu", "Lo", "Lt", "Nl"]
NAME_CATEGORIES = NAME_START_CATEGORIES + ["Mc", "Me", "Mn", "Lm", "Nd"]
ALLOWED_NAME_CHARS = [u"\u00B7", u"\u0387", u"-", u".", u"_"]
# http://www.w3.org/TR/REC-xml-names/#NT-NCName
# [4] NCName ::= (Letter | '_') (NCNameChar)* /* An XML Name, minus
# the ":" */
# [5] NCNameChar ::= Letter | Digit | '.' | '-' | '_' | CombiningChar
# | Extender
def is_ncname(name):
first = name[0]
if first=="_" or category(first) in NAME_START_CATEGORIES:
for i in xrange(1, len(name)):
c = name[i]
if not category(c) in NAME_CATEGORIES:
if c in ALLOWED_NAME_CHARS:
continue
return 0
#if in compatibility area
#if decomposition(c)!='':
# return 0
return 1
else:
return 0
def split_uri(predicate):
predicate = predicate
length = len(predicate)
for i in xrange(0, length):
if not category(predicate[-i-1]) in NAME_CATEGORIES:
for j in xrange(-1-i, length):
if category(predicate[j]) in NAME_START_CATEGORIES:
ns = predicate[:j]
if not ns:
break
ln = predicate[j:]
return (ns, ln)
break
raise Error("This graph cannot be serialized in RDF/XML. Could not split predicate: '%s'" % predicate)
| Python |
# RDF Library
| Python |
# Copyright (c) 2002, Daniel Krech, http://eikeon.com/
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of Daniel Krech nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
from urlparse import urljoin, urldefrag
from xml.sax.saxutils import handler, quoteattr, escape
from urllib import quote
from rdflib.URIRef import URIRef
from rdflib.BNode import BNode
from rdflib.Literal import Literal
from rdflib.Namespace import Namespace
from rdflib.exceptions import ParserError, Error
from rdflib.constants import RDFNS
from rdflib.constants import UNQUALIFIED, CORE_SYNTAX_TERMS, OLD_TERMS
from rdflib.constants import RDF, DESCRIPTION, ID, ABOUT
from rdflib.constants import PARSE_TYPE, RESOURCE, LI
from rdflib.constants import NODE_ID, DATATYPE
from rdflib.constants import SEQ, BAG, ALT
from rdflib.constants import STATEMENT, PROPERTY, XMLLiteral, LIST
from rdflib.constants import SUBJECT, PREDICATE, OBJECT
from rdflib.constants import TYPE, VALUE, FIRST, REST
from rdflib.constants import NIL
from rdflib.syntax.xml_names import is_ncname
NODE_ELEMENT_EXCEPTIONS = CORE_SYNTAX_TERMS + [LI,] + OLD_TERMS
NODE_ELEMENT_ATTRIBUTES = [ID, NODE_ID, ABOUT]
PROPERTY_ELEMENT_EXCEPTIONS = CORE_SYNTAX_TERMS + [DESCRIPTION,] + OLD_TERMS
PROPERTY_ATTRIBUTE_EXCEPTIONS = CORE_SYNTAX_TERMS + [DESCRIPTION, LI] + OLD_TERMS
PROPERTY_ELEMENT_ATTRIBUTES = [ID, RESOURCE, NODE_ID]
XMLNS = Namespace("http://www.w3.org/XML/1998/namespace")
BASE = (XMLNS, "base")
LANG = (XMLNS, "lang")
class BagID(URIRef):
__slots__ = ['li']
def __init__(self, val):
super(URIRef, self).__init__(val)
self.li = 0
def next_li(self):
self.li += 1
return URIRef(RDFNS + "_%s" % self.li)
class ElementHandler(object):
__slots__ = ['start', 'char', 'end', 'li', 'id',
'base', 'subject', 'predicate', 'object',
'list', 'language', 'datatype', 'declared']
def __init__(self):
self.start = None
self.char = None
self.end = None
self.li = 0
self.id = None
self.base = None
self.subject = None
self.object = None
self.list = None
self.language = ""
self.datatype = ""
self.declared = None
def next_li(self):
self.li += 1
return URIRef(RDFNS + "_%s" % self.li)
class RDFXMLHandler(handler.ContentHandler):
def __init__(self, store):
self.store = store
self.reset()
def reset(self):
document_element = ElementHandler()
document_element.start = self.document_element_start
document_element.end = lambda name, qname: None
self.stack = [None, document_element,]
self.ids = {} # remember IDs we have already seen
self.bnode = {}
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
# ContentHandler methods
def setDocumentLocator(self, locator):
self.locator = locator
def startDocument(self):
pass
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
ns_prefix = self.store.ns_prefix_map
prefix_ns = self.store.prefix_ns_map
if prefix in prefix_ns:
if ns_prefix.get(uri, None) != prefix:
num = 1
while 1:
new_prefix = "%s%s" % (prefix, num)
if new_prefix not in prefix_ns:
break
num +=1
ns_prefix[uri] = new_prefix
prefix_ns[new_prefix] = uri
elif uri not in ns_prefix: # Only if we do not already have a
# binding. So we do not clobber
# things like rdf, rdfs
ns_prefix[uri] = prefix
prefix_ns[prefix] = uri
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElementNS(self, name, qname, attrs):
stack = self.stack
stack.append(ElementHandler())
current = self.current
parent = self.parent
base = attrs.get(BASE, None)
if base is not None:
base, frag = urldefrag(base)
else:
if parent:
base = parent.base
if base is None:
systemId = self.locator.getPublicId() or self.locator.getSystemId()
if systemId:
base, frag = urldefrag(systemId)
current.base = base
language = attrs.get(LANG, None)
if language is None:
if parent:
language = parent.language
else:
language = ''
current.language = language
current.start(name, qname, attrs)
def endElementNS(self, name, qname):
self.current.end(name, qname)
self.stack.pop()
def characters(self, content):
char = self.current.char
if char:
char(content)
def ignorableWhitespace(self, content):
pass
def processingInstruction(self, target, data):
pass
def add_reified(self, sid, (s, p, o)):
self.store.add((sid, TYPE, STATEMENT))
self.store.add((sid, SUBJECT, s))
self.store.add((sid, PREDICATE, p))
self.store.add((sid, OBJECT, o))
def error(self, message):
locator = self.locator
info = "%s:%s:%s: " % (locator.getSystemId(),
locator.getLineNumber(), locator.getColumnNumber())
raise ParserError(info + message)
def get_current(self):
return self.stack[-2]
# Create a read only property called current so that self.current
# give the current element handler.
current = property(get_current)
def get_next(self):
return self.stack[-1]
# Create a read only property that gives the element handler to be
# used for the next element.
next = property(get_next)
def get_parent(self):
return self.stack[-3]
# Create a read only property that gives the current parent
# element handler
parent = property(get_parent)
def absolutize(self, uri):
s = urljoin(self.current.base, uri, allow_fragments=1)
if uri and uri[-1]=="#":
return URIRef(''.join((s, "#")))
else:
return URIRef(s)
def convert(self, name, qname, attrs):
if name[0] is None:
name = name[1]
else:
name = "".join(name)
atts = {}
for (n, v) in attrs.items(): #attrs._attrs.iteritems(): #
if n[0] is None:
att = n[1]
else:
att = "".join(n)
if att.startswith(XMLNS) or att[0:3].lower()=="xml":
pass
elif att in UNQUALIFIED:
#if not RDFNS[att] in atts:
atts[RDFNS[att]] = v
else:
atts[att] = v
return name, atts
def document_element_start(self, name, qname, attrs):
if name[0] and "".join(name) == RDF:
next = self.next
next.start = self.node_element_start
next.end = self.node_element_end
else:
self.node_element_start(name, qname, attrs)
#self.current.end = self.node_element_end
# TODO... set end to something that sets start such that
# another element will cause error
def node_element_start(self, name, qname, attrs):
name, atts = self.convert(name, qname, attrs)
current = self.current
absolutize = self.absolutize
next = self.next
next.start = self.property_element_start
next.end = self.property_element_end
if name in NODE_ELEMENT_EXCEPTIONS:
self.error("Invalid node element URI: %s" % name)
if ID in atts:
if ABOUT in atts or NODE_ID in atts:
self.error("Can have at most one of rdf:ID, rdf:about, and rdf:nodeID")
id = atts[ID]
if not is_ncname(id):
self.error("rdf:ID value is not a valid NCName: %s" % id)
subject = absolutize("#%s" % id)
if subject in self.ids:
self.error("two elements cannot use the same ID: '%s'" % subject)
self.ids[subject] = 1 # IDs can only appear once within a document
elif NODE_ID in atts:
if ID in atts or ABOUT in atts:
self.error("Can have at most one of rdf:ID, rdf:about, and rdf:nodeID")
nodeID = atts[NODE_ID]
if not is_ncname(nodeID):
self.error("rdf:nodeID value is not a valid NCName: %s" % nodeID)
if nodeID in self.bnode:
subject = self.bnode[nodeID]
else:
subject = BNode()
self.bnode[nodeID] = subject
elif ABOUT in atts:
if ID in atts or NODE_ID in atts:
self.error("Can have at most one of rdf:ID, rdf:about, and rdf:nodeID")
subject = absolutize(atts[ABOUT])
else:
subject = BNode()
if name!=DESCRIPTION: # S1
self.store.add((subject, TYPE, absolutize(name)))
if TYPE in atts: # S2
self.store.add((subject, TYPE, absolutize(atts[TYPE])))
language = current.language
for att in atts:
if not att.startswith(RDFNS):
predicate = absolutize(att)
try:
object = Literal(atts[att], language)
except Error, e:
self.error(e.msg)
elif att==TYPE: #S2
predicate = TYPE
object = absolutize(atts[TYPE])
elif att in NODE_ELEMENT_ATTRIBUTES:
continue
elif att in PROPERTY_ATTRIBUTE_EXCEPTIONS: #S3
self.error("Invalid property attribute URI: %s" % att)
continue # for when error does not throw an exception
else:
predicate = absolutize(att)
try:
object = Literal(atts[att], language)
except Error, e:
self.error(e.msg)
self.store.add((subject, predicate, object))
current.subject = subject
def node_element_end(self, name, qname):
self.parent.object = self.current.subject
def property_element_start(self, name, qname, attrs):
name, atts = self.convert(name, qname, attrs)
current = self.current
absolutize = self.absolutize
next = self.next
object = None
current.list = None
if not name.startswith(RDFNS):
current.predicate = absolutize(name)
elif name==LI:
current.predicate = current.next_li()
elif name in PROPERTY_ELEMENT_EXCEPTIONS:
self.error("Invalid property element URI: %s" % name)
else:
current.predicate = absolutize(name)
id = atts.get(ID, None)
if id is not None:
if not is_ncname(id):
self.error("rdf:ID value is not a value NCName: %s" % id)
current.id = absolutize("#%s" % id)
else:
current.id = None
resource = atts.get(RESOURCE, None)
nodeID = atts.get(NODE_ID, None)
parse_type = atts.get(PARSE_TYPE, None)
if resource is not None and nodeID is not None:
self.error("Property element cannot have both rdf:nodeID and rdf:resource")
if resource is not None:
object = absolutize(resource)
next.start = self.node_element_start
next.end = self.node_element_end
elif nodeID is not None:
if not is_ncname(nodeID):
self.error("rdf:nodeID value is not a valid NCName: %s" % nodeID)
if nodeID in self.bnode:
object = self.bnode[nodeID]
else:
subject = BNode()
self.bnode[nodeID] = subject
object = subject
next.start = self.node_element_start
next.end = self.node_element_end
else:
if parse_type is not None:
for att in atts:
if att!=PARSE_TYPE and att!=ID:
self.error("Property attr '%s' now allowed here" % att)
if parse_type=="Resource":
current.subject = object = BNode()
current.char = self.property_element_char
next.start = self.property_element_start
next.end = self.property_element_end
elif parse_type=="Collection":
current.char = None
next.start = self.node_element_start
next.end = self.list_node_element_end
else: #if parse_type=="Literal":
# All other values are treated as Literal
# See: http://www.w3.org/TR/rdf-syntax-grammar/#parseTypeOtherPropertyElt
#object = Literal("", current.language, XMLLiteral)
object = Literal("", "", XMLLiteral)
current.char = self.literal_element_char
current.declared = {}
next.start = self.literal_element_start
next.char = self.literal_element_char
next.end = self.literal_element_end
current.object = object
return
else:
object = None
current.char = self.property_element_char
next.start = self.node_element_start
next.end = self.node_element_end
datatype = current.datatype = atts.get(DATATYPE, None)
language = current.language
if datatype is not None:
# TODO: check that there are no atts other than datatype and id
pass
else:
for att in atts:
if not att.startswith(RDFNS):
predicate = absolutize(att)
elif att in PROPERTY_ELEMENT_ATTRIBUTES:
continue
elif att in PROPERTY_ATTRIBUTE_EXCEPTIONS:
self.error("""Invalid property attribute URI: %s""" % att)
else:
predicate = absolutize(att)
if att==TYPE:
o = URIRef(atts[att])
else:
o = Literal(atts[att], language, datatype)
if object is None:
object = BNode()
self.store.add((object, predicate, o))
if object is None:
object = Literal("", language, datatype)
current.object = object
def property_element_char(self, data):
current = self.current
if current.object is None:
try:
current.object = Literal(data, current.language, current.datatype)
except Error, e:
self.error(e.msg)
else:
if isinstance(current.object, Literal):
try:
current.object += data
except Error, e:
self.error(e.msg)
def property_element_end(self, name, qname):
current = self.current
if self.next.end==self.list_node_element_end:
self.store.add((current.list, REST, NIL))
if current.object is not None:
self.store.add((self.parent.subject, current.predicate, current.object))
if current.id is not None:
self.add_reified(current.id, (self.parent.subject,
current.predicate, current.object))
current.subject = None
def list_node_element_end(self, name, qname):
current = self.current
if not self.parent.list:
list = BNode()
# Removed between 20030123 and 20030905
#self.store.add((list, TYPE, LIST))
self.parent.list = list
self.store.add((self.parent.list, FIRST, current.subject))
self.parent.object = list
self.parent.char = None
else:
list = BNode()
# Removed between 20030123 and 20030905
#self.store.add((list, TYPE, LIST))
self.store.add((self.parent.list, REST, list))
self.store.add((list, FIRST, current.subject))
self.parent.list = list
def literal_element_start(self, name, qname, attrs):
current = self.current
self.next.start = self.literal_element_start
self.next.char = self.literal_element_char
self.next.end = self.literal_element_end
current.declared = self.parent.declared.copy()
if name[0]:
prefix = self._current_context[name[0]]
if prefix:
current.object = "<%s:%s" % (prefix, name[1])
else:
current.object = "<%s" % name[1]
if not name[0] in current.declared:
current.declared[name[0]] = prefix
if prefix:
current.object += (' xmlns:%s="%s"' % (prefix, name[0]))
else:
current.object += (' xmlns="%s"' % name[0])
else:
current.object = "<%s" % name[1]
for (name, value) in attrs.items():
if name[0]:
if not name[0] in current.declared:
current.declared[name[0]] = self._current_context[name[0]]
name = current.declared[name[0]] + ":" + name[1]
else:
name = name[1]
current.object += (' %s=%s' % (name, quoteattr(value)))
current.object += ">"
def literal_element_char(self, data):
self.current.object += data
def literal_element_end(self, name, qname):
if name[0]:
prefix = self._current_context[name[0]]
if prefix:
end = u"</%s:%s>" % (prefix, name[1])
else:
end = u"</%s>" % name[1]
else:
end = u"</%s>" % name[1]
self.parent.object += self.current.object + end
| Python |
__all__ = ["RDFXMLParser", "NTParser"]
| Python |
class Error(Exception):
"""Base class for rdflib exceptions."""
def __init__(self, msg=None):
Exception.__init__(self, msg)
self.msg = msg
class TypeCheckError(Error):
"""Parts of assertions are subject to type checks."""
def __init__(self, node):
Error.__init__(self, node)
self.type = type(node)
self.node = node
class SubjectTypeError(TypeCheckError):
"""Subject of an assertion must be an instance of URIRef."""
def __init__(self, node):
TypeCheckError.__init__(self, node)
self.msg = "Subject must be instance of URIRef or BNode: %s(%s)" \
% (self.node, self.type)
class PredicateTypeError(TypeCheckError):
"""Predicate of an assertion must be an instance of URIRef."""
def __init__(self, node):
TypeCheckError.__init__(self, node)
self.msg = "Predicate must be a URIRef instance: %s(%s)" \
% (self.node, self.type)
class ObjectTypeError(TypeCheckError):
"""Object of an assertion must be an instance of URIRef, Literal,
or BNode."""
def __init__(self, node):
TypeCheckError.__init__(self, node)
self.msg = "Object must be instance of URIRef, Literal, or BNode: %s(%s)" % \
(self.node, self.type)
class ContextTypeError(TypeCheckError):
"""Context of an assertion must be an instance of URIRef."""
def __init__(self, node):
TypeCheckError.__init__(self, node)
self.msg = "Context must be instance of URIRef or BNode: %s(%s)" \
% (self.node, self.type)
class ParserError(Error):
"""RDF Parser error."""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class SerializerDispatchNameError(Error):
"""No name set..."""
def __init__(self, msg):
Error.__init__(self)
self.msg = msg
class SerializerDispatchNameClashError(Error):
"""Name clash..."""
def __init(self, msg):
Error.__init__(self)
self.msg = msg
class ParserDispatchNameError(Error):
"""No name set..."""
def __init__(self, msg):
Error.__init__(self)
self.msg = msg
class ParserDispatchNameClashError(Error):
"""Name clash..."""
def __init(self, msg):
Error.__init__(self)
self.msg = msg
| Python |
# RDF Library
__version__ = "2.0.6"
| Python |
from sys import version_info
if version_info[0:2] > (2, 2):
from unicodedata import normalize
else:
normalize = None
from rdflib.Identifier import Identifier
from rdflib.Literal import Literal
class URIRef(Identifier):
def __new__(cls, value):
return Identifier.__new__(cls, value)
def __init__(self, value):
if normalize and value:
if not isinstance(value, unicode):
value = unicode(value)
if value != normalize("NFC", value):
raise Error("value must be in NFC normalized form.")
def n3(self):
return "<%s>" % self
| Python |
#!/usr/bin/python
"""$Id$"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision$"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
import feedvalidator
import unittest, new, os, sys, glob, re
from feedvalidator.logging import Message,SelfDoesntMatchLocation,MissingSelf
from feedvalidator import compatibility
from feedvalidator.formatter.application_test import Formatter
class TestCase(unittest.TestCase):
def failIfNoMessage(self, theList):
filterFunc = compatibility.AA
events = filterFunc(theList)
output = Formatter(events)
for e in events:
if not output.format(e):
raise self.failureException, 'could not contruct message for %s' % e
def failUnlessContainsInstanceOf(self, theClass, params, theList, msg=None):
"""Fail if there are no instances of theClass in theList with given params"""
self.failIfNoMessage(theList)
failure=(msg or 'no %s instances in %s' % (theClass.__name__, `theList`))
for item in theList:
if issubclass(item.__class__, theClass):
if not params: return
for k, v in params.items():
if str(item.params[k]) <> v:
failure=("%s.%s value was %s, expected %s" %
(theClass.__name__, k, item.params[k], v))
break
else:
return
raise self.failureException, failure
def failIfContainsInstanceOf(self, theClass, params, theList, msg=None):
"""Fail if there are instances of theClass in theList with given params"""
self.failIfNoMessage(theList)
for item in theList:
if theClass==Message and isinstance(item,SelfDoesntMatchLocation):
continue
if theClass==Message and isinstance(item,MissingSelf):
continue
if issubclass(item.__class__, theClass):
if not params:
raise self.failureException, \
(msg or 'unexpected %s' % (theClass.__name__))
allmatch = 1
for k, v in params.items():
if item.params[k] != v:
allmatch = 0
if allmatch:
raise self.failureException, \
"unexpected %s.%s with a value of %s" % \
(theClass.__name__, k, v)
desc_re = re.compile("<!--\s*Description:\s*(.*?)\s*Expect:\s*(!?)(\w*)(?:{(.*?)})?\s*-->")
validome_re = re.compile("<!--\s*Description:\s*(.*?)\s*Message:\s*(!?)(\w*).*?\s*-->", re.S)
def getDescription(xmlfile):
"""Extract description and exception from XML file
The deal here is that each test case is an XML file which contains
not only a possibly invalid RSS feed but also the description of the
test, i.e. the exception that we would expect the RSS validator to
raise (or not) when it validates the feed. The expected exception and
the human-readable description are placed into an XML comment like this:
<!--
Description: channel must include title
Expect: MissingTitle
-->
"""
stream = open(xmlfile)
xmldoc = stream.read()
stream.close()
search_results = desc_re.search(xmldoc)
if search_results:
description, cond, excName, plist = list(search_results.groups())
else:
search_results = validome_re.search(xmldoc)
if search_results:
plist = ''
description, cond, excName = list(search_results.groups())
excName = excName.capitalize()
if excName=='Valid': cond,excName = '!', 'Message'
else:
raise RuntimeError, "can't parse %s" % xmlfile
if cond == "":
method = TestCase.failUnlessContainsInstanceOf
else:
method = TestCase.failIfContainsInstanceOf
params = {}
if plist:
for entry in plist.split(','):
name,value = entry.lstrip().split(':',1)
params[name] = value
exc = getattr(feedvalidator, excName)
description = xmlfile + ": " + description
return method, description, params, exc
def buildTestCase(xmlfile, xmlBase, description, method, exc, params):
"""factory to create functions which validate `xmlfile`
the returned function asserts that validating `xmlfile` (an XML file)
will return a list of exceptions that include an instance of
`exc` (an Exception class)
"""
func = lambda self, xmlfile=xmlfile, exc=exc, params=params: \
method(self, exc, params, feedvalidator.validateString(open(xmlfile).read(), fallback='US-ASCII', base=xmlBase)['loggedEvents'])
func.__doc__ = description
return func
def buildTestSuite():
curdir = os.path.dirname(os.path.abspath(__file__))
basedir = os.path.split(curdir)[0]
for xmlfile in sys.argv[1:] or (glob.glob(os.path.join(basedir, 'testcases', '**', '**', '*.xml')) + glob.glob(os.path.join(basedir, 'testcases', 'opml', '**', '*.opml'))):
method, description, params, exc = getDescription(xmlfile)
xmlBase = os.path.abspath(xmlfile).replace(basedir,"http://www.feedvalidator.org")
testName = 'test_' + xmlBase.replace(os.path.sep, "/")
testFunc = buildTestCase(xmlfile, xmlBase, description, method, exc, params)
instanceMethod = new.instancemethod(testFunc, None, TestCase)
setattr(TestCase, testName, instanceMethod)
return unittest.TestLoader().loadTestsFromTestCase(TestCase)
if __name__ == '__main__':
suite = buildTestSuite()
unittest.main(argv=sys.argv[:1])
| Python |
#!/usr/bin/python
"""$Id$"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision$"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
import feedvalidator
import sys
import os
import urllib
import urllib2
import urlparse
if __name__ == '__main__':
# arg 1 is URL to validate
link = sys.argv[1:] and sys.argv[1] or 'http://www.intertwingly.net/blog/index.atom'
link = urlparse.urljoin('file:' + urllib.pathname2url(os.getcwd()) + '/', link)
try:
link = link.decode('utf-8').encode('idna')
except:
pass
print 'Validating %s' % link
curdir = os.path.abspath(os.path.dirname(sys.argv[0]))
basedir = urlparse.urljoin('file:' + curdir, ".")
try:
if link.startswith(basedir):
events = feedvalidator.validateStream(urllib.urlopen(link), firstOccurrenceOnly=1,base=link.replace(basedir,"http://www.feedvalidator.org/"))['loggedEvents']
else:
events = feedvalidator.validateURL(link, firstOccurrenceOnly=1)['loggedEvents']
except feedvalidator.logging.ValidationFailure, vf:
events = [vf.event]
# (optional) arg 2 is compatibility level
# "A" is most basic level
# "AA" mimics online validator
# "AAA" is experimental; these rules WILL change or disappear in future versions
from feedvalidator import compatibility
filter = sys.argv[2:] and sys.argv[2] or "AA"
filterFunc = getattr(compatibility, filter)
events = filterFunc(events)
from feedvalidator.formatter.text_plain import Formatter
output = Formatter(events)
if output:
print "\n".join(output)
sys.exit(1)
else:
print "No errors or warnings"
| Python |
#!/usr/bin/python
# This is a simple demo of validation through the web service.
WS_HOST = 'www.feedvalidator.org'
WS_URI = '/check.cgi'
import urllib, httplib
from xml.dom import minidom
from sys import exit
# Fetch the feed to validate
rawData = open('../testcases/rss/may/image_height_recommended.xml').read()
# Specify the content type, including the charset if known
hdrs = {'Content-Type': 'application/xml'}
# Simply POST the feed contents to the validator URL
connection=httplib.HTTPConnection(WS_HOST, 80)
connection.request('POST', WS_URI, rawData, hdrs)
response=connection.getresponse()
# The response is a SOAP message, as XML (otherwise there's a problem
# with the validator)
try:
document=minidom.parseString(response.read())
except:
print "Server error, unable to validate:",response.status,response.reason
print "(Unable to parse response as XML.)"
exit(20)
# If the status is OK, validation took place.
if response.status == 200:
errors = document.getElementsByTagName("text")
if not errors:
print "The feed is valid!"
exit(0)
else:
# Errors were found
for node in errors:
print "".join([child.data for child in node.childNodes])
exit(5)
# If there was a problem on the server, show details
elif response.status >= 500:
errors = document.getElementsByTagName("faultstring")
for node in errors:
print "".join([child.data for child in node.childNodes])
traceback = document.getElementsByTagNameNS("http://www.python.org/doc/current/lib/module-traceback.html", "traceback")
if traceback:
print "".join([child.data for child in traceback[0].childNodes])
exit(10)
# The unexpected happened...
else:
print "Unexpected server response:",response.status,response.reason
exit(20)
| Python |
#!/usr/bin/python
"""
$Id$
Show any logging events without explanatory web pages
"""
from sys import path, argv, exit
from os.path import isfile
import inspect
import os.path
curdir = os.path.abspath(os.path.dirname(argv[0]))
BASE = os.path.split(curdir)[0]
path.insert(0, os.path.join(BASE, 'src'))
import feedvalidator.logging
# Logic from text_html.py
def getRootClass(aClass):
bl = aClass.__bases__
if not(bl):
return None
aClass = bl[0]
bl = bl[0].__bases__
while bl:
base = bl[0]
if base == feedvalidator.logging.LoggedEvent:
return aClass
aClass = base
bl = aClass.__bases__
return None
show = argv[1:] or ['warning', 'error']
areMissing=False
for n, o in inspect.getmembers(feedvalidator.logging, inspect.isclass):
rc = getRootClass(o)
if not(rc):
continue
rcname = rc.__name__.split('.')[-1].lower()
if rcname in show:
fn = os.path.join('docs', rcname, n + '.html')
if not(isfile(os.path.join(BASE, fn))):
print fn
areMissing=True
if areMissing:
exit(5)
| Python |
#!/usr/bin/python
"""
$Id: missingWebPages.py 75 2004-03-28 07:48:21Z josephw $
Show any logging events without explanatory web pages
"""
from sys import path, argv, exit
from os.path import isfile
import inspect
import os.path
curdir = os.path.abspath(os.path.dirname(argv[0]))
BASE = os.path.split(curdir)[0]
path.insert(0, os.path.join(BASE, 'src'))
import feedvalidator.logging
# Logic from text_html.py
def getRootClass(aClass):
bl = aClass.__bases__
if not(bl):
return None
aClass = bl[0]
bl = bl[0].__bases__
while bl:
base = bl[0]
if base == feedvalidator.logging.LoggedEvent:
return aClass
aClass = base
bl = aClass.__bases__
return None
show = argv[1:] or ['warning', 'error']
areMissing=False
for n, o in inspect.getmembers(feedvalidator.logging, inspect.isclass):
rc = getRootClass(o)
if not(rc):
continue
rcname = rc.__name__.split('.')[-1].lower()
if rcname in show:
fn = os.path.join('docs', rcname, n + '.html')
if not(isfile(os.path.join(BASE, fn))):
print fn
areMissing=True
if areMissing:
exit(5)
| Python |
import feedvalidator
import sys
def escapeURL(url):
import cgi, urllib, urlparse
parts = map(urllib.quote, map(urllib.unquote, urlparse.urlparse(url)))
return cgi.escape(urlparse.urlunparse(parts))
def sanitizeURL(url):
# Allow feed: URIs, as described by draft-obasanjo-feed-URI-scheme-02
if url.lower().startswith('feed:'):
url = url[5:]
if url.startswith('//'):
url = 'http:' + url
if not url.split(':')[0].lower() in ['http','https']:
url = 'http://%s' % url
url = url.strip()
# strip user and password
import re
url = re.sub(r'^(\w*://)[-+.\w]*(:[-+.\w]+)?@', r'\1' ,url)
return url
def index(req,url="",out="xml"):
if not url:
s = """<html><head><title>Feed Validator</title></head><body>
Enter the URL to validate:
<p>
<form method="GET">
URL: <input type="text" name="url"><br>
<input type="submit">
<input type="hidden" name="out" value="html">
</form>
</html>"""
return s
url = sanitizeURL(url)
events = feedvalidator.validateURL(url, firstOccurrenceOnly=1)['loggedEvents']
# (optional) arg 2 is compatibility level
# "A" is most basic level
# "AA" mimics online validator
# "AAA" is experimental; these rules WILL change or disappear in future versions
from feedvalidator import compatibility
filter = "AA"
filterFunc = getattr(compatibility, filter)
events = filterFunc(events)
if out == "html":
s = "<html><body><p>Validating " + escapeURL(url) + "...</p><pre>"
from feedvalidator.formatter.text_plain import Formatter
output = Formatter(events)
if output:
s += "\n".join(output)
else:
s += "No errors or warnings"
s += "</pre></body></html>"
return s
else:
from feedvalidator.formatter.text_xml import Formatter
s = "\n".join(Formatter(events)) or ""
s = '<?xml version="1.0"?>\n<validationErrors>\n' + s + "</validationErrors>"
req.content_type = "application/xml"
return s
if __name__=="__main__":
import sys
for url in sys.argv[1:]:
print index(0,url=url,out="html")
| Python |
#!/usr/bin/python
"""$Id: demo.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
import feedvalidator
import sys
import os
import urllib
import urllib2
import urlparse
if __name__ == '__main__':
# arg 1 is URL to validate
link = sys.argv[1:] and sys.argv[1] or 'http://www.intertwingly.net/blog/index.atom'
link = urlparse.urljoin('file:' + urllib.pathname2url(os.getcwd()) + '/', link)
try:
link = link.decode('utf-8').encode('idna')
except:
pass
print 'Validating %s' % link
curdir = os.path.abspath(os.path.dirname(sys.argv[0]))
basedir = urlparse.urljoin('file:' + curdir, ".")
try:
if link.startswith(basedir):
events = feedvalidator.validateStream(urllib.urlopen(link), firstOccurrenceOnly=1,base=link.replace(basedir,"http://www.feedvalidator.org/"))['loggedEvents']
else:
events = feedvalidator.validateURL(link, firstOccurrenceOnly=1)['loggedEvents']
except feedvalidator.logging.ValidationFailure, vf:
events = [vf.event]
# (optional) arg 2 is compatibility level
# "A" is most basic level
# "AA" mimics online validator
# "AAA" is experimental; these rules WILL change or disappear in future versions
from feedvalidator import compatibility
filter = sys.argv[2:] and sys.argv[2] or "AA"
filterFunc = getattr(compatibility, filter)
events = filterFunc(events)
from feedvalidator.formatter.text_plain import Formatter
output = Formatter(events)
if output:
print "\n".join(output)
sys.exit(1)
else:
print "No errors or warnings"
| Python |
#!/usr/bin/env python
from config import *
import cgi, sys, os, urlparse, sys, re, urllib
import cgitb
cgitb.enable()
import codecs
ENCODING='UTF-8'
sys.stdout = codecs.getwriter(ENCODING)(sys.stdout)
# Used for CGI parameters
decUTF8 = codecs.getdecoder('utf-8')
decW1252 = codecs.getdecoder('windows-1252')
if PYDIR not in sys.path:
sys.path.insert(0, PYDIR)
if WEBDIR not in sys.path:
sys.path.insert(0, WEBDIR)
if SRCDIR not in sys.path:
sys.path.insert(0, SRCDIR)
import feedvalidator
from feedvalidator.logging import FEEDTYPEDISPLAY, VALIDFEEDGRAPHIC
from feedvalidator.logging import Info, Warning, Error, ValidationFailure
from feedvalidator.logging import TYPE_ATOM_ENTRY, TYPE_OPENSEARCH, TYPE_XRD
from feedvalidator.logging import TYPE_APP_SERVICE, TYPE_APP_CATEGORIES
def applyTemplate(templateFile, params={}):
params['CSSURL'] = CSSURL
fsock = open(os.path.join(WEBDIR, 'templates', templateFile))
data = fsock.read() % params
fsock.close()
return data.encode('utf-8')
def sanitizeURL(url):
# Allow feed: URIs, as described by draft-obasanjo-feed-URI-scheme-02
if url.lower().startswith('feed:'):
url = url[5:]
if url.startswith('//'):
url = 'http:' + url
if not url.split(':')[0].lower() in ['http','https']:
url = 'http://%s' % url
url = url.strip()
# strip user and password
url = re.sub(r'^(\w*://)[-+.\w]*(:[-+.\w]+)?@', r'\1' ,url)
return url
def escapeURL(url):
parts = list(urlparse.urlparse(url))
safe = ['/', '/:@', '/', '/', '/?&=;', '/']
for i in range(0,len(parts)):
parts[i] = urllib.quote(urllib.unquote(parts[i]),safe[i])
url = cgi.escape(urlparse.urlunparse(parts))
try:
return url.decode('idna')
except:
return url
import feedvalidator.formatter.text_html
def buildCodeListing(events, rawdata, url):
# print feed
codelines = []
linenum = 1
linesWithErrors = [e.params.get('line', 0) for e in events]
for line in rawdata.split('\n'):
line = feedvalidator.formatter.text_html.escapeAndMark(line)
if not line: line = ' '
linetype = linenum in linesWithErrors and "b" or "a"
codelines.append(applyTemplate('code_listing_line.tmpl', {"line":line, "linenum":linenum, "linetype":linetype}).decode('utf-8'))
linenum += 1
codelisting = "".join(codelines)
return applyTemplate('code_listing.tmpl', {"codelisting":codelisting, "url":escapeURL(url)})
def yieldEventList(output):
errors, warnings = output.getErrors(), output.getWarnings()
yield output.header()
for o in output.getErrors():
yield o.encode('utf-8')
if errors and warnings:
yield output.footer()
if len(warnings) == 1:
yield applyTemplate('andwarn1.tmpl')
else:
yield applyTemplate('andwarn2.tmpl')
yield output.header()
for o in output.getWarnings():
yield o.encode('utf-8')
yield output.footer()
from feedvalidator.formatter.text_html import Formatter
def postvalidate(url, events, rawdata, feedType, autofind=1):
"""returns dictionary including 'url', 'events', 'rawdata', 'output', 'specialCase', 'feedType'"""
# filter based on compatibility level
from feedvalidator import compatibility
filterFunc = compatibility.AA # hardcoded for now
events = filterFunc(events)
specialCase = None
formattedOutput = Formatter(events, rawdata)
if formattedOutput:
# check for special cases
specialCase = compatibility.analyze(events, rawdata)
if (specialCase == 'html') and autofind:
try:
try:
import feedfinder
rssurls = feedfinder.getLinks(rawdata,url)
except:
rssurls = [url]
if rssurls:
url = rssurls[0]
params = feedvalidator.validateURL(url, firstOccurrenceOnly=1, wantRawData=1)
events = params['loggedEvents']
rawdata = params['rawdata']
feedType = params['feedType']
return postvalidate(url, events, rawdata, feedType, autofind=0)
except:
pass
return {"url":url, "events":events, "rawdata":rawdata, "output":formattedOutput, "specialCase":specialCase, "feedType":feedType}
def checker_app(environ, start_response):
method = environ['REQUEST_METHOD'].lower()
contentType = environ.get('CONTENT_TYPE', None)
output_option = ''
if (method == 'get') or (contentType and cgi.parse_header(contentType)[0].lower() == 'application/x-www-form-urlencoded'):
fs = cgi.FieldStorage(fp=environ.get('wsgi.input',None), environ=environ)
url = fs.getvalue("url") or ''
try:
if url: url = url.decode('utf-8').encode('idna')
except:
pass
manual = fs.getvalue("manual") or 0
rawdata = fs.getvalue("rawdata") or ''
output_option = fs.getvalue("output") or ''
# XXX Should use 'charset'
try:
rawdata = decUTF8(rawdata)[0]
except UnicodeError:
rawdata = decW1252(rawdata)[0]
rawdata = rawdata[:feedvalidator.MAXDATALENGTH].replace('\r\n', '\n').replace('\r', '\n')
else:
url = None
manual = None
rawdata = None
if (output_option == "soap12"):
# SOAP
try:
if ((method == 'post') and (not rawdata)):
params = feedvalidator.validateStream(sys.stdin, contentType=contentType)
elif rawdata :
params = feedvalidator.validateString(rawdata, firstOccurrenceOnly=1)
elif url:
url = sanitizeURL(url)
params = feedvalidator.validateURL(url, firstOccurrenceOnly=1, wantRawData=1)
events = params['loggedEvents']
feedType = params['feedType']
# filter based on compatibility level
from feedvalidator import compatibility
filterFunc = compatibility.AA # hardcoded for now
events = filterFunc(events)
events_error = list()
events_warn = list()
events_info = list()
# format as xml
from feedvalidator.formatter.text_xml import Formatter as xmlformat
output = xmlformat(events)
for event in events:
if isinstance(event,Error): events_error.append(output.format(event))
if isinstance(event,Warning): events_warn.append(output.format(event))
if isinstance(event,Info): events_info.append(output.format(event))
if len(events_error) > 0:
validation_bool = "false"
else:
validation_bool = "true"
from datetime import datetime
right_now = datetime.now()
validationtime = str( right_now.isoformat())
body = applyTemplate('soap.tmpl', {
'errorlist':"\n".join( events_error), 'errorcount': str(len(events_error)),
'warninglist':"\n".join( events_warn), 'warningcount': str(len(events_warn)),
'infolist':"\n".join( events_info), 'infocount': str(len(events_info)),
'home_url': HOMEURL, 'url': url, 'date_time': validationtime, 'validation_bool': validation_bool
})
start_response('200 OK', [('Content-type', 'application/soap+xml; charset=' + ENCODING)])
yield body
except:
import traceback
tb = ''.join(apply(traceback.format_exception, sys.exc_info()))
from feedvalidator.formatter.text_xml import xmlEncode
start_response('500 Internal Error', [('Content-type', 'text/xml; charset=' + ENCODING)])
yield applyTemplate('fault.tmpl', {'code':sys.exc_info()[0],
'string':sys.exc_info()[1], 'traceback':xmlEncode(tb)})
else:
start_response('200 OK', [('Content-type', 'text/html; charset=' + ENCODING)])
if url or rawdata:
# validate
goon = 0
if rawdata:
# validate raw data (from text form)
try:
params = feedvalidator.validateString(rawdata, firstOccurrenceOnly=1)
events = params['loggedEvents']
feedType = params['feedType']
goon = 1
except ValidationFailure, vfv:
yield applyTemplate('header.tmpl', {'title':'Feed Validator Results: %s' % escapeURL(url)})
yield applyTemplate('manual.tmpl', {'rawdata':escapeURL(url)})
output = Formatter([vfv.event], None)
for item in yieldEventList(output):
yield item
yield applyTemplate('error.tmpl')
except:
yield applyTemplate('header.tmpl', {'title':'Feed Validator Results: %s' % escapeURL(url)})
yield applyTemplate('manual.tmpl', {'rawdata':escapeURL(url)})
yield applyTemplate('error.tmpl')
else:
url = sanitizeURL(url)
try:
params = feedvalidator.validateURL(url, firstOccurrenceOnly=1, wantRawData=1)
events = params['loggedEvents']
rawdata = params['rawdata']
feedType = params['feedType']
goon = 1
except ValidationFailure, vfv:
yield applyTemplate('header.tmpl', {'title':'Feed Validator Results: %s' % escapeURL(url)})
yield applyTemplate('index.tmpl', {'value':escapeURL(url)})
output = Formatter([vfv.event], None)
for item in yieldEventList(output):
yield item
yield applyTemplate('error.tmpl')
except:
yield applyTemplate('header.tmpl', {'title':'Feed Validator Results: %s' % escapeURL(url)})
yield applyTemplate('index.tmpl', {'value':escapeURL(url)})
yield applyTemplate('error.tmpl')
if goon:
# post-validate (will do RSS autodiscovery if needed)
validationData = postvalidate(url, events, rawdata, feedType)
# write output header
url = validationData['url']
feedType = validationData['feedType']
rawdata = validationData['rawdata']
htmlUrl = escapeURL(urllib.quote(url))
try:
htmlUrl = htmlUrl.encode('idna')
except:
pass
docType = 'feed'
if feedType == TYPE_ATOM_ENTRY: docType = 'entry'
if feedType == TYPE_XRD: docType = 'document'
if feedType == TYPE_APP_CATEGORIES: docType = 'Document'
if feedType == TYPE_APP_SERVICE: docType = 'Document'
if feedType == TYPE_OPENSEARCH: docType = 'description document'
yield applyTemplate('header.tmpl', {'title':'Feed Validator Results: %s' % escapeURL(url)})
if manual:
yield applyTemplate('manual.tmpl', {'rawdata':cgi.escape(rawdata)})
else:
yield applyTemplate('index.tmpl', {'value':escapeURL(url)})
output = validationData.get('output', None)
# print special case, if any
specialCase = validationData.get('specialCase', None)
if specialCase:
yield applyTemplate('%s.tmpl' % specialCase)
msc = output.mostSeriousClass()
# Explain the overall verdict
if msc == Error:
from feedvalidator.logging import ObsoleteNamespace
if len(output.getErrors())==1 and \
isinstance(output.data[0],ObsoleteNamespace):
yield applyTemplate('notsupported.tmpl')
elif specialCase != 'html':
yield applyTemplate('invalid.tmpl')
else:
yield applyTemplate('congrats.tmpl', {"feedType":FEEDTYPEDISPLAY[feedType], "graphic":VALIDFEEDGRAPHIC[feedType], "docType":docType})
if msc == Warning:
yield applyTemplate('warning.tmpl')
elif msc == Info:
yield applyTemplate('info.tmpl')
# Print any issues, whether or not the overall feed is valid
if output:
if specialCase != 'html':
for item in yieldEventList(output):
yield item
# print code listing
yield buildCodeListing(validationData['events'], validationData['rawdata'], url)
# As long as there were no errors, show that the feed is valid
if msc != Error:
# valid
yield applyTemplate('valid.tmpl', {"url":htmlUrl, "srcUrl":htmlUrl, "feedType":FEEDTYPEDISPLAY[feedType], "graphic":VALIDFEEDGRAPHIC[feedType], "HOMEURL":HOMEURL, "docType":docType})
else:
# nothing to validate, just write basic form
yield applyTemplate('header.tmpl', {'title':'Feed Validator for Atom and RSS'})
if manual:
yield applyTemplate('manual.tmpl', {'rawdata':''})
else:
yield applyTemplate('index.tmpl', {'value':'http://'})
yield applyTemplate('special.tmpl', {})
yield applyTemplate('navbar.tmpl')
yield applyTemplate('footer.tmpl')
if __name__ == "__main__":
if len(sys.argv)==1 or not sys.argv[1].isdigit():
def start_response(status, headers):
print 'Status: %s\r\n' % status,
for header,value in headers:
print '%s: %s\r\n' % (header, value),
print
for output in checker_app(os.environ, start_response):
print output.decode('utf-8')
else:
# export HTTP_HOST=http://feedvalidator.org/
# export SCRIPT_NAME=check.cgi
# export SCRIPT_FILENAME=/home/rubys/svn/feedvalidator/check.cgi
import fcgi
port=int(sys.argv[1])
fcgi.WSGIServer(checker_app, bindAddress=("127.0.0.1", port)).run()
| Python |
#!/usr/bin/python
print "Content-type: text/plain\r\n\r\n",
import rfc822
import time
print "Current time:\n"
print " RFC 2822: " + rfc822.formatdate()
print " RFC 3339: " + time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
| Python |
#!/usr/bin/env python
from config import *
import cgi, cgitb, sys
cgitb.enable()
import codecs
ENCODING='UTF-8'
sys.stdout = codecs.getwriter(ENCODING)(sys.stdout)
if SRCDIR not in sys.path:
sys.path.insert(0, SRCDIR)
class request:
content_type = "text/html"
from index import index
fs = cgi.FieldStorage()
req = request()
url = fs.getvalue('url') or ''
out = fs.getvalue('out') or 'xml'
result=index(req,url,out)
print "Content-type: %s\r\n\r\n%s" % (req.content_type, result)
| Python |
#!/usr/bin/python
# Put a header and a footer on a list of all documented diagnostics,
# linking to their pages.
# Note that this script has lots of hardcoded paths, needs to be run
# from the docs-xml directory, and modifies the index.html in docs.
from os import listdir
from os import path
import re
from sys import stderr
basedir = '.'
messageRe = re.compile("<div id='message'>\n<p>(.*)</p>\n</div>")
def getMessage(fn):
f = open(fn)
txt = f.read()
f.close()
m = messageRe.search(txt)
return m.group(1)
of = open('../docs/index.html', 'w')
def printLine(hr, msg):
of.write('<li><a href="%s">%s</a></li>' % (hr, msg))
of.write("\n")
f = open('docs-index-header.html')
of.write(f.read())
f.close()
of.write("<h2>Validator messages</h2>\n")
for (type, title) in [('error', 'Errors'), ('warning', 'Warnings'), ('info', 'Information')]:
p = path.join(basedir, type)
allMsgs = []
for f in listdir(p):
(name,ext) = path.splitext(f)
if ext != '.xml':
continue
msg = getMessage(path.join(p, f))
allMsgs.append([name, msg])
allMsgs.sort()
of.write("\n<h3>%s</h3>\n" % title)
of.write("<ul>\n")
for (f, msg) in allMsgs:
printLine(type + '/' + f + '.html', msg)
of.write("</ul>\n")
f = open('docs-index-footer.html')
of.write(f.read())
f.close()
| Python |
#!/usr/bin/python
# Given a template (with a specific format), a target document root and a set of formatted XML
# documents, generate HTML documentation for public web access.
# Extracts information from XML using regular expression and proper parsing
from sys import argv, stderr, exit
if len(argv) < 3:
print >>stderr,"Usage:",argv[0]," <template.html> <target-doc-directory> [source XML document ... ]"
exit(5)
template = argv[1]
targetDir = argv[2]
f = open(template)
bp = f.read()
f.close()
doc = bp
import libxml2
import os.path
libxml2.substituteEntitiesDefault(True)
def asText(x):
d = libxml2.parseDoc(x)
return d.xpathCastNodeToString()
import re
wsRE = re.compile('\s+')
def trimWS(s):
s = wsRE.sub(' ', s)
if s and s[0] == ' ':
s = s[1:]
if s and s[-1] == ' ':
s = s[:-1]
return s
secRe = re.compile("<div id='(\w+)'>\n(.*?\n)</div>\n", re.DOTALL)
import codecs
def writeDoc(x, h):
f = open(x)
t = f.read()
f.close()
doc = bp
# Get the title
xd = libxml2.parseFile(x)
ctxt = xd.xpathNewContext()
ctxt.xpathRegisterNs('html', 'http://www.w3.org/1999/xhtml')
title = ctxt.xpathEvalExpression('string(/fvdoc//html:div[@id="message"])')
title = trimWS(title)
doc = doc.replace('<title></title>', '<title>' + title + '</title>')
for (sec, txt) in secRe.findall(t):
r = re.compile('<h2>' + sec + '</h2>\s*<div class="docbody">\s*()</div>', re.IGNORECASE)
idx = r.search(doc).start(1)
doc = doc[:idx] + txt + doc[idx:]
c = codecs.getdecoder('utf-8')
doc = c(doc)[0]
c = codecs.getencoder('iso-8859-1')
f = open(h, 'w')
f.write(c(doc, 'xmlcharrefreplace')[0])
f.close()
for f in argv[3:]:
sp = os.path.abspath(f)
if not(os.path.isfile(sp)):
continue
category = os.path.split(os.path.dirname(sp))[1]
filename = os.path.basename(sp)
if not(category):
continue
(name, ext) = os.path.splitext(filename)
if ext == '.xml':
writeDoc(sp, os.path.join(targetDir, category, name + '.html'))
else:
print >>stderr,"Ignoring",f
| Python |
import sys
import os.path as path
basename = path.dirname(path.dirname(path.abspath(__file__)))
sys.path.append(path.join(basename,'src'))
from feedvalidator.i18n.en import messages
from feedvalidator.logging import Warning, Error
template = '''
<fvdoc>
<div xmlns='http://www.w3.org/1999/xhtml'>
<div id='message'>
<p>%s</p>
</div>
<div id='explanation'>
<p>XXX</p>
</div>
<div id='solution'>
<p>XXX</p>
</div>
</div>
</fvdoc>
'''
def missing():
result = []
for key, value in messages.items():
if issubclass(key,Error):
dir = 'error'
elif issubclass(key,Warning):
dir = 'warning'
else:
continue
xml = path.join(basename, 'docs-xml', dir, key.__name__+'.xml')
html = path.join(basename, 'docs', dir, key.__name__+'.html')
if not path.exists(html) or not path.exists(xml):
print xml
base = key.__bases__[0]
while base not in [Error,Warning]:
if path.exists(path.join(basename, 'docs', dir, base.__name__+'.html')) and \
path.exists(path.join(basename, 'docs-xml', dir, base.__name__+'.xml')):
break
base = base.__bases__[0]
else:
result.append((dir, key.__name__, value, xml, html))
return result
import unittest
class MissingMessagesTest(unittest.TestCase):
def test_messages(self):
self.assertEqual([],
["%s/%s" % (dir,id) for dir, id, msg, xml, html in missing()])
def buildTestSuite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(MissingMessagesTest))
return suite
if __name__ == '__main__':
import re
for dir, id, msg, xml, html in missing():
msg = re.sub("%\(\w+\)\w?", "<code>foo</code>", msg)
if not path.exists(html):
open(html,'w').write('')
if not path.exists(xml):
print xml
open(xml,'w').write(template.lstrip() % msg)
| Python |
#!/usr/bin/python
# Put a header and a footer on a list of all documented diagnostics,
# linking to their pages.
# Note that this script has lots of hardcoded paths, needs to be run
# from the docs-xml directory, and modifies the index.html in docs.
from os import listdir
from os import path
import re
from sys import stderr
basedir = '.'
messageRe = re.compile("<div id='message'>\n<p>(.*)</p>\n</div>")
def getMessage(fn):
f = open(fn)
txt = f.read()
f.close()
m = messageRe.search(txt)
return m.group(1)
of = open('../docs/index.html', 'w')
def printLine(hr, msg):
of.write('<li><a href="%s">%s</a></li>' % (hr, msg))
of.write("\n")
f = open('docs-index-header.html')
of.write(f.read())
f.close()
of.write("<h2>Validator messages</h2>\n")
for (type, title) in [('error', 'Errors'), ('warning', 'Warnings'), ('info', 'Information')]:
p = path.join(basedir, type)
allMsgs = []
for f in listdir(p):
(name,ext) = path.splitext(f)
if ext != '.xml':
continue
msg = getMessage(path.join(p, f))
allMsgs.append([name, msg])
allMsgs.sort()
of.write("\n<h3>%s</h3>\n" % title)
of.write("<ul>\n")
for (f, msg) in allMsgs:
printLine(type + '/' + f + '.html', msg)
of.write("</ul>\n")
f = open('docs-index-footer.html')
of.write(f.read())
f.close()
| Python |
#!/usr/bin/python
# Given a template (with a specific format), a target document root and a set of formatted XML
# documents, generate HTML documentation for public web access.
# Extracts information from XML using regular expression and proper parsing
from sys import argv, stderr, exit
if len(argv) < 3:
print >>stderr,"Usage:",argv[0]," <template.html> <target-doc-directory> [source XML document ... ]"
exit(5)
template = argv[1]
targetDir = argv[2]
f = open(template)
bp = f.read()
f.close()
doc = bp
import libxml2
import os.path
libxml2.substituteEntitiesDefault(True)
def asText(x):
d = libxml2.parseDoc(x)
return d.xpathCastNodeToString()
import re
wsRE = re.compile('\s+')
def trimWS(s):
s = wsRE.sub(' ', s)
if s and s[0] == ' ':
s = s[1:]
if s and s[-1] == ' ':
s = s[:-1]
return s
secRe = re.compile("<div id='(\w+)'>\n(.*?\n)</div>\n", re.DOTALL)
import codecs
def writeDoc(x, h):
f = open(x)
t = f.read()
f.close()
doc = bp
# Get the title
xd = libxml2.parseFile(x)
ctxt = xd.xpathNewContext()
ctxt.xpathRegisterNs('html', 'http://www.w3.org/1999/xhtml')
title = ctxt.xpathEvalExpression('string(/fvdoc//html:div[@id="message"])')
title = trimWS(title)
doc = doc.replace('<title></title>', '<title>' + title + '</title>')
for (sec, txt) in secRe.findall(t):
r = re.compile('<h2>' + sec + '</h2>\s*<div class="docbody">\s*()</div>', re.IGNORECASE)
idx = r.search(doc).start(1)
doc = doc[:idx] + txt + doc[idx:]
c = codecs.getdecoder('utf-8')
doc = c(doc)[0]
c = codecs.getencoder('iso-8859-1')
f = open(h, 'w')
f.write(c(doc, 'xmlcharrefreplace')[0])
f.close()
for f in argv[3:]:
sp = os.path.abspath(f)
if not(os.path.isfile(sp)):
continue
category = os.path.split(os.path.dirname(sp))[1]
filename = os.path.basename(sp)
if not(category):
continue
(name, ext) = os.path.splitext(filename)
if ext == '.xml':
writeDoc(sp, os.path.join(targetDir, category, name + '.html'))
else:
print >>stderr,"Ignoring",f
| Python |
# Default URL of the validator itself... feel free to beautify as you like
import os
HOMEURL = os.environ['HTTP_HOST'] + os.environ['SCRIPT_NAME']
if not HOMEURL.startswith('http://'): HOMEURL = 'http://' + HOMEURL
# This is where the CGI itself is... other supporting scripts (like
# feedfinder) may be placed here.
WEBDIR = '/'.join(os.environ['SCRIPT_FILENAME'].split('/')[0:-1])
# This following value is primarily used for setting up the other values...
HOMEDIR = WEBDIR
# This is where local python libraries are installed. This may be useful
# for locating a locally installed libxml2 library, for example...
PYDIR = HOMEDIR + r'/lib/python/'
# This is where the feedvalidator code lives...
SRCDIR = WEBDIR + r'/src'
# The web location prefix of the docs and CSS, relative to check.cgi
DOCSURL='docs'
CSSURL='css'
| Python |
#!/usr/bin/python
print "Content-type: text/plain\r\n\r\n",
import rfc822
import time
print "Current time:\n"
print " RFC 2822: " + rfc822.formatdate()
print " RFC 3339: " + time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
| Python |
#!/usr/bin/env python
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit http://feedparser.org/ for the latest version
Visit http://feedparser.org/docs/ for the latest documentation
Required: Python 2.1 or later
Recommended: Python 2.3 or later
Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "4.2-pre-" + "$Revision: 1.144 $"[11:16] + "-cvs"
__license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>"]
_debug = 0
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# ---------- required modules (should come with any Python distribution) ----------
import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2
try:
from cStringIO import StringIO as _StringIO
except:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except:
gzip = None
try:
import zlib
except:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
from xml.sax.saxutils import escape as _xmlescape
_XML_AVAILABLE = 1
except:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except:
base64 = binascii = None
# cjkcodecs and iconv_codec provide support for more character encodings.
# Both are available from http://cjkpython.i18n.org/
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
if _debug:
import chardet.constants
chardet.constants._debug = 1
except:
chardet = None
# reversable htmlentitydefs mappings for Python 2.2
try:
from htmlentitydefs import name2codepoint, codepoint2name
except:
import htmlentitydefs
name2codepoint={}
codepoint2name={}
for (name,codepoint) in htmlentitydefs.entitydefs.iteritems():
if codepoint.startswith('&#'): codepoint=unichr(int(codepoint[2:-1]))
name2codepoint[name]=ord(codepoint)
codepoint2name[ord(codepoint)]=name
# BeautifulSoup parser used for parsing microformats from embedded HTML content
# http://www.crummy.com/software/BeautifulSoup/. At the moment, it appears
# that there is a version incompatibility, so the import is replaced with
# a 'None'. Restoring the try/import/except/none will renable the MF tests.
BeautifulSoup = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
sgmllib.special = re.compile('<!')
sgmllib.charref = re.compile('&#(\d+|x[0-9a-fA-F]+);')
if sgmllib.endbracket.search(' <').start(0):
class EndBracketMatch:
endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self,string,index=0):
self.match = self.endbracket.match(string,index)
if self.match: return self
def start(self,n):
return self.match.end(n)
sgmllib.endbracket = EndBracketMatch()
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
'hotrss': 'Hot RSS'
}
try:
UserDict = dict
except NameError:
# Python 2.1 does not have dict
from UserDict import UserDict
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
class FeedParserDict(UserDict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['subtitle', 'summary'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
return UserDict.__getitem__(self, 'tags')[0]['term']
if key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in UserDict.__getitem__(self, 'links') if link['rel']=='enclosure']
if key == 'categories':
return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')]
realkey = self.keymap.get(key, key)
if type(realkey) == types.ListType:
for k in realkey:
if UserDict.has_key(self, k):
return UserDict.__getitem__(self, k)
if UserDict.has_key(self, key):
return UserDict.__getitem__(self, key)
return UserDict.__getitem__(self, realkey)
def __setitem__(self, key, value):
for k in self.keymap.keys():
if key == k:
key = self.keymap[k]
if type(key) == types.ListType:
key = key[0]
return UserDict.__setitem__(self, key, value)
def get(self, key, default=None):
if self.has_key(key):
return self[key]
else:
return default
def setdefault(self, key, value):
if not self.has_key(key):
self[key] = value
return self[key]
def has_key(self, key):
try:
return hasattr(self, key) or UserDict.has_key(self, key)
except AttributeError:
return False
def __getattr__(self, key):
try:
return self.__dict__[key]
except KeyError:
pass
try:
assert not key.startswith('_')
return self.__getitem__(key)
except:
raise AttributeError, "object has no attribute '%s'" % key
def __setattr__(self, key, value):
if key.startswith('_') or key == 'data':
self.__dict__[key] = value
else:
return self.__setitem__(key, value)
def __contains__(self, key):
return self.has_key(key)
def zopeCompatibilityHack():
global FeedParserDict
del FeedParserDict
def FeedParserDict(aDict=None):
rc = {}
if aDict:
rc.update(aDict)
return rc
_ebcdic_to_ascii_map = None
def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
)
import string
_ebcdic_to_ascii_map = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(_ebcdic_to_ascii_map)
_cp1252 = {
unichr(128): unichr(8364), # euro sign
unichr(130): unichr(8218), # single low-9 quotation mark
unichr(131): unichr( 402), # latin small letter f with hook
unichr(132): unichr(8222), # double low-9 quotation mark
unichr(133): unichr(8230), # horizontal ellipsis
unichr(134): unichr(8224), # dagger
unichr(135): unichr(8225), # double dagger
unichr(136): unichr( 710), # modifier letter circumflex accent
unichr(137): unichr(8240), # per mille sign
unichr(138): unichr( 352), # latin capital letter s with caron
unichr(139): unichr(8249), # single left-pointing angle quotation mark
unichr(140): unichr( 338), # latin capital ligature oe
unichr(142): unichr( 381), # latin capital letter z with caron
unichr(145): unichr(8216), # left single quotation mark
unichr(146): unichr(8217), # right single quotation mark
unichr(147): unichr(8220), # left double quotation mark
unichr(148): unichr(8221), # right double quotation mark
unichr(149): unichr(8226), # bullet
unichr(150): unichr(8211), # en dash
unichr(151): unichr(8212), # em dash
unichr(152): unichr( 732), # small tilde
unichr(153): unichr(8482), # trade mark sign
unichr(154): unichr( 353), # latin small letter s with caron
unichr(155): unichr(8250), # single right-pointing angle quotation mark
unichr(156): unichr( 339), # latin small ligature oe
unichr(158): unichr( 382), # latin small letter z with caron
unichr(159): unichr( 376)} # latin capital letter y with diaeresis
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
try:
return urlparse.urljoin(base, uri)
except:
uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)])
return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf'
}
_matchnamespaces = {}
can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'license', 'icon', 'logo']
can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if _debug: sys.stderr.write('initializing FeedParser\n')
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
def unknown_starttag(self, tag, attrs):
if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
# normalize attrs
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
# Note: probably shouldn't simply recreate localname here, but
# our namespace handling isn't actually 100% correct in cases where
# the feed redefines the default namespace (which is actually
# the usual case for inline content, thanks Sam), so here we
# cheat and just reconstruct the element based on localname
# because that compensates for the bugs in our namespace handling.
# This will horribly munge inline content with non-empty qnames,
# but nobody actually does that, so I'm not fixing it.
if tag.find(':') <> -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
return self.push(prefix + suffix, 1)
def unknown_endtag(self, tag):
if _debug: sys.stderr.write('end %s\n' % tag)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack: return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack: return
if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref)
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities.keys():
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try: name2codepoint[ref]
except KeyError: text = '&%s;' % ref
else: text = unichr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack: return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if _debug: sys.stderr.write('entering parse_declaration\n')
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1: k = len(self.rawdata)
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
return k+1
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
self.version = 'rss090'
if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
self.version = 'rss10'
if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if self._matchnamespaces.has_key(loweruri):
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack: return
if self.elementstack[-1][0] != element: return
element, expectingText, pieces = self.elementstack.pop()
if self.version == 'atom10' and self.contentparams.get('type','text') == 'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0: break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText: return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = base64.decodestring(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
if self.lookslikehtml(output):
self.contentparams['type']='text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', 'text/html'))
# parse microformats
# (must do this before sanitizing because some microformats
# rely on elements that we sanitize)
if is_htmlish and element in ['content', 'description', 'summary']:
mfresults = _parseMicroformats(output, self.baseuri, self.encoding)
if mfresults:
for tag in mfresults.get('tags', []):
self._addTag(tag['term'], tag['scheme'], tag['label'])
for enclosure in mfresults.get('enclosures', []):
self._start_enclosure(enclosure)
for xfn in mfresults.get('xfn', []):
self._addXFN(xfn['relationships'], xfn['href'], xfn['name'])
vcard = mfresults.get('vcard')
if vcard:
self._getContext()['vcard'] = vcard
# sanitize embedded markup
if is_htmlish:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', 'text/html'))
if self.encoding and type(output) != type(u''):
try:
output = unicode(output, self.encoding)
except:
pass
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding=='utf-8' and type(output) == type(u''):
try:
output = unicode(output.encode('iso-8859-1'), 'utf-8')
except:
pass
# map win-1252 extensions to the proper code points
if type(output) == type(u''):
output = u''.join([c in _cp1252.keys() and _cp1252[c] or c for c in output])
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang: self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
def lookslikehtml(self, str):
if self.version.startswith('atom'): return
if self.contentparams.get('type','text/html') != 'text/plain': return
# must have a close tag or a entity reference to qualify
if not (re.search(r'</(\w+)>',str) or re.search("&#?\w+;",str)): return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
re.findall(r'</?(\w+)',str)): return
# all entities must have been defined as valid HTML entities
from htmlentitydefs import entitydefs
if filter(lambda e: e not in entitydefs.keys(),
re.findall(r'&(\w+);',str)): return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value):
context = self._getContext()
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
if not self.version:
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_dlhottitles(self, attrsD):
self.version = 'hotrss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
_start_feedinfo = _start_channel
def _cdf_common(self, attrsD):
if attrsD.has_key('lastmod'):
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if attrsD.has_key('href'):
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author: return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.replace('<>', '')
author = author.replace('<>', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, FeedParserDict())
if author:
context['%s_detail' % key]['name'] = author
if email:
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
_start_product = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value))
_end_dcterms_issued = _end_published
_end_issued = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_pubdate = _start_updated
_start_dc_date = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_pubdate = _end_updated
_end_dc_date = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value))
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')))
def _start_cc_license(self, attrsD):
self.push('license', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('license')
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
def _end_creativecommons_license(self):
self.pop('license')
def _addXFN(self, relationships, href, name):
context = self._getContext()
xfn = context.setdefault('xfn', [])
value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name})
if value not in xfn:
xfn.append(value)
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label): return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(value)
def _start_category(self, attrsD):
if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split():
self._addTag(term, 'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value: return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
if attrsD['rel'] == 'self':
attrsD.setdefault('type', 'application/atom+xml')
else:
attrsD.setdefault('type', 'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
if attrsD.get('rel')=='enclosure' and not context.get('id'):
context['id'] = attrsD.get('href')
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
context['links'].append(FeedParserDict(attrsD))
if attrsD.has_key('href'):
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
_start_producturl = _start_link
def _end_link(self):
value = self.pop('link')
context = self._getContext()
_end_producturl = _end_link
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
def _start_title(self, attrsD):
if self.incontent: return self.unknown_starttag('title', attrsD)
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
value = self.popContent('title')
if not value: return
context = self._getContext()
_end_dc_title = _end_title
_end_media_title = _end_title
def _start_description(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if context.has_key('generator_detail'):
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel']='enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
href = attrsD.get('href')
if href and not context.get('id'):
context['id'] = href
def _start_source(self, attrsD):
self.insource = 1
def _end_source(self):
self.insource = 0
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_prodlink(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToDescription:
self._save('description', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
_end_prodlink = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
if _debug: sys.stderr.write('trying StrictFeedParser\n')
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
def startPrefixMapping(self, prefix, uri):
self.trackNamespace(prefix, uri)
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD = {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname))
for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param']
def __init__(self, encoding, type):
self.encoding = encoding
self.type = type
if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
def parse_starttag(self,i):
j=sgmllib.SGMLParser.parse_starttag(self, i)
if self.type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
#data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace
data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
if self.encoding and type(data) == type(u''):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs: return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if type(value) != type(u''):
try:
value = unicode(value, self.encoding)
except:
value = unicode(value, 'iso-8859-1')
uattrs.append((unicode(key, self.encoding), value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs=strattrs.encode(self.encoding)
except:
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
else:
self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
if ref.startswith('x'):
value = unichr(int(ref[1:],16))
else:
value = unichr(int(ref))
if value in _cp1252.keys():
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%(ref)s;' % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if name2codepoint.has_key(ref):
self.pieces.append('&%(ref)s;' % locals())
else:
self.pieces.append('&%(ref)s' % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text)
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%(text)s-->' % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%(text)s>' % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%(text)s>' % locals())
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _MicroformatsParser:
STRING = 1
DATE = 2
URI = 3
NODE = 4
EMAIL = 5
known_xfn_relationships = ['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me']
known_binary_extensions = ['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv']
def __init__(self, data, baseuri, encoding):
self.document = BeautifulSoup.BeautifulSoup(data)
self.baseuri = baseuri
self.encoding = encoding
if type(data) == type(u''):
data = data.encode(encoding)
self.tags = []
self.enclosures = []
self.xfn = []
self.vcard = None
def vcardEscape(self, s):
if type(s) in (type(''), type(u'')):
s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n')
return s
def vcardFold(self, s):
s = re.sub(';+$', '', s)
sFolded = ''
iMax = 75
sPrefix = ''
while len(s) > iMax:
sFolded += sPrefix + s[:iMax] + '\n'
s = s[iMax:]
sPrefix = ' '
iMax = 74
sFolded += sPrefix + s
return sFolded
def normalize(self, s):
return re.sub(r'\s+', ' ', s).strip()
def unique(self, aList):
results = []
for element in aList:
if element not in results:
results.append(element)
return results
def toISO8601(self, dt):
return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt)
def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0):
all = lambda x: 1
sProperty = sProperty.lower()
bFound = 0
bNormalize = 1
propertyMatch = re.compile(r'\b%s\b' % sProperty)
if bAllowMultiple and (iPropertyType != self.NODE):
snapResults = []
containers = elmRoot(['ul', 'ol'], propertyMatch)
for container in containers:
snapResults.extend(container('li'))
bFound = (len(snapResults) != 0)
if not bFound:
snapResults = elmRoot(all, propertyMatch)
bFound = (len(snapResults) != 0)
if (not bFound) and (sProperty == 'value'):
snapResults = elmRoot('pre')
bFound = (len(snapResults) != 0)
bNormalize = not bFound
if not bFound:
snapResults = [elmRoot]
bFound = (len(snapResults) != 0)
arFilter = []
if sProperty == 'vcard':
snapFilter = elmRoot(all, propertyMatch)
for node in snapFilter:
if node.findParent(all, propertyMatch):
arFilter.append(node)
arResults = []
for node in snapResults:
if node not in arFilter:
arResults.append(node)
bFound = (len(arResults) != 0)
if not bFound:
if bAllowMultiple: return []
elif iPropertyType == self.STRING: return ''
elif iPropertyType == self.DATE: return BeautifulSoup.Null
elif iPropertyType == self.URI: return ''
elif iPropertyType == self.NODE: return BeautifulSoup.Null
else: return BeautifulSoup.Null
arValues = []
for elmResult in arResults:
sValue = BeautifulSoup.Null
if iPropertyType == self.NODE:
if bAllowMultiple:
arValues.append(elmResult)
continue
else:
return elmResult
sNodeName = elmResult.name.lower()
if (iPropertyType == self.EMAIL) and (sNodeName == 'a'):
sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0]
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'abbr'):
sValue = elmResult.get('title')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (iPropertyType == self.URI):
if sNodeName == 'a': sValue = elmResult.get('href')
elif sNodeName == 'img': sValue = elmResult.get('src')
elif sNodeName == 'object': sValue = elmResult.get('data')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'img'):
sValue = elmResult.get('alt')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue:
sValue = elmResult.renderContents()
sValue = re.sub(r'<\S[^>]*>', '', sValue)
sValue = sValue.replace('\r\n', '\n')
sValue = sValue.replace('\r', '\n')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue: continue
if iPropertyType == self.DATE:
sValue = _parse_date_iso8601(sValue)
if bAllowMultiple:
arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue)
else:
return bAutoEscape and self.vcardEscape(sValue) or sValue
return arValues
def findVCards(self, elmRoot, bAgentParsing=0):
sVCards = ''
if not bAgentParsing:
arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1)
else:
arCards = [elmRoot]
for elmCard in arCards:
arLines = []
def processSingleString(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue))
return sValue or ''
def processSingleURI(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.URI)
if sValue:
sContentType = ''
sEncoding = ''
sValueKey = ''
if sValue.startswith('data:'):
sEncoding = ';ENCODING=b'
sContentType = sValue.split(';')[0].split('/').pop()
sValue = sValue.split(',', 1).pop()
else:
elmValue = self.getPropertyValue(elmCard, sProperty)
if elmValue:
if sProperty != 'url':
sValueKey = ';VALUE=uri'
sContentType = elmValue.get('type', '').strip().split('/').pop().strip()
sContentType = sContentType.upper()
if sContentType == 'OCTET-STREAM':
sContentType = ''
if sContentType:
sContentType = ';TYPE=' + sContentType.upper()
arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue))
def processTypeValue(sProperty, arDefaultType, arForceType=None):
arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1)
for elmResult in arResults:
arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1)
if arForceType:
arType = self.unique(arForceType + arType)
if not arType:
arType = arDefaultType
sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue))
# AGENT
# must do this before all other properties because it is destructive
# (removes nested class="vcard" nodes so they don't interfere with
# this vcard's other properties)
arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1)
for elmAgent in arAgent:
if re.compile(r'\bvcard\b').search(elmAgent.get('class')):
sAgentValue = self.findVCards(elmAgent, 1) + '\n'
sAgentValue = sAgentValue.replace('\n', '\\n')
sAgentValue = sAgentValue.replace(';', '\\;')
if sAgentValue:
arLines.append(self.vcardFold('AGENT:' + sAgentValue))
elmAgent['class'] = ''
elmAgent.contents = BeautifulSoup.Null
else:
sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1);
if sAgentValue:
arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue))
# FN (full name)
sFN = processSingleString('fn')
# N (name)
elmName = self.getPropertyValue(elmCard, 'n')
if elmName:
sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1)
sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1)
arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1)
arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1)
arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1)
arLines.append(self.vcardFold('N:' + sFamilyName + ';' +
sGivenName + ';' +
','.join(arAdditionalNames) + ';' +
','.join(arHonorificPrefixes) + ';' +
','.join(arHonorificSuffixes)))
elif sFN:
# implied "N" optimization
# http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization
arNames = self.normalize(sFN).split()
if len(arNames) == 2:
bFamilyNameFirst = (arNames[0].endswith(',') or
len(arNames[1]) == 1 or
((len(arNames[1]) == 2) and (arNames[1].endswith('.'))))
if bFamilyNameFirst:
arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1]))
else:
arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0]))
# SORT-STRING
sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1)
if sSortString:
arLines.append(self.vcardFold('SORT-STRING:' + sSortString))
# NICKNAME
arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1)
if arNickname:
arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname)))
# PHOTO
processSingleURI('photo')
# BDAY
dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE)
if dtBday:
arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday)))
# ADR (address)
arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1)
for elmAdr in arAdr:
arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1)
if not arType:
arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1
sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1)
sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1)
sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1)
sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1)
sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1)
sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1)
sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1)
arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' +
sPostOfficeBox + ';' +
sExtendedAddress + ';' +
sStreetAddress + ';' +
sLocality + ';' +
sRegion + ';' +
sPostalCode + ';' +
sCountryName))
# LABEL
processTypeValue('label', ['intl','postal','parcel','work'])
# TEL (phone number)
processTypeValue('tel', ['voice'])
# EMAIL
processTypeValue('email', ['internet'], ['internet'])
# MAILER
processSingleString('mailer')
# TZ (timezone)
processSingleString('tz')
# GEO (geographical information)
elmGeo = self.getPropertyValue(elmCard, 'geo')
if elmGeo:
sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1)
sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1)
arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude))
# TITLE
processSingleString('title')
# ROLE
processSingleString('role')
# LOGO
processSingleURI('logo')
# ORG (organization)
elmOrg = self.getPropertyValue(elmCard, 'org')
if elmOrg:
sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1)
if not sOrganizationName:
# implied "organization-name" optimization
# http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization
sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1)
if sOrganizationName:
arLines.append(self.vcardFold('ORG:' + sOrganizationName))
else:
arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1)
arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit)))
# CATEGORY
arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1)
if arCategory:
arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory)))
# NOTE
processSingleString('note')
# REV
processSingleString('rev')
# SOUND
processSingleURI('sound')
# UID
processSingleString('uid')
# URL
processSingleURI('url')
# CLASS
processSingleString('class')
# KEY
processSingleURI('key')
if arLines:
arLines = ['BEGIN:vCard','VERSION:3.0'] + arLines + ['END:vCard']
sVCards += '\n'.join(arLines) + '\n'
return sVCards.strip()
def isProbablyDownloadable(self, elm):
attrsD = elm.attrMap
if not attrsD.has_key('href'): return 0
linktype = attrsD.get('type', '').strip()
if linktype.startswith('audio/') or \
linktype.startswith('video/') or \
(linktype.startswith('application/') and not linktype.endswith('xml')):
return 1
path = urlparse.urlparse(attrsD['href'])[2]
if path.find('.') == -1: return 0
fileext = path.split('.').pop().lower()
return fileext in self.known_binary_extensions
def findTags(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile(r'\btag\b')}):
href = elm.get('href')
if not href: continue
urlscheme, domain, path, params, query, fragment = \
urlparse.urlparse(_urljoin(self.baseuri, href))
segments = path.split('/')
tag = segments.pop()
if not tag:
tag = segments.pop()
tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', ''))
if not tagscheme.endswith('/'):
tagscheme += '/'
self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''}))
def findEnclosures(self):
all = lambda x: 1
enclosure_match = re.compile(r'\benclosure\b')
for elm in self.document(all, {'href': re.compile(r'.+')}):
if not enclosure_match.search(elm.get('rel', '')) and not self.isProbablyDownloadable(elm): continue
if elm.attrMap not in self.enclosures:
self.enclosures.append(elm.attrMap)
if elm.string and not elm.get('title'):
self.enclosures[-1]['title'] = elm.string
def findXFN(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}):
rels = elm.get('rel', '').split()
xfn_rels = []
for rel in rels:
if rel in self.known_xfn_relationships:
xfn_rels.append(rel)
if xfn_rels:
self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string})
def _parseMicroformats(htmlSource, baseURI, encoding):
if not BeautifulSoup: return
if _debug: sys.stderr.write('entering _parseMicroformats\n')
p = _MicroformatsParser(htmlSource, baseURI, encoding)
p.vcard = p.findVCards(p.document)
p.findTags()
p.findEnclosures()
p.findXFN()
return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard}
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = [('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')]
def __init__(self, baseuri, encoding, type):
_BaseHTMLProcessor.__init__(self, encoding, type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _urljoin(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, type):
if _debug: sys.stderr.write('entering _resolveRelativeURIs\n')
p = _RelativeURIResolver(baseURI, encoding, type)
p.feed(htmlSource)
return p.output()
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b',
'big', 'blockquote', 'br', 'button', 'caption', 'center', 'cite',
'code', 'col', 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt',
'em', 'fieldset', 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'hr', 'i', 'img', 'input', 'ins', 'kbd', 'label', 'legend', 'li', 'map',
'menu', 'ol', 'optgroup', 'option', 'p', 'pre', 'q', 's', 'samp',
'select', 'small', 'span', 'strike', 'strong', 'sub', 'sup', 'table',
'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u',
'ul', 'var']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'axis', 'border', 'cellpadding',
'cellspacing', 'char', 'charoff', 'charset', 'checked', 'cite', 'class',
'clear', 'cols', 'colspan', 'color', 'compact', 'coords', 'datetime',
'dir', 'disabled', 'enctype', 'for', 'frame', 'headers', 'height',
'href', 'hreflang', 'hspace', 'id', 'ismap', 'label', 'lang',
'longdesc', 'maxlength', 'media', 'method', 'multiple', 'name',
'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', 'rel', 'rev',
'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title',
'type', 'usemap', 'valign', 'value', 'vspace', 'width', 'xml:lang']
unacceptable_elements_with_end_tag = ['script', 'applet']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
# survey of common keywords found in feeds
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'font-face',
'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern', 'image',
'linearGradient', 'line', 'metadata', 'missing-glyph', 'mpath', 'path',
'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop', 'svg',
'switch', 'text', 'title', 'use']
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd',
'descent', 'display', 'dur', 'end', 'fill', 'fill-rule', 'font-family',
'font-size', 'font-stretch', 'font-style', 'font-variant',
'font-weight', 'from', 'fx', 'fy', 'g1', 'g2', 'glyph-name', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'mathematical', 'max',
'min', 'name', 'offset', 'opacity', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'repeatCount', 'repeatDur',
'requiredExtensions', 'requiredFeatures', 'restart', 'rotate', 'rx',
'ry', 'slope', 'stemh', 'stemv', 'stop-color', 'stop-opacity',
'strikethrough-position', 'strikethrough-thickness', 'stroke',
'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-width',
'systemLanguage', 'target', 'text-anchor', 'to', 'transform', 'type',
'u1', 'u2', 'underline-position', 'underline-thickness', 'unicode',
'unicode-range', 'units-per-em', 'values', 'version', 'viewBox',
'visibility', 'width', 'widths', 'x', 'x-height', 'x1', 'x2',
'xlink:actuate', 'xlink:arcrole', 'xlink:href', 'xlink:role',
'xlink:show', 'xlink:title', 'xlink:type', 'xml:base', 'xml:lang',
'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1', 'y2', 'zoomAndPan']
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK = 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK = 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
else:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if filter(lambda (n,v): n.startswith('xlink:'),attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value: clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math': self.mathmlOK = 0
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg': self.svgOK = 0
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return ''
if not re.match("^(\s*[-\w]+\s*:\s*[^:;]*(;|$))*$", style): return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value: continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def _sanitizeHTML(htmlSource, encoding, type):
p = _HTMLSanitizer(encoding, type)
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = type(data) == type(u'')
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
if ((code / 100) == 3) and (code != 304):
return self.http_error_302(req, fp, code, msg, headers)
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
return infourl
def http_error_302(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
def http_error_301(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
http_error_300 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions)
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
try:
assert sys.version.split()[0] >= '2.3.3'
assert base64 != None
user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
except:
return self.http_error_default(req, fp, code, msg, headers)
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if url_file_stream_or_string == '-':
return sys.stdin
if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'):
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.encodestring(user_passwd).strip()
# iri support
try:
if isinstance(url_file_stream_or_string,unicode):
url_file_stream_or_string = url_file_stream_or_string.encode('idna')
else:
url_file_stream_or_string = url_file_stream_or_string.decode('utf-8').encode('idna')
except:
pass
# try to open with urllib2 (to use optional headers)
request = urllib2.Request(url_file_stream_or_string)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if type(modified) == type(''):
modified = _parse_date(modified)
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
request.add_header('A-IM', 'feed') # RFC 3229 support
opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string)
except:
pass
# treat url_file_stream_or_string as string
return _StringIO(str(url_file_stream_or_string))
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}(\.\d*)?))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
del tmpl
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
del regex
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m: break
if not m: return
if m.span() == (0, 0): return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params.keys():
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tm))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m: return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
_mssql_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
def _parse_date_mssql(dateString):
'''Parse a string according to the MS SQL date format'''
m = _mssql_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_mssql)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m: return
try:
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
except:
return
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date)
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m: return
try:
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
except:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<julian>\d\d\d)'
'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
rfc822._timezones.update(_additional_timezones)
registerDateHandler(_parse_date_rfc822)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
if not date9tuple: continue
if len(date9tuple) != 9:
if _debug: sys.stderr.write('date handler function must return 9-tuple\n')
raise ValueError
map(int, date9tuple)
return date9tuple
except Exception, e:
if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e)))
pass
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). CJKCodecs and iconv_codec help a lot;
you should definitely install them if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
return content_type, params.get('charset', '').replace("'", '')
sniffed_xml_encoding = ''
xml_encoding = ''
true_encoding = ''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type'))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = _ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
true_encoding = http_encoding or 'us-ascii'
elif http_headers and (not http_headers.has_key('content-type')):
true_encoding = xml_encoding or 'iso-8859-1'
else:
true_encoding = xml_encoding or 'utf-8'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding)
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16be':
sys.stderr.write('trying utf-16be instead\n')
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16le':
sys.stderr.write('trying utf-16le instead\n')
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-8':
sys.stderr.write('trying utf-8 instead\n')
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32be':
sys.stderr.write('trying utf-32be instead\n')
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32le':
sys.stderr.write('trying utf-32le instead\n')
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE)
entity_results=entity_pattern.findall(data)
data = entity_pattern.sub('', data)
doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE)
doctype_results = doctype_pattern.findall(data)
doctype = doctype_results and doctype_results[0] or ''
if doctype.lower().count('netscape'):
version = 'rss091n'
else:
version = None
# only allow in 'safe' inline entity definitions
replacement=''
if len(doctype_results)==1 and entity_results:
safe_pattern=re.compile('\s+(\w+)\s+"(&#\w+;|[^&"]*)"')
safe_entities=filter(lambda e: safe_pattern.match(e),entity_results)
if safe_entities:
replacement='<!DOCTYPE feed [\n <!ENTITY %s>\n]>' % '>\n <!ENTITY '.join(safe_entities)
data = doctype_pattern.sub(replacement, data)
return version, data, dict(replacement and safe_pattern.findall(replacement))
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]):
'''Parse a feed from a URL, file, stream, or string'''
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
if _XML_AVAILABLE:
result['bozo'] = 0
if type(handlers) == types.InstanceType:
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
f = None
# if feed is gzip-compressed, decompress it
if f and data and hasattr(f, 'headers'):
if gzip and f.headers.get('content-encoding', '') == 'gzip':
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except Exception, e:
# Some feeds claim to be gzipped but they're not, so
# we get garbage. Ideally, we should re-request the
# feed without the 'Accept-encoding: gzip' header,
# but we don't.
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
elif zlib and f.headers.get('content-encoding', '') == 'deflate':
try:
data = zlib.decompress(data, -zlib.MAX_WBITS)
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
# save HTTP headers
if hasattr(f, 'info'):
info = f.info()
result['etag'] = info.getheader('ETag')
last_modified = info.getheader('Last-Modified')
if last_modified:
result['modified'] = _parse_date(last_modified)
if hasattr(f, 'url'):
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'headers'):
result['headers'] = f.headers.dict
if hasattr(f, 'close'):
f.close()
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
http_headers = result.get('headers', {})
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if http_headers.has_key('content-type'):
bozo_message = '%s is not an XML media type' % http_headers['content-type']
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
result['version'], data, entities = _stripDoctype(data)
baseuri = http_headers.get('content-location', result.get('href'))
baselang = http_headers.get('content-language', None)
# if server sent 304, we're done
if result.get('status', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if not data:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding):
if not proposed_encoding: continue
if proposed_encoding in tried_encodings: continue
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
break
except:
pass
# if no luck and we have auto-detection library, try that
if (not known_encoding) and chardet:
try:
proposed_encoding = chardet.detect(data)['encoding']
if proposed_encoding and (proposed_encoding not in tried_encodings):
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried utf-8 yet, try that
if (not known_encoding) and ('utf-8' not in tried_encodings):
try:
proposed_encoding = 'utf-8'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried windows-1252 yet, try that
if (not known_encoding) and ('windows-1252' not in tried_encodings):
try:
proposed_encoding = 'windows-1252'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried iso-8859-2 yet, try that.
if (not known_encoding) and ('iso-8859-2' not in tried_encodings):
try:
proposed_encoding = 'iso-8859-2'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck, give up
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = ''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'documented declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
if hasattr(saxparser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
saxparser.parse(source)
except Exception, e:
if _debug:
import traceback
traceback.print_stack()
traceback.print_exc()
sys.stderr.write('xml parsing failed\n')
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '', entities)
feedparser.feed(data)
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
class Serializer:
def __init__(self, results):
self.results = results
class TextSerializer(Serializer):
def write(self, stream=sys.stdout):
self._writer(stream, self.results, '')
def _writer(self, stream, node, prefix):
if not node: return
if hasattr(node, 'keys'):
keys = node.keys()
keys.sort()
for k in keys:
if k in ('description', 'link'): continue
if node.has_key(k + '_detail'): continue
if node.has_key(k + '_parsed'): continue
self._writer(stream, node[k], prefix + k + '.')
elif type(node) == types.ListType:
index = 0
for n in node:
self._writer(stream, n, prefix[:-1] + '[' + str(index) + '].')
index += 1
else:
try:
s = str(node).encode('utf-8')
s = s.replace('\\', '\\\\')
s = s.replace('\r', '')
s = s.replace('\n', r'\n')
stream.write(prefix[:-1])
stream.write('=')
stream.write(s)
stream.write('\n')
except:
pass
class PprintSerializer(Serializer):
def write(self, stream=sys.stdout):
stream.write(self.results['href'] + '\n\n')
from pprint import pprint
pprint(self.results, stream)
stream.write('\n')
if __name__ == '__main__':
try:
from optparse import OptionParser
except:
OptionParser = None
if OptionParser:
optionParser = OptionParser(version=__version__, usage="%prog [options] url_or_filename_or_-")
optionParser.set_defaults(format="pprint")
optionParser.add_option("-A", "--user-agent", dest="agent", metavar="AGENT", help="User-Agent for HTTP URLs")
optionParser.add_option("-e", "--referer", "--referrer", dest="referrer", metavar="URL", help="Referrer for HTTP URLs")
optionParser.add_option("-t", "--etag", dest="etag", metavar="TAG", help="ETag/If-None-Match for HTTP URLs")
optionParser.add_option("-m", "--last-modified", dest="modified", metavar="DATE", help="Last-modified/If-Modified-Since for HTTP URLs (any supported date format)")
optionParser.add_option("-f", "--format", dest="format", metavar="FORMAT", help="output results in FORMAT (text, pprint)")
optionParser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="write debugging information to stderr")
(options, urls) = optionParser.parse_args()
if options.verbose:
_debug = 1
if not urls:
optionParser.print_help()
sys.exit(0)
else:
if not sys.argv[1:]:
print __doc__
sys.exit(0)
class _Options:
etag = modified = agent = referrer = None
format = 'pprint'
options = _Options()
urls = sys.argv[1:]
zopeCompatibilityHack()
serializer = globals().get(options.format.capitalize() + 'Serializer', Serializer)
for url in urls:
results = parse(url, etag=options.etag, modified=options.modified, agent=options.agent, referrer=options.referrer)
serializer(results).write(sys.stdout)
#REVISION HISTORY
#1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements,
# added Simon Fell's test suite
#1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections
#2.0 - 10/19/2002
# JD - use inchannel to watch out for image and textinput elements which can
# also contain title, link, and description elements
# JD - check for isPermaLink='false' attribute on guid elements
# JD - replaced openAnything with open_resource supporting ETag and
# If-Modified-Since request headers
# JD - parse now accepts etag, modified, agent, and referrer optional
# arguments
# JD - modified parse to return a dictionary instead of a tuple so that any
# etag or modified information can be returned and cached by the caller
#2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything
# because of etag/modified, return the old etag/modified to the caller to
# indicate why nothing is being returned
#2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its
# useless. Fixes the problem JD was addressing by adding it.
#2.1 - 11/14/2002 - MAP - added gzip support
#2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent.
# start_admingeneratoragent is an example of how to handle elements with
# only attributes, no content.
#2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify);
# also, make sure we send the User-Agent even if urllib2 isn't available.
# Match any variation of backend.userland.com/rss namespace.
#2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is.
#2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's
# snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed
# project name
#2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree);
# removed unnecessary urllib code -- urllib2 should always be available anyway;
# return actual url, status, and full HTTP headers (as result['url'],
# result['status'], and result['headers']) if parsing a remote feed over HTTP --
# this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>;
# added the latest namespace-of-the-week for RSS 2.0
#2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom
# User-Agent (otherwise urllib2 sends two, which confuses some servers)
#2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for
# inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds
#2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or
# textInput, and also to return the character encoding (if specified)
#2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking
# nested divs within content (JohnD); fixed missing sys import (JohanS);
# fixed regular expression to capture XML character encoding (Andrei);
# added support for Atom 0.3-style links; fixed bug with textInput tracking;
# added support for cloud (MartijnP); added support for multiple
# category/dc:subject (MartijnP); normalize content model: 'description' gets
# description (which can come from description, summary, or full content if no
# description), 'content' gets dict of base/language/type/value (which can come
# from content:encoded, xhtml:body, content, or fullitem);
# fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang
# tracking; fixed bug tracking unknown tags; fixed bug tracking content when
# <content> element is not in default namespace (like Pocketsoap feed);
# resolve relative URLs in link, guid, docs, url, comments, wfw:comment,
# wfw:commentRSS; resolve relative URLs within embedded HTML markup in
# description, xhtml:body, content, content:encoded, title, subtitle,
# summary, info, tagline, and copyright; added support for pingback and
# trackback namespaces
#2.7 - 1/5/2004 - MAP - really added support for trackback and pingback
# namespaces, as opposed to 2.6 when I said I did but didn't really;
# sanitize HTML markup within some elements; added mxTidy support (if
# installed) to tidy HTML markup within some elements; fixed indentation
# bug in _parse_date (FazalM); use socket.setdefaulttimeout if available
# (FazalM); universal date parsing and normalization (FazalM): 'created', modified',
# 'issued' are parsed into 9-tuple date format and stored in 'created_parsed',
# 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified'
# and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa
#2.7.1 - 1/9/2004 - MAP - fixed bug handling " and '. fixed memory
# leak not closing url opener (JohnD); added dc:publisher support (MarekK);
# added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK)
#2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in
# encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL);
# fixed relative URI processing for guid (skadz); added ICBM support; added
# base64 support
#2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many
# blogspot.com sites); added _debug variable
#2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing
#3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available);
# added several new supported namespaces; fixed bug tracking naked markup in
# description; added support for enclosure; added support for source; re-added
# support for cloud which got dropped somehow; added support for expirationDate
#3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking
# xml:base URI, one for documents that don't define one explicitly and one for
# documents that define an outer and an inner xml:base that goes out of scope
# before the end of the document
#3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level
#3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version']
# will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized;
# added support for creativeCommons:license and cc:license; added support for
# full Atom content model in title, tagline, info, copyright, summary; fixed bug
# with gzip encoding (not always telling server we support it when we do)
#3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail
# (dictionary of 'name', 'url', 'email'); map author to author_detail if author
# contains name + email address
#3.0b8 - 1/28/2004 - MAP - added support for contributor
#3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added
# support for summary
#3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from
# xml.util.iso8601
#3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain
# dangerous markup; fiddled with decodeEntities (not right); liberalized
# date parsing even further
#3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right);
# added support to Atom 0.2 subtitle; added support for Atom content model
# in copyright; better sanitizing of dangerous HTML elements with end tags
# (script, frameset)
#3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img,
# etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />)
#3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under
# Python 2.1
#3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS;
# fixed bug capturing author and contributor URL; fixed bug resolving relative
# links in author and contributor URL; fixed bug resolvin relative links in
# generator URL; added support for recognizing RSS 1.0; passed Simon Fell's
# namespace tests, and included them permanently in the test suite with his
# permission; fixed namespace handling under Python 2.1
#3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15)
#3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023
#3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei);
# use libxml2 (if available)
#3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author
# name was in parentheses; removed ultra-problematic mxTidy support; patch to
# workaround crash in PyXML/expat when encountering invalid entities
# (MarkMoraes); support for textinput/textInput
#3.0b20 - 4/7/2004 - MAP - added CDF support
#3.0b21 - 4/14/2004 - MAP - added Hot RSS support
#3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in
# results dict; changed results dict to allow getting values with results.key
# as well as results[key]; work around embedded illformed HTML with half
# a DOCTYPE; work around malformed Content-Type header; if character encoding
# is wrong, try several common ones before falling back to regexes (if this
# works, bozo_exception is set to CharacterEncodingOverride); fixed character
# encoding issues in BaseHTMLProcessor by tracking encoding and converting
# from Unicode to raw strings before feeding data to sgmllib.SGMLParser;
# convert each value in results to Unicode (if possible), even if using
# regex-based parsing
#3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain
# high-bit characters in attributes in embedded HTML in description (thanks
# Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in
# FeedParserDict; tweaked FeedParserDict.has_key to return True if asking
# about a mapped key
#3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and
# results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could
# cause the same encoding to be tried twice (even if it failed the first time);
# fixed DOCTYPE stripping when DOCTYPE contained entity declarations;
# better textinput and image tracking in illformed RSS 1.0 feeds
#3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed
# my blink tag tests
#3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that
# failed to parse utf-16 encoded feeds; made source into a FeedParserDict;
# duplicate admin:generatorAgent/@rdf:resource in generator_detail.url;
# added support for image; refactored parse() fallback logic to try other
# encodings if SAX parsing fails (previously it would only try other encodings
# if re-encoding failed); remove unichr madness in normalize_attrs now that
# we're properly tracking encoding in and out of BaseHTMLProcessor; set
# feed.language from root-level xml:lang; set entry.id from rdf:about;
# send Accept header
#3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between
# iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are
# windows-1252); fixed regression that could cause the same encoding to be
# tried twice (even if it failed the first time)
#3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types;
# recover from malformed content-type header parameter with no equals sign
# ('text/xml; charset:iso-8859-1')
#3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities
# to Unicode equivalents in illformed feeds (aaronsw); added and
# passed tests for converting character entities to Unicode equivalents
# in illformed feeds (aaronsw); test for valid parsers when setting
# XML_AVAILABLE; make version and encoding available when server returns
# a 304; add handlers parameter to pass arbitrary urllib2 handlers (like
# digest auth or proxy support); add code to parse username/password
# out of url and send as basic authentication; expose downloading-related
# exceptions in bozo_exception (aaronsw); added __contains__ method to
# FeedParserDict (aaronsw); added publisher_detail (aaronsw)
#3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always
# convert feed to UTF-8 before passing to XML parser; completely revamped
# logic for determining character encoding and attempting XML parsing
# (much faster); increased default timeout to 20 seconds; test for presence
# of Location header on redirects; added tests for many alternate character
# encodings; support various EBCDIC encodings; support UTF-16BE and
# UTF16-LE with or without a BOM; support UTF-8 with a BOM; support
# UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no
# XML parsers are available; added support for 'Content-encoding: deflate';
# send blank 'Accept-encoding: ' header if neither gzip nor zlib modules
# are available
#3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure
# problem tracking xml:base and xml:lang if element declares it, child
# doesn't, first grandchild redeclares it, and second grandchild doesn't;
# refactored date parsing; defined public registerDateHandler so callers
# can add support for additional date formats at runtime; added support
# for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added
# zopeCompatibilityHack() which turns FeedParserDict into a regular
# dictionary, required for Zope compatibility, and also makes command-
# line debugging easier because pprint module formats real dictionaries
# better than dictionary-like objects; added NonXMLContentType exception,
# which is stored in bozo_exception when a feed is served with a non-XML
# media type such as 'text/plain'; respect Content-Language as default
# language if not xml:lang is present; cloud dict is now FeedParserDict;
# generator dict is now FeedParserDict; better tracking of xml:lang,
# including support for xml:lang='' to unset the current language;
# recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default
# namespace; don't overwrite final status on redirects (scenarios:
# redirecting to a URL that returns 304, redirecting to a URL that
# redirects to another URL with a different type of redirect); add
# support for HTTP 303 redirects
#4.0 - MAP - support for relative URIs in xml:base attribute; fixed
# encoding issue with mxTidy (phopkins); preliminary support for RFC 3229;
# support for Atom 1.0; support for iTunes extensions; new 'tags' for
# categories/keywords/etc. as array of dict
# {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0
# terminology; parse RFC 822-style dates with no time; lots of other
# bug fixes
#4.1 - MAP - removed socket timeout; added support for chardet library
#4.2 - MAP - added support for parsing microformats within content elements:
# currently supports rel-tag (maps to 'tags'), rel-enclosure (maps to
# 'enclosures'), XFN links within content elements (maps to 'xfn'),
# and hCard (parses as vCard); bug [ 1481975 ] Misencoded utf-8/win-1252
| Python |
import sys
from docutils import core, io
# The following two functions are copied from:
#
# http://docutils.sourceforge.net/docutils/examples.py
def html_parts(input_string, source_path=None, destination_path=None,
input_encoding='unicode', doctitle=1, initial_header_level=1):
"""
Given an input string, returns a dictionary of HTML document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
encoding is up to the client.
Parameters:
- `input_string`: A multi-line text string; required.
- `source_path`: Path to the source file or object. Optional, but useful
for diagnostic output (system messages).
- `destination_path`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
- `input_encoding`: The encoding of `input_string`. If it is an encoded
8-bit string, provide the correct encoding. If it is a Unicode string,
use "unicode", the default.
- `doctitle`: Disable the promotion of a lone top-level section title to
document title (and subsequent section title to document subtitle
promotion); enabled by default.
- `initial_header_level`: The initial level for header elements (e.g. 1
for "<h1>").
"""
overrides = {'input_encoding': input_encoding,
'doctitle_xform': doctitle,
'initial_header_level': initial_header_level}
parts = core.publish_parts(
source=input_string, source_path=source_path,
destination_path=destination_path,
writer_name='html', settings_overrides=overrides)
return parts
def html_body(input_string, source_path=None, destination_path=None,
input_encoding='unicode', output_encoding='unicode',
doctitle=1, initial_header_level=1):
"""
Given an input string, returns an HTML fragment as a string.
The return value is the contents of the <body> element.
Parameters (see `html_parts()` for the remainder):
- `output_encoding`: The desired encoding of the output. If a Unicode
string is desired, use the default value of "unicode" .
"""
parts = html_parts(
input_string=input_string, source_path=source_path,
destination_path=destination_path,
input_encoding=input_encoding, doctitle=doctitle,
initial_header_level=initial_header_level)
fragment = parts['html_body']
if output_encoding != 'unicode':
fragment = fragment.encode(output_encoding)
return fragment
name = sys.argv[1].rsplit(".")[0]
module = __import__(name, globals(), locals())
skel = file("skeleton.html").read()
print skel % (name, html_body(unicode(module.__doc__)))
| Python |
#!/usr/bin/env python
import glob, unittest, os, sys
from trace import fullmodname
try:
from tests.utils import cleanup
except:
def cleanup():
pass
# try to start in a consistent, predictable location
if sys.path[0]:
os.chdir(sys.path[0])
# find all of the planet test modules
modules = map(fullmodname, glob.glob(os.path.join('base', 'tests', 'test_*.py')))
# load all of the tests into a suite
try:
suite = unittest.TestLoader().loadTestsFromNames(modules)
except Exception, exception:
# attempt to produce a more specific message
for module in modules:
__import__(module)
raise
verbosity = 1
if "-q" in sys.argv or '--quiet' in sys.argv:
verbosity = 0
if "-v" in sys.argv or '--verbose' in sys.argv:
verbosity = 2
# run test suite
unittest.TextTestRunner(verbosity=verbosity).run(suite)
cleanup()
| Python |
from distutils.core import setup
VERSION = '0.1.0'
setup(name='atompubbase',
version=VERSION,
author='Joe Gregorio',
author_email='joe@bitworking.org',
url='http://code.google.com/p/feedvalidator/',
description='An Atom Publishing Protocol client library.',
license='MIT',
long_description="""
The atompubbase library is an Atom Publishing Protocol client library.
""",
packages=['atompubbase', 'atompubbase.mimeparse'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries',
],
)
| Python |
from urllib import urlencode
import atompubbase
import atompubbase.events
from atompubbase.model import init_event_handlers
init_event_handlers()
def apply_credentials_file(filename, http, error):
parts = file(filename, "r").read().splitlines()
if len(parts) == 2:
name, password = parts
http.add_credentials(name, password)
elif len(parts) == 3:
name, password, authtype = parts
authname, service = authtype.split()
if authname != "ClientLogin":
error(msg.CRED_FILE, "Unknown type of authentication: %s ['ClientLogin' is the only good value at this time.]" % authname)
return
cl = ClientLogin(http, name, password, service)
else:
error(msg.CRED_FILE, "Wrong format for credentials file")
class ClientLogin:
"""
Perform ClientLogin up front, save the auth token, and then
register for all the PRE events so that we can add the auth token
to all requests.
"""
def __init__(self, http, name, password, service):
auth = dict(accountType="HOSTED_OR_GOOGLE", Email=name, Passwd=password, service=service,
source='AtomPubBase-1.0')
resp, content = http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
atompubbase.events.register_callback("PRE", self.pre_cb)
def pre_cb(self, headers, body, filters):
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
| Python |
# Copyright Google 2007
"""
An eventing system for atompubbase objects.
Each class that is registered with add_event_handlers()
will be hooked into the event system. Clients can then
register for callbacks when a member function is called,
filtering on when to trigger the callback. There
are several axes that can be used to filter on:
Time PRE|POST
Method Name GET|PUT|DELETE|CREATE
Media MEDIA|NEXT
Class SERVICE|COLLECTION|ENTRY
Note that Class is really driven by which
classes use the eventing system. Media is triggered
if the method name ends in "_media".
For example, given the following class:
class Entry(object):
def get(self, headers, body = None):
pass
def put_media(self, headers, body = None):
pass
It can be added to the event system by calling:
add_event_handlers(Entry)
Now you can register callbacks for when methods of instances
of the class Entry are called. For example:
def mycb(headers, body, attributes):
pass
register_callback("PRE_ENTRY", mycb)
The 'mycb' callback will be called before any
method is called in the Entry class. The headers
and body parameters will be passed along. The
headers can be changed by the callback.
You can construct a filter string by selecting zero
or one value across each axis and concatenating
them with underscores, order is not important.
PRE calls contain the header and body of the request,
POST calls contain the header and body of the response.
If you wish to receive all the events then register
with the ANY filter.
Presuming registered classes:
class Service:
def get(headers, body=None): pass
class Entry:
def get(headers, body=None): pass
def get_media(headers, body=None): pass
def delete(headers, body=None): pass
def put(headers, body): pass
def put_media(headers, body): pass
class Collection:
def get(headers, body=None): pass
def create(headers, body): pass
These are all valid filters:
PRE_GET_MEDIA - Called before Entry.get_media() is called
PRE - Called before any classes member function is called.
COLLECTION - Called before any Collection classes member function is called.
POST_COLLECTION - Called after any Collection classes member function is called.
POST_COLLECTION_CREATE - Called after Collection.create() is called.
ANY - Called before and after every classes member function is called.
"""
import sys
PREPOST = set(["PRE", "POST"])
WRAPPABLE = set(["get", "put", "delete", "create"])
class Events(object):
def __init__(self):
# Callbacks are a list of tuples (filter, cb)
# where filter the set is the set of method attributes
# used to select that callback.
self.callbacks = []
def register(self, filter, cb):
"""
Add a callback (cb) to be called when it matches
the filter. The filter is a string of attibute
names separated by underscores.
Example:
events.register("PRE_ENTRY", mycb)
The 'mycb' callback will be called before any
method is called in the Entry class.
"""
filter = set([coord for coord in filter.upper().split("_")])
if not PREPOST.intersection(filter) and "ANY" not in filter:
filter.add("PRE")
self.callbacks.append((filter, cb))
def clear(self):
self.callbacks = []
def trigger(self, when, methodname, instance, headers, body):
method_filter = set(methodname.upper().split("_"))
method_filter.add(instance.__class__.__name__.upper())
method_filter.add(when)
matches = method_filter.copy()
matches.add("ANY")
for filter, cb in self.callbacks:
if filter.issubset(matches):
cb(headers, body, method_filter)
events = Events()
def _wrap(method, methodname):
"""
Create a closure around the given method that calls into
the eventing system.
"""
def wrapped(self, headers=None, body=None):
if headers == None:
headers = {}
try:
headers["-request-uri"] = self.uri()
except AttributeError:
pass
events.trigger("PRE", methodname, self, headers, body)
(headers, body) = method(self, headers, body)
events.trigger("POST", methodname, self, headers, body)
return (headers, body)
return wrapped
_wrapped = set()
def add_event_handlers(theclass):
"""
Wrap each callable non-internal member function of the class
with a wrapper function that calls into the eventing system.
"""
if theclass not in _wrapped:
for methodname in dir(theclass):
method = getattr(theclass, methodname)
methodprefix = methodname.split("_")[0]
if methodprefix in WRAPPABLE and callable(method) and not methodname.startswith("_"):
setattr(theclass, methodname, _wrap(method, methodname))
_wrapped.add(theclass)
def register_callback(filter, cb):
"""
Add a callback (cb) to be called when it matches
the filter. The filter is a string of attibute
names separated by underscores.
Example:
register_callback("PRE_ENTRY", mycb)
The 'mycb' callback will be called before any
method is called in the Entry class.
"""
events.register(filter, cb)
def clear():
"""
Unregister all callbacks.
"""
events.clear()
__all__ = ["add_event_handlers", "register_callback", "clear"]
| Python |
#!/usr/bin/env python
import glob, unittest, os, sys
from trace import fullmodname
try:
from tests.utils import cleanup
except:
def cleanup():
pass
# try to start in a consistent, predictable location
if sys.path[0]:
os.chdir(sys.path[0])
# find all of the planet test modules
modules = map(fullmodname, glob.glob(os.path.join('tests', 'test_*.py')))
print "Running the tests found in the following modules:"
print modules
# load all of the tests into a suite
try:
suite = unittest.TestLoader().loadTestsFromNames(modules)
except Exception, exception:
# attempt to produce a more specific message
for module in modules:
__import__(module)
raise
verbosity = 1
if "-q" in sys.argv or '--quiet' in sys.argv:
verbosity = 0
if "-v" in sys.argv or '--verbose' in sys.argv:
verbosity = 2
# run test suite
unittest.TextTestRunner(verbosity=verbosity).run(suite)
cleanup()
| Python |
"""MIME-Type Parser
This module provides basic functions for handling mime-types. It can handle
matching mime-types against a list of media-ranges. See section 14.1 of the
HTTP specification [RFC 2616] for a complete explanation.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
Contents:
- parse_mime_type(): Parses a mime-type into its component parts.
- parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q'
quality parameter.
- quality(): Determines the quality ('q') of a mime-type when
compared against a list of media-ranges.
- quality_parsed(): Just like quality() except the second parameter must be
pre-parsed.
- best_match(): Choose the mime-type with the highest quality ('q')
from a list of candidates.
"""
__version__ = '0.1.3'
__author__ = 'Joe Gregorio'
__email__ = 'joe@bitworking.org'
__license__ = 'MIT License'
__credits__ = ''
def parse_mime_type(mime_type):
"""Parses a mime-type into its component parts.
Carves up a mime-type and returns a tuple of the (type, subtype, params)
where 'params' is a dictionary of all the parameters for the media range.
For example, the media range 'application/xhtml;q=0.5' would get parsed
into:
('application', 'xhtml', {'q', '0.5'})
"""
parts = mime_type.split(';')
params = dict([tuple([s.strip() for s in param.split('=', 1)])\
for param in parts[1:]
])
full_type = parts[0].strip()
# Java URLConnection class sends an Accept header that includes a
# single '*'. Turn it into a legal wildcard.
if full_type == '*':
full_type = '*/*'
(type, subtype) = full_type.split('/')
return (type.strip(), subtype.strip(), params)
def parse_media_range(range):
"""Parse a media-range into its component parts.
Carves up a media range and returns a tuple of the (type, subtype,
params) where 'params' is a dictionary of all the parameters for the media
range. For example, the media range 'application/*;q=0.5' would get parsed
into:
('application', '*', {'q', '0.5'})
In addition this function also guarantees that there is a value for 'q'
in the params dictionary, filling it in with a proper default if
necessary.
"""
(type, subtype, params) = parse_mime_type(range)
if 'q' not in params or not params['q'] or \
not float(params['q']) or float(params['q']) > 1\
or float(params['q']) < 0:
params['q'] = '1'
return (type, subtype, params)
def fitness_and_quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns a tuple of
the fitness value and the value of the 'q' quality parameter of the best
match, or (-1, 0) if no match was found. Just as for quality_parsed(),
'parsed_ranges' must be a list of parsed media ranges.
"""
best_fitness = -1
best_fit_q = 0
(target_type, target_subtype, target_params) =\
parse_media_range(mime_type)
for (type, subtype, params) in parsed_ranges:
type_match = (type == target_type or\
type == '*' or\
target_type == '*')
subtype_match = (subtype == target_subtype or\
subtype == '*' or\
target_subtype == '*')
if type_match and subtype_match:
param_matches = sum([1 for (key, value) in \
target_params.items() if key != 'q' and \
key in params and value == params[key]], 0)
fitness = (type == target_type) and 100 or 0
fitness += (subtype == target_subtype) and 10 or 0
fitness += param_matches
if fitness > best_fitness:
best_fitness = fitness
best_fit_q = params['q']
return best_fitness, float(best_fit_q)
def quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns the 'q'
quality parameter of the best match, 0 if no match was found. This function
bahaves the same as quality() except that 'parsed_ranges' must be a list of
parsed media ranges. """
return fitness_and_quality_parsed(mime_type, parsed_ranges)[1]
def quality(mime_type, ranges):
"""Return the quality ('q') of a mime-type against a list of media-ranges.
Returns the quality 'q' of a mime-type when compared against the
media-ranges in ranges. For example:
>>> quality('text/html','text/*;q=0.3, text/html;q=0.7,
text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5')
0.7
"""
parsed_ranges = [parse_media_range(r) for r in ranges.split(',')]
return quality_parsed(mime_type, parsed_ranges)
def best_match(supported, header):
"""Return mime-type with the highest quality ('q') from list of candidates.
Takes a list of supported mime-types and finds the best match for all the
media-ranges listed in header. The value of header must be a string that
conforms to the format of the HTTP Accept: header. The value of 'supported'
is a list of mime-types. The list of supported mime-types should be sorted
in order of increasing desirability, in case of a situation where there is
a tie.
>>> best_match(['application/xbel+xml', 'text/xml'],
'text/*;q=0.5,*/*; q=0.1')
'text/xml'
"""
split_header = _filter_blank(header.split(','))
parsed_header = [parse_media_range(r) for r in split_header]
weighted_matches = []
pos = 0
for mime_type in supported:
weighted_matches.append((fitness_and_quality_parsed(mime_type,
parsed_header), pos, mime_type))
pos += 1
weighted_matches.sort()
return weighted_matches[-1][0][1] and weighted_matches[-1][2] or ''
def _filter_blank(i):
for s in i:
if s.strip():
yield s
| Python |
# -*- coding: utf-8 -*-
#old way
from distutils.core import setup
#new way
#from setuptools import setup, find_packages
setup(name='mimeparse',
version='0.1.4',
description='A module provides basic functions for parsing mime-type names and matching them against a list of media-ranges.',
long_description="""
This module provides basic functions for handling mime-types. It can handle
matching mime-types against a list of media-ranges. See section 14.1 of
the HTTP specification [RFC 2616] for a complete explanation.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
Contents:
- parse_mime_type(): Parses a mime-type into its component parts.
- parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q' quality parameter.
- quality(): Determines the quality ('q') of a mime-type when compared against a list of media-ranges.
- quality_parsed(): Just like quality() except the second parameter must be pre-parsed.
- best_match(): Choose the mime-type with the highest quality ('q') from a list of candidates.
""",
classifiers=[
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
keywords='mime-type',
author='Joe Gregorio',
author_email='joe@bitworking.org',
maintainer='Joe Gregorio',
maintainer_email='joe@bitworking.org',
url='http://code.google.com/p/mimeparse/',
license='MIT',
py_modules=['mimeparse']
)
| Python |
"""
Python tests for Mime-Type Parser.
This module loads a json file and converts the tests specified therein to a set
of PyUnitTestCases. Then it uses PyUnit to run them and report their status.
"""
__version__ = "0.1"
__author__ = 'Ade Oshineye'
__email__ = "ade@oshineye.com"
__credits__ = ""
import mimeparse
import unittest
from functools import partial
# Conditional import to support Python 2.5
try:
import json
except ImportError:
import simplejson as json
def test_parse_media_range(args, expected):
expected = tuple(expected)
result = mimeparse.parse_media_range(args)
message = "Expected: '%s' but got %s" % (expected, result)
assert expected == result, message
def test_quality(args, expected):
result = mimeparse.quality(args[0], args[1])
message = "Expected: '%s' but got %s" % (expected, result)
assert expected == result, message
def test_best_match(args, expected):
result = mimeparse.best_match(args[0], args[1])
message = "Expected: '%s' but got %s" % (expected, result)
assert expected == result, message
def test_parse_mime_type(args, expected):
expected = tuple(expected)
result = mimeparse.parse_mime_type(args)
message = "Expected: '%s' but got %s" % (expected, result)
assert expected == result, message
def add_tests(suite, json_object, func_name, test_func):
test_data = json_object[func_name]
for test_datum in test_data:
args, expected = test_datum[0], test_datum[1]
desc = "%s(%s) with expected result: %s" % (func_name, str(args), str(expected))
if len(test_datum) == 3:
desc = test_datum[2] + " : " + desc
func = partial(test_func, *(args, expected))
func.__name__ = test_func.__name__
testcase = unittest.FunctionTestCase(func, description=desc)
suite.addTest(testcase)
def run_tests():
json_object = json.load(open("testdata.json"))
suite = unittest.TestSuite()
add_tests(suite, json_object, "parse_media_range", test_parse_media_range)
add_tests(suite, json_object, "quality", test_quality)
add_tests(suite, json_object, "best_match", test_best_match)
add_tests(suite, json_object, "parse_mime_type", test_parse_mime_type)
test_runner = unittest.TextTestRunner(verbosity=1)
test_runner.run(suite)
if __name__ == "__main__":
run_tests()
| Python |
"""
There are four classes that make up the core
of the atompub model.
class Context
class Service
class Collection
class Entry
Context represents the current state, as represented
by a service document, a collection and an entry.
Each atompub object (Service, Collection, or Entry)
is just instantiated with a URI (or with a Context)
that it then uses to perform its work. Each object can produce
a list of URIs (actually Context objects) (possibly filtered)
for the next level down. The only parsing done will be xpaths to
pick out URIs, e.g. collections from service documents.
Here is an example of how the classes are used together:
# Note that httplib2.Http is passed in so you
# can pass in your own instrumented version, etc.
from httplib2 import Http
h = httplib2.Http()
c = Context(h, service_document_uri)
service = Service(c)
collection = Collection(service.iter()[0])
entry = Entry(collection.iter()[0])
(headers, body) = entry.get()
body = "<entry>...some updated stuff </entry>"
entry.put(body)
# saving and restoring is a matter of pickling/unpickling the Context.
import pickle
f = file("somefile", "w")
pickle.dump(entry.context(), f)
import pickle
f = file("somefile", "r")
context = pickle.load(f)
# You pass the class names into restore() for it to use to restore the context.
(service, collection, entry) = context.restore(Service, Collection, Entry)
# You don't have to use the context, Entries
# and Collections can be instantiated from URIs instead
# of Context instances.
entry = Entry(entry_edit_uri)
"""
import events
from mimeparse import mimeparse
import urlparse
import httplib2
import copy
try:
from xml.etree.ElementTree import fromstring, tostring
except:
from elementtree.ElementTree import fromstring, tostring
from xml.parsers.expat import ExpatError
ATOM = "http://www.w3.org/2005/Atom"
XHTML = "http://www.w3.org/1999/xhtml"
APP = "http://www.w3.org/2007/app"
ATOM_ENTRY = "{%s}entry" % ATOM
LINK = "{%s}link" % ATOM
ATOM_TITLE= "{%s}title" % ATOM
APP_COLL = "{%s}collection" % APP
APP_MEMBER_TYPE = "{%s}accept" % APP
XHTML_DIV = "{%s}div" % XHTML
class ParseException(Exception):
def __init__(self, headers, body):
self.headers = headers
self.body = body
def __str__(self):
return "XML is non-well-formed"
def get_child_title(node):
title = node.find(".//" + ATOM_TITLE)
if title == None:
return ""
title_type = title.get('type', 'text')
if title_type in ['text', 'html']:
return title.text
else:
div = title.find(".//" + XHTML_DIV)
div_text = div.text + "".join([c.text + c.tail for c in div.getchildren()])
return div_text
def absolutize(baseuri, uri):
"""
Given a baseuri, return the absolute
version of the given uri. Works whether
uri is relative or absolute.
"""
if uri == None:
return None
(scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
if not authority:
uri = urlparse.urljoin(baseuri, uri)
return uri
def link_value(etree, xpath, relation):
"""
Given and elementtree element 'etree', find all link
elements under the given xpath and return the @href
of the link of the given relation.
"""
xpath = xpath + "/" + LINK
for link in etree.findall(xpath):
if link.get('rel') == relation:
return link.get('href')
return None
class Context(object):
"""
Encapsulates the current service documents,
the current collection and the current
entry. Can be picked and un-pickled to
achieve persistence of context.
"""
_service = None
_collection = None
_entry = None
http = None
_collection_stack = []
def __init__(self, http = None, service=None, collection=None, entry=None):
"""http is either an instance of httplib2.Http() or something that
acts like it. For this module the only tow functions that need to
be implemented are request() and add_credentials().
"""
self._collection_stack = []
if http:
self.http = http
else:
self.http = httplib2.Http()
self._service = service
self._collection = collection
self._entry = entry
def _get_service(self):
return self._service
def _set_service(self, service):
self._service = service
self._collection = None
self._collection_stack = []
self._entry = None
service = property(_get_service, _set_service, None, "The URI of the Service Document. None if not set yet.")
def _get_collection(self):
return self._collection
def _set_collection(self, collection):
self._collection = collection
self._collection_stack = []
self._entry = None
collection = property(_get_collection, _set_collection, None, "The URI of the collection. None if not set yet.")
def _get_entry(self):
return self._entry
def _set_entry(self, entry):
self._entry = entry
entry = property(_get_entry, _set_entry, None, "The URI of the entry. None if not set yet.")
def restore(self, service_type, collection_type, entry_type):
"""
Restore the state from a Context. The types of the objects
to be instantiated for the service, collection and entry
are passed in. If no URI is set for a specific level
then None is returned for that instance.
"""
service = self._service and service_type(self) or None
collection = self._collection and collection_type(self) or None
entry = self._entry and entry_type(self) or None
return (service, collection, entry)
def collpush(self, uri):
"""
The collpush and collpop members are similar to the
command line 'pushd' and 'popd' commands. They let you
change to a different collection and then pop back
to the older collection when you are done.
"""
self._collection_stack.append((self._collection, self._entry))
self._collection = uri
self._entry = None
def collpop(self):
"""
See collpush.
"""
self._collection, self._entry = self._collection_stack.pop()
class Service(object):
"""
An Atom Publishing Protocol Service Document.
"""
def __init__(self, context_or_uri):
self.context = isinstance(context_or_uri, Context) and context_or_uri or Context(service=context_or_uri)
self.representation = None
self._etree = None
def context(self):
"""
Get the curent Context associated with this Service Document.
"""
return self.context
def uri(self):
return self.context.service
def get(self, headers=None, body=None):
"""
Retrieve the current Service Document from the server.
Returns a tuple of the HTTP response headers
and the body.
"""
headers, body = self.context.http.request(self.context.service, headers=headers)
if headers.status == 200:
self.representation = body
try:
self._etree = fromstring(body)
except ExpatError:
raise ParseException(headers, body)
return (headers, body)
def etree(self):
"""
Returns an ElementTree representation of the Service Document.
"""
if not self._etree:
self.get()
return self._etree
def iter_match(self, mimerange):
"""
Returns a generator that iterates over
the collections in the service document
that accept the given mimerange. The mimerange
can be a specific mimetype - "image/png" - or
a range - "image/*".
"""
if not self.representation:
headers, body = self.get()
for coll in self._etree.findall(".//" + APP_COLL):
accept_type = [t.text for t in coll.findall(APP_MEMBER_TYPE)]
if len(accept_type) == 0:
accept_type.append("application/atom+xml")
coll_type = [t for t in accept_type if mimeparse.best_match([t], mimerange)]
if coll_type:
context = copy.copy(self.context)
context.collection = absolutize(self.context.service, coll.get('href'))
yield context
def iter(self):
"""
Returns a generator that iterates over all
the collections in the service document.
"""
return self.iter_match("*/*")
def iter_info(self):
"""
Returns a generator that iterates over all
the collections in the service document.
Each yield tuple contains the collection
URI, the collection title and the workspace title
"""
if not self.representation:
headers, body = self.get()
for workspace in self._etree.findall(".//{%s}workspace" % APP):
workspace_title = get_child_title(workspace)
for coll in workspace.findall(".//" + APP_COLL):
coll_title = get_child_title(coll)
coll_uri = absolutize(self.context.service, coll.get('href'))
yield (workspace_title, coll_title, coll_uri)
class Collection(object):
def __init__(self, context_or_uri):
"""
Create a Collection from either the URI of the
collection, or from a Context object.
"""
self._context = isinstance(context_or_uri, Context) and context_or_uri or Context(service=context_or_uri)
self.representation = None
self._etree = None
self.next = None
def context(self):
"""
The Context associated with this Collection.
"""
return self._context
def uri(self):
return self._context.collection
def etree(self):
"""
Returns an ElementTree representation of the
current page of the collection.
"""
if not self.representation:
self.get()
return self._etree
def _record_next(self, base_uri, headers, body):
if headers.status == 200:
self.representation = body
try:
self._etree = fromstring(body)
except ExpatError:
raise ParseException(headers, body)
self.next = link_value(self._etree, ".", "next")
if self.next:
self.next = absolutize(base_uri, self.next)
else:
self.representation = self._etree = selfnext = None
def get(self, headers=None, body=None):
"""
Retrieves the first feed in a paged series of
collection documents.
Returns a tuple of the HTTP response headers
and the body.
"""
headers, body = self._context.http.request(self._context.collection, headers=headers, body=body)
self._record_next(self._context.collection, headers, body)
return (headers, body)
def has_next(self):
"""
Collections can be paged across many
Atom feeds. Returns True if there is a
'next' feed we can get.
"""
return self.next != None
def get_next(self, headers=None, body=None):
"""
Collections can be paged across many
Atom feeds. Get's the next feed in the
paging.
Returns a tuple of the HTTP response headers
and the body.
"""
headers, body = self._context.http.request(self.next, headers=headers, body=body)
self._record_next(self.next, headers, body)
return (headers, body)
def create(self, headers=None, body=None):
"""
Create a new member in the collection.
Can be used to create members of regular
and media collections. Be sure to set the
'content-type' header appropriately.
Returns a tuple of the HTTP response headers
and the body.
"""
headers, body = self._context.http.request(self._context.collection, method="POST", headers=headers, body=body)
return (headers, body)
def entry_create(self, headers=None, body=None):
"""
Convenience method that returns an Entry object
if the create has succeeded, or None if it fails.
"""
headers, body = self._context.http.request(self._context.collection, method="POST", headers=headers, body=body)
if headers.status == 201 and 'location' in headers:
context = copy.copy(self._context)
context.entry = headers['location']
return context
else:
return None
def iter(self):
"""
Returns in iterable that produces a Context
object for every Entry in the collection.
"""
self.get()
while True:
for entry in self._etree.findall(ATOM_ENTRY):
context = copy.copy(self._context)
edit_link = link_value(entry, ".", "edit")
context.entry = absolutize(self._context.collection, edit_link)
yield context
if self.has_next():
self.get_next()
else:
break
def iter_entry(self):
"""
Returns in iterable that produces an elementtree
Entry for every Entry in the collection. Note that this
Entry is the possibly incomplete Entry in the collection
feed.
"""
self.get()
while True:
for entry in self._etree.findall(ATOM_ENTRY):
yield entry
if self.has_next():
self.get_next()
else:
break
class Entry(object):
def __init__(self, context_or_uri):
"""
Create an Entry from either the URI of the
entry edit URI, or from a Context object.
"""
self._context = isinstance(context_or_uri, Context) and context_or_uri or Context(entry=context_or_uri)
self.representation = None
self._etree = None
self.edit_media = None
def _clear(self):
self.representation = None
self._etree = None
self.edit_media = None
def etree(self):
"""
Returns an ElementTree representation of the Entry.
"""
if not self.representation:
self.get()
return self._etree
def context(self):
return self._context
def uri(self):
return self._context.entry
def get(self, headers=None, body=None):
"""
Retrieve the representation for this entry.
"""
headers, body = self._context.http.request(self._context.entry, headers=headers)
self.representation = body
try:
self._etree = fromstring(body)
except ExpatError:
raise ParseException(headers, body)
self.edit_media = absolutize(self._context.entry, link_value(self._etree, ".", "edit-media"))
return (headers, body)
def has_media(self):
"""
Returns True if this is a Media Link Entry.
"""
if not self.representation:
self.get()
return self.edit_media != None
def get_media(self, headers=None, body=None):
"""
If this entry is a Media Link Entry, then retrieve
the associated media.
"""
if not self.representation:
self.get()
headers, body = self._context.http.request(self.edit_media, headers=headers)
return (headers, body)
def put(self, headers=None, body=None):
"""
Update the entry on the server. If the body to send
is not supplied then the internal elementtree element
will be serialized and sent to the server.
"""
if headers == None:
headers = {}
if 'content-type' not in headers:
headers['content-type'] = 'application/atom+xml;type=entry'
if not self.representation:
self.get()
if body == None:
body = tostring(self._etree)
headers, body = self._context.http.request(self._context.entry, headers=headers, method="PUT", body=body)
if headers.status < 300:
self._clear()
return (headers, body)
def put_media(self, headers=None, body=None):
"""
If this entry is a Media Link Entry, then update
the associated media.
"""
if not self.representation:
self.get()
headers, body = self._context.http.request(self.edit_media, headers=headers, method="PUT", body=body)
if headers.status < 300:
self._clear()
return (headers, body)
def delete(self, headers=None, body=None):
"""
Delete the entry from the server.
"""
headers, body = self._context.http.request(self._context.entry, headers=headers, method="DELETE")
if headers.status < 300:
self._clear()
return (headers, body)
def init_event_handlers():
"""
Add in hooks to the Service, Collection
and Entry classes to enable Events.
"""
events.add_event_handlers(Service)
events.add_event_handlers(Collection)
events.add_event_handlers(Entry)
| Python |
SERVICE1 = """<?xml version="1.0" encoding='utf-8'?>
<service xmlns="http://www.w3.org/2007/app">
<workspace title="Main Site" >
<collection
title="My Blog Entries"
href="http://example.org/reilly/main" />
<collection
title="Pictures"
href="http://example.org/reilly/pic" >
<accept>image/*</accept>
</collection>
</workspace>
<workspace title="Side Bar Blog">
<collection title="Remaindered Links"
href="http://example.org/reilly/list" />
</workspace>
</service>"""
ENTRY1 = """<?xml version="1.0" encoding="utf-8"?>
<entry xmlns="http://www.w3.org/2005/Atom">
<title type="text">third</title>
<id>http://bitworking.org/foo/app/main/third</id>
<author>
<name>Joe Gregorio</name>
</author>
<updated>2006-08-04T15:52:00-05:00</updated>
<summary type="html"><p>not much</p></summary>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml"><p>Some stuff</p>
<p><i>[Update: The Atom draft is finished.]</i></p>
outside a child element.
<p>More stuff.</p></div>
</content>
</entry>
"""
import apptools
import unittest
try:
from xml.etree.ElementTree import fromstring, tostring
except:
from elementtree.ElementTree import fromstring, tostring
class parseAtomTest(unittest.TestCase):
def testSimple(self):
res = apptools.parse_atom_entry(".", fromstring(ENTRY1))
self.assertEqual(res['title'], "third")
self.assertEqual(res['summary'], "<p>not much</p>")
self.assertTrue(res['content'].startswith("""\n <html:p xmlns:html="http://www.w3.org/1999/xhtml">Some stuff</html:p>"""))
class unparseAtomEntryTest(unittest.TestCase):
def testEntry(self):
element = fromstring(ENTRY1)
d = apptools.parse_atom_entry(".", fromstring(ENTRY1))
d['content'] = "This is text"
d['content__type'] = 'text'
d['summary'] = "<p>This is text</p>"
d['summary__type'] = 'xhtml'
apptools.unparse_atom_entry(element, d)
new_text = tostring(element)
d = apptools.parse_atom_entry(".", fromstring(new_text))
self.assertEqual("This is text", d['content'])
self.assertEqual('<html:p xmlns:html="http://www.w3.org/1999/xhtml">This is text</html:p>', d['summary'])
class wrapTest(unittest.TestCase):
def testWrap(self):
self.assertEqual("This\nis", apptools.wrap("This\nis", 80))
self.assertEqual("This is ", apptools.wrap("This is", 80))
self.assertEqual("This\nis ", apptools.wrap("This is", 3))
self.assertEqual("This\nis\n", apptools.wrap("This is\n", 3))
unittest.main()
| Python |
from pretty import pretty
try:
from xml.etree.ElementTree import fromstring, tostring
except:
from elementtree.ElementTree import fromstring, tostring
src = """<html:div xmlns:html="http://www.w3.org/1999/xhtml">
<html:p >I took a couple of days off work
and we drove down to Florida to visit family in "The Villages",
a 55+ golf cart community that currently has about 50,000 residents.
</html:p>
<html:p xmlns:html="http://www.w3.org/1999/xhtml">That is not a typo. Check out the <html:a href="http://en.wikipedia.org/wiki/The_Villages">wikipedia</html:a> <html:a href="http://en.wikipedia.org/wiki/The_Villages%2C_Florida">entries</html:a>.
</html:p>
<html:p xmlns:html="http://www.w3.org/1999/xhtml">On Monday we went out to feed the ducks at a nearby pond, but well fed
by everyone else, they weren't interested in our bread. Instead the bread was
attacked from below by the fish in the pond, which wasn't very interesting, that is, until
a local heron came over and started feasting on the fish we'd attracted. There's nothing
like the sight of a still living fish wiggling down the throat of a heron to make
a young boy's day.
</html:p>
<html:table style="width: 194px;" xmlns:html="http://www.w3.org/1999/xhtml"><html:tr><html:td align="center" style="height: 194px;"><html:a href="http://picasaweb.google.com/joe.gregorio/TheVillagesFlorida"><html:img height="160" src="http://lh6.google.com/joe.gregorio/RoK-XGNIkuE/AAAAAAAAAA8/ePqbYyHlxvU/s160-c/TheVillagesFlorida.jpg" style="margin: 1px 0 0 4px;" width="160" /></html:a></html:td></html:tr><html:tr><html:td style="text-align: center; font-family: arial,sans-serif; font-size: 11px;"><html:a href="http://picasaweb.google.com/joe.gregorio/TheVillagesFlorida" style="color: #4D4D4D; font-weight: bold; text-decoration: none;">The Villages, Florida</html:a></html:td></html:tr>
</html:table>
</html:div>"""
print pretty(fromstring(src))
| Python |
#!/usr/bin/env python
import glob, unittest, os, sys
from trace import fullmodname
try:
from tests.utils import cleanup
except:
def cleanup():
pass
# try to start in a consistent, predictable location
if sys.path[0]:
os.chdir(sys.path[0])
# find all of the planet test modules
modules = map(fullmodname, glob.glob(os.path.join('tests', 'test_*.py')))
print "Running the tests found in the following modules:"
print modules
# load all of the tests into a suite
try:
suite = unittest.TestLoader().loadTestsFromNames(modules)
except Exception, exception:
# attempt to produce a more specific message
for module in modules:
__import__(module)
raise
verbosity = 1
if "-q" in sys.argv or '--quiet' in sys.argv:
verbosity = 0
if "-v" in sys.argv or '--verbose' in sys.argv:
verbosity = 2
# run test suite
unittest.TextTestRunner(verbosity=verbosity).run(suite)
cleanup()
| Python |
from model import init_event_handlers, Context, Service, Collection, Entry, ATOM, XHTML
from httplib2 import Http
import unittest
import events
try:
from xml.etree.ElementTree import fromstring, tostring
except:
from elementtree.ElementTree import fromstring, tostring
ATOM_CONTENT = "{%s}content/{%s}div" % (ATOM, XHTML)
class Test(unittest.TestCase):
def test(self):
c = Context(http = Http(".cache"), service = "http://bitworking.org/projects/apptestsite/app.cgi/service/;service_document")
s = Service(c)
collection = Collection(s.iter_match("application/atom+xml;type=entry").next())
init_event_handlers()
class EventListener(object):
events = []
def callback(self, headers, body, attributes):
self.events.append(attributes)
listener = EventListener()
events.register_callback("ANY", listener.callback)
CONTENT = """<entry xmlns="http://www.w3.org/2005/Atom">
<title>Test Post From AtomPubBase Live Test</title>
<id>urn:uuid:1225c695-ffb8-4ebb-aaaa-80da354efa6a</id>
<updated>2005-09-02T10:30:00Z</updated>
<summary>Hi!</summary>
<author>
<name>Joe Gregorio</name>
</author>
<content>Plain text content for this test.</content>
</entry>
"""
entry_context = collection.entry_create(body=CONTENT, headers={'content-type':'application/atom+xml;type=entry'})
self.assertNotEqual(None, entry_context)
entry = Entry(entry_context)
entry.etree().find(ATOM_CONTENT).text = "Bye"
headers, body = entry.put()
self.assertEqual(200, headers.status)
headers, body = entry.delete()
self.assertEqual(200, headers.status)
print listener.events
unittest.main()
| Python |
try:
from xml.etree.ElementTree import fromstring, tostring, SubElement
import xml.etree.ElementTree as ElementTree
except:
from elementtree.ElementTree import fromstring, tostring, SubElement
import elementtree.ElementTree as ElementTree
class namespace(object):
def __init__(self, uri):
self.ns_uri = uri
self.memoized = {}
def __call__(self, element):
if element not in self.memoized:
self.memoized[element] = "{%s}%s" % (self.ns_uri, element)
return self.memoized[element]
ATOM = namespace("http://www.w3.org/2005/Atom")
APP = namespace("http://www.w3.org/2007/app")
XHTML = namespace("http://www.w3.org/1999/xhtml")
my_namespaces = {
"http://www.w3.org/1999/xhtml": "xhtml",
"http://www.w3.org/2007/app" : "app",
"http://www.w3.org/2005/Atom" : "atom"
}
ElementTree._namespace_map.update(my_namespaces)
import re
from urlparse import urljoin
from xml.sax.saxutils import quoteattr, escape
import time
import calendar
def get_element(etree, name):
value = ""
if '}' not in name:
name = ATOM(name)
l = etree.findall(name)
if l:
value = l[0].text
return value
RFC3339 = re.compile("^(?P<year>\d\d\d\d)-(?P<month>\d\d)-(?P<day>\d\d)T(?P<hour>\d\d):(?P<minute>\d\d):(?P<second>\d\d)(\.\d*)?" +
"(?P<timezone>Z|((?P<tzhour>[+-]\d\d):\d\d))$")
def get_date(etree, name):
"""
Returns the Date Construct value as seconds from the epoch
in UTC. The 'name' should be the element name of an
RFC 4287 Date Contruct, such as ATOM('published'), ATOM('updated')
or APP('edited'). The parameter 'etree' in an elementtree
element. Note that you don't need to add the namespace
to elements in the ATOM namespace.
"""
date = get_element(etree, name)
m = RFC3339.search(date)
if not m:
raise ValueError("Not a valid RFC 3339 format.")
d = m.groupdict()
ndate = [int(x) for x in [d['year'], d['month'], d['day'], d['hour'], d['minute'], d['second']]]
ndate.append(0) # weekday
ndate.append(1) # year day
if d['timezone'] != 'Z':
ndate[3] -= int(d['tzhour'])
ndate.append(0)
return calendar.timegm(tuple(ndate))
def serialize_nons(element, top):
tag = element.tag.split("}", 1)[1]
tail = u""
if element.tail != None:
tail = escape(element.tail)
text = u""
if element.text != None:
text = element.text
attribs = " ".join(["%s=%s" % (k, quoteattr(v)) for k, v in element.attrib.iteritems()])
if attribs:
attribs = " " + attribs
if top:
value = escape(text)
close = u""
else:
value = "<%s%s>%s" % (tag, attribs, escape(text))
close = "</%s>" % tag
if value == None:
value = u""
return value + "".join([serialize_nons(c, False) for c in element.getchildren()]) + close + tail
def get_text(name, entry):
value = ""
texttype = "text"
l = entry.findall(ATOM(name))
if l:
value = l[0].text
texttype = mime2atom(l[0].get('type', 'text'))
if texttype in ["text", "html"]:
pass
elif texttype == "xhtml":
div = l[0].find("{http://www.w3.org/1999/xhtml}div")
value = serialize_nons(div, True)
else:
value = ""
if value == None:
value = ""
return (texttype, value)
def set_text(entry, name, ttype, value):
elements = entry.findall(ATOM(name))
if not elements:
element = SubElement(entry, ATOM(name))
else:
element = elements[0]
element.set('type', ttype)
[element.remove(e) for e in element.getchildren()]
if ttype in ["html", "text"]:
element.text = value
elif ttype == "xhtml":
element.text = ""
try:
div = fromstring((u"<div xmlns='http://www.w3.org/1999/xhtml'>%s</div>" % value).encode('utf-8'))
element.append(div)
except:
element.text = value
element.set('type', 'html')
mime_to_atom = {
"application/xhtml+xml": "xhtml",
"text/html": "html",
"text/plain": "text"
}
def mime2atom(t):
if t in mime_to_atom:
return mime_to_atom[t]
else:
return t
def wrap(text, width):
l = 0
ret = []
for s in text.split(' '):
ret.append(s)
l += len(s)
nl = s.find('\n') >= 0
if l > width or nl:
l = 0
if not nl:
ret.append('\n')
else:
ret.append(' ')
return "".join(ret)
| Python |
import urlparse
import urllib
import httplib2
from email import message_from_string, message_from_file
import os
class MockHttp:
"""
A mock for httplib2.Http that takes its
response headers and bodies from files on disk
"""
def __init__(self, directory):
self.directory = directory
self.hit_counter = {}
def request(self, uri, method="GET", body=None, headers=None, redirections=5):
counter = self.hit_counter.get(method+uri, 0)
counter += 1
self.hit_counter[method+uri] = counter
path = urlparse.urlparse(uri)[2]
fname = os.path.join(self.directory, method, urllib.quote(path.strip("/")) + ".file")
fname_next = fname + "." + str(counter)
if os.path.exists(fname_next):
fname = fname_next
if os.path.exists(fname):
f = file(fname, "r")
response = message_from_file(f)
f.close()
body = response.get_payload()
headers = httplib2.Response(response)
return (headers, body)
else:
return (httplib2.Response({"status": "404"}), "")
def add_credentials(self, name, password):
pass
class MockRecorder(httplib2.Http):
def __init__(self, h, directory):
self.h = h
self.directory = directory
self.hit_counter = {}
def request(self, uri, method="GET", body=None, headers=None, redirections=5):
counter = self.hit_counter.get(method+uri, 0)
counter += 1
self.hit_counter[method+uri] = counter
headers, body = self.h.request(uri, method, body, headers, redirections)
path = urlparse.urlparse(uri)[2]
fname = os.path.join(self.directory, method, urllib.quote(path.strip("/")) + ".file")
if counter >= 2:
fname = fname + "." + str(counter)
dirname = os.path.dirname(fname)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = file(fname, "w")
f.write(
"\r\n".join(["%s: %s" % (key, value) for key, value in headers.iteritems()])
)
f.write("\r\n\r\n")
f.write(body)
f.close()
return (headers, body)
def add_credentials(self, name, password):
h.add_credentials(name, password)
| Python |
#!/usr/bin/env python
import glob, unittest, os, sys
from trace import fullmodname
try:
from tests.utils import cleanup
except:
def cleanup():
pass
# try to start in a consistent, predictable location
if sys.path[0]:
os.chdir(sys.path[0])
# find all of the planet test modules
modules = map(fullmodname, glob.glob(os.path.join('base', 'tests', 'test_*.py')))
# load all of the tests into a suite
try:
suite = unittest.TestLoader().loadTestsFromNames(modules)
except Exception, exception:
# attempt to produce a more specific message
for module in modules:
__import__(module)
raise
verbosity = 1
if "-q" in sys.argv or '--quiet' in sys.argv:
verbosity = 0
if "-v" in sys.argv or '--verbose' in sys.argv:
verbosity = 2
# run test suite
unittest.TextTestRunner(verbosity=verbosity).run(suite)
cleanup()
| Python |
#!/usr/bin/env python
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit http://feedparser.org/ for the latest version
Visit http://feedparser.org/docs/ for the latest documentation
Required: Python 2.1 or later
Recommended: Python 2.3 or later
Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "4.2-pre-" + "$Revision: 1.144 $"[11:16] + "-cvs"
__license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>"]
_debug = 0
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# ---------- required modules (should come with any Python distribution) ----------
import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2
try:
from cStringIO import StringIO as _StringIO
except:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except:
gzip = None
try:
import zlib
except:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
from xml.sax.saxutils import escape as _xmlescape
_XML_AVAILABLE = 1
except:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except:
base64 = binascii = None
# cjkcodecs and iconv_codec provide support for more character encodings.
# Both are available from http://cjkpython.i18n.org/
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
if _debug:
import chardet.constants
chardet.constants._debug = 1
except:
chardet = None
# reversable htmlentitydefs mappings for Python 2.2
try:
from htmlentitydefs import name2codepoint, codepoint2name
except:
import htmlentitydefs
name2codepoint={}
codepoint2name={}
for (name,codepoint) in htmlentitydefs.entitydefs.iteritems():
if codepoint.startswith('&#'): codepoint=unichr(int(codepoint[2:-1]))
name2codepoint[name]=ord(codepoint)
codepoint2name[ord(codepoint)]=name
# BeautifulSoup parser used for parsing microformats from embedded HTML content
# http://www.crummy.com/software/BeautifulSoup/. At the moment, it appears
# that there is a version incompatibility, so the import is replaced with
# a 'None'. Restoring the try/import/except/none will renable the MF tests.
BeautifulSoup = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
sgmllib.special = re.compile('<!')
sgmllib.charref = re.compile('&#(\d+|x[0-9a-fA-F]+);')
if sgmllib.endbracket.search(' <').start(0):
class EndBracketMatch:
endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self,string,index=0):
self.match = self.endbracket.match(string,index)
if self.match: return self
def start(self,n):
return self.match.end(n)
sgmllib.endbracket = EndBracketMatch()
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
'hotrss': 'Hot RSS'
}
try:
UserDict = dict
except NameError:
# Python 2.1 does not have dict
from UserDict import UserDict
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
class FeedParserDict(UserDict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['subtitle', 'summary'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
return UserDict.__getitem__(self, 'tags')[0]['term']
if key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in UserDict.__getitem__(self, 'links') if link['rel']=='enclosure']
if key == 'categories':
return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')]
realkey = self.keymap.get(key, key)
if type(realkey) == types.ListType:
for k in realkey:
if UserDict.has_key(self, k):
return UserDict.__getitem__(self, k)
if UserDict.has_key(self, key):
return UserDict.__getitem__(self, key)
return UserDict.__getitem__(self, realkey)
def __setitem__(self, key, value):
for k in self.keymap.keys():
if key == k:
key = self.keymap[k]
if type(key) == types.ListType:
key = key[0]
return UserDict.__setitem__(self, key, value)
def get(self, key, default=None):
if self.has_key(key):
return self[key]
else:
return default
def setdefault(self, key, value):
if not self.has_key(key):
self[key] = value
return self[key]
def has_key(self, key):
try:
return hasattr(self, key) or UserDict.has_key(self, key)
except AttributeError:
return False
def __getattr__(self, key):
try:
return self.__dict__[key]
except KeyError:
pass
try:
assert not key.startswith('_')
return self.__getitem__(key)
except:
raise AttributeError, "object has no attribute '%s'" % key
def __setattr__(self, key, value):
if key.startswith('_') or key == 'data':
self.__dict__[key] = value
else:
return self.__setitem__(key, value)
def __contains__(self, key):
return self.has_key(key)
def zopeCompatibilityHack():
global FeedParserDict
del FeedParserDict
def FeedParserDict(aDict=None):
rc = {}
if aDict:
rc.update(aDict)
return rc
_ebcdic_to_ascii_map = None
def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
)
import string
_ebcdic_to_ascii_map = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(_ebcdic_to_ascii_map)
_cp1252 = {
unichr(128): unichr(8364), # euro sign
unichr(130): unichr(8218), # single low-9 quotation mark
unichr(131): unichr( 402), # latin small letter f with hook
unichr(132): unichr(8222), # double low-9 quotation mark
unichr(133): unichr(8230), # horizontal ellipsis
unichr(134): unichr(8224), # dagger
unichr(135): unichr(8225), # double dagger
unichr(136): unichr( 710), # modifier letter circumflex accent
unichr(137): unichr(8240), # per mille sign
unichr(138): unichr( 352), # latin capital letter s with caron
unichr(139): unichr(8249), # single left-pointing angle quotation mark
unichr(140): unichr( 338), # latin capital ligature oe
unichr(142): unichr( 381), # latin capital letter z with caron
unichr(145): unichr(8216), # left single quotation mark
unichr(146): unichr(8217), # right single quotation mark
unichr(147): unichr(8220), # left double quotation mark
unichr(148): unichr(8221), # right double quotation mark
unichr(149): unichr(8226), # bullet
unichr(150): unichr(8211), # en dash
unichr(151): unichr(8212), # em dash
unichr(152): unichr( 732), # small tilde
unichr(153): unichr(8482), # trade mark sign
unichr(154): unichr( 353), # latin small letter s with caron
unichr(155): unichr(8250), # single right-pointing angle quotation mark
unichr(156): unichr( 339), # latin small ligature oe
unichr(158): unichr( 382), # latin small letter z with caron
unichr(159): unichr( 376)} # latin capital letter y with diaeresis
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
try:
return urlparse.urljoin(base, uri)
except:
uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)])
return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf'
}
_matchnamespaces = {}
can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'license', 'icon', 'logo']
can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if _debug: sys.stderr.write('initializing FeedParser\n')
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
def unknown_starttag(self, tag, attrs):
if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
# normalize attrs
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
# Note: probably shouldn't simply recreate localname here, but
# our namespace handling isn't actually 100% correct in cases where
# the feed redefines the default namespace (which is actually
# the usual case for inline content, thanks Sam), so here we
# cheat and just reconstruct the element based on localname
# because that compensates for the bugs in our namespace handling.
# This will horribly munge inline content with non-empty qnames,
# but nobody actually does that, so I'm not fixing it.
if tag.find(':') <> -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
return self.push(prefix + suffix, 1)
def unknown_endtag(self, tag):
if _debug: sys.stderr.write('end %s\n' % tag)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack: return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack: return
if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref)
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities.keys():
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try: name2codepoint[ref]
except KeyError: text = '&%s;' % ref
else: text = unichr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack: return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if _debug: sys.stderr.write('entering parse_declaration\n')
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1: k = len(self.rawdata)
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
return k+1
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
self.version = 'rss090'
if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
self.version = 'rss10'
if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if self._matchnamespaces.has_key(loweruri):
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack: return
if self.elementstack[-1][0] != element: return
element, expectingText, pieces = self.elementstack.pop()
if self.version == 'atom10' and self.contentparams.get('type','text') == 'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0: break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText: return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = base64.decodestring(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
if self.lookslikehtml(output):
self.contentparams['type']='text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', 'text/html'))
# parse microformats
# (must do this before sanitizing because some microformats
# rely on elements that we sanitize)
if is_htmlish and element in ['content', 'description', 'summary']:
mfresults = _parseMicroformats(output, self.baseuri, self.encoding)
if mfresults:
for tag in mfresults.get('tags', []):
self._addTag(tag['term'], tag['scheme'], tag['label'])
for enclosure in mfresults.get('enclosures', []):
self._start_enclosure(enclosure)
for xfn in mfresults.get('xfn', []):
self._addXFN(xfn['relationships'], xfn['href'], xfn['name'])
vcard = mfresults.get('vcard')
if vcard:
self._getContext()['vcard'] = vcard
# sanitize embedded markup
if is_htmlish:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', 'text/html'))
if self.encoding and type(output) != type(u''):
try:
output = unicode(output, self.encoding)
except:
pass
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding=='utf-8' and type(output) == type(u''):
try:
output = unicode(output.encode('iso-8859-1'), 'utf-8')
except:
pass
# map win-1252 extensions to the proper code points
if type(output) == type(u''):
output = u''.join([c in _cp1252.keys() and _cp1252[c] or c for c in output])
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang: self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
def lookslikehtml(self, str):
if self.version.startswith('atom'): return
if self.contentparams.get('type','text/html') != 'text/plain': return
# must have a close tag or a entity reference to qualify
if not (re.search(r'</(\w+)>',str) or re.search("&#?\w+;",str)): return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
re.findall(r'</?(\w+)',str)): return
# all entities must have been defined as valid HTML entities
from htmlentitydefs import entitydefs
if filter(lambda e: e not in entitydefs.keys(),
re.findall(r'&(\w+);',str)): return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value):
context = self._getContext()
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
if not self.version:
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_dlhottitles(self, attrsD):
self.version = 'hotrss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
_start_feedinfo = _start_channel
def _cdf_common(self, attrsD):
if attrsD.has_key('lastmod'):
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if attrsD.has_key('href'):
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author: return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.replace('<>', '')
author = author.replace('<>', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, FeedParserDict())
if author:
context['%s_detail' % key]['name'] = author
if email:
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
_start_product = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value))
_end_dcterms_issued = _end_published
_end_issued = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_pubdate = _start_updated
_start_dc_date = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_pubdate = _end_updated
_end_dc_date = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value))
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')))
def _start_cc_license(self, attrsD):
self.push('license', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('license')
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
def _end_creativecommons_license(self):
self.pop('license')
def _addXFN(self, relationships, href, name):
context = self._getContext()
xfn = context.setdefault('xfn', [])
value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name})
if value not in xfn:
xfn.append(value)
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label): return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(value)
def _start_category(self, attrsD):
if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split():
self._addTag(term, 'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value: return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
if attrsD['rel'] == 'self':
attrsD.setdefault('type', 'application/atom+xml')
else:
attrsD.setdefault('type', 'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
if attrsD.get('rel')=='enclosure' and not context.get('id'):
context['id'] = attrsD.get('href')
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
context['links'].append(FeedParserDict(attrsD))
if attrsD.has_key('href'):
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
_start_producturl = _start_link
def _end_link(self):
value = self.pop('link')
context = self._getContext()
_end_producturl = _end_link
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
def _start_title(self, attrsD):
if self.incontent: return self.unknown_starttag('title', attrsD)
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
value = self.popContent('title')
if not value: return
context = self._getContext()
_end_dc_title = _end_title
_end_media_title = _end_title
def _start_description(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if context.has_key('generator_detail'):
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel']='enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
href = attrsD.get('href')
if href and not context.get('id'):
context['id'] = href
def _start_source(self, attrsD):
self.insource = 1
def _end_source(self):
self.insource = 0
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_prodlink(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToDescription:
self._save('description', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
_end_prodlink = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
if _debug: sys.stderr.write('trying StrictFeedParser\n')
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
def startPrefixMapping(self, prefix, uri):
self.trackNamespace(prefix, uri)
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD = {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname))
for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param']
def __init__(self, encoding, type):
self.encoding = encoding
self.type = type
if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
def parse_starttag(self,i):
j=sgmllib.SGMLParser.parse_starttag(self, i)
if self.type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
#data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace
data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
if self.encoding and type(data) == type(u''):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs: return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if type(value) != type(u''):
try:
value = unicode(value, self.encoding)
except:
value = unicode(value, 'iso-8859-1')
uattrs.append((unicode(key, self.encoding), value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs=strattrs.encode(self.encoding)
except:
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
else:
self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
if ref.startswith('x'):
value = unichr(int(ref[1:],16))
else:
value = unichr(int(ref))
if value in _cp1252.keys():
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%(ref)s;' % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if name2codepoint.has_key(ref):
self.pieces.append('&%(ref)s;' % locals())
else:
self.pieces.append('&%(ref)s' % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text)
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%(text)s-->' % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%(text)s>' % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%(text)s>' % locals())
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _MicroformatsParser:
STRING = 1
DATE = 2
URI = 3
NODE = 4
EMAIL = 5
known_xfn_relationships = ['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me']
known_binary_extensions = ['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv']
def __init__(self, data, baseuri, encoding):
self.document = BeautifulSoup.BeautifulSoup(data)
self.baseuri = baseuri
self.encoding = encoding
if type(data) == type(u''):
data = data.encode(encoding)
self.tags = []
self.enclosures = []
self.xfn = []
self.vcard = None
def vcardEscape(self, s):
if type(s) in (type(''), type(u'')):
s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n')
return s
def vcardFold(self, s):
s = re.sub(';+$', '', s)
sFolded = ''
iMax = 75
sPrefix = ''
while len(s) > iMax:
sFolded += sPrefix + s[:iMax] + '\n'
s = s[iMax:]
sPrefix = ' '
iMax = 74
sFolded += sPrefix + s
return sFolded
def normalize(self, s):
return re.sub(r'\s+', ' ', s).strip()
def unique(self, aList):
results = []
for element in aList:
if element not in results:
results.append(element)
return results
def toISO8601(self, dt):
return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt)
def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0):
all = lambda x: 1
sProperty = sProperty.lower()
bFound = 0
bNormalize = 1
propertyMatch = re.compile(r'\b%s\b' % sProperty)
if bAllowMultiple and (iPropertyType != self.NODE):
snapResults = []
containers = elmRoot(['ul', 'ol'], propertyMatch)
for container in containers:
snapResults.extend(container('li'))
bFound = (len(snapResults) != 0)
if not bFound:
snapResults = elmRoot(all, propertyMatch)
bFound = (len(snapResults) != 0)
if (not bFound) and (sProperty == 'value'):
snapResults = elmRoot('pre')
bFound = (len(snapResults) != 0)
bNormalize = not bFound
if not bFound:
snapResults = [elmRoot]
bFound = (len(snapResults) != 0)
arFilter = []
if sProperty == 'vcard':
snapFilter = elmRoot(all, propertyMatch)
for node in snapFilter:
if node.findParent(all, propertyMatch):
arFilter.append(node)
arResults = []
for node in snapResults:
if node not in arFilter:
arResults.append(node)
bFound = (len(arResults) != 0)
if not bFound:
if bAllowMultiple: return []
elif iPropertyType == self.STRING: return ''
elif iPropertyType == self.DATE: return BeautifulSoup.Null
elif iPropertyType == self.URI: return ''
elif iPropertyType == self.NODE: return BeautifulSoup.Null
else: return BeautifulSoup.Null
arValues = []
for elmResult in arResults:
sValue = BeautifulSoup.Null
if iPropertyType == self.NODE:
if bAllowMultiple:
arValues.append(elmResult)
continue
else:
return elmResult
sNodeName = elmResult.name.lower()
if (iPropertyType == self.EMAIL) and (sNodeName == 'a'):
sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0]
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'abbr'):
sValue = elmResult.get('title')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (iPropertyType == self.URI):
if sNodeName == 'a': sValue = elmResult.get('href')
elif sNodeName == 'img': sValue = elmResult.get('src')
elif sNodeName == 'object': sValue = elmResult.get('data')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'img'):
sValue = elmResult.get('alt')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue:
sValue = elmResult.renderContents()
sValue = re.sub(r'<\S[^>]*>', '', sValue)
sValue = sValue.replace('\r\n', '\n')
sValue = sValue.replace('\r', '\n')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue: continue
if iPropertyType == self.DATE:
sValue = _parse_date_iso8601(sValue)
if bAllowMultiple:
arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue)
else:
return bAutoEscape and self.vcardEscape(sValue) or sValue
return arValues
def findVCards(self, elmRoot, bAgentParsing=0):
sVCards = ''
if not bAgentParsing:
arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1)
else:
arCards = [elmRoot]
for elmCard in arCards:
arLines = []
def processSingleString(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue))
return sValue or ''
def processSingleURI(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.URI)
if sValue:
sContentType = ''
sEncoding = ''
sValueKey = ''
if sValue.startswith('data:'):
sEncoding = ';ENCODING=b'
sContentType = sValue.split(';')[0].split('/').pop()
sValue = sValue.split(',', 1).pop()
else:
elmValue = self.getPropertyValue(elmCard, sProperty)
if elmValue:
if sProperty != 'url':
sValueKey = ';VALUE=uri'
sContentType = elmValue.get('type', '').strip().split('/').pop().strip()
sContentType = sContentType.upper()
if sContentType == 'OCTET-STREAM':
sContentType = ''
if sContentType:
sContentType = ';TYPE=' + sContentType.upper()
arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue))
def processTypeValue(sProperty, arDefaultType, arForceType=None):
arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1)
for elmResult in arResults:
arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1)
if arForceType:
arType = self.unique(arForceType + arType)
if not arType:
arType = arDefaultType
sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue))
# AGENT
# must do this before all other properties because it is destructive
# (removes nested class="vcard" nodes so they don't interfere with
# this vcard's other properties)
arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1)
for elmAgent in arAgent:
if re.compile(r'\bvcard\b').search(elmAgent.get('class')):
sAgentValue = self.findVCards(elmAgent, 1) + '\n'
sAgentValue = sAgentValue.replace('\n', '\\n')
sAgentValue = sAgentValue.replace(';', '\\;')
if sAgentValue:
arLines.append(self.vcardFold('AGENT:' + sAgentValue))
elmAgent['class'] = ''
elmAgent.contents = BeautifulSoup.Null
else:
sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1);
if sAgentValue:
arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue))
# FN (full name)
sFN = processSingleString('fn')
# N (name)
elmName = self.getPropertyValue(elmCard, 'n')
if elmName:
sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1)
sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1)
arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1)
arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1)
arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1)
arLines.append(self.vcardFold('N:' + sFamilyName + ';' +
sGivenName + ';' +
','.join(arAdditionalNames) + ';' +
','.join(arHonorificPrefixes) + ';' +
','.join(arHonorificSuffixes)))
elif sFN:
# implied "N" optimization
# http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization
arNames = self.normalize(sFN).split()
if len(arNames) == 2:
bFamilyNameFirst = (arNames[0].endswith(',') or
len(arNames[1]) == 1 or
((len(arNames[1]) == 2) and (arNames[1].endswith('.'))))
if bFamilyNameFirst:
arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1]))
else:
arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0]))
# SORT-STRING
sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1)
if sSortString:
arLines.append(self.vcardFold('SORT-STRING:' + sSortString))
# NICKNAME
arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1)
if arNickname:
arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname)))
# PHOTO
processSingleURI('photo')
# BDAY
dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE)
if dtBday:
arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday)))
# ADR (address)
arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1)
for elmAdr in arAdr:
arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1)
if not arType:
arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1
sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1)
sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1)
sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1)
sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1)
sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1)
sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1)
sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1)
arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' +
sPostOfficeBox + ';' +
sExtendedAddress + ';' +
sStreetAddress + ';' +
sLocality + ';' +
sRegion + ';' +
sPostalCode + ';' +
sCountryName))
# LABEL
processTypeValue('label', ['intl','postal','parcel','work'])
# TEL (phone number)
processTypeValue('tel', ['voice'])
# EMAIL
processTypeValue('email', ['internet'], ['internet'])
# MAILER
processSingleString('mailer')
# TZ (timezone)
processSingleString('tz')
# GEO (geographical information)
elmGeo = self.getPropertyValue(elmCard, 'geo')
if elmGeo:
sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1)
sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1)
arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude))
# TITLE
processSingleString('title')
# ROLE
processSingleString('role')
# LOGO
processSingleURI('logo')
# ORG (organization)
elmOrg = self.getPropertyValue(elmCard, 'org')
if elmOrg:
sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1)
if not sOrganizationName:
# implied "organization-name" optimization
# http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization
sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1)
if sOrganizationName:
arLines.append(self.vcardFold('ORG:' + sOrganizationName))
else:
arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1)
arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit)))
# CATEGORY
arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1)
if arCategory:
arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory)))
# NOTE
processSingleString('note')
# REV
processSingleString('rev')
# SOUND
processSingleURI('sound')
# UID
processSingleString('uid')
# URL
processSingleURI('url')
# CLASS
processSingleString('class')
# KEY
processSingleURI('key')
if arLines:
arLines = ['BEGIN:vCard','VERSION:3.0'] + arLines + ['END:vCard']
sVCards += '\n'.join(arLines) + '\n'
return sVCards.strip()
def isProbablyDownloadable(self, elm):
attrsD = elm.attrMap
if not attrsD.has_key('href'): return 0
linktype = attrsD.get('type', '').strip()
if linktype.startswith('audio/') or \
linktype.startswith('video/') or \
(linktype.startswith('application/') and not linktype.endswith('xml')):
return 1
path = urlparse.urlparse(attrsD['href'])[2]
if path.find('.') == -1: return 0
fileext = path.split('.').pop().lower()
return fileext in self.known_binary_extensions
def findTags(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile(r'\btag\b')}):
href = elm.get('href')
if not href: continue
urlscheme, domain, path, params, query, fragment = \
urlparse.urlparse(_urljoin(self.baseuri, href))
segments = path.split('/')
tag = segments.pop()
if not tag:
tag = segments.pop()
tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', ''))
if not tagscheme.endswith('/'):
tagscheme += '/'
self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''}))
def findEnclosures(self):
all = lambda x: 1
enclosure_match = re.compile(r'\benclosure\b')
for elm in self.document(all, {'href': re.compile(r'.+')}):
if not enclosure_match.search(elm.get('rel', '')) and not self.isProbablyDownloadable(elm): continue
if elm.attrMap not in self.enclosures:
self.enclosures.append(elm.attrMap)
if elm.string and not elm.get('title'):
self.enclosures[-1]['title'] = elm.string
def findXFN(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}):
rels = elm.get('rel', '').split()
xfn_rels = []
for rel in rels:
if rel in self.known_xfn_relationships:
xfn_rels.append(rel)
if xfn_rels:
self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string})
def _parseMicroformats(htmlSource, baseURI, encoding):
if not BeautifulSoup: return
if _debug: sys.stderr.write('entering _parseMicroformats\n')
p = _MicroformatsParser(htmlSource, baseURI, encoding)
p.vcard = p.findVCards(p.document)
p.findTags()
p.findEnclosures()
p.findXFN()
return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard}
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = [('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')]
def __init__(self, baseuri, encoding, type):
_BaseHTMLProcessor.__init__(self, encoding, type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _urljoin(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, type):
if _debug: sys.stderr.write('entering _resolveRelativeURIs\n')
p = _RelativeURIResolver(baseURI, encoding, type)
p.feed(htmlSource)
return p.output()
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b',
'big', 'blockquote', 'br', 'button', 'caption', 'center', 'cite',
'code', 'col', 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt',
'em', 'fieldset', 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'hr', 'i', 'img', 'input', 'ins', 'kbd', 'label', 'legend', 'li', 'map',
'menu', 'ol', 'optgroup', 'option', 'p', 'pre', 'q', 's', 'samp',
'select', 'small', 'span', 'strike', 'strong', 'sub', 'sup', 'table',
'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u',
'ul', 'var']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'axis', 'border', 'cellpadding',
'cellspacing', 'char', 'charoff', 'charset', 'checked', 'cite', 'class',
'clear', 'cols', 'colspan', 'color', 'compact', 'coords', 'datetime',
'dir', 'disabled', 'enctype', 'for', 'frame', 'headers', 'height',
'href', 'hreflang', 'hspace', 'id', 'ismap', 'label', 'lang',
'longdesc', 'maxlength', 'media', 'method', 'multiple', 'name',
'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', 'rel', 'rev',
'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title',
'type', 'usemap', 'valign', 'value', 'vspace', 'width', 'xml:lang']
unacceptable_elements_with_end_tag = ['script', 'applet']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
# survey of common keywords found in feeds
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'font-face',
'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern', 'image',
'linearGradient', 'line', 'metadata', 'missing-glyph', 'mpath', 'path',
'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop', 'svg',
'switch', 'text', 'title', 'use']
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd',
'descent', 'display', 'dur', 'end', 'fill', 'fill-rule', 'font-family',
'font-size', 'font-stretch', 'font-style', 'font-variant',
'font-weight', 'from', 'fx', 'fy', 'g1', 'g2', 'glyph-name', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'mathematical', 'max',
'min', 'name', 'offset', 'opacity', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'repeatCount', 'repeatDur',
'requiredExtensions', 'requiredFeatures', 'restart', 'rotate', 'rx',
'ry', 'slope', 'stemh', 'stemv', 'stop-color', 'stop-opacity',
'strikethrough-position', 'strikethrough-thickness', 'stroke',
'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-width',
'systemLanguage', 'target', 'text-anchor', 'to', 'transform', 'type',
'u1', 'u2', 'underline-position', 'underline-thickness', 'unicode',
'unicode-range', 'units-per-em', 'values', 'version', 'viewBox',
'visibility', 'width', 'widths', 'x', 'x-height', 'x1', 'x2',
'xlink:actuate', 'xlink:arcrole', 'xlink:href', 'xlink:role',
'xlink:show', 'xlink:title', 'xlink:type', 'xml:base', 'xml:lang',
'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1', 'y2', 'zoomAndPan']
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK = 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK = 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
else:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if filter(lambda (n,v): n.startswith('xlink:'),attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value: clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math': self.mathmlOK = 0
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg': self.svgOK = 0
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return ''
if not re.match("^(\s*[-\w]+\s*:\s*[^:;]*(;|$))*$", style): return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value: continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def _sanitizeHTML(htmlSource, encoding, type):
p = _HTMLSanitizer(encoding, type)
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = type(data) == type(u'')
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
if ((code / 100) == 3) and (code != 304):
return self.http_error_302(req, fp, code, msg, headers)
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
return infourl
def http_error_302(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
def http_error_301(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
http_error_300 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions)
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
try:
assert sys.version.split()[0] >= '2.3.3'
assert base64 != None
user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
except:
return self.http_error_default(req, fp, code, msg, headers)
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if url_file_stream_or_string == '-':
return sys.stdin
if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'):
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.encodestring(user_passwd).strip()
# iri support
try:
if isinstance(url_file_stream_or_string,unicode):
url_file_stream_or_string = url_file_stream_or_string.encode('idna')
else:
url_file_stream_or_string = url_file_stream_or_string.decode('utf-8').encode('idna')
except:
pass
# try to open with urllib2 (to use optional headers)
request = urllib2.Request(url_file_stream_or_string)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if type(modified) == type(''):
modified = _parse_date(modified)
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
request.add_header('A-IM', 'feed') # RFC 3229 support
opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string)
except:
pass
# treat url_file_stream_or_string as string
return _StringIO(str(url_file_stream_or_string))
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}(\.\d*)?))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
del tmpl
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
del regex
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m: break
if not m: return
if m.span() == (0, 0): return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params.keys():
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tm))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m: return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
_mssql_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
def _parse_date_mssql(dateString):
'''Parse a string according to the MS SQL date format'''
m = _mssql_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_mssql)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m: return
try:
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
except:
return
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date)
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m: return
try:
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
except:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<julian>\d\d\d)'
'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
rfc822._timezones.update(_additional_timezones)
registerDateHandler(_parse_date_rfc822)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
if not date9tuple: continue
if len(date9tuple) != 9:
if _debug: sys.stderr.write('date handler function must return 9-tuple\n')
raise ValueError
map(int, date9tuple)
return date9tuple
except Exception, e:
if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e)))
pass
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). CJKCodecs and iconv_codec help a lot;
you should definitely install them if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
return content_type, params.get('charset', '').replace("'", '')
sniffed_xml_encoding = ''
xml_encoding = ''
true_encoding = ''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type'))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = _ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
true_encoding = http_encoding or 'us-ascii'
elif http_headers and (not http_headers.has_key('content-type')):
true_encoding = xml_encoding or 'iso-8859-1'
else:
true_encoding = xml_encoding or 'utf-8'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding)
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16be':
sys.stderr.write('trying utf-16be instead\n')
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16le':
sys.stderr.write('trying utf-16le instead\n')
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-8':
sys.stderr.write('trying utf-8 instead\n')
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32be':
sys.stderr.write('trying utf-32be instead\n')
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32le':
sys.stderr.write('trying utf-32le instead\n')
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE)
entity_results=entity_pattern.findall(data)
data = entity_pattern.sub('', data)
doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE)
doctype_results = doctype_pattern.findall(data)
doctype = doctype_results and doctype_results[0] or ''
if doctype.lower().count('netscape'):
version = 'rss091n'
else:
version = None
# only allow in 'safe' inline entity definitions
replacement=''
if len(doctype_results)==1 and entity_results:
safe_pattern=re.compile('\s+(\w+)\s+"(&#\w+;|[^&"]*)"')
safe_entities=filter(lambda e: safe_pattern.match(e),entity_results)
if safe_entities:
replacement='<!DOCTYPE feed [\n <!ENTITY %s>\n]>' % '>\n <!ENTITY '.join(safe_entities)
data = doctype_pattern.sub(replacement, data)
return version, data, dict(replacement and safe_pattern.findall(replacement))
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]):
'''Parse a feed from a URL, file, stream, or string'''
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
if _XML_AVAILABLE:
result['bozo'] = 0
if type(handlers) == types.InstanceType:
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
f = None
# if feed is gzip-compressed, decompress it
if f and data and hasattr(f, 'headers'):
if gzip and f.headers.get('content-encoding', '') == 'gzip':
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except Exception, e:
# Some feeds claim to be gzipped but they're not, so
# we get garbage. Ideally, we should re-request the
# feed without the 'Accept-encoding: gzip' header,
# but we don't.
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
elif zlib and f.headers.get('content-encoding', '') == 'deflate':
try:
data = zlib.decompress(data, -zlib.MAX_WBITS)
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
# save HTTP headers
if hasattr(f, 'info'):
info = f.info()
result['etag'] = info.getheader('ETag')
last_modified = info.getheader('Last-Modified')
if last_modified:
result['modified'] = _parse_date(last_modified)
if hasattr(f, 'url'):
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'headers'):
result['headers'] = f.headers.dict
if hasattr(f, 'close'):
f.close()
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
http_headers = result.get('headers', {})
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if http_headers.has_key('content-type'):
bozo_message = '%s is not an XML media type' % http_headers['content-type']
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
result['version'], data, entities = _stripDoctype(data)
baseuri = http_headers.get('content-location', result.get('href'))
baselang = http_headers.get('content-language', None)
# if server sent 304, we're done
if result.get('status', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if not data:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding):
if not proposed_encoding: continue
if proposed_encoding in tried_encodings: continue
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
break
except:
pass
# if no luck and we have auto-detection library, try that
if (not known_encoding) and chardet:
try:
proposed_encoding = chardet.detect(data)['encoding']
if proposed_encoding and (proposed_encoding not in tried_encodings):
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried utf-8 yet, try that
if (not known_encoding) and ('utf-8' not in tried_encodings):
try:
proposed_encoding = 'utf-8'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried windows-1252 yet, try that
if (not known_encoding) and ('windows-1252' not in tried_encodings):
try:
proposed_encoding = 'windows-1252'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried iso-8859-2 yet, try that.
if (not known_encoding) and ('iso-8859-2' not in tried_encodings):
try:
proposed_encoding = 'iso-8859-2'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck, give up
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = ''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'documented declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
if hasattr(saxparser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
saxparser.parse(source)
except Exception, e:
if _debug:
import traceback
traceback.print_stack()
traceback.print_exc()
sys.stderr.write('xml parsing failed\n')
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '', entities)
feedparser.feed(data)
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
class Serializer:
def __init__(self, results):
self.results = results
class TextSerializer(Serializer):
def write(self, stream=sys.stdout):
self._writer(stream, self.results, '')
def _writer(self, stream, node, prefix):
if not node: return
if hasattr(node, 'keys'):
keys = node.keys()
keys.sort()
for k in keys:
if k in ('description', 'link'): continue
if node.has_key(k + '_detail'): continue
if node.has_key(k + '_parsed'): continue
self._writer(stream, node[k], prefix + k + '.')
elif type(node) == types.ListType:
index = 0
for n in node:
self._writer(stream, n, prefix[:-1] + '[' + str(index) + '].')
index += 1
else:
try:
s = str(node).encode('utf-8')
s = s.replace('\\', '\\\\')
s = s.replace('\r', '')
s = s.replace('\n', r'\n')
stream.write(prefix[:-1])
stream.write('=')
stream.write(s)
stream.write('\n')
except:
pass
class PprintSerializer(Serializer):
def write(self, stream=sys.stdout):
stream.write(self.results['href'] + '\n\n')
from pprint import pprint
pprint(self.results, stream)
stream.write('\n')
if __name__ == '__main__':
try:
from optparse import OptionParser
except:
OptionParser = None
if OptionParser:
optionParser = OptionParser(version=__version__, usage="%prog [options] url_or_filename_or_-")
optionParser.set_defaults(format="pprint")
optionParser.add_option("-A", "--user-agent", dest="agent", metavar="AGENT", help="User-Agent for HTTP URLs")
optionParser.add_option("-e", "--referer", "--referrer", dest="referrer", metavar="URL", help="Referrer for HTTP URLs")
optionParser.add_option("-t", "--etag", dest="etag", metavar="TAG", help="ETag/If-None-Match for HTTP URLs")
optionParser.add_option("-m", "--last-modified", dest="modified", metavar="DATE", help="Last-modified/If-Modified-Since for HTTP URLs (any supported date format)")
optionParser.add_option("-f", "--format", dest="format", metavar="FORMAT", help="output results in FORMAT (text, pprint)")
optionParser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="write debugging information to stderr")
(options, urls) = optionParser.parse_args()
if options.verbose:
_debug = 1
if not urls:
optionParser.print_help()
sys.exit(0)
else:
if not sys.argv[1:]:
print __doc__
sys.exit(0)
class _Options:
etag = modified = agent = referrer = None
format = 'pprint'
options = _Options()
urls = sys.argv[1:]
zopeCompatibilityHack()
serializer = globals().get(options.format.capitalize() + 'Serializer', Serializer)
for url in urls:
results = parse(url, etag=options.etag, modified=options.modified, agent=options.agent, referrer=options.referrer)
serializer(results).write(sys.stdout)
#REVISION HISTORY
#1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements,
# added Simon Fell's test suite
#1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections
#2.0 - 10/19/2002
# JD - use inchannel to watch out for image and textinput elements which can
# also contain title, link, and description elements
# JD - check for isPermaLink='false' attribute on guid elements
# JD - replaced openAnything with open_resource supporting ETag and
# If-Modified-Since request headers
# JD - parse now accepts etag, modified, agent, and referrer optional
# arguments
# JD - modified parse to return a dictionary instead of a tuple so that any
# etag or modified information can be returned and cached by the caller
#2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything
# because of etag/modified, return the old etag/modified to the caller to
# indicate why nothing is being returned
#2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its
# useless. Fixes the problem JD was addressing by adding it.
#2.1 - 11/14/2002 - MAP - added gzip support
#2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent.
# start_admingeneratoragent is an example of how to handle elements with
# only attributes, no content.
#2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify);
# also, make sure we send the User-Agent even if urllib2 isn't available.
# Match any variation of backend.userland.com/rss namespace.
#2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is.
#2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's
# snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed
# project name
#2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree);
# removed unnecessary urllib code -- urllib2 should always be available anyway;
# return actual url, status, and full HTTP headers (as result['url'],
# result['status'], and result['headers']) if parsing a remote feed over HTTP --
# this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>;
# added the latest namespace-of-the-week for RSS 2.0
#2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom
# User-Agent (otherwise urllib2 sends two, which confuses some servers)
#2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for
# inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds
#2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or
# textInput, and also to return the character encoding (if specified)
#2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking
# nested divs within content (JohnD); fixed missing sys import (JohanS);
# fixed regular expression to capture XML character encoding (Andrei);
# added support for Atom 0.3-style links; fixed bug with textInput tracking;
# added support for cloud (MartijnP); added support for multiple
# category/dc:subject (MartijnP); normalize content model: 'description' gets
# description (which can come from description, summary, or full content if no
# description), 'content' gets dict of base/language/type/value (which can come
# from content:encoded, xhtml:body, content, or fullitem);
# fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang
# tracking; fixed bug tracking unknown tags; fixed bug tracking content when
# <content> element is not in default namespace (like Pocketsoap feed);
# resolve relative URLs in link, guid, docs, url, comments, wfw:comment,
# wfw:commentRSS; resolve relative URLs within embedded HTML markup in
# description, xhtml:body, content, content:encoded, title, subtitle,
# summary, info, tagline, and copyright; added support for pingback and
# trackback namespaces
#2.7 - 1/5/2004 - MAP - really added support for trackback and pingback
# namespaces, as opposed to 2.6 when I said I did but didn't really;
# sanitize HTML markup within some elements; added mxTidy support (if
# installed) to tidy HTML markup within some elements; fixed indentation
# bug in _parse_date (FazalM); use socket.setdefaulttimeout if available
# (FazalM); universal date parsing and normalization (FazalM): 'created', modified',
# 'issued' are parsed into 9-tuple date format and stored in 'created_parsed',
# 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified'
# and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa
#2.7.1 - 1/9/2004 - MAP - fixed bug handling " and '. fixed memory
# leak not closing url opener (JohnD); added dc:publisher support (MarekK);
# added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK)
#2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in
# encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL);
# fixed relative URI processing for guid (skadz); added ICBM support; added
# base64 support
#2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many
# blogspot.com sites); added _debug variable
#2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing
#3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available);
# added several new supported namespaces; fixed bug tracking naked markup in
# description; added support for enclosure; added support for source; re-added
# support for cloud which got dropped somehow; added support for expirationDate
#3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking
# xml:base URI, one for documents that don't define one explicitly and one for
# documents that define an outer and an inner xml:base that goes out of scope
# before the end of the document
#3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level
#3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version']
# will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized;
# added support for creativeCommons:license and cc:license; added support for
# full Atom content model in title, tagline, info, copyright, summary; fixed bug
# with gzip encoding (not always telling server we support it when we do)
#3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail
# (dictionary of 'name', 'url', 'email'); map author to author_detail if author
# contains name + email address
#3.0b8 - 1/28/2004 - MAP - added support for contributor
#3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added
# support for summary
#3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from
# xml.util.iso8601
#3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain
# dangerous markup; fiddled with decodeEntities (not right); liberalized
# date parsing even further
#3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right);
# added support to Atom 0.2 subtitle; added support for Atom content model
# in copyright; better sanitizing of dangerous HTML elements with end tags
# (script, frameset)
#3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img,
# etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />)
#3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under
# Python 2.1
#3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS;
# fixed bug capturing author and contributor URL; fixed bug resolving relative
# links in author and contributor URL; fixed bug resolvin relative links in
# generator URL; added support for recognizing RSS 1.0; passed Simon Fell's
# namespace tests, and included them permanently in the test suite with his
# permission; fixed namespace handling under Python 2.1
#3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15)
#3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023
#3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei);
# use libxml2 (if available)
#3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author
# name was in parentheses; removed ultra-problematic mxTidy support; patch to
# workaround crash in PyXML/expat when encountering invalid entities
# (MarkMoraes); support for textinput/textInput
#3.0b20 - 4/7/2004 - MAP - added CDF support
#3.0b21 - 4/14/2004 - MAP - added Hot RSS support
#3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in
# results dict; changed results dict to allow getting values with results.key
# as well as results[key]; work around embedded illformed HTML with half
# a DOCTYPE; work around malformed Content-Type header; if character encoding
# is wrong, try several common ones before falling back to regexes (if this
# works, bozo_exception is set to CharacterEncodingOverride); fixed character
# encoding issues in BaseHTMLProcessor by tracking encoding and converting
# from Unicode to raw strings before feeding data to sgmllib.SGMLParser;
# convert each value in results to Unicode (if possible), even if using
# regex-based parsing
#3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain
# high-bit characters in attributes in embedded HTML in description (thanks
# Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in
# FeedParserDict; tweaked FeedParserDict.has_key to return True if asking
# about a mapped key
#3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and
# results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could
# cause the same encoding to be tried twice (even if it failed the first time);
# fixed DOCTYPE stripping when DOCTYPE contained entity declarations;
# better textinput and image tracking in illformed RSS 1.0 feeds
#3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed
# my blink tag tests
#3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that
# failed to parse utf-16 encoded feeds; made source into a FeedParserDict;
# duplicate admin:generatorAgent/@rdf:resource in generator_detail.url;
# added support for image; refactored parse() fallback logic to try other
# encodings if SAX parsing fails (previously it would only try other encodings
# if re-encoding failed); remove unichr madness in normalize_attrs now that
# we're properly tracking encoding in and out of BaseHTMLProcessor; set
# feed.language from root-level xml:lang; set entry.id from rdf:about;
# send Accept header
#3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between
# iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are
# windows-1252); fixed regression that could cause the same encoding to be
# tried twice (even if it failed the first time)
#3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types;
# recover from malformed content-type header parameter with no equals sign
# ('text/xml; charset:iso-8859-1')
#3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities
# to Unicode equivalents in illformed feeds (aaronsw); added and
# passed tests for converting character entities to Unicode equivalents
# in illformed feeds (aaronsw); test for valid parsers when setting
# XML_AVAILABLE; make version and encoding available when server returns
# a 304; add handlers parameter to pass arbitrary urllib2 handlers (like
# digest auth or proxy support); add code to parse username/password
# out of url and send as basic authentication; expose downloading-related
# exceptions in bozo_exception (aaronsw); added __contains__ method to
# FeedParserDict (aaronsw); added publisher_detail (aaronsw)
#3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always
# convert feed to UTF-8 before passing to XML parser; completely revamped
# logic for determining character encoding and attempting XML parsing
# (much faster); increased default timeout to 20 seconds; test for presence
# of Location header on redirects; added tests for many alternate character
# encodings; support various EBCDIC encodings; support UTF-16BE and
# UTF16-LE with or without a BOM; support UTF-8 with a BOM; support
# UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no
# XML parsers are available; added support for 'Content-encoding: deflate';
# send blank 'Accept-encoding: ' header if neither gzip nor zlib modules
# are available
#3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure
# problem tracking xml:base and xml:lang if element declares it, child
# doesn't, first grandchild redeclares it, and second grandchild doesn't;
# refactored date parsing; defined public registerDateHandler so callers
# can add support for additional date formats at runtime; added support
# for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added
# zopeCompatibilityHack() which turns FeedParserDict into a regular
# dictionary, required for Zope compatibility, and also makes command-
# line debugging easier because pprint module formats real dictionaries
# better than dictionary-like objects; added NonXMLContentType exception,
# which is stored in bozo_exception when a feed is served with a non-XML
# media type such as 'text/plain'; respect Content-Language as default
# language if not xml:lang is present; cloud dict is now FeedParserDict;
# generator dict is now FeedParserDict; better tracking of xml:lang,
# including support for xml:lang='' to unset the current language;
# recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default
# namespace; don't overwrite final status on redirects (scenarios:
# redirecting to a URL that returns 304, redirecting to a URL that
# redirects to another URL with a different type of redirect); add
# support for HTTP 303 redirects
#4.0 - MAP - support for relative URIs in xml:base attribute; fixed
# encoding issue with mxTidy (phopkins); preliminary support for RFC 3229;
# support for Atom 1.0; support for iTunes extensions; new 'tags' for
# categories/keywords/etc. as array of dict
# {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0
# terminology; parse RFC 822-style dates with no time; lots of other
# bug fixes
#4.1 - MAP - removed socket timeout; added support for chardet library
#4.2 - MAP - added support for parsing microformats within content elements:
# currently supports rel-tag (maps to 'tags'), rel-enclosure (maps to
# 'enclosures'), XFN links within content elements (maps to 'xfn'),
# and hCard (parses as vCard); bug [ 1481975 ] Misencoded utf-8/win-1252
| Python |
#!/bin/env python2.5
__version__ = "0.1.0"
import os
import sys
sys.path.append(os.getcwd())
from optparse import OptionParser, SUPPRESS_HELP, make_option
from atompubbase.model import Entry, Collection, Service, Context, APP, ATOM
from httplib2 import Http
import pickle
import itertools
import mimetypes
import copy
import httplib2
httplib2.debuglevel=4
class _Session(object):
context = Context()
authtype = None
credentials = None
cache = None
def _find_session_file(options):
"""
command line options
$APEXER_CMD_LINE_SESSION
$HOME/.apexer/session
"""
dir = options.session
if not dir:
dir = os.environ.get("APEXER_CMD_LINE_SESSION")
if not dir:
home = os.environ.get("HOME")
if home:
home_dir = os.path.join(home, ".apexer")
if not os.path.exists(home_dir):
os.mkdir(home_dir, 0700)
dir = os.path.join(home, ".apexer", "session")
if not dir:
perror("Could not locate the session file.")
return dir
def _save_session(session, options):
session.context.http = None
fname = _find_session_file(options)
f = file(fname, "w")
pickle.dump(session, f)
f.close()
def apply_credentials(session, credentials):
try:
f = file(credentials, "r")
except IOError:
perror("Unable to load the credentials file %s, file is missing or unreadable." % credentials)
name, password = f.read().split()[0:2]
f.close()
session.context.http.add_credentials(name, password)
def _restore_session(options):
fname = _find_session_file(options)
try:
f = file(fname, "r")
except IOError:
perror("Unable to load the session file %s, file is missing or unreadable." % fname)
session = pickle.load(f)
f.close()
session.context.http = Http(session.cache)
if session.credentials:
apply_credentials(session, session.credentials)
return session
# Create a prototype OptionParser configured the way we
# want that will be cloned for each operation.
baseparser = OptionParser(usage="", conflict_handler="resolve")
# Add in options common to all commands
baseparser.add_option("-s", "--session", dest="session", help="A file for the session state.")
baseparser.add_option("-h", "--help", help=SUPPRESS_HELP)
# Define options to be used by mulitple commands
RAW = make_option("-r", "--raw", dest="raw", action="store_true", help="Print the raw response body received.")
INCLUDE = make_option("-i", "--include", dest="include", action="store_true", help="Print the HTTP headers received.")
CREDENTIALS = make_option("-d", "--credentials", dest="credentials", help="File containing credentials. The file must contain a username on one line and the password on the next.")
CACHE = make_option("-c", "--cache", dest="cache", help="Directory to store the HTTP cache in.")
def require_service(service):
if None == service:
baseparser.error("Please select a service document to start with first.")
def require_collection(collection):
if None == collection:
baseparser.error("Please select a collection to work with first.")
def require_entry(entry):
if None == entry:
baseparser.error("Please select an entry to work with first.")
def perror(s):
baseparser.error(s)
def response_options(options, headers, body):
if options.include:
print "\n".join(["%s: %s" % (k, v) for (k,v) in headers.iteritems()])
if options.raw:
if options.include:
print
print body
def service(args):
"""service: Set the service document to work from.
usage: service [URI]
Sets the service document to be the one located at URI.
"""
(options, args) = service.parser.parse_args(args)
if len(args) != 1:
perror("Incorrect number of arguments.")
session = _Session()
session.cache = options.cache
h = Http(session.cache)
session.context = Context(http=h, service=args[0])
if options.credentials:
apply_credentials(session, options.credentials)
session.credentials = options.credentials
s = Service(session.context)
headers, body = s.get()
if headers.status == 200:
if s.etree() and "{http://www.w3.org/2007/app}service" == s.etree().tag:
_save_session(session, options)
else:
perror("The document at %s in not a valid service document" % args[0])
else:
perror("Did not receive a good HTTP status code, expected 200, but received %d %s" % (headers.status, headers.reason))
response_options(options, headers, body)
service.parser = copy.deepcopy(baseparser)
service.parser.add_option(CREDENTIALS)
service.parser.add_option(CACHE)
service.parser.add_option(RAW)
service.parser.add_option(INCLUDE)
def lc(args):
"""lc: List all the collections
usage: lc
List the collections enumerated in a service document.
"""
(options, args) = lc.parser.parse_args(args)
if len(args) != 0:
perror("Incorrect number of arguments.")
session = _restore_session(options)
service, collection, entry = session.context.restore(Service, Collection, Entry)
require_service(service)
for index, c in enumerate(service.etree().findall(".//{%s}collection" % APP)):
etitle = c.find("{%s}title" % ATOM)
if None != etitle:
title = etitle.text
else:
title = "untitled"
print index, " ", title
lc.parser = copy.deepcopy(baseparser)
def collection(args):
"""collection: Select a collection from a service document to work with.
usage: collection INDEX
Select the collection by index number from all the
collections listed in the service document.
"""
this = collection
(options, args) = this.parser.parse_args(args)
if len(args) != 1:
perror("Incorrect number of arguments.")
try:
int(args[0])
session = _restore_session(options)
if options.credentials:
apply_credentials(session, options.credentials)
session.credentials = options.credentials
if options.cache:
session.cache = options.cache
service, c, entry = session.context.restore(Service, Collection, Entry)
require_service(service)
index = int(args[0])
collections = list(service.iter())
if index < 0 or index >= len(collections):
perror("INDEX out of range. Expected a number from 0 to %d" % len(collections)-1)
c = Collection(collections[index])
except ValueError:
session = _Session()
session.cache = options.cache
h = Http(session.cache)
session.context = Context(http=h, collection=args[0])
if options.credentials:
apply_credentials(session, options.credentials)
session.credentials = options.credentials
c = Collection(session.context)
headers, body = c.get()
if headers.status == 200:
if c.etree() and "{http://www.w3.org/2005/Atom}feed" == c.etree().tag:
session.context = c.context()
_save_session(session, options)
else:
perror("The document at %s in not a valid collection document" % c.context().collection)
else:
perror("Did not receive a good HTTP status code, expected 200, but received %d %s" % (headers.status, headers.reason))
response_options(options, headers, body)
collection.parser = copy.deepcopy(baseparser)
collection.parser.add_option(RAW)
collection.parser.add_option(INCLUDE)
collection.parser.add_option(CREDENTIALS)
collection.parser.add_option(CACHE)
def ls(args):
"""ls: List the entries
usage: ls
List the entries in a collection.
"""
(options, args) = ls.parser.parse_args(args)
if len(args) != 0:
ls.parser.error("Incorrect number of arguments.")
session = _restore_session(options)
service, collection, entry = session.context.restore(Service, Collection, Entry)
require_collection(collection)
stop = 10
if options.all:
stop = None
for index, e in itertools.islice(enumerate(collection.iter_entry()), stop):
etitle = e.find("{%s}title" % ATOM)
if None != etitle:
title = etitle.text
else:
title = "untitled"
print index, " ", title
ls.parser = copy.deepcopy(baseparser)
ls.parser.add_option("-l", "--all", dest="all", action="store_true", help="Print all the entries, not just a subset.")
def entry(args):
"""entry: Select an entry from a collection to work with.
usage: entry [INDEX]
Select the entry by index number from all the
entries listed in the collection.
"""
(options, args) = entry.parser.parse_args(args)
if len(args) != 1:
entry.parser.error("Incorrect number of arguments.")
session = _restore_session(options)
if options.credentials:
session.credentials = options.credentials
apply_credentials(session, options.credentials)
if options.cache:
session.cache = options.cache
service, collection, e = session.context.restore(Service, Collection, Entry)
require_collection(collection)
index = int(args[0])
e= Entry(itertools.islice(collection.iter(), index, index+1).next())
headers, body = e.get()
if headers.status == 200:
if e.etree() and "{http://www.w3.org/2005/Atom}entry" == e.etree().tag:
session.context = e.context()
_save_session(session, options)
else:
perror("The document at %s in not a valid entry document" % e.context().entry)
else:
perror("Did not receive a good HTTP status code, expected 200, but received %d %s" % (headers.status, headers.reason))
response_options(options, headers, body)
entry.parser = copy.deepcopy(baseparser)
entry.parser.add_option(RAW)
entry.parser.add_option(INCLUDE)
entry.parser.add_option(CREDENTIALS)
entry.parser.add_option(CACHE)
def create(args):
"""create: Create a new member in a collection.
usage: create [FILENAME]
Create a new member in the collection. The FILENAME
points to the body to POST to the collection. If FILENAME is not
supplied then the input is read from from stdin. If no
content-type is given then it is sniffed from the filename.
"""
(options, args) = create.parser.parse_args(args)
session = _restore_session(options)
service, collection, entry = session.context.restore(Service, Collection, Entry)
require_collection(collection)
filename = ''
contenttype= options.contenttype
if len(args) == 1:
filename = args[0]
if not contenttype:
contenttype = mimetypes.guess_type(filename)
f = file(filename, "r")
body = f.read()
f.close()
else:
body = sys.stdin.read()
if not contenttype:
perror("Could not determine the mime-type of the data. Please supply it via --content-type.")
entry = collection.entry_create(headers={'content-type': contenttype}, body=body)
if entry:
session.context = entry
_save_session(session, options)
else:
perror("Did not receive a good HTTP status code, expected 200, but received %d %s" % (headers.status, headers.reason))
create.parser = copy.deepcopy(baseparser)
create.parser.add_option("-c", "--content-type", dest="contenttype", help="Set the mime-type of the content being sent")
def get(args):
"""get: Retrieve the entry and store it locally.
usage: get [FILENAME]
Retrieve the entry and store it locally.
Use the given filename, or 'entry' if none
is given.
"""
(options, args) = get.parser.parse_args(args)
filename = 'entry'
if len(args) == 1:
filename = args[0]
session = _restore_session(options)
service, collection, entry = session.context.restore(Service, Collection, Entry)
require_entry(entry)
headers, body = entry.get()
if headers.status == 200:
f = file(filename, "w")
f.write(body)
f.close()
else:
perror("Did not receive a good HTTP status code, expected 200, but received %d %s" % (headers.status, headers.reason))
response_options(options, headers, body)
get.parser = copy.deepcopy(baseparser)
get.parser.add_option(RAW)
get.parser.add_option(INCLUDE)
def getmedia(args):
""" getmedia: Retrieve the entry and store it locally.
usage: getmedia [FILENAME]
Retrieve the associate media entry and store it locally.
Use the given filename, or 'media.{ext}' if none
is given, where {ext} is the appropriate extension for
the media type returned.
"""
(options, args) = get.parser.parse_args(args)
filename = 'media'
if len(args) == 1:
filename = args[0]
session = _restore_session(options)
service, collection, entry = session.context.restore(Service, Collection, Entry)
require_entry(entry)
headers, body = entry.get_media()
if headers.status == 200:
f = file(filename, "wb")
f.write(body)
f.close()
else:
perror("Did not receive a good HTTP status code, expected 200, but received %d %s" % (headers.status, headers.reason))
getmedia.parser = copy.deepcopy(baseparser)
def put(args):
"""put: Store the entry back to the server.
usage: put [FILENAME]
Store the entry back to the server.
Use the given filename, or 'entry' if none
is given.
"""
(options, args) = put.parser.parse_args(args)
filename = 'entry'
if len(args) == 1:
filename = args[0]
session = _restore_session(options)
service, collection, entry = session.context.restore(Service, Collection, Entry)
require_entry(entry)
f = file(filename, "r")
body = f.read()
f.close()
headers, body = entry.put(headers={'content-type': 'application/atom+xml;type=entry'}, body=body)
if headers.status != 200:
perror("Did not receive a good HTTP status code, expected 200, but received %d %s" % (headers.status, headers.reason))
put.parser = copy.deepcopy(baseparser)
def putmedia(args):
"""putmedia: Store the entry back to the server.
usage: putmedia [FILENAME]
Store the media back to the server.
Use the given filename, or 'media' if none
is given.
"""
(options, args) = put.parser.parse_args(args)
filename = 'media'
if len(args) == 1:
filename = args[0]
session = _restore_session(options)
service, collection, entry = session.context.restore(Service, Collection, Entry)
require_entry(entry)
f = file(filename, "rb")
body = f.read()
f.close()
headers, body = entry.put_media(body=body)
if headers.status != 200:
perror("Did not receive a good HTTP status code, expected 200, but received %d %s" % (headers.status, headers.reason))
putmedia.parser = copy.deepcopy(baseparser)
def delete(args):
"""delete: Delete the entry from the collection.
usage: delete [FILENAME]
Delete the entry from the collection.
"""
(options, args) = delete.parser.parse_args(args)
filename = 'entry'
if len(args) == 1:
filename = args[0]
session = _restore_session(options)
service, collection, entry = session.context.restore(Service, Collection, Entry)
require_entry(entry)
headers, body = entry.delete()
if headers.status != 200:
perror("Did not receive a good HTTP status code, expected 200, but received %d %s" % (headers.status, headers.reason))
delete.parser = copy.deepcopy(baseparser)
# Meta commands -------------------------------------------
def status(args):
"""status (st): Print the status of the session.
usage: status
Print the status of the session, including the
URIs of the service document, the collection
and the current entry.
"""
(options, args) = status.parser.parse_args(args)
session = _restore_session(options)
print "Service Document : ", session.context.service or "(none)"
print "Collection : ", session.context.collection or "(none)"
print "Entry : ", session.context.entry or "(none)"
print "HTTP Cache : ", session.cache or "(none)"
fname = _find_session_file(options)
print "Session File : ", fname or "(none)"
print "Credentials File : ", session.credentials or "(none)"
status.parser = copy.deepcopy(baseparser)
# Create an alias of 'st' for 'status'
st = status
def help(args):
"""usage: apexer <subcommand> [options] [args]
Apexer command-line client, version %(__version__)s.
Type 'apexer help <subcommand>' for help on a specific subcommand.
Most subcommands take URI and/or index arguments.
Session State
Session state is search for in the following places
in the following order:
- command line argument
- $APEXER_CMD_LINE_SESSION
- $HOME/.apexer/session
Available subcommands:
"""
try:
name = args[0]
except:
name = "help"
if name in members:
print members[name].__doc__ % globals()
options = getattr(members[name], "parser", None)
if options and name != "help":
options.print_help()
if name == "help":
names = list(members.iterkeys())
names.sort()
print " ",
print "\n ".join(names)
else:
perror("'%s' is not a valid command.\nType 'apexer help' for usage." % cmd)
help.parser = copy.deepcopy(baseparser)
members = dict([(k,v) for (k,v) in copy.copy(globals()).iteritems() if not k.startswith("_") and callable(v) and hasattr(v, "parser")])
if __name__ == "__main__":
try:
cmd = sys.argv[1]
except:
cmd = "help"
args = sys.argv[2:]
if cmd not in members:
perror("'%s' is not a valid command.\nType 'apexer help' for usage." % cmd)
else:
members[cmd](args)
| Python |
#!/bin/env python2.5
__version__ = "0.1.0"
import os
import sys
sys.path.append(os.getcwd())
from optparse import OptionParser, SUPPRESS_HELP, make_option
from atompubbase.model import Entry, Collection, Service, Context, APP, ATOM
from httplib2 import Http
import pickle
import itertools
import mimetypes
import copy
import httplib2
httplib2.debuglevel=4
class _Session(object):
context = Context()
authtype = None
credentials = None
cache = None
def _find_session_file(options):
"""
command line options
$APEXER_CMD_LINE_SESSION
$HOME/.apexer/session
"""
dir = options.session
if not dir:
dir = os.environ.get("APEXER_CMD_LINE_SESSION")
if not dir:
home = os.environ.get("HOME")
if home:
home_dir = os.path.join(home, ".apexer")
if not os.path.exists(home_dir):
os.mkdir(home_dir, 0700)
dir = os.path.join(home, ".apexer", "session")
if not dir:
perror("Could not locate the session file.")
return dir
def _save_session(session, options):
session.context.http = None
fname = _find_session_file(options)
f = file(fname, "w")
pickle.dump(session, f)
f.close()
def apply_credentials(session, credentials):
try:
f = file(credentials, "r")
except IOError:
perror("Unable to load the credentials file %s, file is missing or unreadable." % credentials)
name, password = f.read().split()[0:2]
f.close()
session.context.http.add_credentials(name, password)
def _restore_session(options):
fname = _find_session_file(options)
try:
f = file(fname, "r")
except IOError:
perror("Unable to load the session file %s, file is missing or unreadable." % fname)
session = pickle.load(f)
f.close()
session.context.http = Http(session.cache)
if session.credentials:
apply_credentials(session, session.credentials)
return session
# Create a prototype OptionParser configured the way we
# want that will be cloned for each operation.
baseparser = OptionParser(usage="", conflict_handler="resolve")
# Add in options common to all commands
baseparser.add_option("-s", "--session", dest="session", help="A file for the session state.")
baseparser.add_option("-h", "--help", help=SUPPRESS_HELP)
# Define options to be used by mulitple commands
RAW = make_option("-r", "--raw", dest="raw", action="store_true", help="Print the raw response body received.")
INCLUDE = make_option("-i", "--include", dest="include", action="store_true", help="Print the HTTP headers received.")
CREDENTIALS = make_option("-d", "--credentials", dest="credentials", help="File containing credentials. The file must contain a username on one line and the password on the next.")
CACHE = make_option("-c", "--cache", dest="cache", help="Directory to store the HTTP cache in.")
def require_service(service):
if None == service:
baseparser.error("Please select a service document to start with first.")
def require_collection(collection):
if None == collection:
baseparser.error("Please select a collection to work with first.")
def require_entry(entry):
if None == entry:
baseparser.error("Please select an entry to work with first.")
def perror(s):
baseparser.error(s)
def response_options(options, headers, body):
if options.include:
print "\n".join(["%s: %s" % (k, v) for (k,v) in headers.iteritems()])
if options.raw:
if options.include:
print
print body
def service(args):
"""service: Set the service document to work from.
usage: service [URI]
Sets the service document to be the one located at URI.
"""
(options, args) = service.parser.parse_args(args)
if len(args) != 1:
perror("Incorrect number of arguments.")
session = _Session()
session.cache = options.cache
h = Http(session.cache)
session.context = Context(http=h, service=args[0])
if options.credentials:
apply_credentials(session, options.credentials)
session.credentials = options.credentials
s = Service(session.context)
headers, body = s.get()
if headers.status == 200:
if s.etree() and "{http://www.w3.org/2007/app}service" == s.etree().tag:
_save_session(session, options)
else:
perror("The document at %s in not a valid service document" % args[0])
else:
perror("Did not receive a good HTTP status code, expected 200, but received %d %s" % (headers.status, headers.reason))
response_options(options, headers, body)
service.parser = copy.deepcopy(baseparser)
service.parser.add_option(CREDENTIALS)
service.parser.add_option(CACHE)
service.parser.add_option(RAW)
service.parser.add_option(INCLUDE)
def lc(args):
"""lc: List all the collections
usage: lc
List the collections enumerated in a service document.
"""
(options, args) = lc.parser.parse_args(args)
if len(args) != 0:
perror("Incorrect number of arguments.")
session = _restore_session(options)
service, collection, entry = session.context.restore(Service, Collection, Entry)
require_service(service)
for index, c in enumerate(service.etree().findall(".//{%s}collection" % APP)):
etitle = c.find("{%s}title" % ATOM)
if None != etitle:
title = etitle.text
else:
title = "untitled"
print index, " ", title
lc.parser = copy.deepcopy(baseparser)
def collection(args):
"""collection: Select a collection from a service document to work with.
usage: collection INDEX
Select the collection by index number from all the
collections listed in the service document.
"""
this = collection
(options, args) = this.parser.parse_args(args)
if len(args) != 1:
perror("Incorrect number of arguments.")
try:
int(args[0])
session = _restore_session(options)
if options.credentials:
apply_credentials(session, options.credentials)
session.credentials = options.credentials
if options.cache:
session.cache = options.cache
service, c, entry = session.context.restore(Service, Collection, Entry)
require_service(service)
index = int(args[0])
collections = list(service.iter())
if index < 0 or index >= len(collections):
perror("INDEX out of range. Expected a number from 0 to %d" % len(collections)-1)
c = Collection(collections[index])
except ValueError:
session = _Session()
session.cache = options.cache
h = Http(session.cache)
session.context = Context(http=h, collection=args[0])
if options.credentials:
apply_credentials(session, options.credentials)
session.credentials = options.credentials
c = Collection(session.context)
headers, body = c.get()
if headers.status == 200:
if c.etree() and "{http://www.w3.org/2005/Atom}feed" == c.etree().tag:
session.context = c.context()
_save_session(session, options)
else:
perror("The document at %s in not a valid collection document" % c.context().collection)
else:
perror("Did not receive a good HTTP status code, expected 200, but received %d %s" % (headers.status, headers.reason))
response_options(options, headers, body)
collection.parser = copy.deepcopy(baseparser)
collection.parser.add_option(RAW)
collection.parser.add_option(INCLUDE)
collection.parser.add_option(CREDENTIALS)
collection.parser.add_option(CACHE)
def ls(args):
"""ls: List the entries
usage: ls
List the entries in a collection.
"""
(options, args) = ls.parser.parse_args(args)
if len(args) != 0:
ls.parser.error("Incorrect number of arguments.")
session = _restore_session(options)
service, collection, entry = session.context.restore(Service, Collection, Entry)
require_collection(collection)
stop = 10
if options.all:
stop = None
for index, e in itertools.islice(enumerate(collection.iter_entry()), stop):
etitle = e.find("{%s}title" % ATOM)
if None != etitle:
title = etitle.text
else:
title = "untitled"
print index, " ", title
ls.parser = copy.deepcopy(baseparser)
ls.parser.add_option("-l", "--all", dest="all", action="store_true", help="Print all the entries, not just a subset.")
def entry(args):
"""entry: Select an entry from a collection to work with.
usage: entry [INDEX]
Select the entry by index number from all the
entries listed in the collection.
"""
(options, args) = entry.parser.parse_args(args)
if len(args) != 1:
entry.parser.error("Incorrect number of arguments.")
session = _restore_session(options)
if options.credentials:
session.credentials = options.credentials
apply_credentials(session, options.credentials)
if options.cache:
session.cache = options.cache
service, collection, e = session.context.restore(Service, Collection, Entry)
require_collection(collection)
index = int(args[0])
e= Entry(itertools.islice(collection.iter(), index, index+1).next())
headers, body = e.get()
if headers.status == 200:
if e.etree() and "{http://www.w3.org/2005/Atom}entry" == e.etree().tag:
session.context = e.context()
_save_session(session, options)
else:
perror("The document at %s in not a valid entry document" % e.context().entry)
else:
perror("Did not receive a good HTTP status code, expected 200, but received %d %s" % (headers.status, headers.reason))
response_options(options, headers, body)
entry.parser = copy.deepcopy(baseparser)
entry.parser.add_option(RAW)
entry.parser.add_option(INCLUDE)
entry.parser.add_option(CREDENTIALS)
entry.parser.add_option(CACHE)
def create(args):
"""create: Create a new member in a collection.
usage: create [FILENAME]
Create a new member in the collection. The FILENAME
points to the body to POST to the collection. If FILENAME is not
supplied then the input is read from from stdin. If no
content-type is given then it is sniffed from the filename.
"""
(options, args) = create.parser.parse_args(args)
session = _restore_session(options)
service, collection, entry = session.context.restore(Service, Collection, Entry)
require_collection(collection)
filename = ''
contenttype= options.contenttype
if len(args) == 1:
filename = args[0]
if not contenttype:
contenttype = mimetypes.guess_type(filename)
f = file(filename, "r")
body = f.read()
f.close()
else:
body = sys.stdin.read()
if not contenttype:
perror("Could not determine the mime-type of the data. Please supply it via --content-type.")
entry = collection.entry_create(headers={'content-type': contenttype}, body=body)
if entry:
session.context = entry
_save_session(session, options)
else:
perror("Did not receive a good HTTP status code, expected 200, but received %d %s" % (headers.status, headers.reason))
create.parser = copy.deepcopy(baseparser)
create.parser.add_option("-c", "--content-type", dest="contenttype", help="Set the mime-type of the content being sent")
def get(args):
"""get: Retrieve the entry and store it locally.
usage: get [FILENAME]
Retrieve the entry and store it locally.
Use the given filename, or 'entry' if none
is given.
"""
(options, args) = get.parser.parse_args(args)
filename = 'entry'
if len(args) == 1:
filename = args[0]
session = _restore_session(options)
service, collection, entry = session.context.restore(Service, Collection, Entry)
require_entry(entry)
headers, body = entry.get()
if headers.status == 200:
f = file(filename, "w")
f.write(body)
f.close()
else:
perror("Did not receive a good HTTP status code, expected 200, but received %d %s" % (headers.status, headers.reason))
response_options(options, headers, body)
get.parser = copy.deepcopy(baseparser)
get.parser.add_option(RAW)
get.parser.add_option(INCLUDE)
def getmedia(args):
""" getmedia: Retrieve the entry and store it locally.
usage: getmedia [FILENAME]
Retrieve the associate media entry and store it locally.
Use the given filename, or 'media.{ext}' if none
is given, where {ext} is the appropriate extension for
the media type returned.
"""
(options, args) = get.parser.parse_args(args)
filename = 'media'
if len(args) == 1:
filename = args[0]
session = _restore_session(options)
service, collection, entry = session.context.restore(Service, Collection, Entry)
require_entry(entry)
headers, body = entry.get_media()
if headers.status == 200:
f = file(filename, "wb")
f.write(body)
f.close()
else:
perror("Did not receive a good HTTP status code, expected 200, but received %d %s" % (headers.status, headers.reason))
getmedia.parser = copy.deepcopy(baseparser)
def put(args):
"""put: Store the entry back to the server.
usage: put [FILENAME]
Store the entry back to the server.
Use the given filename, or 'entry' if none
is given.
"""
(options, args) = put.parser.parse_args(args)
filename = 'entry'
if len(args) == 1:
filename = args[0]
session = _restore_session(options)
service, collection, entry = session.context.restore(Service, Collection, Entry)
require_entry(entry)
f = file(filename, "r")
body = f.read()
f.close()
headers, body = entry.put(headers={'content-type': 'application/atom+xml;type=entry'}, body=body)
if headers.status != 200:
perror("Did not receive a good HTTP status code, expected 200, but received %d %s" % (headers.status, headers.reason))
put.parser = copy.deepcopy(baseparser)
def putmedia(args):
"""putmedia: Store the entry back to the server.
usage: putmedia [FILENAME]
Store the media back to the server.
Use the given filename, or 'media' if none
is given.
"""
(options, args) = put.parser.parse_args(args)
filename = 'media'
if len(args) == 1:
filename = args[0]
session = _restore_session(options)
service, collection, entry = session.context.restore(Service, Collection, Entry)
require_entry(entry)
f = file(filename, "rb")
body = f.read()
f.close()
headers, body = entry.put_media(body=body)
if headers.status != 200:
perror("Did not receive a good HTTP status code, expected 200, but received %d %s" % (headers.status, headers.reason))
putmedia.parser = copy.deepcopy(baseparser)
def delete(args):
"""delete: Delete the entry from the collection.
usage: delete [FILENAME]
Delete the entry from the collection.
"""
(options, args) = delete.parser.parse_args(args)
filename = 'entry'
if len(args) == 1:
filename = args[0]
session = _restore_session(options)
service, collection, entry = session.context.restore(Service, Collection, Entry)
require_entry(entry)
headers, body = entry.delete()
if headers.status != 200:
perror("Did not receive a good HTTP status code, expected 200, but received %d %s" % (headers.status, headers.reason))
delete.parser = copy.deepcopy(baseparser)
# Meta commands -------------------------------------------
def status(args):
"""status (st): Print the status of the session.
usage: status
Print the status of the session, including the
URIs of the service document, the collection
and the current entry.
"""
(options, args) = status.parser.parse_args(args)
session = _restore_session(options)
print "Service Document : ", session.context.service or "(none)"
print "Collection : ", session.context.collection or "(none)"
print "Entry : ", session.context.entry or "(none)"
print "HTTP Cache : ", session.cache or "(none)"
fname = _find_session_file(options)
print "Session File : ", fname or "(none)"
print "Credentials File : ", session.credentials or "(none)"
status.parser = copy.deepcopy(baseparser)
# Create an alias of 'st' for 'status'
st = status
def help(args):
"""usage: apexer <subcommand> [options] [args]
Apexer command-line client, version %(__version__)s.
Type 'apexer help <subcommand>' for help on a specific subcommand.
Most subcommands take URI and/or index arguments.
Session State
Session state is search for in the following places
in the following order:
- command line argument
- $APEXER_CMD_LINE_SESSION
- $HOME/.apexer/session
Available subcommands:
"""
try:
name = args[0]
except:
name = "help"
if name in members:
print members[name].__doc__ % globals()
options = getattr(members[name], "parser", None)
if options and name != "help":
options.print_help()
if name == "help":
names = list(members.iterkeys())
names.sort()
print " ",
print "\n ".join(names)
else:
perror("'%s' is not a valid command.\nType 'apexer help' for usage." % cmd)
help.parser = copy.deepcopy(baseparser)
members = dict([(k,v) for (k,v) in copy.copy(globals()).iteritems() if not k.startswith("_") and callable(v) and hasattr(v, "parser")])
if __name__ == "__main__":
try:
cmd = sys.argv[1]
except:
cmd = "help"
args = sys.argv[2:]
if cmd not in members:
perror("'%s' is not a valid command.\nType 'apexer help' for usage." % cmd)
else:
members[cmd](args)
| Python |
from distutils.core import setup
import py2exe
setup(console=["appeditor.py"]) | Python |
from subprocess import Popen, PIPE
import unittest
from appclienttest import msg
class Test(unittest.TestCase):
def _parse(self, output):
parsed = [tuple(l.split(" ", 1)[0].split(":")) for l in output.splitlines()]
parsed = [(code, getattr(msg, message)) for code, message in parsed]
msg_count = {}
for code, message in parsed:
msg_count[code] = msg_count.get(code, 0) + 1
return (parsed, msg_count)
def testNonWellFormed(self):
"""
Non-WellFormed output should be caught
and a log message recording the malformed
XML should be produced.
"""
output = Popen(["python", "./validator/appclienttest.py", "--quiet", "--playback=./validator/rawtestdata/invalid-service/"], stdout=PIPE).communicate()[0]
parsed, msg_count = self._parse(output)
self.assertTrue(("Error", msg.WELL_FORMED_XML) in parsed)
self.assertEqual(1, msg_count["Begin_Test"])
self.assertEqual(0, msg_count.get("Warning", 0))
def testNoLocation(self):
"""
Test a complete path through the flow. The following errors
have been injected into a good run:
The service document does not return Etag or Last-Modified headers.
On Entry creation neither a Location or Content-Location: header are returned.
"""
output = Popen(["python", "./validator/appclienttest.py", "--quiet", "--playback=./validator/rawtestdata/nolocation/", "http://example.org/service"], stdout=PIPE).communicate()[0]
parsed, msg_count = self._parse(output)
self.assertTrue(("Warning", msg.HTTP_ETAG) in parsed)
self.assertTrue(("Warning", msg.HTTP_LAST_MODIFIED) in parsed)
self.assertTrue(("Error", msg.CREATE_RETURNS_LOCATION) in parsed)
if __name__ == "__main__":
unittest.main()
| Python |
"""$Id: uri.py 988 2008-03-12 18:22:48Z sa3ruby $"""
"""
Code to test URI references for validity, and give their normalized
form, according to RFC 3986.
"""
__author__ = "Joseph Walton <http://www.kafsemo.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2004, 2007 Joseph Walton"
from urlparse import urljoin
from urllib import quote, quote_plus, unquote, unquote_plus
from unicodedata import normalize
from codecs import lookup
import re
(enc, dec) = lookup('UTF-8')[:2]
SUBDELIMS='!$&\'()*+,;='
PCHAR='-._~' + SUBDELIMS + ':@'
GENDELIMS=':/?#[]@'
RESERVED=GENDELIMS + SUBDELIMS
default_port = {
'ftp': 21,
'telnet': 23,
'http': 80,
'gopher': 70,
'news': 119,
'nntp': 119,
'prospero': 191,
'https': 443,
'snews': 563,
'snntp': 563,
}
class BadUri(Exception):
pass
def _n(s):
return enc(normalize('NFC', dec(s)[0]))[0]
octetRe = re.compile('([^%]|%[a-fA-F0-9]{2})')
def asOctets(s):
while (s):
m = octetRe.match(s)
if not(m):
raise BadUri()
c = m.group(1)
if (c[0] == '%'):
yield(c.upper(), chr(int(c[1:], 0x10)))
else:
yield(c, c)
s = s[m.end(1):]
def _qnu(s,safe=''):
if s == None:
return None
# unquote{,_plus} leave high-bit octets unconverted in Unicode strings
# This conversion will, correctly, cause UnicodeEncodeError if there are
# non-ASCII characters present in the string
s = str(s)
res = ''
b = ''
for (c,x) in asOctets(s):
if x in RESERVED and x in safe:
res += quote(_n(unquote(b)), safe)
b = ''
res += c
else:
b += x
res += quote(_n(unquote(b)), safe)
return res
# Match an optional port specification
portRe = re.compile(':(\d*)$')
def _normPort(netloc,defPort):
nl = netloc.lower()
p = defPort
m = portRe.search(nl)
if m:
if m.group(1) != '':
p = int(m.group(1))
nl = nl[:m.start(1) - 1]
if nl and nl[-1] == '.' and nl.rfind('.', 0, -2) >= 0:
nl = nl[:-1]
# Square brackets are allowed, and only allowed, delimiting IPv6 addresses
if nl.startswith('[') != nl.endswith(']'):
raise BadUri()
if p != defPort:
nl = nl + ':' + str(p)
return nl
def _normAuth(auth,port):
i = auth.rfind('@')
if i >= 0:
c = auth[:i]
if c == ':':
c = ''
h = auth[i + 1:]
else:
c = None
h = auth
if c:
return c + '@' + _normPort(h,port)
else:
return _normPort(h,port)
def _normPath(p):
l = p.split(u'/')
i = 0
if l and l[0]:
i = len(l)
while i < len(l):
c = l[i]
if (c == '.'):
if i < len(l) - 1:
del l[i]
else:
l[i] = ''
elif (c == '..'):
if i < len(l) - 1:
del l[i]
else:
l[i] = ''
if i > 1 or (i > 0 and l[0]):
i -= 1
del l[i]
else:
i += 1
if l == ['']:
l = ['', '']
return u'/'.join([_qnu(c, PCHAR) for c in l])
# From RFC 2396bis, with added end-of-string marker
uriRe = re.compile('^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?$')
def _canonical(s):
m = uriRe.match(s)
if not(m):
raise BadUri()
# Check for a relative URI
if m.group(2) is None:
scheme = None
else:
scheme = m.group(2).lower()
if m.group(4) is None:
authority = None
p = m.group(5)
# Don't try to normalise URI references with relative paths
if scheme is None and not p.startswith('/'):
return None
if scheme == 'mailto':
# XXX From RFC 2368, mailto equivalence needs to be subtler than this
i = p.find('@')
if i > 0:
j = p.find('?')
if j < 0:
j = len(p)
p = _qnu(p[:i]) + '@' + _qnu(p[i + 1:].lower()) + _qnu(p[j:])
path = p
else:
if scheme is None or p.startswith('/'):
path = _normPath(p)
else:
path = _qnu(p, PCHAR + '/')
else:
a = m.group(4)
p = m.group(5)
if scheme in default_port:
a = _normAuth(a, default_port[scheme])
else:
a = _normAuth(a, None)
authority = a
path = _normPath(p)
query = _qnu(m.group(7), PCHAR + "/?")
fragment = _qnu(m.group(9), PCHAR + "/?")
s = u''
if scheme != None:
s += scheme + ':'
if authority != None:
s += '//' + authority
s += path
if query != None:
s += '?' + query
if fragment != None:
s += '#' + fragment
return s
class Uri:
"""A Uri wraps a string and performs equality testing according to the
rules for URI equivalence. """
def __init__(self,s):
self.s = s
self.n = _canonical(s)
def __str__(self):
return self.s
def __repr__(self):
return repr(self.s)
def __eq__(self, a):
return self.n == a.n
def canonicalForm(u):
"""Give the canonical form for a URI, so char-by-char comparisons become valid tests for equivalence."""
try:
return _canonical(u)
except BadUri:
return None
except UnicodeError:
return None
| Python |
"""$Id: compatibility.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from logging import *
def _must(event):
return isinstance(event, Error)
def _should(event):
return isinstance(event, Warning)
def _may(event):
return isinstance(event, Info)
def A(events):
return [event for event in events if _must(event)]
def AA(events):
return [event for event in events if _must(event) or _should(event)]
def AAA(events):
return [event for event in events if _must(event) or _should(event) or _may(event)]
def AAAA(events):
return events
def analyze(events, rawdata):
block = rawdata[0:512].strip().upper()
if block.startswith('<HTML'): return 'html'
if block.startswith('<!DOCTYPE HTML'): return 'html'
for event in events:
if isinstance(event,UndefinedElement):
if event.params['parent'] == 'root':
if event.params['element'].lower() in ['html','xhtml:html']:
return "html"
return None
| Python |
"""$Id: root.py 1049 2009-05-06 02:00:03Z rothfuss $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1049 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
rss11_namespace='http://purl.org/net/rss1.1#'
purl1_namespace='http://purl.org/rss/1.0/'
soap_namespace='http://feeds.archive.org/validator/'
pie_namespace='http://purl.org/atom/ns#'
atom_namespace='http://www.w3.org/2005/Atom'
opensearch_namespace='http://a9.com/-/spec/opensearch/1.1/'
xrds_namespace='xri://$xrds'
kml20_namespace='http://earth.google.com/kml/2.0'
kml21_namespace='http://earth.google.com/kml/2.1'
kml22_namespace='http://www.opengis.net/kml/2.2'
#
# Main document.
# Supports rss, rdf, pie, kml, and ffkar
#
class root(validatorBase):
def __init__(self, parent, base):
validatorBase.__init__(self)
self.parent = parent
self.dispatcher = parent
self.name = "root"
self.xmlBase = base
self.xmlLang = None
def startElementNS(self, name, qname, attrs):
if name=='rss':
if qname:
from logging import InvalidNamespace
self.log(InvalidNamespace({"parent":"root", "element":name, "namespace":qname}))
self.dispatcher.defaultNamespaces.append(qname)
if name=='feed' or name=='entry':
if self.namespace.has_key('atom'):
from logging import AvoidNamespacePrefix
self.log(AvoidNamespacePrefix({'prefix':'atom'}))
if self.namespace.has_key('xhtml'):
from logging import AvoidNamespacePrefix
self.log(AvoidNamespacePrefix({'prefix':'xhtml'}))
if qname==pie_namespace:
from logging import ObsoleteNamespace
self.log(ObsoleteNamespace({"element":"feed"}))
self.dispatcher.defaultNamespaces.append(pie_namespace)
from logging import TYPE_ATOM
self.setFeedType(TYPE_ATOM)
elif not qname:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":"root", "element":name}))
else:
if name=='feed':
from logging import TYPE_ATOM
self.setFeedType(TYPE_ATOM)
else:
from logging import TYPE_ATOM_ENTRY
self.setFeedType(TYPE_ATOM_ENTRY)
self.dispatcher.defaultNamespaces.append(atom_namespace)
if qname<>atom_namespace:
from logging import InvalidNamespace
self.log(InvalidNamespace({"parent":"root", "element":name, "namespace":qname}))
self.dispatcher.defaultNamespaces.append(qname)
if name=='Channel':
if not qname:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":"root", "element":name}))
elif qname != rss11_namespace :
from logging import InvalidNamespace
self.log(InvalidNamespace({"parent":"root", "element":name, "namespace":qname}))
else:
self.dispatcher.defaultNamespaces.append(qname)
from logging import TYPE_RSS1
self.setFeedType(TYPE_RSS1)
if name=='kml':
from logging import TYPE_KML20, TYPE_KML21, TYPE_KML22
self.dispatcher.defaultNamespaces.append(qname)
if not qname:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":"root", "element":name}))
qname = kml20_namespace
feedType = TYPE_KML20
elif qname == kml20_namespace:
feedType = TYPE_KML20
elif qname == kml21_namespace:
feedType = TYPE_KML21
elif qname == kml22_namespace:
feedType = TYPE_KML22
elif qname != kml20_namespace and qname != kml21_namespace and qname != kml22_namespace:
from logging import InvalidNamespace
self.log(InvalidNamespace({"element":name, "namespace":qname}))
qname = kml22_namespace
feedType = TYPE_KML22
self.setFeedType(feedType)
if name=='OpenSearchDescription':
if not qname:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":"root", "element":name}))
qname = opensearch_namespace
elif qname != opensearch_namespace:
from logging import InvalidNamespace
self.log(InvalidNamespace({"element":name, "namespace":qname}))
self.dispatcher.defaultNamespaces.append(qname)
qname = opensearch_namespace
if name=='XRDS':
from logging import TYPE_XRD
self.setFeedType(TYPE_XRD)
if not qname:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":"root", "element":name}))
qname = xrds_namespace
elif qname != xrds_namespace:
from logging import InvalidNamespace
self.log(InvalidNamespace({"element":name, "namespace":qname}))
self.dispatcher.defaultNamespaces.append(qname)
qname = xrds_namespace
validatorBase.startElementNS(self, name, qname, attrs)
def unknown_starttag(self, name, qname, attrs):
from logging import ObsoleteNamespace,InvalidNamespace,UndefinedElement
if qname in ['http://example.com/newformat#','http://purl.org/atom/ns#']:
self.log(ObsoleteNamespace({"element":name, "namespace":qname}))
elif name=='feed':
self.log(InvalidNamespace({"element":name, "namespace":qname}))
else:
self.log(UndefinedElement({"parent":"root", "element":name}))
from validators import any
return any(self, name, qname, attrs)
def do_rss(self):
from rss import rss
return rss()
def do_feed(self):
from feed import feed
if pie_namespace in self.dispatcher.defaultNamespaces:
from validators import eater
return eater()
return feed()
def do_entry(self):
from entry import entry
return entry()
def do_app_categories(self):
from logging import TYPE_APP_CATEGORIES
self.setFeedType(TYPE_APP_CATEGORIES)
from categories import categories
return categories()
def do_app_service(self):
from logging import TYPE_APP_SERVICE
self.setFeedType(TYPE_APP_SERVICE)
from service import service
return service()
def do_kml(self):
from kml import kml
return kml()
def do_opml(self):
from opml import opml
return opml()
def do_outlineDocument(self):
from logging import ObsoleteVersion
self.log(ObsoleteVersion({"element":"outlineDocument"}))
from opml import opml
return opml()
def do_opensearch_OpenSearchDescription(self):
import opensearch
self.dispatcher.defaultNamespaces.append(opensearch_namespace)
from logging import TYPE_OPENSEARCH
self.setFeedType(TYPE_OPENSEARCH)
return opensearch.OpenSearchDescription()
def do_xrds_XRDS(self):
from xrd import xrds
return xrds()
def do_rdf_RDF(self):
from rdf import rdf
self.dispatcher.defaultNamespaces.append(purl1_namespace)
return rdf()
def do_Channel(self):
from channel import rss10Channel
return rss10Channel()
def do_soap_Envelope(self):
return root(self, self.xmlBase)
def do_soap_Body(self):
self.dispatcher.defaultNamespaces.append(soap_namespace)
return root(self, self.xmlBase)
def do_request(self):
return root(self, self.xmlBase)
def do_xhtml_html(self):
from logging import UndefinedElement
self.log(UndefinedElement({"parent":"root", "element":"xhtml:html"}))
from validators import eater
return eater()
| Python |
"""$Id: item.py 1019 2008-06-03 05:13:16Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1019 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
from logging import *
from itunes import itunes_item
from extension import *
#
# item element.
#
class item(validatorBase, extension_item, itunes_item):
def validate(self):
if (not "title" in self.children) and (not "description" in self.children):
self.log(ItemMustContainTitleOrDescription({}))
if not "guid" in self.children:
if self.getFeedType() == TYPE_RSS2:
rss = self.parent.parent
while rss and rss.name!='rss': rss=rss.parent
if rss.version.startswith("2."):
self.log(MissingGuid({"parent":self.name, "element":"guid"}))
if "slash_comments" in self.children:
if "lastBuildDate" not in self.parent.children and self.getFeedType()==TYPE_RSS2:
self.log(SlashDate({}))
if self.itunes: itunes_item.validate(self)
def do_link(self):
return rfc2396_full(), noduplicates()
def do_title(self):
return nonhtml(), nonblank(), noduplicates()
def do_description(self):
if self.getFeedType() == TYPE_RSS2:
rss = self.parent.parent
while rss and rss.name!='rss': rss=rss.parent
if rss.version == "0.91":
return nonhtml(), noduplicates()
return safeHtml(), noduplicates()
def do_content_encoded(self):
if self.getFeedType() == TYPE_RSS2:
if not 'description' in self.children:
self.log(NeedDescriptionBeforeContent({}))
return safeHtml(), noduplicates()
def do_content_items(self):
return ContentItems(), noduplicates()
def do_xhtml_body(self):
if self.getFeedType() == TYPE_RSS2:
self.log(DuplicateDescriptionSemantics({"element":"xhtml:body"}))
return htmlEater().setElement('xhtml:body',{},self)
def do_atom_id(self):
if "guid" in self.children:
self.log(DuplicateItemSemantics({"core":"guid", "ext":"atom:id"}))
return rfc2396_full(), noduplicates(), unique('atom_id',self.parent)
def do_atom_link(self):
from link import link
return link()
def do_atom_title(self):
from content import content
return content(), noduplicates()
def do_atom_summary(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_atom_author(self):
from author import author
return author(), noduplicates()
def do_atom_contributor(self):
from author import author
return author()
def do_atom_content(self):
from content import content
return content()
def do_atom_published(self):
if "published" in self.children:
self.log(DuplicateItemSemantics({"core":"pubDate", "ext":"atom:published"}))
return rfc3339(), noduplicates()
def do_atom_updated(self):
return rfc3339(), noduplicates()
def do_dc_creator(self):
if self.child.find('.')<0 and "author" in self.children:
self.log(DuplicateItemSemantics({"core":"author", "ext":"dc:creator"}))
return text() # duplicates allowed
def do_dc_subject(self):
if self.child.find('.')<0 and "category" in self.children:
self.log(DuplicateItemSemantics({"core":"category", "ext":"dc:subject"}))
return text() # duplicates allowed
def do_dc_date(self):
if self.child.find('.')<0 and "pubDate" in self.children:
self.log(DuplicateItemSemantics({"core":"pubDate", "ext":"dc:date"}))
return w3cdtf()
def do_cc_license(self):
if "creativeCommons_license" in self.children:
self.log(DuplicateItemSemantics({"core":"creativeCommons:license", "ext":"cc:license"}))
return eater()
def do_creativeCommons_license(self):
if "cc_license" in self.children:
self.log(DuplicateItemSemantics({"core":"creativeCommons:license", "ext":"cc:license"}))
return rfc2396_full()
class rss20Item(item, extension_rss20_item):
def do_comments(self):
return rfc2396_full(), noduplicates()
def do_enclosure(self):
return enclosure(), noduplicates(DuplicateEnclosure)
def do_pubDate(self):
if "dc_date" in self.children:
self.log(DuplicateItemSemantics({"core":"pubDate", "ext":"dc:date"}))
if "atom_published" in self.children:
self.log(DuplicateItemSemantics({"core":"pubDate", "ext":"atom:published"}))
return rfc822(), noduplicates()
def do_author(self):
if "dc_creator" in self.children:
self.log(DuplicateItemSemantics({"core":"author", "ext":"dc:creator"}))
return email_with_name(), noduplicates()
def do_category(self):
if "dc_subject" in self.children:
self.log(DuplicateItemSemantics({"core":"category", "ext":"dc:subject"}))
return category(), nonblank()
def do_guid(self):
if "atom_id" in self.children:
self.log(DuplicateItemSemantics({"core":"guid", "ext":"atom:id"}))
return guid(), noduplicates(), unique('guid',self.parent)
def do_source(self):
if "dc_source" in self.children:
self.log(DuplicateItemSemantics({"core":"source", "ext":"dc:source"}))
return source(), noduplicates()
class rss10Item(item, extension_rss10_item):
def validate(self):
if not "link" in self.children:
self.log(MissingElement({"parent":self.name, "element":"link"}))
if not "title" in self.children:
self.log(MissingElement({"parent":self.name, "element":"title"}))
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'about')]
def do_rdfs_label(self):
return text()
def do_rdfs_comment(self):
return text()
def prevalidate(self):
if self.attrs.has_key((rdfNS,"about")):
about = self.attrs[(rdfNS,"about")]
if not "abouts" in self.dispatcher.__dict__:
self.dispatcher.__dict__["abouts"] = []
if about in self.dispatcher.__dict__["abouts"]:
self.log(DuplicateValue({"parent":self.name, "element":"rdf:about", "value":about}))
else:
self.dispatcher.__dict__["abouts"].append(about)
#
# items element.
#
class items(validatorBase):
from root import rss11_namespace as rss11_ns
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'parseType')]
def do_item(self):
if self.rss11_ns not in self.dispatcher.defaultNamespaces:
self.log(UndefinedElement({"element":"item","parent":"items"}))
return rss10Item()
def do_rdf_Seq(self):
if self.rss11_ns in self.dispatcher.defaultNamespaces:
self.log(UndefinedElement({"element":"rdf:Seq","parent":"items"}))
return rdfSeq()
class rdfSeq(validatorBase):
def do_rdf_li(self):
return rdfLi()
class rdfLi(validatorBase):
def getExpectedAttrNames(self):
return [(None,u'resource'),
(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'resource')]
class category(nonhtml):
def getExpectedAttrNames(self):
return [(None, u'domain')]
class source(nonhtml):
def getExpectedAttrNames(self):
return [(None, u'url')]
def prevalidate(self):
self.validate_required_attribute((None,'url'), rfc2396_full)
return text.prevalidate(self)
class enclosure(validatorBase):
from validators import mime_re
def getExpectedAttrNames(self):
return [(None, u'url'), (None, u'length'), (None, u'type')]
def prevalidate(self):
try:
if int(self.attrs.getValue((None, 'length'))) < 0:
if int(self.attrs.getValue((None, 'length'))) == -1:
self.log(UseZeroForUnknown({"parent":self.name, "element":'length'}))
else:
self.log(InvalidNonNegativeInteger({"parent":self.name, "element":'length'}))
else:
self.log(ValidIntegerAttribute({"parent":self.parent.name, "element":self.name, "attr":'length'}))
except KeyError:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":'length'}))
except ValueError:
self.log(InvalidIntegerAttribute({"parent":self.parent.name, "element":self.name, "attr":'length'}))
try:
if not self.mime_re.match(self.attrs.getValue((None, 'type'))):
self.log(InvalidMIMEAttribute({"parent":self.parent.name, "element":self.name, "attr":'type'}))
else:
self.log(ValidMIMEAttribute({"parent":self.parent.name, "element":self.name, "attr":'type'}))
except KeyError:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":'type'}))
self.validate_required_attribute((None,'url'), httpURL)
if self.attrs.has_key((None,u"url")):
if hasattr(self.parent,'setEnclosure'):
self.parent.setEnclosure(self.attrs.getValue((None, 'url')))
return validatorBase.prevalidate(self)
class guid(rfc2396_full, noduplicates):
def getExpectedAttrNames(self):
return [(None, u'isPermaLink')]
def validate(self):
isPermalink = 1
try:
isPermalinkStr = self.attrs.getValue((None, 'isPermaLink'))
if isPermalinkStr not in ('true', 'false'):
self.log(InvalidBooleanAttribute({"parent":self.parent.name, "element":self.name, "attr":"isPermaLink"}))
else:
self.log(ValidBooleanAttribute({"parent":self.parent.name, "element":self.name, "attr":"isPermaLink"}))
isPermalink = (isPermalinkStr == 'true')
except KeyError:
pass
if isPermalink:
if not(rfc2396.validate(self, InvalidHttpGUID, ValidHttpGUID)):
return 0
else:
lu = self.value.lower()
if lu.startswith("tag:") or lu.startswith("urn:uuid:"):
self.log(InvalidPermalink({"parent":self.parent.name, "element":self.name}))
return 0
else:
return 1
elif len(self.value)<9 and self.value.isdigit():
self.log(NotSufficientlyUnique({"parent":self.parent.name, "element":self.name, "value":self.value}))
return noduplicates.validate(self)
else:
self.log(ValidHttpGUID({"parent":self.parent.name, "element":self.name}))
return noduplicates.validate(self)
class ContentItems(validatorBase):
def do_rdf_Bag(self):
return ContentBag(), noduplicates()
class ContentBag(validatorBase):
def do_rdf_li(self):
return ContentLi()
class ContentLi(validatorBase):
def do_content_item(self):
return ContentItem()
class ContentItem(validatorBase):
def do_content_format(self):
return rdfResourceURI(), noduplicates()
def do_content_encoding(self):
return rdfResourceURI(), noduplicates()
def do_rdf_value(self):
return text(), noduplicates()
| Python |
from base import validatorBase
from category import category
from validators import yesno
from logging import ConflictingCatAttr, ConflictingCatChildren
class categories(validatorBase):
def getExpectedAttrNames(self):
return [(None,u'scheme'),(None,u'fixed'),(None,u'href')]
def prevalidate(self):
self.validate_optional_attribute((None,'fixed'), yesno)
if self.attrs.has_key((None,'href')):
if self.attrs.has_key((None,'fixed')):
self.log(ConflictingCatAttr({'attr':'fixed'}))
if self.attrs.has_key((None,'scheme')):
self.log(ConflictingCatAttr({'attr':'scheme'}))
def validate(self):
if self.attrs.has_key((None,'href')) and self.children:
self.log(ConflictingCatChildren({}))
def do_atom_category(self):
return category()
| Python |
from base import validatorBase
from validators import *
class xrds(validatorBase):
def do_xrd_XRD(self):
return xrd()
class xrd(validatorBase):
def do_xrd_Service(self):
return service()
class service(validatorBase):
def getExpectedAttrNames(self):
return [(None,'priority')]
def prevalidate(self):
self.validate_optional_attribute((None,'priority'), nonNegativeInteger)
def do_xrd_Type(self):
return xrdtype()
def do_xrd_URI(self):
return xrdtype()
def do_openid_Delegate(self):
return delegate()
xrdtype = rfc3987
URI = rfc3987
delegate = rfc3987
| Python |
"""$Id: rdf.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from logging import *
from validators import rdfAbout, noduplicates, text, eater
from root import rss11_namespace as rss11_ns
from extension import extension_everywhere
rdfNS = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
#
# rdf:RDF element. The valid children include "channel", "item", "textinput", "image"
#
class rdf(validatorBase,object):
def do_rss090_channel(self):
from channel import channel
self.dispatcher.defaultNamespaces.append("http://my.netscape.com/rdf/simple/0.9/")
return channel(), noduplicates()
def do_channel(self):
from channel import rss10Channel
return rdfAbout(), rss10Channel(), noduplicates()
def _is_090(self):
return "http://my.netscape.com/rdf/simple/0.9/" in self.dispatcher.defaultNamespaces
def _withAbout(self,v):
if self._is_090():
return v
else:
return v, rdfAbout()
def do_item(self):
from item import rss10Item
return self._withAbout(rss10Item())
def do_textinput(self):
from textInput import textInput
return self._withAbout(textInput())
def do_image(self):
return self._withAbout(rss10Image())
def do_cc_License(self):
return eater()
def do_taxo_topic(self):
return eater()
def do_rdf_Description(self):
return eater()
def prevalidate(self):
self.setFeedType(TYPE_RSS1)
def validate(self):
if not "channel" in self.children and not "rss090_channel" in self.children:
self.log(MissingElement({"parent":self.name.replace('_',':'), "element":"channel"}))
from validators import rfc2396_full
class rss10Image(validatorBase, extension_everywhere):
def validate(self):
if not "title" in self.children:
self.log(MissingTitle({"parent":self.name, "element":"title"}))
if not "link" in self.children:
self.log(MissingLink({"parent":self.name, "element":"link"}))
if not "url" in self.children:
self.log(MissingElement({"parent":self.name, "element":"url"}))
def do_title(self):
from image import title
return title(), noduplicates()
def do_link(self):
return rfc2396_full(), noduplicates()
def do_url(self):
return rfc2396_full(), noduplicates()
def do_dc_creator(self):
return text()
def do_dc_subject(self):
return text() # duplicates allowed
def do_dc_date(self):
from validators import w3cdtf
return w3cdtf(), noduplicates()
def do_cc_license(self):
return eater()
#
# This class performs RSS 1.x specific validations on extensions.
#
class rdfExtension(validatorBase):
def __init__(self, qname, literal=False):
validatorBase.__init__(self)
self.qname=qname
self.literal=literal
def textOK(self):
pass
def setElement(self, name, attrs, parent):
validatorBase.setElement(self, name, attrs, parent)
if attrs.has_key((rdfNS,"parseType")):
if attrs[(rdfNS,"parseType")] == "Literal": self.literal=True
if not self.literal:
# ensure no rss11 children
if self.qname==rss11_ns:
from logging import UndefinedElement
self.log(UndefinedElement({"parent":parent.name, "element":name}))
# no duplicate rdf:abouts
if attrs.has_key((rdfNS,"about")):
about = attrs[(rdfNS,"about")]
if not "abouts" in self.dispatcher.__dict__:
self.dispatcher.__dict__["abouts"] = []
if about in self.dispatcher.__dict__["abouts"]:
self.log(DuplicateValue(
{"parent":parent.name, "element":"rdf:about", "value":about}))
else:
self.dispatcher.__dict__["abouts"].append(about)
def getExpectedAttrNames(self):
# no rss11 attributes
if self.literal or not self.attrs: return self.attrs.keys()
return [(ns,n) for ns,n in self.attrs.keys() if ns!=rss11_ns]
def validate(self):
# rdflib 2.0.5 does not catch mixed content errors
if self.value.strip() and self.children and not self.literal:
self.log(InvalidRDF({"message":"mixed content"}))
def startElementNS(self, name, qname, attrs):
# ensure element is "namespace well formed"
if name.find(':') != -1:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":name}))
# ensure all attribute namespaces are properly defined
for (namespace,attr) in attrs.keys():
if ':' in attr and not namespace:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":attr}))
# eat children
self.children.append((qname,name))
self.push(rdfExtension(qname, self.literal), name, attrs)
def characters(self, string):
if not self.literal: validatorBase.characters(self, string)
| Python |
"""$Id: iso639codes.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
isoLang = \
{'aa': 'Afar',
'ab': 'Abkhazian',
'ae': 'Avestan',
'af': 'Afrikaans',
'ak': 'Akan',
'am': 'Amharic',
'an': 'Aragonese',
'ar': 'Arabic',
'as': 'Assamese',
'av': 'Avaric',
'ay': 'Aymara',
'az': 'Azerbaijani',
'ba': 'Bashkir',
'be': 'Byelorussian',
'bg': 'Bulgarian',
'bh': 'Bihari',
'bi': 'Bislama',
'bm': 'Bambara',
'bn': 'Bengali;Bangla',
'bo': 'Tibetan',
'br': 'Breton',
'bs': 'Bosnian',
'ca': 'Catalan',
'ce': 'Chechen',
'ch': 'Chamorro',
'co': 'Corsican',
'cr': 'Cree',
'cs': 'Czech',
'cu': 'Church Slavic',
'cv': 'Chuvash',
'cy': 'Welsh',
'da': 'Danish',
'de': 'German',
'dv': 'Divehi',
'dz': 'Dzongkha',
'ee': 'Ewe',
'el': 'Greek',
'en': 'English',
'eo': 'Esperanto',
'es': 'Spanish',
'et': 'Estonian',
'eu': 'Basque',
'fa': 'Persian (Farsi)',
'ff': 'Fulah',
'fi': 'Finnish',
'fj': 'Fiji',
'fo': 'Faroese',
'fr': 'French',
'fy': 'Frisian, Western',
'ga': 'Irish',
'gd': 'Scots Gaelic',
'gl': 'Galician',
'gn': 'Guarani',
'gu': 'Gujarati',
'gv': 'Manx',
'ha': 'Hausa',
'he': 'Hebrew',
'hi': 'Hindi',
'ho': 'Hiri Motu',
'hr': 'Croatian',
'ht': 'Haitian',
'hu': 'Hungarian',
'hy': 'Armenian',
'hz': 'Herero',
'ia': 'Interlingua',
'id': 'Indonesian',
'ie': 'Interlingue',
'ig': 'Igbo',
'ii': 'Sichuan Yi',
'ik': 'Inupiak',
'io': 'Ido',
'is': 'Icelandic',
'it': 'Italian',
'iu': 'Inuktitut',
'ja': 'Japanese',
'jv': 'Javanese',
'ka': 'Georgian',
'kg': 'Kongo',
'ki': 'Kikuyu; Gikuyu',
'kj': 'Kuanyama; Kwanyama',
'kk': 'Kazakh',
'kl': 'Greenlandic',
'km': 'Cambodian',
'kn': 'Kannada',
'ko': 'Korean',
'kr': 'Kanuri',
'ks': 'Kashmiri',
'ku': 'Kurdish',
'kv': 'Komi',
'kw': 'Cornish',
'ky': 'Kirghiz',
'la': 'Latin',
'lb': 'Letzeburgesch; Luxembourgish',
'lg': 'Ganda',
'li': 'Limburgan; Limburger, Limburgish',
'ln': 'Lingala',
'lo': 'Lao',
'lt': 'Lithuanian',
'lu': 'Luba-Katanga',
'lv': 'Latvian',
'mg': 'Malagasy',
'mh': 'Marshallese',
'mi': 'Maori',
'mk': 'Macedonian',
'ml': 'Malayalam',
'mn': 'Mongolian',
'mo': 'Moldavian',
'mr': 'Marathi',
'ms': 'Malay',
'mt': 'Maltese',
'my': 'Burmese',
'na': 'Nauru',
'nb': 'Norwegian Bokmal',
'nd': 'Ndebele, North',
'ne': 'Nepali',
'ng': 'Ndonga',
'nl': 'Dutch',
'nn': 'Norwegian Nynorsk',
'no': 'Norwegian',
'nr': 'Ndebele, South',
'nv': 'Navaho; Navajo',
'ny': 'Chewa; Chichewa; Nyanha',
'oc': 'Occitan',
'oj': 'Ojibwa',
'om': 'Afan (Oromo)',
'or': 'Oriya',
'os': 'Ossetian; Ossetic',
'pa': 'Punjabi',
'pi': 'Pali',
'pl': 'Polish',
'ps': 'Pushto',
'pt': 'Portuguese',
'qu': 'Quechua',
'rm': 'Rhaeto-Romance',
'rn': 'Kurundi',
'ro': 'Romanian',
'ru': 'Russian',
'rw': 'Kinyarwanda',
'sa': 'Sanskrit',
'sc': 'Sardinian',
'sd': 'Sindhi',
'se': 'Northern Sami',
'sg': 'Sangho',
'sh': 'Serbo-Croatian',
'si': 'Singhalese',
'sk': 'Slovak',
'sl': 'Slovenian',
'sm': 'Samoan',
'sn': 'Shona',
'so': 'Somali',
'sq': 'Albanian',
'sr': 'Serbian',
'ss': 'Swati',
'st': 'Sotho, Southern',
'su': 'Sundanese',
'sv': 'Swedish',
'sw': 'Swahili',
'ta': 'Tamil',
'te': 'Telugu',
'tg': 'Tajik',
'th': 'Thai',
'ti': 'Tigrinya',
'tk': 'Turkmen',
'tl': 'Tagalog',
'tn': 'Tswana',
'to': 'Tonga',
'tr': 'Turkish',
'ts': 'Tsonga',
'tt': 'Tatar',
'tw': 'Twi',
'ty': 'Tahitian',
'ug': 'Uigur',
'uk': 'Ukrainian',
'ur': 'Urdu',
'uz': 'Uzbek',
've': 'Venda',
'vi': 'Vietnamese',
'vo': 'Volapuk',
'wa': 'Walloon',
'wo': 'Wolof',
'xh': 'Xhosa',
'yi': 'Yiddish',
'yo': 'Yoruba',
'za': 'Zhuang',
'zh': 'Chinese',
'zu': 'Zulu',
'x' : 'a user-defined language',
'xx': 'a user-defined language',
'abk': 'Abkhazian',
'ace': 'Achinese',
'ach': 'Acoli',
'ada': 'Adangme',
'ady': 'Adygei',
'ady': 'Adyghe',
'aar': 'Afar',
'afh': 'Afrihili',
'afr': 'Afrikaans',
'afa': 'Afro-Asiatic (Other)',
'ain': 'Ainu',
'aka': 'Akan',
'akk': 'Akkadian',
'alb': 'Albanian',
'sqi': 'Albanian',
'gws': 'Alemanic',
'ale': 'Aleut',
'alg': 'Algonquian languages',
'tut': 'Altaic (Other)',
'amh': 'Amharic',
'anp': 'Angika',
'apa': 'Apache languages',
'ara': 'Arabic',
'arg': 'Aragonese',
'arc': 'Aramaic',
'arp': 'Arapaho',
'arn': 'Araucanian',
'arw': 'Arawak',
'arm': 'Armenian',
'hye': 'Armenian',
'rup': 'Aromanian',
'art': 'Artificial (Other)',
'asm': 'Assamese',
'ast': 'Asturian',
'ath': 'Athapascan languages',
'aus': 'Australian languages',
'map': 'Austronesian (Other)',
'ava': 'Avaric',
'ave': 'Avestan',
'awa': 'Awadhi',
'aym': 'Aymara',
'aze': 'Azerbaijani',
'ast': 'Bable',
'ban': 'Balinese',
'bat': 'Baltic (Other)',
'bal': 'Baluchi',
'bam': 'Bambara',
'bai': 'Bamileke languages',
'bad': 'Banda',
'bnt': 'Bantu (Other)',
'bas': 'Basa',
'bak': 'Bashkir',
'baq': 'Basque',
'eus': 'Basque',
'btk': 'Batak (Indonesia)',
'bej': 'Beja',
'bel': 'Belarusian',
'bem': 'Bemba',
'ben': 'Bengali',
'ber': 'Berber (Other)',
'bho': 'Bhojpuri',
'bih': 'Bihari',
'bik': 'Bikol',
'byn': 'Bilin',
'bin': 'Bini',
'bis': 'Bislama',
'byn': 'Blin',
'nob': 'Bokmal, Norwegian',
'bos': 'Bosnian',
'bra': 'Braj',
'bre': 'Breton',
'bug': 'Buginese',
'bul': 'Bulgarian',
'bua': 'Buriat',
'bur': 'Burmese',
'mya': 'Burmese',
'cad': 'Caddo',
'car': 'Carib',
'spa': 'Castilian',
'cat': 'Catalan',
'cau': 'Caucasian (Other)',
'ceb': 'Cebuano',
'cel': 'Celtic (Other)',
'cai': 'Central American Indian (Other)',
'chg': 'Chagatai',
'cmc': 'Chamic languages',
'cha': 'Chamorro',
'che': 'Chechen',
'chr': 'Cherokee',
'nya': 'Chewa',
'chy': 'Cheyenne',
'chb': 'Chibcha',
'nya': 'Chichewa',
'chi': 'Chinese',
'zho': 'Chinese',
'chn': 'Chinook jargon',
'chp': 'Chipewyan',
'cho': 'Choctaw',
'zha': 'Chuang',
'chu': 'Church Slavic; Church Slavonic; Old Church Slavonic; Old Church Slavic; Old Bulgarian',
'chk': 'Chuukese',
'chv': 'Chuvash',
'nwc': 'Classical Nepal Bhasa; Classical Newari; Old Newari',
'cop': 'Coptic',
'cor': 'Cornish',
'cos': 'Corsican',
'cre': 'Cree',
'mus': 'Creek',
'crp': 'Creoles and pidgins(Other)',
'cpe': 'Creoles and pidgins, English-based (Other)',
'cpf': 'Creoles and pidgins, French-based (Other)',
'cpp': 'Creoles and pidgins, Portuguese-based (Other)',
'crh': 'Crimean Tatar; Crimean Turkish',
'scr': 'Croatian',
'hrv': 'Croatian',
'cus': 'Cushitic (Other)',
'cze': 'Czech',
'ces': 'Czech',
'dak': 'Dakota',
'dan': 'Danish',
'dar': 'Dargwa',
'day': 'Dayak',
'del': 'Delaware',
'din': 'Dinka',
'div': 'Divehi',
'doi': 'Dogri',
'dgr': 'Dogrib',
'dra': 'Dravidian (Other)',
'dua': 'Duala',
'dut': 'Dutch',
'nld': 'Dutch',
'dum': 'Dutch, Middle (ca. 1050-1350)',
'dyu': 'Dyula',
'dzo': 'Dzongkha',
'efi': 'Efik',
'egy': 'Egyptian (Ancient)',
'eka': 'Ekajuk',
'elx': 'Elamite',
'eng': 'English',
'enm': 'English, Middle (1100-1500)',
'ang': 'English, Old (ca.450-1100)',
'myv': 'Erzya',
'epo': 'Esperanto',
'est': 'Estonian',
'ewe': 'Ewe',
'ewo': 'Ewondo',
'fan': 'Fang',
'fat': 'Fanti',
'fao': 'Faroese',
'fij': 'Fijian',
'fil': 'Filipino; Pilipino',
'fin': 'Finnish',
'fiu': 'Finno-Ugrian (Other)',
'fon': 'Fon',
'fre': 'French',
'fra': 'French',
'frm': 'French, Middle (ca.1400-1600)',
'fro': 'French, Old (842-ca.1400)',
'frs': 'Frisian, Eastern',
'fry': 'Frisian, Western',
'fur': 'Friulian',
'ful': 'Fulah',
'gaa': 'Ga',
'gla': 'Gaelic',
'glg': 'Gallegan',
'lug': 'Ganda',
'gay': 'Gayo',
'gba': 'Gbaya',
'gez': 'Geez',
'geo': 'Georgian',
'kat': 'Georgian',
'ger': 'German',
'deu': 'German',
'nds': 'German, Low',
'gmh': 'German, Middle High (ca.1050-1500)',
'goh': 'German, Old High (ca.750-1050)',
'gem': 'Germanic (Other)',
'kik': 'Gikuyu',
'gil': 'Gilbertese',
'gon': 'Gondi',
'gor': 'Gorontalo',
'got': 'Gothic',
'grb': 'Grebo',
'grc': 'Greek, Ancient (to 1453)',
'gre': 'Greek, Modern (1453-)',
'ell': 'Greek, Modern (1453-)',
'kal': 'Greenlandic; Kalaallisut',
'grn': 'Guarani',
'guj': 'Gujarati',
'gwi': 'Gwich\'in',
'hai': 'Haida',
'hat': 'Haitian',
'hau': 'Hausa',
'haw': 'Hawaiian',
'heb': 'Hebrew',
'her': 'Herero',
'hil': 'Hiligaynon',
'him': 'Himachali',
'hin': 'Hindi',
'hmo': 'Hiri Motu',
'hit': 'Hittite',
'hmn': 'Hmong',
'hun': 'Hungarian',
'hup': 'Hupa',
'iba': 'Iban',
'ice': 'Icelandic',
'isl': 'Icelandic',
'ido': 'Ido',
'ibo': 'Igbo',
'ijo': 'Ijo',
'ilo': 'Iloko',
'smn': 'Inari Sami',
'inc': 'Indic (Other)',
'ine': 'Indo-European (Other)',
'ind': 'Indonesian',
'inh': 'Ingush',
'ina': 'Interlingua (International Auxiliary Language Association)',
'ile': 'Interlingue',
'iku': 'Inuktitut',
'ipk': 'Inupiaq',
'ira': 'Iranian (Other)',
'gle': 'Irish',
'mga': 'Irish, Middle (900-1200)',
'sga': 'Irish, Old (to 900)',
'iro': 'Iroquoian languages',
'ita': 'Italian',
'jpn': 'Japanese',
'jav': 'Javanese',
'jrb': 'Judeo-Arabic',
'jpr': 'Judeo-Persian',
'kbd': 'Kabardian',
'kab': 'Kabyle',
'kac': 'Kachin',
'kal': 'Kalaallisut',
'xal': 'Kalmyk',
'kam': 'Kamba',
'kan': 'Kannada',
'kau': 'Kanuri',
'krc': 'Karachay-Balkar',
'kaa': 'Kara-Kalpak',
'krl': 'Karelian',
'kar': 'Karen',
'kas': 'Kashmiri',
'csb': 'Kashubian',
'kaw': 'Kawi',
'kaz': 'Kazakh',
'kha': 'Khasi',
'khm': 'Khmer',
'khi': 'Khoisan (Other)',
'kho': 'Khotanese',
'kik': 'Kikuyu',
'kmb': 'Kimbundu',
'kin': 'Kinyarwanda',
'kir': 'Kirghiz',
'tlh': 'Klingon; tlhIngan-Hol',
'kom': 'Komi',
'kon': 'Kongo',
'kok': 'Konkani',
'kor': 'Korean',
'kos': 'Kosraean',
'kpe': 'Kpelle',
'kro': 'Kru',
'kua': 'Kuanyama',
'kum': 'Kumyk',
'kur': 'Kurdish',
'kru': 'Kurukh',
'kut': 'Kutenai',
'kua': 'Kwanyama',
'lad': 'Ladino',
'lah': 'Lahnda',
'lam': 'Lamba',
'lao': 'Lao',
'lat': 'Latin',
'lav': 'Latvian',
'ltz': 'Letzeburgesch',
'lez': 'Lezghian',
'lim': 'Limburgan',
'lin': 'Lingala',
'lit': 'Lithuanian',
'jbo': 'Lojban',
'nds': 'Low German',
'dsb': 'Lower Sorbian',
'loz': 'Lozi',
'lub': 'Luba-Katanga',
'lua': 'Luba-Lulua',
'lui': 'Luiseno',
'smj': 'Lule Sami',
'lun': 'Lunda',
'luo': 'Luo (Kenya and Tanzania)',
'lus': 'Lushai',
'ltz': 'Luxembourgish',
'mac': 'Macedonian',
'mkd': 'Macedonian',
'mad': 'Madurese',
'mag': 'Magahi',
'mai': 'Maithili',
'mak': 'Makasar',
'mlg': 'Malagasy',
'may': 'Malay',
'msa': 'Malay',
'mal': 'Malayalam',
'mlt': 'Maltese',
'mnc': 'Manchu',
'mdr': 'Mandar',
'man': 'Mandingo',
'mni': 'Manipuri',
'mno': 'Manobo languages',
'glv': 'Manx',
'mao': 'Maori',
'mri': 'Maori',
'mar': 'Marathi',
'chm': 'Mari',
'mah': 'Marshallese',
'mwr': 'Marwari',
'mas': 'Masai',
'myn': 'Mayan languages',
'men': 'Mende',
'mic': 'Micmac',
'min': 'Minangkabau',
'mwl': 'Mirandese',
'mis': 'Miscellaneous languages',
'moh': 'Mohawk',
'mdf': 'Moksha',
'mol': 'Moldavian',
'mkh': 'Mon-Khmer (Other)',
'lol': 'Mongo',
'mon': 'Mongolian',
'mos': 'Mossi',
'mul': 'Multiple languages',
'mun': 'Munda languages',
'nah': 'Nahuatl',
'nau': 'Nauru',
'nav': 'Navaho; Navajo',
'nde': 'Ndebele, North',
'nbl': 'Ndebele, South',
'ndo': 'Ndonga',
'nap': 'Neapolitan',
'nep': 'Nepali',
'new': 'Newari',
'nia': 'Nias',
'nic': 'Niger-Kordofanian (Other)',
'ssa': 'Nilo-Saharan (Other)',
'niu': 'Niuean',
'nog': 'Nogai',
'non': 'Norse, Old',
'nai': 'North American Indian (Other)',
'frr': 'Northern Frisian',
'sme': 'Northern Sami',
'nso': 'Northern Sotho; Pedi; Sepedi',
'nde': 'North Ndebele',
'nor': 'Norwegian',
'nob': 'Norwegian Bokmal',
'nno': 'Norwegian Nynorsk',
'nub': 'Nubian languages',
'nym': 'Nyamwezi',
'nya': 'Nyanja',
'nyn': 'Nyankole',
'nno': 'Nynorsk, Norwegian',
'nyo': 'Nyoro',
'nzi': 'Nzima',
'oci': 'Occitan (post 1500)',
'oji': 'Ojibwa',
'ori': 'Oriya',
'orm': 'Oromo',
'osa': 'Osage',
'oss': 'Ossetian; Ossetic',
'oto': 'Otomian languages',
'pal': 'Pahlavi',
'pau': 'Palauan',
'pli': 'Pali',
'pam': 'Pampanga',
'pag': 'Pangasinan',
'pan': 'Panjabi',
'pap': 'Papiamento',
'paa': 'Papuan (Other)',
'per': 'Persian',
'fas': 'Persian',
'peo': 'Persian, Old (ca.600-400)',
'phi': 'Philippine (Other)',
'phn': 'Phoenician',
'pon': 'Pohnpeian',
'pol': 'Polish',
'por': 'Portuguese',
'pra': 'Prakrit languages',
'oci': 'Provencal',
'pro': 'Provencal, Old (to 1500)',
'pan': 'Punjabi',
'pus': 'Pushto',
'que': 'Quechua',
'roh': 'Raeto-Romance',
'raj': 'Rajasthani',
'rap': 'Rapanui',
'rar': 'Rarotongan',
'qaa': 'Reserved for local use',
'qtz': 'Reserved for local use',
'roa': 'Romance (Other)',
'rum': 'Romanian',
'ron': 'Romanian',
'rom': 'Romany',
'run': 'Rundi',
'rus': 'Russian',
'sal': 'Salishan languages',
'sam': 'Samaritan Aramaic',
'smi': 'Sami languages (Other)',
'smo': 'Samoan',
'sad': 'Sandawe',
'sag': 'Sango',
'san': 'Sanskrit',
'sat': 'Santali',
'srd': 'Sardinian',
'sas': 'Sasak',
'nds': 'Saxon, Low',
'sco': 'Scots',
'gla': 'Scottish Gaelic',
'sel': 'Selkup',
'sem': 'Semitic (Other)',
'nso': 'Sepedi; Northern Sotho; Pedi',
'scc': 'Serbian',
'srp': 'Serbian',
'srr': 'Serer',
'shn': 'Shan',
'sna': 'Shona',
'iii': 'Sichuan Yi',
'scn': 'Sicilian',
'sid': 'Sidamo',
'sgn': 'Sign languages',
'bla': 'Siksika',
'snd': 'Sindhi',
'sin': 'Sinhalese',
'sit': 'Sino-Tibetan (Other)',
'sio': 'Siouan languages',
'sms': 'Skolt Sami',
'den': 'Slave (Athapascan)',
'sla': 'Slavic (Other)',
'slo': 'Slovak',
'slk': 'Slovak',
'slv': 'Slovenian',
'sog': 'Sogdian',
'som': 'Somali',
'son': 'Songhai',
'snk': 'Soninke',
'wen': 'Sorbian languages',
'nso': 'Sotho, Northern',
'sot': 'Sotho, Southern',
'sai': 'South American Indian (Other)',
'alt': 'Southern Altai',
'sma': 'Southern Sami',
'nbl': 'South Ndebele',
'spa': 'Spanish',
'srn': 'Sranan Tongo',
'suk': 'Sukuma',
'sux': 'Sumerian',
'sun': 'Sundanese',
'sus': 'Susu',
'swa': 'Swahili',
'ssw': 'Swati',
'swe': 'Swedish',
'gsw': 'Swiss German; Alemanic',
'syr': 'Syriac',
'tgl': 'Tagalog',
'tah': 'Tahitian',
'tai': 'Tai (Other)',
'tgk': 'Tajik',
'tmh': 'Tamashek',
'tam': 'Tamil',
'tat': 'Tatar',
'tel': 'Telugu',
'ter': 'Tereno',
'tet': 'Tetum',
'tha': 'Thai',
'tib': 'Tibetan',
'bod': 'Tibetan',
'tig': 'Tigre',
'tir': 'Tigrinya',
'tem': 'Timne',
'tiv': 'Tiv',
'tlh': 'tlhIngan-Hol; Klingon',
'tli': 'Tlingit',
'tpi': 'Tok Pisin',
'tkl': 'Tokelau',
'tog': 'Tonga (Nyasa)',
'ton': 'Tonga (Tonga Islands)',
'tsi': 'Tsimshian',
'tso': 'Tsonga',
'tsn': 'Tswana',
'tum': 'Tumbuka',
'tup': 'Tupi languages',
'tur': 'Turkish',
'ota': 'Turkish, Ottoman (1500-1928)',
'tuk': 'Turkmen',
'tvl': 'Tuvalu',
'tyv': 'Tuvinian',
'twi': 'Twi',
'udm': 'Udmurt',
'uga': 'Ugaritic',
'uig': 'Uighur',
'ukr': 'Ukrainian',
'umb': 'Umbundu',
'und': 'Undetermined',
'hsb': 'Upper Sorbian',
'urd': 'Urdu',
'uzb': 'Uzbek',
'vai': 'Vai',
'cat': 'Valencian',
'ven': 'Venda',
'vie': 'Vietnamese',
'vol': 'Volapuk',
'vot': 'Votic',
'wak': 'Wakashan languages',
'wal': 'Walamo',
'wln': 'Walloon',
'war': 'Waray',
'was': 'Washo',
'wel': 'Welsh',
'cym': 'Welsh',
'fry': 'Wester Frisian',
'wol': 'Wolof',
'xho': 'Xhosa',
'sah': 'Yakut',
'yao': 'Yao',
'yap': 'Yapese',
'yid': 'Yiddish',
'yor': 'Yoruba',
'ypk': 'Yupik languages',
'znd': 'Zande',
'zap': 'Zapotec',
'zen': 'Zenaga',
'zha': 'Zhuang',
'zul': 'Zulu',
'zun': 'Zuni' }
| Python |
from base import validatorBase
from validators import *
from logging import InvalidSseType, InvalidNSS, MissingElement, MissingByAndWhenAttrs
import re
class Sharing(validatorBase):
def getExpectedAttrNames(self):
return [ (None, u'expires'), (None, u'since'), (None, u'until') ]
def prevalidate(self):
if self.attrs.has_key((None,'until')):
self.validate_required_attribute((None,'since'), rfc3339)
else:
self.validate_optional_attribute((None,'since'), rfc3339)
if self.attrs.has_key((None,'since')):
self.validate_required_attribute((None,'until'), rfc3339)
else:
self.validate_optional_attribute((None,'until'), rfc3339)
self.validate_optional_attribute((None,'expires'), rfc3339)
if self.attrs.has_key((None,'since')):
if self.attrs.has_key((None,'until')):
if self.attrs[(None,'since')]>self.attrs[(None,'until')]:
self.log(SinceAfterUntil({}))
def do_sx_related(self):
return Related()
class Sync(validatorBase):
def getExpectedAttrNames(self):
return [ (None, u'deleted'), (None, u'noconflicts'),
(None, u'id'), (None, u'updates') ]
def prevalidate(self):
self.validate_optional_attribute((None,'deleted'), truefalsestrict)
self.validate_optional_attribute((None,'noconflicts'), truefalsestrict)
self.validate_required_attribute((None,'id'), unique('id',self.parent.parent))
self.validate_optional_attribute((None,'id'), rfc2141_nss)
self.validate_required_attribute((None,'updates'), UINT31)
def validate(self):
if not 'sx_history' in self.children:
self.log(MissingElement({'parent':self.name, 'element':'sx:history'}))
def do_sx_history(self):
return History()
def do_sx_conflicts(self):
return Conflicts()
class Related(validatorBase):
def getExpectedAttrNames(self):
return [ (None, u'link'), (None, u'title'), (None, u'type') ]
def prevalidate(self):
self.validate_required_attribute((None,'link'), rfc2396_full)
self.validate_optional_attribute((None,'title'), nonhtml)
self.validate_optional_attribute((None,'title'), nonblank)
self.validate_required_attribute((None,'type'), FeedType)
class History(validatorBase):
def getExpectedAttrNames(self):
return [ (None, u'by'), (None, u'sequence'), (None, u'when') ]
def prevalidate(self):
self.validate_optional_attribute((None,'by'), nonhtml)
self.validate_optional_attribute((None,'by'), nonblank)
self.validate_optional_attribute((None,'by'), rfc2141_nss)
self.validate_required_attribute((None,'sequence'), UINT31)
self.validate_optional_attribute((None,'when'), rfc3339)
if self.attrs.has_key((None,'when')):
if not self.attrs.has_key((None,'by')):
self.log(MissingRecommendedAttribute({"attr":"by"}))
elif self.attrs.has_key((None,'by')):
self.log(MissingRecommendedAttribute({"attr":"when"}))
else:
self.log(MissingByAndWhenAttrs({}))
class FeedType(enumeration):
error = InvalidSseType
valuelist = ['complete', 'aggregated']
class rfc2141_nss(text):
def validate(self):
if not re.match("^([0-9a-zA-Z()+,\\-\\.:=@;$_!*'/?#]|%[0-9a-fA-F][0-9a-fA-F])+$", self.value):
self.log(InvalidNSS({"element":self.name,"parent":self.parent.name}))
class Conflicts(validatorBase):
def do_entry(self):
from entry import entry
return entry()
def do_item(self):
from item import item
return item()
| Python |
"""$Id: logging.py 1059 2009-11-12 22:08:04Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1059 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
# feed types
TYPE_UNKNOWN = 0
TYPE_RSS1 = 1
TYPE_RSS2 = 2
TYPE_ATOM = 3
TYPE_ATOM_ENTRY = 4
TYPE_APP_CATEGORIES = 5
TYPE_APP_SERVICE = 6
TYPE_XRD = 7
TYPE_OPENSEARCH = 8
TYPE_OPML = 9
TYPE_KML20 = 10
TYPE_KML21 = 11
TYPE_KML22 = 12
FEEDTYPEDISPLAY = {0:"(unknown type)", 1:"RSS", 2:"RSS", 3:"Atom 1.0", 4:"Atom 1.0", 5:"Atom Publishing Protocol Category", 6:"Atom Publishing Protocol Service", 7:"XRD", 8:"OpenSearch", 9:"OPML", 10:"KML 2.0", 11:"KML 2.1", 12:"KML 2.2"}
VALIDFEEDGRAPHIC = {0:"", 1:"valid-rss.png", 2:"valid-rss-rogers.png", 3:"valid-atom.png", 4:"valid-atom.png", 5:"valid-atom.png", 6:"valid-atom.png", 7:"valid-xrd.png", 8:"valid-opensearch.png", 9:"valid-opml.gif", 10:"valid-kml.png", 11:"valid-kml.png", 12:"valid-kml.png"}
#
# logging support
#
class LoggedEvent:
def __init__(self, params):
self.params = params
class Info(LoggedEvent): pass
class Message(LoggedEvent): pass
class Warning(Message): pass
class Error(Message): pass
class ValidationFailure(Error):
def __init__(self, event):
LoggedEvent.__init__(self, {})
self.event = event
###################### error ######################
class SAXError(Error): pass
class WPBlankLine(SAXError): pass
class UnicodeError(Error): pass
class MissingNamespace(SAXError): pass
class NotInANamespace(MissingNamespace): pass
class UseOfExtensionAttr(Warning): pass
class UndefinedNamedEntity(SAXError): pass
class InvalidRSSVersion(Error): pass
class UndefinedElement(Error): pass
class NoBlink(UndefinedElement): pass
class NoThrWhen(UndefinedElement): pass
class MissingAttribute(Error): pass
class UnexpectedAttribute(Error): pass
class DuplicateElement(Error): pass
class NotEnoughHoursInTheDay(Error): pass
class EightDaysAWeek(Error): pass
class InvalidValue(Error): pass
class InvalidContact(InvalidValue): pass
class UnknownHost(Warning): pass
class InvalidAddrSpec(InvalidContact): pass
class InvalidLink(InvalidValue): pass
class UriNotIri(InvalidLink): pass
class InvalidIRI(InvalidLink): pass
class InvalidFullLink(InvalidLink): pass
class InvalidUriChar(InvalidLink): pass
class InvalidISO8601Date(InvalidValue): pass
class InvalidISO8601DateTime(InvalidValue): pass
class InvalidW3CDTFDate(InvalidISO8601Date): pass
class InvalidRFC2822Date(InvalidValue): pass
class IncorrectDOW(InvalidRFC2822Date): pass
class InvalidRFC3339Date(InvalidValue): pass
class InvalidURIAttribute(InvalidLink): pass
class InvalidURLAttribute(InvalidURIAttribute): pass
class InvalidIntegerAttribute(InvalidValue): pass
class InvalidBooleanAttribute(InvalidValue): pass
class InvalidMIMEAttribute(InvalidValue): pass
class InvalidInteger(InvalidValue): pass
class InvalidPercentage(InvalidValue): pass
class InvalidNonNegativeInteger(InvalidInteger): pass
class InvalidPositiveInteger(InvalidInteger): pass
class InvalidAlphanum(Error): pass
class InvalidWidth(InvalidValue): pass
class InvalidHeight(InvalidValue): pass
class InvalidHour(InvalidValue): pass
class InvalidDay(InvalidValue): pass
class InvalidHttpGUID(InvalidValue): pass
class InvalidLanguage(InvalidValue): pass
class InvalidUpdatePeriod(InvalidValue): pass
class InvalidItunesCategory(InvalidValue): pass
class ObsoleteItunesCategory(Warning): pass
class InvalidYesNo(InvalidValue): pass
class InvalidYesNoClean(InvalidValue): pass
class InvalidDuration(InvalidValue): pass
class TooLong(InvalidValue): pass
class InvalidKeywords(Warning): pass
class InvalidTextType(InvalidValue): pass
class InvalidCommaSeparatedIntegers(InvalidValue): pass
class UndeterminableVocabulary(Warning): pass
class InvalidFormComponentName(InvalidValue): pass
class InvalidAccessRestrictionRel(InvalidValue): pass
class NotURLEncoded(InvalidValue): pass
class InvalidLocalRole(InvalidValue): pass
class InvalidEncoding(InvalidValue): pass
class InvalidSyndicationRight(InvalidValue): pass
class InvalidLocalParameter(InvalidValue): pass
class MissingElement(Error): pass
class MissingDescription(MissingElement): pass
class MissingLink(MissingElement): pass
class MissingTitle(MissingElement): pass
class ItemMustContainTitleOrDescription(MissingElement): pass
class MissingXhtmlDiv(MissingElement): pass
class MissingContentOrAlternate(MissingElement): pass
class FatalSecurityRisk(Error): pass
class ContainsSystemEntity(Info): pass
class DuplicateValue(InvalidValue): pass
class InvalidDoctype(Error): pass
class BadXmlVersion(Error): pass
class DuplicateAtomLink(Error): pass
class MissingHref(MissingAttribute): pass
class AtomLinkNotEmpty(Warning): pass
class UnregisteredAtomLinkRel(Warning): pass
class HttpError(Error): pass
class IOError(Error): pass
class UnknownEncoding(Error): pass
class UnexpectedText(Error): pass
class UnexpectedWhitespace(Error): pass
class ValidatorLimit(Error): pass
class HttpProtocolError(Error): pass
class InvalidRDF(Error): pass
class InvalidLatitude(Error): pass
class InvalidLongitude(Error): pass
class MisplacedMetadata(Error): pass
class InvalidPermalink(Error): pass
class InvalidCreditRole(Error): pass
class InvalidMediaTextType(Error): pass
class InvalidMediaHash(Error): pass
class InvalidMediaRating(Error): pass
class InvalidNPTTime(Error): pass
class InvalidMediaRestriction(Error): pass
class InvalidMediaRestrictionRel(Error): pass
class InvalidMediaRestrictionType(Error): pass
class InvalidMediaMedium(Error): pass
class InvalidMediaExpression(Error): pass
class DeprecatedMediaAdult(Warning): pass
class MediaGroupWithoutAlternatives(Error): pass
class InvalidSseType(Error): pass
class InvalidNSS(Error): pass
class IntegerOverflow(Error): pass
class SinceAfterUntil(Error): pass
class MissingByAndWhenAttrs(Error): pass
###################### warning ######################
class DuplicateSemantics(Warning): pass
class DuplicateItemSemantics(DuplicateSemantics): pass
class DuplicateDescriptionSemantics(DuplicateSemantics): pass
class ImageLinkDoesntMatch(Warning): pass
class ImageUrlFormat(Warning): pass
class ContainsRelRef(Warning): pass
class ReservedPrefix(Warning): pass
class MediaRssNamespace(Error): pass
class NotSufficientlyUnique(Warning): pass
class ImplausibleDate(Warning): pass
class ProblematicalRFC822Date(Warning): pass
class SecurityRisk(Warning): pass
class SecurityRiskAttr(SecurityRisk): pass
class DangerousStyleAttr(SecurityRiskAttr): pass
class BadCharacters(Warning): pass
class ObscureEncoding(Warning): pass
class UnexpectedContentType(Warning): pass
class EncodingMismatch(Warning): pass
class NonSpecificMediaType(Warning): pass
class NonCanonicalURI(Warning): pass
class SameDocumentReference(Warning): pass
class ContainsEmail(Warning): pass
class ContainsHTML(Warning): pass
class ContainsUndeclaredHTML(ContainsHTML): pass
class MissingSelf(Warning): pass
class SelfDoesntMatchLocation(Warning): pass
class RelativeSelf(Warning): pass
class MissingSourceElement(Warning): pass
class MissingTypeAttr(Warning): pass
class DuplicateIds(Error): pass
class DuplicateEntries(Warning): pass
class DuplicateUpdated(Warning): pass
class NotBlank(Warning): pass
class AttrNotBlank(Warning): pass
class MissingSummary(Error): pass
class MissingTextualContent(Warning): pass
class NotUTF8(Warning): pass
class MissingItunesElement(Warning): pass
class MissingItunesEmail(Warning): pass
class UnsupportedItunesFormat(Warning): pass
class SelfNotAtom(Warning): pass
class DuplicateEnclosure(Warning): pass
class MissingGuid(Warning): pass
class ObsoleteWikiNamespace(Warning): pass
class CommentRSS(Warning): pass
class ShouldIncludeExample(Warning): pass
class InvalidAdultContent(Warning): pass
class InvalidSyndicationRight(InvalidValue): pass
class UndeclaredPrefix(InvalidValue): pass
class MisplacedXHTMLContent(Warning): pass
class SchemeNotIANARegistered(Warning): pass
class AvoidNamespacePrefix(Warning): pass
class UnknownNamespace(Warning): pass
class MissingRecommendedAttribute(Warning): pass
class QuestionableUsage(Warning): pass
###################### info ######################
class BestPractices(Info): pass
class MissingRecommendedElement(BestPractices): pass
class MissingDCLanguage(MissingRecommendedElement): pass
class NonstdPrefix(BestPractices): pass
class NonstdEncoding(BestPractices): pass
class MissingEncoding(BestPractices): pass
class TempRedirect(Info): pass
class TextXml(Info): pass
class Uncompressed(Info): pass
## Atom-specific errors
class ObsoleteVersion(Warning): pass
class ObsoleteNamespace(Error): pass
class ConflictingCatAttr(Error): pass
class ConflictingCatChildren(Error): pass
class InvalidMediaRange(Error): pass
class UndefinedParam(Warning): pass
class InvalidURI(InvalidValue) : pass
class InvalidURN(InvalidValue): pass
class InvalidUUID(InvalidValue): pass
class InvalidTAG(InvalidValue): pass
class InvalidContentMode(InvalidValue) : pass
class InvalidMIMEType(InvalidMediaRange) : pass
class InvalidNamespace(Error): pass
class NotEscaped(InvalidValue): pass
class NotBase64(InvalidValue): pass
class NotInline(Warning): pass # this one can never be sure...
class NotHtml(Warning): pass
class HtmlFragment(Warning): pass
class FeedHistoryRelInEntry(Warning): pass
class FeedRelInCompleteFeed(Error): pass
class CurrentNotSelfInCompleteFeed(Error): pass
class LinkPastEnd(Error): pass
class MissingCurrentInArchive(Warning): pass
class ArchiveIncomplete(Warning): pass
############## non-errors (logging successes) ###################
class Success(LoggedEvent): pass
class ValidValue(Success): pass
class ValidCloud(Success): pass
class ValidURI(ValidValue): pass
class ValidHttpGUID(ValidURI): pass
class ValidURLAttribute(ValidURI): pass
class ValidURN(ValidValue): pass
class ValidTAG(ValidValue): pass
class ValidTitle(ValidValue): pass
class ValidDate(ValidValue): pass
class ValidW3CDTFDate(ValidDate): pass
class ValidRFC2822Date(ValidDate): pass
class ValidAttributeValue(ValidValue): pass
class ValidBooleanAttribute(ValidAttributeValue): pass
class ValidLanguage(ValidValue): pass
class ValidHeight(ValidValue): pass
class ValidWidth(ValidValue): pass
class ValidTitle(ValidValue): pass
class ValidContact(ValidValue): pass
class ValidIntegerAttribute(ValidValue): pass
class ValidMIMEAttribute(ValidValue): pass
class ValidDay(ValidValue): pass
class ValidHour(ValidValue): pass
class ValidInteger(ValidValue): pass
class ValidPercentage(ValidValue): pass
class ValidUpdatePeriod(ValidValue): pass
class ValidContentMode(ValidValue): pass
class ValidElement(ValidValue): pass
class ValidCopyright(ValidValue): pass
class ValidGeneratorName(ValidValue): pass
class OptionalValueMissing(ValidValue): pass
class ValidDoctype(ValidValue): pass
class DeprecatedDTD(Error): pass
class ValidHtml(ValidValue): pass
class ValidAtomLinkRel(ValidValue): pass
class ValidLatitude(ValidValue): pass
class ValidLongitude(ValidValue): pass
class ValidNPTTime(ValidValue): pass
###################### opml ######################
class InvalidOPMLVersion(Error): pass
class MissingXmlURL(Warning): pass
class InvalidOutlineVersion(Warning): pass
class InvalidOutlineType(Warning): pass
class InvalidExpansionState(Error): pass
class InvalidTrueFalse(InvalidValue): pass
class MissingOutlineType(Warning): pass
class MissingTitleAttr(Warning): pass
class MissingUrlAttr(Warning): pass
###################### gbase ######################
class InvalidCountryCode(InvalidValue): pass
class InvalidCurrencyUnit(InvalidValue): pass
class InvalidFloat(InvalidValue): pass
class InvalidFloatUnit(InvalidValue): pass
class InvalidFullLocation(InvalidValue): pass
class InvalidGender(InvalidValue): pass
class InvalidIntUnit(InvalidValue): pass
class InvalidLabel(InvalidValue): pass
class InvalidLocation(InvalidValue): pass
class InvalidMaritalStatus(InvalidValue): pass
class InvalidPaymentMethod(InvalidValue): pass
class InvalidPriceType(InvalidValue): pass
class InvalidRatingType(InvalidValue): pass
class InvalidReviewerType(InvalidValue): pass
class InvalidSalaryType(InvalidValue): pass
class InvalidServiceType(InvalidValue): pass
class InvalidYear(InvalidValue): pass
class TooMany(DuplicateElement): pass
###################### georss ######################
class InvalidCoord(InvalidValue): pass
class InvalidCoordList(InvalidValue): pass
class CoordComma(Warning): pass
###################### meta ######################
class InvalidMetaName(InvalidValue): pass
class InvalidMetaContent(InvalidValue): pass
###################### kml ######################
class Deprecated(Warning): pass
class DeprecatedRootHref(Warning): pass
class InvalidAltitudeMode(InvalidValue): pass
class InvalidAngle(InvalidValue): pass
class InvalidColor(InvalidValue): pass
class InvalidColorMode(InvalidValue): pass
class InvalidItemIconState(InvalidValue): pass
class InvalidListItemType(InvalidValue): pass
class InvalidKmlCoordList(InvalidValue): pass
class InvalidKmlLatitude(InvalidValue): pass
class InvalidKmlLongitude(InvalidValue): pass
class InvalidKmlMediaType(Warning): pass
class InvalidKmlUnits(InvalidValue): pass
class InvalidRefreshMode(InvalidValue): pass
class InvalidSchemaFieldType(InvalidValue): pass
class InvalidStyleState(InvalidValue): pass
class InvalidViewRefreshMode(InvalidValue): pass
class InvalidZeroOne(InvalidValue): pass
class MissingId(Warning): pass
class ValidAngle(ValidValue): pass
###################### RSS 2.0 Profile ######################
class RSS20Profile(Warning): pass
class CharacterData(ContainsHTML): pass
class EmailFormat(RSS20Profile): pass
class MissingRealName(EmailFormat): pass
class MisplacedItem(RSS20Profile): pass
class ImageTitleDoesntMatch(RSS20Profile): pass
class AvoidTextInput(RSS20Profile): pass
class NeedDescriptionBeforeContent(RSS20Profile): pass
class SlashDate(RSS20Profile): pass
class UseZeroForMidnight(RSS20Profile): pass
class MissingAtomSelfLink(MissingSelf): pass
class UseZeroForUnknown(InvalidNonNegativeInteger): pass
| Python |
"""$Id: itunes.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from validators import *
class itunes:
def do_itunes_author(self):
return lengthLimitedText(255), noduplicates()
def do_itunes_block(self):
return yesnoclean(), noduplicates()
def do_itunes_explicit(self):
return yesnoclean(), noduplicates()
def do_itunes_keywords(self):
return lengthLimitedText(255), keywords(), noduplicates()
def do_itunes_subtitle(self):
return lengthLimitedText(255), noduplicates()
def do_itunes_summary(self):
return lengthLimitedText(4000), noduplicates()
def do_itunes_image(self):
return image(), noduplicates()
class itunes_channel(itunes):
from logging import MissingItunesElement
def validate(self):
if not 'language' in self.children and not self.xmlLang:
self.log(MissingItunesElement({"parent":self.name, "element":'language'}))
if not 'itunes_category' in self.children:
self.log(MissingItunesElement({"parent":self.name, "element":'itunes:category'}))
if not 'itunes_explicit' in self.children:
self.log(MissingItunesElement({"parent":self.name, "element":'itunes:explicit'}))
if not 'itunes_owner' in self.children:
self.log(MissingItunesEmail({"parent":self.name, "element":'itunes:email'}))
def setItunes(self, value):
if value and not self.itunes:
if self.dispatcher.encoding.lower() not in ['utf-8','utf8']:
from logging import NotUTF8
self.log(NotUTF8({"parent":self.parent.name, "element":self.name}))
if self.getFeedType() == TYPE_ATOM and 'entry' in self.children:
self.validate()
self.itunes |= value
def do_itunes_owner(self):
return owner(), noduplicates()
def do_itunes_category(self):
return category()
def do_itunes_pubDate(self):
return rfc822(), noduplicates()
def do_itunes_new_feed_url(self):
if self.child != 'itunes_new-feed-url':
self.log(UndefinedElement({"parent":self.name.replace("_",":"), "element":self.child}))
return rfc2396_full(), noduplicates()
class itunes_item(itunes):
supported_formats = ['m4a', 'mp3', 'mov', 'mp4', 'm4v', 'pdf']
def validate(self):
pass
def setItunes(self, value):
if value and not self.itunes:
self.parent.setItunes(True)
self.itunes = value
if hasattr(self, 'enclosures'):
save, self.enclosures = self.enclosures, []
for enclosure in save:
self.setEnclosure(enclosure)
def setEnclosure(self, url):
if self.itunes:
# http://www.apple.com/itunes/podcasts/techspecs.html#_Toc526931678
ext = url.split('.')[-1]
if ext not in itunes_item.supported_formats:
from logging import UnsupportedItunesFormat
self.log(UnsupportedItunesFormat({"parent":self.parent.name, "element":self.name, "extension":ext}))
if not hasattr(self, 'enclosures'): self.enclosures = []
self.enclosures.append(url)
def do_itunes_duration(self):
return duration(), noduplicates()
class owner(validatorBase):
def validate(self):
if not "itunes_email" in self.children:
self.log(MissingElement({"parent":self.name.replace("_",":"),
"element":"itunes:email"}))
def do_itunes_email(self):
return email(), noduplicates()
def do_itunes_name(self):
return lengthLimitedText(255), noduplicates()
class subcategory(validatorBase):
def __init__(self, newlist, oldlist):
validatorBase.__init__(self)
self.newlist = newlist
self.oldlist = oldlist
self.text = None
def getExpectedAttrNames(self):
return [(None, u'text')]
def prevalidate(self):
try:
self.text=self.attrs.getValue((None, "text"))
if not self.text in self.newlist:
if self.text in self.oldlist:
self.log(ObsoleteItunesCategory({"parent":self.parent.name.replace("_",":"),
"element":self.name.replace("_",":"),
"text":self.text}))
else:
self.log(InvalidItunesCategory({"parent":self.parent.name.replace("_",":"),
"element":self.name.replace("_",":"),
"text":self.text}))
except KeyError:
self.log(MissingAttribute({"parent":self.parent.name.replace("_",":"),
"element":self.name.replace("_",":"),
"attr":"text"}))
class image(validatorBase):
def getExpectedAttrNames(self):
return [(None, u'href')]
def prevalidate(self):
self.validate_required_attribute((None,'href'), httpURL)
class category(subcategory):
def __init__(self):
subcategory.__init__(self, valid_itunes_categories.keys(),
old_itunes_categories.keys())
def do_itunes_category(self):
if not self.text: return eater()
return subcategory(valid_itunes_categories.get(self.text,[]),
old_itunes_categories.get(self.text,[]))
valid_itunes_categories = {
"Arts": [
"Design",
"Fashion & Beauty",
"Food",
"Literature",
"Performing Arts",
"Visual Arts"],
"Business": [
"Business News",
"Careers",
"Investing",
"Management & Marketing",
"Shopping"],
"Comedy": [],
"Education": [
"Education Technology",
"Higher Education",
"K-12",
"Language Courses",
"Training"],
"Games & Hobbies": [
"Automotive",
"Aviation",
"Hobbies",
"Other Games",
"Video Games"],
"Government & Organizations": [
"Local",
"National",
"Non-Profit",
"Regional"],
"Health": [
"Alternative Health",
"Fitness & Nutrition",
"Self-Help",
"Sexuality"],
"Kids & Family": [],
"Music": [],
"News & Politics": [],
"Religion & Spirituality": [
"Buddhism",
"Christianity",
"Hinduism",
"Islam",
"Judaism",
"Other",
"Spirituality"],
"Science & Medicine": [
"Medicine",
"Natural Sciences",
"Social Sciences"],
"Society & Culture": [
"History",
"Personal Journals",
"Philosophy",
"Places & Travel"],
"Sports & Recreation": [
"Amateur",
"College & High School",
"Outdoor",
"Professional"],
"Technology": [
"Gadgets",
"Tech News",
"Podcasting",
"Software How-To"],
"TV & Film": [],
}
old_itunes_categories = {
"Arts & Entertainment": [
"Architecture",
"Books",
"Design",
"Entertainment",
"Games",
"Performing Arts",
"Photography",
"Poetry",
"Science Fiction"],
"Audio Blogs": [],
"Business": [
"Careers",
"Finance",
"Investing",
"Management",
"Marketing"],
"Comedy": [],
"Education": [
"Higher Education",
"K-12"],
"Family": [],
"Food": [],
"Health": [
"Diet & Nutrition",
"Fitness",
"Relationships",
"Self-Help",
"Sexuality"],
"International": [
"Australian",
"Belgian",
"Brazilian",
"Canadian",
"Chinese",
"Dutch",
"French",
"German",
"Hebrew",
"Italian",
"Japanese",
"Norwegian",
"Polish",
"Portuguese",
"Spanish",
"Swedish"],
"Movies & Television": [],
"Music": [],
"News": [],
"Politics": [],
"Public Radio": [],
"Religion & Spirituality": [
"Buddhism",
"Christianity",
"Islam",
"Judaism",
"New Age",
"Philosophy",
"Spirituality"],
"Science": [],
"Sports": [],
"Talk Radio": [],
"Technology": [
"Computers",
"Developers",
"Gadgets",
"Information Technology",
"News",
"Operating Systems",
"Podcasting",
"Smart Phones",
"Text/Speech"],
"Transportation": [
"Automotive",
"Aviation",
"Bicycles",
"Commuting"],
"Travel": []
}
class yesnoclean(text):
def normalizeWhitespace(self):
pass
def validate(self):
if not self.value.lower() in ['yes','no','clean']:
self.log(InvalidYesNoClean({"parent":self.parent.name, "element":self.name,"value":self.value}))
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.