code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
# http://msdn.microsoft.com/XML/rss/sle/default.aspx
from base import validatorBase
from validators import eater, text
class sort(validatorBase):
def getExpectedAttrNames(self):
return [(None,u'data-type'),(None,u'default'),(None,u'element'),(None, u'label'),(None,u'ns')]
class group(validatorBase):
def getExpectedAttrNames(self):
return [(None,u'element'),(None, u'label'),(None,u'ns')]
class listinfo(validatorBase):
def do_cf_sort(self):
return sort()
def do_cf_group(self):
return group()
class treatAs(text): pass
| Python |
"""$Id: rss.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from logging import *
from validators import noduplicates
#
# Rss element. The only valid child element is "channel"
#
class rss(validatorBase):
def do_channel(self):
from channel import rss20Channel
return rss20Channel(), noduplicates()
def do_access_restriction(self):
from extension import access_restriction
return access_restriction(), noduplicates()
def getExpectedAttrNames(self):
return [(None, u'version')]
def prevalidate(self):
self.setFeedType(TYPE_RSS2) # could be anything in the 0.9x family, don't really care
self.version = "2.0"
if (None,'version') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"version"}))
elif [e for e in self.dispatcher.loggedEvents if e.__class__==ValidDoctype]:
self.version = self.attrs[(None,'version')]
if self.attrs[(None,'version')]<>'0.91':
self.log(InvalidDoctype({"parent":self.parent.name, "element":self.name, "attr":"version"}))
else:
self.version = self.attrs[(None,'version')]
if self.version not in ['0.91', '0.92', '2.0']:
self.log(InvalidRSSVersion({"parent":self.parent.name, "element":self.name, "value":self.version}))
def validate(self):
if not "channel" in self.children:
self.log(MissingElement({"parent":self.name, "element":"channel"}))
| Python |
"""$Id: textInput.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from validators import *
from extension import extension_everywhere
#
# textInput element.
#
class textInput(validatorBase, extension_everywhere):
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'about')]
def validate(self):
if not "title" in self.children:
self.log(MissingTitle({"parent":self.name, "element":"title"}))
if not "link" in self.children:
self.log(MissingLink({"parent":self.name, "element":"link"}))
if not "description" in self.children:
self.log(MissingDescription({"parent":self.name,"element":"description"}))
if not "name" in self.children:
self.log(MissingElement({"parent":self.name, "element":"name"}))
def do_title(self):
return nonhtml(), noduplicates()
def do_description(self):
return text(), noduplicates()
def do_name(self):
return formname(), noduplicates()
def do_link(self):
return rfc2396_full(), noduplicates()
def do_dc_creator(self):
return text() # duplicates allowed
def do_dc_subject(self):
return text() # duplicates allowed
def do_dc_date(self):
return w3cdtf(), noduplicates()
| Python |
"""$Id: link.py 1060 2010-01-23 01:29:14Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1060 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
#
# Atom link element
#
class link(nonblank,xmlbase,iso639,nonhtml,nonNegativeInteger,rfc3339,nonblank):
validRelations = [
# http://www.iana.org/assignments/link-relations.html
'alternate', # RFC4287
'current', # RFC5005
'describedby', # http://www.w3.org/TR/powder-dr/#assoc-linking
'edit', # RFC-ietf-atompub-protocol-17.txt
'edit-media', # RFC-ietf-atompub-protocol-17.txt
'enclosure', # RFC4287
'first', # RFC5005
'hub', # http://pubsubhubbub.googlecode.com/
'last', # RFC5005
'license', # RFC4946
'next', # RFC5005
'next-archive', # RFC5005
'payment', # Kinberg
'prev-archive', # RFC5005
'previous', # RFC5005
'related', # RFC4287
'replies', # RFC4685
'self', # RFC4287
'service', # Snell
'up', # Slater
'via' # RFC4287
]
rfc5005 = [
'current', # RFC5005
'first', # RFC5005
'last', # RFC5005
'next', # RFC5005
'next-archive', # RFC5005
'prev-archive', # RFC5005
'previous', # RFC5005
]
def getExpectedAttrNames(self):
return [(None, u'type'), (None, u'title'), (None, u'rel'),
(None, u'href'), (None, u'length'), (None, u'hreflang'),
(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'type'),
(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'resource'),
(u'http://purl.org/syndication/thread/1.0', u'count'),
(u'http://purl.org/syndication/thread/1.0', u'when'),
(u'http://purl.org/syndication/thread/1.0', u'updated')]
def validate(self):
self.type = ""
self.rel = "alternate"
self.href = ""
self.hreflang = ""
self.title = ""
if self.attrs.has_key((None, "rel")):
self.value = self.rel = self.attrs.getValue((None, "rel"))
if self.rel.startswith('http://www.iana.org/assignments/relation/'):
self.rel=self.rel[len('http://www.iana.org/assignments/relation/'):]
if self.rel in self.validRelations:
self.log(ValidAtomLinkRel({"parent":self.parent.name, "element":self.name, "attr":"rel", "value":self.rel}))
elif rfc2396_full.rfc2396_re.match(self.rel.encode('idna')):
self.log(ValidAtomLinkRel({"parent":self.parent.name, "element":self.name, "attr":"rel", "value":self.rel}))
else:
self.log(UnregisteredAtomLinkRel({"parent":self.parent.name, "element":self.name, "attr":"rel", "value":self.rel}))
nonblank.validate(self, errorClass=AttrNotBlank, extraParams={"attr": "rel"})
if self.rel in self.rfc5005 and self.parent.name == 'entry':
self.log(FeedHistoryRelInEntry({"rel":self.rel}))
if self.attrs.has_key((None, "type")):
self.value = self.type = self.attrs.getValue((None, "type"))
if not mime_re.match(self.type):
self.log(InvalidMIMEType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
elif self.rel == "self" and self.type not in ["application/atom+xml", "application/rss+xml", "application/rdf+xml"]:
self.log(SelfNotAtom({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
else:
self.log(ValidMIMEAttribute({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
if self.attrs.has_key((None, "title")):
self.log(ValidTitle({"parent":self.parent.name, "element":self.name, "attr":"title"}))
self.value = self.title = self.attrs.getValue((None, "title"))
nonblank.validate(self, errorClass=AttrNotBlank, extraParams={"attr": "title"})
nonhtml.validate(self)
if self.attrs.has_key((None, "length")):
self.name = 'length'
self.value = self.attrs.getValue((None, "length"))
nonNegativeInteger.validate(self)
nonblank.validate(self)
if self.attrs.has_key((None, "hreflang")):
self.name = 'hreflang'
self.value = self.hreflang = self.attrs.getValue((None, "hreflang"))
iso639.validate(self)
if self.attrs.has_key((None, "href")):
self.name = 'href'
self.value = self.href = self.attrs.getValue((None, "href"))
xmlbase.validate(self, extraParams={"attr": "href"})
if self.rel == "self" and self.parent.name in ["feed","channel"]:
# detect relative self values
from urlparse import urlparse
from xml.dom import XML_NAMESPACE
absolute = urlparse(self.href)[1]
element = self
while not absolute and element and hasattr(element,'attrs'):
pattrs = element.attrs
if pattrs and pattrs.has_key((XML_NAMESPACE, u'base')):
absolute=urlparse(pattrs.getValue((XML_NAMESPACE, u'base')))[1]
element = element.parent
if not absolute:
self.log(RelativeSelf({"value":self.href}))
from urlparse import urljoin
if urljoin(self.xmlBase,self.value) not in self.dispatcher.selfURIs:
if urljoin(self.xmlBase,self.value).split('#')[0] != self.xmlBase.split('#')[0]:
from uri import Uri
if self.value.startswith('http://feeds.feedburner.com/'):
if self.value.endswith('?format=xml'):
self.value = self.value.split('?')[0]
value = Uri(self.value)
for docbase in self.dispatcher.selfURIs:
if value == Uri(docbase): break
# don't complain when validating feedburner's xml view
if docbase.startswith('http://feeds.feedburner.com/'):
if docbase.endswith('?format=xml'):
if value == Uri(docbase.split('?')[0]): break
else:
self.log(SelfDoesntMatchLocation({"parent":self.parent.name, "element":self.name}))
self.dispatcher.selfURIs.append(urljoin(self.xmlBase,self.value))
else:
self.log(MissingHref({"parent":self.parent.name, "element":self.name, "attr":"href"}))
if self.attrs.has_key((u'http://purl.org/syndication/thread/1.0', u'count')):
if self.rel != "replies":
self.log(UnexpectedAttribute({"parent":self.parent.name, "element":self.name, "attribute":"thr:count"}))
self.value = self.attrs.getValue((u'http://purl.org/syndication/thread/1.0', u'count'))
self.name="thr:count"
nonNegativeInteger.validate(self)
if self.attrs.has_key((u'http://purl.org/syndication/thread/1.0', u'when')):
self.log(NoThrWhen({"parent":self.parent.name, "element":self.name, "attribute":"thr:when"}))
if self.attrs.has_key((u'http://purl.org/syndication/thread/1.0', u'updated')):
if self.rel != "replies":
self.log(UnexpectedAttribute({"parent":self.parent.name, "element":self.name, "attribute":"thr:updated"}))
self.value = self.attrs.getValue((u'http://purl.org/syndication/thread/1.0', u'updated'))
self.name="thr:updated"
rfc3339.validate(self)
def startElementNS(self, name, qname, attrs):
self.push(eater(), name, attrs)
def characters(self, text):
if text.strip():
self.log(AtomLinkNotEmpty({"parent":self.parent.name, "element":self.name}))
| Python |
"""$Id: channel.py 1033 2008-11-18 11:35:34Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1033 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from logging import *
from validators import *
from itunes import itunes_channel
from extension import *
#
# channel element.
#
class channel(validatorBase, rfc2396, extension_channel, itunes_channel):
def getExpectedAttrNames(self):
return [(u'urn:atom-extension:indexing', u'index')]
def prevalidate(self):
self.validate_optional_attribute((u'urn:atom-extension:indexing', u'index'), yesno)
def __init__(self):
self.link=None
self.docs=''
self.links = []
self.title=None
validatorBase.__init__(self)
def validate(self):
if not "description" in self.children:
self.log(MissingDescription({"parent":self.name,"element":"description"}))
if not "link" in self.children:
self.log(MissingLink({"parent":self.name, "element":"link"}))
if not "title" in self.children:
self.log(MissingTitle({"parent":self.name, "element":"title"}))
if not "dc_language" in self.children and not "language" in self.children:
if not self.xmlLang:
self.log(MissingDCLanguage({"parent":self.name, "element":"language"}))
if self.children.count("image") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"image"}))
if self.children.count("textInput") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"textInput"}))
if self.children.count("skipHours") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"skipHours"}))
if self.children.count("skipDays") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"skipDays"}))
if self.attrs.has_key((rdfNS,"about")):
self.value = self.attrs.getValue((rdfNS, "about"))
rfc2396.validate(self, extraParams={"attr": "rdf:about"})
if not "items" in self.children:
self.log(MissingElement({"parent":self.name, "element":"items"}))
if self.parent.name == 'rss' and self.parent.version == '2.0':
for link in self.links:
if link.rel=='self': break
else:
self.log(MissingAtomSelfLink({}))
if self.itunes: itunes_channel.validate(self)
# don't warn about use of extension attributes for rss-board compliant feeds
if self.docs == 'http://www.rssboard.org/rss-specification':
self.dispatcher.loggedEvents = [event for
event in self.dispatcher.loggedEvents
if not isinstance(event,UseOfExtensionAttr)]
def metadata(self):
pass
def do_image(self):
self.metadata()
from image import image
return image(), noduplicates()
def do_textInput(self):
self.metadata()
from textInput import textInput
return textInput(), noduplicates()
def do_textinput(self):
self.metadata()
if not self.attrs.has_key((rdfNS,"about")):
# optimize for RSS 2.0. If it is not valid RDF, assume that it is
# a simple misspelling (in other words, the error message will be
# less than helpful on RSS 1.0 feeds.
self.log(UndefinedElement({"parent":self.name, "element":"textinput"}))
return eater(), noduplicates()
def do_link(self):
self.metadata()
return link(), noduplicates()
def do_title(self):
self.metadata()
return title(), noduplicates(), nonblank()
def do_description(self):
self.metadata()
return nonhtml(), noduplicates()
def do_blink(self):
return blink(), noduplicates()
def do_atom_author(self):
from author import author
return author()
def do_atom_category(self):
from category import category
return category()
def do_atom_contributor(self):
from author import author
return author()
def do_atom_generator(self):
from generator import generator
return generator(), nonblank(), noduplicates()
def do_atom_id(self):
return rfc2396_full(), noduplicates()
def do_atom_icon(self):
return nonblank(), rfc2396(), noduplicates()
def do_atom_link(self):
self.metadata()
from link import link
self.links.append(link())
return self.links[-1]
def do_atom_logo(self):
return nonblank(), rfc2396(), noduplicates()
def do_atom_title(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_atom_subtitle(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_atom_rights(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_atom_updated(self):
return rfc3339(), noduplicates()
def do_dc_creator(self):
if "managingEditor" in self.children:
self.log(DuplicateSemantics({"core":"managingEditor", "ext":"dc:creator"}))
return text() # duplicates allowed
def do_dc_subject(self):
if "category" in self.children:
self.log(DuplicateSemantics({"core":"category", "ext":"dc:subject"}))
return text() # duplicates allowed
def do_dc_date(self):
if "pubDate" in self.children:
self.log(DuplicateSemantics({"core":"pubDate", "ext":"dc:date"}))
return w3cdtf(), noduplicates()
def do_cc_license(self):
if "creativeCommons_license" in self.children:
self.log(DuplicateSemantics({"core":"creativeCommons:license", "ext":"cc:license"}))
return eater()
def do_creativeCommons_license(self):
if "cc_license" in self.children:
self.log(DuplicateSemantics({"core":"creativeCommons:license", "ext":"cc:license"}))
return rfc2396_full()
class rss20Channel(channel):
def __init__(self):
self.itemlocs=[]
channel.__init__(self)
def metadata(self):
locator=self.dispatcher.locator
for line,col in self.itemlocs:
offset=(line - locator.getLineNumber(), col - locator.getColumnNumber())
self.log(MisplacedItem({"parent":self.name, "element":"item"}), offset)
self.itemlocs = []
def do_textInput(self):
self.log(AvoidTextInput({}))
return channel.do_textInput(self)
def do_item(self):
locator=self.dispatcher.locator
self.itemlocs.append((locator.getLineNumber(), locator.getColumnNumber()))
from item import rss20Item
return rss20Item()
def do_category(self):
self.metadata()
return category()
def do_cloud(self):
self.metadata()
return cloud(), noduplicates()
do_rating = validatorBase.leaf # TODO test cases?!?
def do_ttl(self):
self.metadata()
return positiveInteger(), nonblank(), noduplicates()
def do_docs(self):
self.metadata()
return docs(), noduplicates()
def do_generator(self):
self.metadata()
if "admin_generatorAgent" in self.children:
self.log(DuplicateSemantics({"core":"generator", "ext":"admin:generatorAgent"}))
return text(), noduplicates()
def do_pubDate(self):
self.metadata()
if "dc_date" in self.children:
self.log(DuplicateSemantics({"core":"pubDate", "ext":"dc:date"}))
return rfc822(), noduplicates()
def do_managingEditor(self):
self.metadata()
if "dc_creator" in self.children:
self.log(DuplicateSemantics({"core":"managingEditor", "ext":"dc:creator"}))
return email_with_name(), noduplicates()
def do_webMaster(self):
self.metadata()
if "dc_publisher" in self.children:
self.log(DuplicateSemantics({"core":"webMaster", "ext":"dc:publisher"}))
return email_with_name(), noduplicates()
def do_language(self):
self.metadata()
if "dc_language" in self.children:
self.log(DuplicateSemantics({"core":"language", "ext":"dc:language"}))
return iso639(), noduplicates()
def do_copyright(self):
self.metadata()
if "dc_rights" in self.children:
self.log(DuplicateSemantics({"core":"copyright", "ext":"dc:rights"}))
return nonhtml(), noduplicates()
def do_lastBuildDate(self):
self.metadata()
if "dcterms_modified" in self.children:
self.log(DuplicateSemantics({"core":"lastBuildDate", "ext":"dcterms:modified"}))
return rfc822(), noduplicates()
def do_skipHours(self):
self.metadata()
from skipHours import skipHours
return skipHours()
def do_skipDays(self):
self.metadata()
from skipDays import skipDays
return skipDays()
class rss10Channel(channel):
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'about'),
(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'about')]
def prevalidate(self):
if self.attrs.has_key((rdfNS,"about")):
if not "abouts" in self.dispatcher.__dict__:
self.dispatcher.__dict__["abouts"] = []
self.dispatcher.__dict__["abouts"].append(self.attrs[(rdfNS,"about")])
def do_items(self): # this actually should be from the rss1.0 ns
if not self.attrs.has_key((rdfNS,"about")):
self.log(MissingAttribute({"parent":self.name, "element":self.name, "attr":"rdf:about"}))
from item import items
return items(), noduplicates()
def do_rdfs_label(self):
return text()
def do_rdfs_comment(self):
return text()
class link(rfc2396_full):
def validate(self):
self.parent.link = self.value
rfc2396_full.validate(self)
class title(nonhtml):
def validate(self):
self.parent.title = self.value
nonhtml.validate(self)
class docs(rfc2396_full):
def validate(self):
self.parent.docs = self.value
rfc2396_full.validate(self)
class blink(text):
def validate(self):
self.log(NoBlink({}))
class category(nonhtml):
def getExpectedAttrNames(self):
return [(None, u'domain')]
class cloud(validatorBase):
def getExpectedAttrNames(self):
return [(None, u'domain'), (None, u'path'), (None, u'registerProcedure'),
(None, u'protocol'), (None, u'port')]
def prevalidate(self):
if (None, 'domain') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"domain"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"domain"}))
try:
if int(self.attrs.getValue((None, 'port'))) <= 0:
self.log(InvalidIntegerAttribute({"parent":self.parent.name, "element":self.name, "attr":'port'}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":'port'}))
except KeyError:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":'port'}))
except ValueError:
self.log(InvalidIntegerAttribute({"parent":self.parent.name, "element":self.name, "attr":'port'}))
if (None, 'path') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"path"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"path"}))
if (None, 'registerProcedure') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"registerProcedure"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"registerProcedure"}))
if (None, 'protocol') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"protocol"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"protocol"}))
## TODO - is there a list of accepted protocols for this thing?
return validatorBase.prevalidate(self)
| Python |
"""
$Id: mediaTypes.py 988 2008-03-12 18:22:48Z sa3ruby $
This module deals with valid internet media types for feeds.
"""
__author__ = "Joseph Walton <http://www.kafsemo.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2004 Joseph Walton"
from cgi import parse_header
from logging import *
FEED_TYPES = [
'text/xml', 'application/xml', 'application/rss+xml', 'application/rdf+xml',
'application/atom+xml', 'text/x-opml', 'application/xrds+xml',
'application/opensearchdescription+xml', 'application/vnd.google-earth.kml+xml', 'application/vnd.google-earth.kmz',
'application/atomsvc+xml', 'application/atomcat+xml',
]
# Is the Content-Type correct?
def checkValid(contentType, loggedEvents):
(mediaType, params) = parse_header(contentType)
if mediaType.lower() not in FEED_TYPES:
loggedEvents.append(UnexpectedContentType({"type": "Feeds", "contentType": mediaType}))
if 'charset' in params:
charset = params['charset']
else:
charset = None
return (mediaType, charset)
# Warn about mismatches between media type and feed version
def checkAgainstFeedType(mediaType, feedType, loggedEvents):
mtl = mediaType.lower()
if mtl in ['application/x.atom+xml', 'application/atom+xml']:
if feedType not in [TYPE_ATOM, TYPE_ATOM_ENTRY]:
loggedEvents.append(UnexpectedContentType({"type": 'Non-Atom 1.0 feeds', "contentType": mediaType}))
elif mtl == 'application/atomcat+xml':
if feedType != TYPE_APP_CATEGORIES:
loggedEvents.append(UnexpectedContentType({"type": 'Non-AtomPub Category document', "contentType": mediaType}))
elif mtl == 'application/atomsvc+xml':
if feedType != TYPE_APP_SERVICE:
loggedEvents.append(UnexpectedContentType({"type": 'Non-AtomPub Service document', "contentType": mediaType}))
elif mtl == 'application/rdf+xml':
if feedType != TYPE_RSS1:
loggedEvents.append(UnexpectedContentType({"type": 'Non-RSS 1.0 feeds', "contentType": mediaType}))
elif mtl == 'application/rss+xml':
if feedType not in [TYPE_RSS1, TYPE_RSS2]:
loggedEvents.append(UnexpectedContentType({"type": 'Non-RSS feeds', "contentType": mediaType}))
elif mtl == 'text/x-opml':
if feedType not in [TYPE_OPML]:
loggedEvents.append(UnexpectedContentType({"type": 'Non-OPML feeds', "contentType": mediaType}))
elif mtl == 'application/opensearchdescription+xml':
if feedType not in [TYPE_OPENSEARCH]:
loggedEvents.append(UnexpectedContentType({"type": 'Non-OpenSearchDescription documents', "contentType": mediaType}))
elif mtl == 'application/xrds+xml':
if feedType not in [TYPE_XRD]:
loggedEvents.append(UnexpectedContentType({"type": 'Non-Extensible Resource Descriptor documents', "contentType": mediaType}))
elif mtl == 'application/vnd.google-earth.kml+xml':
if feedType not in [TYPE_KML20, TYPE_KML21, TYPE_KML22]:
loggedEvents.append(UnexpectedContentType({"type": 'Non-KML documents', "contentType": mediaType}))
elif mtl == 'application/earthviewer':
loggedEvents.append(InvalidKmlMediaType({"type": 'Non-KML documents', "contentType": mediaType}))
# warn if a non-specific media type is used without a 'marker'
def contentSniffing(mediaType, rawdata, loggedEvents):
if mediaType not in FEED_TYPES: return
if mediaType == 'application/atom+xml': return
if mediaType == 'application/atomcat+xml': return
if mediaType == 'application/atomsvc+xml': return
if mediaType == 'application/rss+xml': return
if mediaType == 'text/x-opml': return
if mediaType == 'application/opensearchdescription+xml': return
if mediaType == 'application/xrds+xml': return
if mediaType == 'application/vnd.google-earth.kml+xml': return
block = rawdata[:512]
if block.find('<rss') >= 0: return
if block.find('<feed') >= 0: return
if block.find('<opml') >= 0: return
if block.find('<kml') >= 0: return
if block.find('<OpenSearchDescription') >= 0: return
if (block.find('<rdf:RDF') >=0 and
block.find('http://www.w3.org/1999/02/22-rdf-syntax-ns#') >= 0 and
block.find( 'http://purl.org/rss/1.0/')): return
from logging import NonSpecificMediaType
loggedEvents.append(NonSpecificMediaType({"contentType": mediaType}))
| Python |
"""$Id: category.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
#
# author element.
#
class category(validatorBase):
def getExpectedAttrNames(self):
return [(None,u'term'),(None,u'scheme'),(None,u'label')]
def prevalidate(self):
self.children.append(True) # force warnings about "mixed" content
self.validate_required_attribute((None,'term'), nonblank)
self.validate_optional_attribute((None,'scheme'), rfc3987_full)
self.validate_optional_attribute((None,'label'), nonhtml)
| Python |
from validators import *
from logging import *
import re
class OpenSearchDescription(validatorBase):
def __init__(self):
self.exampleFound = 0
validatorBase.__init__(self)
def validate(self):
name=self.name.replace("opensearch_",'')
if not "ShortName" in self.children:
self.log(MissingElement({"parent":name, "element":"ShortName"}))
if not "Description" in self.children:
self.log(MissingElement({"parent":name, "element":"Description"}))
if not "Url" in self.children:
self.log(MissingElement({"parent":name, "element":"Url"}))
if not self.exampleFound:
self.log(ShouldIncludeExample({}))
def do_ShortName(self):
return lengthLimitedText(16), noduplicates()
def do_Description(self):
return lengthLimitedText(1024), noduplicates()
def do_Url(self):
return Url()
def do_Contact(self):
return addr_spec(), noduplicates()
def do_Tags(self):
return lengthLimitedText(256), noduplicates()
def do_LongName(self):
return lengthLimitedText(48), noduplicates()
def do_Image(self):
return Image()
def do_Query(self):
return Query()
def do_Developer(self):
return lengthLimitedText(64), noduplicates()
def do_Attribution(self):
return lengthLimitedText(256), noduplicates()
def do_SyndicationRight(self):
return SyndicationRight(), noduplicates()
def do_AdultContent(self):
return AdultContent(), noduplicates()
def do_Language(self):
return Language()
def do_InputEncoding(self):
return Charset()
def do_OutputEncoding(self):
return Charset()
class Url(validatorBase):
def getExpectedAttrNames(self):
return [(None,attr) for attr in ['template', 'type', 'indexOffset',
'pageOffset']]
def prevalidate(self):
self.validate_required_attribute((None,'template'), Template())
self.validate_required_attribute((None,'type'), MimeType)
self.validate_optional_attribute((None,'indexOffset'), Integer)
self.validate_optional_attribute((None,'pageOffset'), Integer)
class Template(rfc2396_full):
tparam = re.compile("{((?:[-a-zA-Z0-9._~]|%[a-fA-F0-9]{2})+:?(?:[-a-zA-Z0-9._~]|%[a-fA-F0-9]{2})*)\??}")
valuelist = ['searchTerms', 'count', 'startIndex', 'startPage', 'language',
'inputEncoding', 'outputEncoding']
def validate(self):
for pname in self.tparam.findall(self.value):
if pname.find(':')<0:
if pname not in self.valuelist:
self.log(InvalidLocalParameter({'value':pname}))
else:
prefix,name = pname.split(':',1)
if not self.parent.namespaceFor(prefix):
self.log(UndeclaredPrefix({'value':prefix}))
self.value = self.tparam.sub(r'\1',self.value)
rfc2396_full.validate(self)
class Image(rfc2396_full):
def getExpectedAttrNames(self):
return [(None,attr) for attr in ['height', 'width', 'type']]
def prevalidate(self):
self.validate_required_attribute((None,'height'), nonNegativeInteger)
self.validate_required_attribute((None,'width'), nonNegativeInteger)
self.validate_required_attribute((None,'type'), MimeType)
class Query(validatorBase):
def getExpectedAttrNames(self):
return [(None,attr) for attr in ['role', 'title', 'totalResults',
'searchTerms', 'count', 'startIndex', 'startPage', 'language',
'inputEncoding', 'outputEncoding', 'parameter']]
def prevalidate(self):
self.validate_required_attribute((None,'role'), QueryRole)
self.validate_optional_attribute((None,'title'), lengthLimitedText(256))
self.validate_optional_attribute((None,'title'), nonhtml)
self.validate_optional_attribute((None,'totalResults'), nonNegativeInteger)
self.validate_optional_attribute((None,'searchTerms'), UrlEncoded)
self.validate_optional_attribute((None,'count'), nonNegativeInteger)
self.validate_optional_attribute((None,'startIndex'), Integer)
self.validate_optional_attribute((None,'startPage'), Integer)
self.validate_optional_attribute((None,'language'), iso639)
self.validate_optional_attribute((None,'inputEncoding'), Charset)
self.validate_optional_attribute((None,'outputEncoding'), Charset)
if self.attrs.has_key((None,"role")) and \
self.attrs.getValue((None,"role")) == "example":
self.parent.exampleFound = 1
class QueryRole(enumeration):
error = InvalidLocalRole
valuelist = ['request', 'example', 'related', 'correction', 'subset',
'superset']
def validate(self):
if self.value.find(':')<0:
enumeration.validate(self)
else:
prefix,name = self.value.split(':',1)
if not self.parent.namespaceFor(prefix):
self.log(UndeclaredPrefix({'value':prefix}))
class UrlEncoded(validatorBase):
def validate(self):
from urllib import quote, unquote
import re
for value in self.value.split():
if type(value) == unicode: value = value.encode('utf-8')
value = re.sub('%\w\w', lambda x: x.group(0).upper(), value)
if value != quote(unquote(value)):
self.log(NotURLEncoded({}))
break
class SyndicationRight(enumeration):
error = InvalidSyndicationRight
valuelist = ['open','limited','private','closed']
def validate(self):
self.value = self.value.lower()
enumeration.validate(self)
class AdultContent(enumeration):
error = InvalidAdultContent
valuelist = ['false', 'FALSE', '0', 'no', 'NO',
'true', 'TRUE', '1', 'yes', 'YES']
class Language(iso639):
def validate(self):
if self.value != '*':
iso639.validate(self)
| Python |
#!/usr/bin/python
"""
$Id$
This module deals with detecting XML encodings, using both BOMs and
explicit declarations.
"""
__author__ = "Joseph Walton <http://www.kafsemo.org/>"
__version__ = "$Revision$"
__copyright__ = "Copyright (c) 2004 Joseph Walton"
import codecs
import re
from logging import ObscureEncoding, NonstdEncoding
import logging
class FailingCodec:
def __init__(self, name):
self.name = name
def fail(self, txt, errors='strict'):
raise UnicodeError('No codec available for ' + self.name + ' in this installation of FeedValidator')
# Don't die if the codec can't be found, but return
# a decoder that will fail on use
def getdecoder(codec):
try:
return codecs.getdecoder(codec)
except:
return FailingCodec(codec).fail
# These are generic decoders that are only used
# to decode the XML declaration, from which we can read
# the real encoding
_decUTF32BE = getdecoder('UTF-32BE')
_decUTF32LE = getdecoder('UTF-32LE')
_decUTF16BE = getdecoder('UTF-16BE')
_decUTF16LE = getdecoder('UTF-16LE')
_decEBCDIC = getdecoder('IBM037') # EBCDIC
_decACE = getdecoder('ISO-8859-1') # An ASCII-compatible encoding
# Given a character index into a string, calculate its 1-based row and column
def _position(txt, idx):
row = txt.count('\n', 0, idx) + 1
ln = txt.rfind('\n', 0, idx) + 1
column = 0
for c in txt[ln:idx]:
if c == '\t':
column = (column // 8 + 1) * 8
else:
column += 1
column += 1
return (row, column)
def _normaliseNewlines(txt):
return txt.replace('\r\n', '\n').replace('\r', '\n')
def _logEvent(loggedEvents, e, pos=None):
if pos:
e.params['line'], e.params['column'] = pos
loggedEvents.append(e)
# Return the encoding from the declaration, or 'None'
# Return None if the 'permitted' list is passed in and the encoding
# isn't found in it. This is so that, e.g., a 4-byte-character XML file
# that claims to be US-ASCII will fail now.
def _decodeDeclaration(sig, dec, permitted, loggedEvents):
sig = _normaliseNewlines(dec(sig)[0])
eo = _encodingFromDecl(sig)
if not(eo):
_logEvent(loggedEvents,
logging.UnicodeError({'exception': 'This XML file (apparently ' + permitted[0] + ') requires an encoding declaration'}), (1, 1))
elif permitted and not(eo[0].upper() in permitted):
if _hasCodec(eo[0]):
# see if the codec is an alias of one of the permitted encodings
codec=codecs.lookup(eo[0])
for encoding in permitted:
if _hasCodec(encoding) and codecs.lookup(encoding)[-1]==codec[-1]: break
else:
_logEvent(loggedEvents,
logging.UnicodeError({'exception': 'This XML file claims an encoding of ' + eo[0] + ', but looks more like ' + permitted[0]}), eo[1])
return eo
# Return the encoding from the declaration, or 'fallback' if none is
# present. Return None if the 'permitted' list is passed in and
# the encoding isn't found in it
def _decodePostBOMDeclaration(sig, dec, permitted, loggedEvents, fallback=None):
sig = _normaliseNewlines(dec(sig)[0])
eo = _encodingFromDecl(sig)
if eo and not(eo[0].upper() in permitted):
_logEvent(loggedEvents,
logging.UnicodeError({'exception': 'Document starts with ' + permitted[0] + ' BOM marker but has incompatible declaration of ' + eo[0]}), eo[1])
return None
else:
return eo or (fallback, None)
def isStandard(x):
""" Is this encoding required by the XML 1.0 Specification, 4.3.3? """
return x.upper() in ['UTF-8', 'UTF-16']
def isCommon(x):
"""Is this encoding commonly used, according to
<http://www.syndic8.com/stats.php?Section=feeds#XMLEncodings>
(as of 2004-03-27)?"""
return isStandard(x) or x.upper() in ['US-ASCII', 'ISO-8859-1',
'EUC-JP', 'ISO-8859-2', 'ISO-8859-15', 'ISO-8859-7',
'KOI8-R', 'SHIFT_JIS', 'WINDOWS-1250', 'WINDOWS-1251',
'WINDOWS-1252', 'WINDOWS-1254', 'WINDOWS-1255', 'WINDOWS-1256',
# This doesn't seem to be popular, but is the Chinese
# government's mandatory standard
'GB18030'
]
# Inspired by xmlproc's autodetect_encoding, but rewritten
def _detect(doc_start, loggedEvents=[], fallback='UTF-8'):
"""This is the logic from appendix F.1 of the XML 1.0 specification.
Pass in the start of a document (>= 256 octets), and receive the encoding to
use, or None if there is a problem with the document."""
sig = doc_start[:4]
# With a BOM. We also check for a declaration, and make sure
# it doesn't contradict (for 4-byte encodings, it's required)
if sig == '\x00\x00\xFE\xFF': # UTF-32 BE
eo = _decodeDeclaration(doc_start[4:], _decUTF32BE, ['UTF-32', 'ISO-10646-UCS-4', 'CSUCS4', 'UCS-4'], loggedEvents)
elif sig == '\xFF\xFE\x00\x00': # UTF-32 LE
eo = _decodeDeclaration(doc_start[4:], _decUTF32LE, ['UTF-32', 'ISO-10646-UCS-4', 'CSUCS4', 'UCS-4'], loggedEvents)
elif sig == '\x00\x00\xFF\xFE' or sig == '\xFE\xFF\x00\x00':
raise UnicodeError('Unable to process UCS-4 with unusual octet ordering')
elif sig[:2] == '\xFE\xFF': # UTF-16 BE
eo = _decodePostBOMDeclaration(doc_start[2:], _decUTF16BE, ['UTF-16', 'ISO-10646-UCS-2', 'CSUNICODE', 'UCS-2'], loggedEvents, fallback='UTF-16')
elif sig[:2] == '\xFF\xFE': # UTF-16 LE
eo = _decodePostBOMDeclaration(doc_start[2:], _decUTF16LE, ['UTF-16', 'ISO-10646-UCS-2', 'CSUNICODE', 'UCS-2'], loggedEvents, fallback='UTF-16')
elif sig[:3] == '\xEF\xBB\xBF':
eo = _decodePostBOMDeclaration(doc_start[3:], _decACE, ['UTF-8'], loggedEvents, fallback='UTF-8')
# Without a BOM; we must read the declaration
elif sig == '\x00\x00\x00\x3C':
eo = _decodeDeclaration(doc_start, _decUTF32BE, ['UTF-32BE', 'UTF-32', 'ISO-10646-UCS-4', 'CSUCS4', 'UCS-4'], loggedEvents)
elif sig == '\x3C\x00\x00\x00':
eo = _decodeDeclaration(doc_start, _decUTF32LE, ['UTF-32LE', 'UTF-32', 'ISO-10646-UCS-4', 'CSUCS4', 'UCS-4'], loggedEvents)
elif sig == '\x00\x3C\x00\x3F':
eo = _decodeDeclaration(doc_start, _decUTF16BE, ['UTF-16BE', 'UTF-16', 'ISO-10646-UCS-2', 'CSUNICODE', 'UCS-2'], loggedEvents)
elif sig == '\x3C\x00\x3F\x00':
eo = _decodeDeclaration(doc_start, _decUTF16LE, ['UTF-16LE', 'UTF-16', 'ISO-10646-UCS-2', 'CSUNICODE', 'UCS-2'], loggedEvents)
elif sig == '\x3C\x3F\x78\x6D':
eo = _encodingFromDecl(_normaliseNewlines(_decACE(doc_start)[0])) or ('UTF-8', None)
elif sig == '\x4C\x6F\xA7\x94':
eo = _decodeDeclaration(doc_start, _decEBCDIC, ['IBM037', 'CP037', 'IBM038', 'EBCDIC-INT'], loggedEvents)
# There's no BOM, and no declaration. It's UTF-8, or mislabelled.
else:
eo = (fallback, None)
return eo
def detect(doc_start, loggedEvents=[], fallback='UTF-8'):
eo = _detect(doc_start, loggedEvents, fallback)
if eo:
return eo[0]
else:
return None
_encRe = re.compile(r'<\?xml\s+version\s*=\s*(?:"[-a-zA-Z0-9_.:]+"|\'[-a-zA-Z0-9_.:]+\')\s+(encoding\s*=\s*(?:"([-A-Za-z0-9._]+)"|\'([-A-Za-z0-9._]+)\'))')
def _encodingFromDecl(x):
m = _encRe.match(x)
if m:
if m.group(2):
return m.group(2), _position(x, m.start(2))
else:
return m.group(3), _position(x, m.start(3))
else:
return None
def removeDeclaration(x):
"""Replace an XML document string's encoding declaration with the
same number of spaces. Some XML parsers don't allow the
encoding to be overridden, and this is a workaround."""
m = _encRe.match(x)
if m:
s = m.start(1)
e = m.end(1)
res = x[:s] + ' ' * (e - s) + x[e:]
else:
res = x
return res
def _hasCodec(enc):
try:
return codecs.lookup(enc) is not None
except:
return False
def decode(mediaType, charset, bs, loggedEvents, fallback=None):
eo = _detect(bs, loggedEvents, fallback=None)
# Check declared encodings
if eo and eo[1] and _hasCodec(eo[0]):
if not(isCommon(eo[0])):
_logEvent(loggedEvents, ObscureEncoding({"encoding": eo[0]}), eo[1])
elif not(isStandard(eo[0])):
_logEvent(loggedEvents, NonstdEncoding({"encoding": eo[0]}), eo[1])
if eo:
encoding = eo[0]
else:
encoding = None
if charset and encoding and charset.lower() != encoding.lower():
# RFC 3023 requires us to use 'charset', but a number of aggregators
# ignore this recommendation, so we should warn.
loggedEvents.append(logging.EncodingMismatch({"charset": charset, "encoding": encoding}))
if mediaType and mediaType.startswith("text/") and charset is None:
loggedEvents.append(logging.TextXml({}))
# RFC 3023 requires text/* to default to US-ASCII. Issue a warning
# if this occurs, but continue validation using the detected encoding
try:
bs.decode("US-ASCII")
except:
if not encoding:
try:
bs.decode(fallback)
encoding=fallback
except:
pass
if encoding and encoding.lower() != 'us-ascii':
loggedEvents.append(logging.EncodingMismatch({"charset": "US-ASCII", "encoding": encoding}))
enc = charset or encoding
if enc is None:
loggedEvents.append(logging.MissingEncoding({}))
enc = fallback
elif not(_hasCodec(enc)):
if eo:
_logEvent(loggedEvents, logging.UnknownEncoding({'encoding': enc}), eo[1])
else:
_logEvent(loggedEvents, logging.UnknownEncoding({'encoding': enc}))
enc = fallback
if enc is None:
return enc, None
dec = getdecoder(enc)
try:
return enc, dec(bs)[0]
except UnicodeError, ue:
salvage = dec(bs, 'replace')[0]
if 'start' in ue.__dict__:
# XXX 'start' is in bytes, not characters. This is wrong for multibyte
# encodings
pos = _position(salvage, ue.start)
else:
pos = None
_logEvent(loggedEvents, logging.UnicodeError({"exception":ue}), pos)
return enc, salvage
_encUTF8 = codecs.getencoder('UTF-8')
def asUTF8(x):
"""Accept a Unicode string and return a UTF-8 encoded string, with
its encoding declaration removed, suitable for parsing."""
x = removeDeclaration(unicode(x))
return _encUTF8(x)[0]
if __name__ == '__main__':
from sys import argv
from os.path import isfile
for x in argv[1:]:
if isfile(x):
f = open(x, 'r')
l = f.read(1024)
log = []
eo = detect(l, log)
if eo:
print x,eo
else:
print repr(log)
| Python |
"""$Id: generator.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
#
# Atom generator element
#
class generator(nonhtml,rfc2396):
def getExpectedAttrNames(self):
return [(None, u'uri'), (None, u'version')]
def prevalidate(self):
if self.attrs.has_key((None, "url")):
self.value = self.attrs.getValue((None, "url"))
rfc2396.validate(self, extraParams={"attr": "url"})
if self.attrs.has_key((None, "uri")):
self.value = self.attrs.getValue((None, "uri"))
rfc2396.validate(self, errorClass=InvalidURIAttribute, extraParams={"attr": "uri"})
self.value=''
| Python |
"""$Id: skipHours.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import text
from logging import *
#
# skipHours element
#
class skipHours(validatorBase):
def __init__(self):
self.hours = []
validatorBase.__init__(self)
def validate(self):
if "hour" not in self.children:
self.log(MissingElement({"parent":self.name, "element":"hour"}))
if len(self.children) > 24:
self.log(NotEnoughHoursInTheDay({}))
def do_hour(self):
return hour()
class hour(text):
def validate(self):
try:
h = int(self.value)
if h in self.parent.hours or (h in [0,24] and 24-h in self.parent.hours):
self.log(DuplicateValue({"parent":self.parent.name, "element":self.name, "value":self.value}))
if (h < 0) or (h > 23):
raise ValueError
else:
self.parent.hours.append(h)
self.log(ValidHour({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
if self.value == '24':
self.log(UseZeroForMidnight({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(InvalidHour({"parent":self.parent.name, "element":self.name, "value":self.value}))
| Python |
"""$Id: opml.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
from logging import *
from extension import extension_everywhere
import re
#
# Outline Processor Markup Language element.
#
class opml(validatorBase, extension_everywhere):
versionList = ['1.0', '1.1', '2.0']
def validate(self):
self.setFeedType(TYPE_OPML)
if (None,'version') in self.attrs.getNames():
if self.attrs[(None,'version')] not in opml.versionList:
self.log(InvalidOPMLVersion({"parent":self.parent.name, "element":self.name, "value":self.attrs[(None,'version')]}))
elif self.name != 'outlineDocument':
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"version"}))
if 'head' not in self.children:
self.log(MissingElement({"parent":self.name, "element":"head"}))
if 'body' not in self.children:
self.log(MissingElement({"parent":self.name, "element":"body"}))
def getExpectedAttrNames(self):
return [(None, u'version')]
def do_head(self):
return opmlHead()
def do_body(self):
return opmlBody()
class opmlHead(validatorBase, extension_everywhere):
def do_title(self):
return safeHtml(), noduplicates()
def do_dateCreated(self):
return rfc822(), noduplicates()
def do_dateModified(self):
return rfc822(), noduplicates()
def do_ownerName(self):
return safeHtml(), noduplicates()
def do_ownerEmail(self):
return email(), noduplicates()
def do_ownerId(self):
return httpURL(), noduplicates()
def do_expansionState(self):
return commaSeparatedLines(), noduplicates()
def do_vertScrollState(self):
return positiveInteger(), nonblank(), noduplicates()
def do_windowTop(self):
return positiveInteger(), nonblank(), noduplicates()
def do_windowLeft(self):
return positiveInteger(), nonblank(), noduplicates()
def do_windowBottom(self):
return positiveInteger(), nonblank(), noduplicates()
def do_windowRight(self):
return positiveInteger(), nonblank(), noduplicates()
class commaSeparatedLines(text):
linenumbers_re=re.compile('^(\d+(,\s*\d+)*)?$')
def validate(self):
if not self.linenumbers_re.match(self.value):
self.log(InvalidExpansionState({"parent":self.parent.name, "element":self.name, "value":self.value}))
class opmlBody(validatorBase, extension_everywhere):
def validate(self):
if 'outline' not in self.children:
self.log(MissingElement({"parent":self.name, "element":"outline"}))
def do_outline(self):
return opmlOutline()
class opmlOutline(validatorBase, extension_everywhere):
versionList = ['RSS', 'RSS1', 'RSS2', 'scriptingNews']
def getExpectedAttrNames(self):
return [
(None, u'category'),
(None, u'created'),
(None, u'description'),
(None, u'htmlUrl'),
(None, u'isBreakpoint'),
(None, u'isComment'),
(None, u'language'),
(None, u'text'),
(None, u'title'),
(None, u'type'),
(None, u'url'),
(None, u'version'),
(None, u'xmlUrl'),
]
def validate(self):
if not (None,'text') in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"text"}))
if (None,'type') in self.attrs.getNames():
if self.attrs[(None,'type')].lower() == 'rss':
if not (None,'xmlUrl') in self.attrs.getNames():
self.log(MissingXmlURL({"parent":self.parent.name, "element":self.name}))
if not (None,'title') in self.attrs.getNames():
self.log(MissingTitleAttr({"parent":self.parent.name, "element":self.name}))
elif self.attrs[(None,'type')].lower() == 'link':
if not (None,'url') in self.attrs.getNames():
self.log(MissingUrlAttr({"parent":self.parent.name, "element":self.name}))
else:
self.log(InvalidOutlineType({"parent":self.parent.name, "element":self.name, "value":self.attrs[(None,'type')]}))
if (None,'version') in self.attrs.getNames():
if self.attrs[(None,'version')] not in opmlOutline.versionList:
self.log(InvalidOutlineVersion({"parent":self.parent.name, "element":self.name, "value":self.attrs[(None,'version')]}))
if len(self.attrs)>1 and not (None,u'type') in self.attrs.getNames():
for name in u'description htmlUrl language title version xmlUrl'.split():
if (None, name) in self.attrs.getNames():
self.log(MissingOutlineType({"parent":self.parent.name, "element":self.name}))
break
self.validate_optional_attribute((None,'created'), rfc822)
self.validate_optional_attribute((None,'description'), safeHtml)
self.validate_optional_attribute((None,'htmlUrl'), rfc2396_full)
self.validate_optional_attribute((None,'isBreakpoint'), truefalse)
self.validate_optional_attribute((None,'isComment'), truefalse)
self.validate_optional_attribute((None,'language'), iso639)
self.validate_optional_attribute((None,'title'), safeHtml)
self.validate_optional_attribute((None,'text'), safeHtml)
self.validate_optional_attribute((None,'url'), rfc2396_full)
def characters(self, string):
if not self.value:
if string.strip():
self.log(UnexpectedText({"element":self.name,"parent":self.parent.name}))
self.value = string
def do_outline(self):
return opmlOutline()
| Python |
"""$Id: skipDays.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import text
from logging import *
#
# skipDays element
#
class skipDays(validatorBase):
def __init__(self):
self.days = []
validatorBase.__init__(self)
def validate(self):
if "day" not in self.children:
self.log(MissingElement({"parent":self.name, "element":"day"}))
if len(self.children) > 7:
self.log(EightDaysAWeek({}))
def do_day(self):
return day()
class day(text):
def validate(self):
if self.value not in ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'):
self.log(InvalidDay({"parent":self.parent.name, "element":self.name, "value":self.value}))
elif self.value in self.parent.days:
self.log(DuplicateValue({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.parent.days.append(self.value)
self.log(ValidDay({"parent":self.parent.name, "element":self.name, "value":self.value}))
| Python |
"""$Id: text_plain.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
"""Output class for plain text output"""
from base import BaseFormatter
import feedvalidator
class Formatter(BaseFormatter):
def format(self, event):
return '%s %s%s' % (self.getLineAndColumn(event), self.getMessage(event),
self.getCount(event))
| Python |
"""$Id: text_html.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
"""Output class for HTML text output"""
from base import BaseFormatter
import feedvalidator
from xml.sax.saxutils import escape
from feedvalidator.logging import Message, Info, Warning, Error
from config import DOCSURL
def escapeAndMark(x):
html = escape(x)
# Double-escape, and highlight, illegal characters.
for i in range(len(html)-1,-1,-1):
c = ord(html[i])
if 0x80 <= c <= 0x9F or c == 0xfffd:
if c == 0xfffd:
e = '?'
else:
e = '\\x%02x' % (c)
html = '%s<span class="badOctet">%s</span>%s' % (html[:i], e, html[i+1:])
return html.replace(" "," ")
class Formatter(BaseFormatter):
FRAGMENTLEN = 80
def __init__(self, events, rawdata):
BaseFormatter.__init__(self, events)
self.rawdata = rawdata
def getRootClass(self, aClass):
base = aClass.__bases__[0]
if base == Message: return aClass
if base.__name__.split('.')[-1] == 'LoggedEvent':
return aClass
else:
return self.getRootClass(base)
def getHelpURL(self, event):
rootClass = self.getRootClass(event.__class__).__name__
rootClass = rootClass.split('.')[-1]
rootClass = rootClass.lower()
# messageClass = self.getMessageClass(event).__name__.split('.')[-1]
messageClass = event.__class__.__name__.split('.')[-1]
return DOCSURL + '/' + rootClass + '/' + messageClass
def mostSeriousClass(self):
ms=0
for event in self.data:
level = -1
if isinstance(event,Info): level = 1
if isinstance(event,Warning): level = 2
if isinstance(event,Error): level = 3
ms = max(ms, level)
return [None, Info, Warning, Error][ms]
def header(self):
return '<ul>'
def footer(self):
return '</ul>'
def format(self, event):
if event.params.has_key('line'):
line = event.params['line']
if line >= len(self.rawdata.split('\n')):
# For some odd reason, UnicodeErrors tend to trigger a bug
# in the SAX parser that misrepresents the current line number.
# We try to capture the last known good line number/column as
# we go along, and now it's time to fall back to that.
line = event.params['line'] = event.params.get('backupline',0)
column = event.params['column'] = event.params.get('backupcolumn',0)
column = event.params['column']
codeFragment = self.rawdata.split('\n')[line-1]
markerColumn = column
if column > self.FRAGMENTLEN:
codeFragment = '... ' + codeFragment[column-(self.FRAGMENTLEN/2):]
markerColumn = 5 + (self.FRAGMENTLEN/2)
if len(codeFragment) > self.FRAGMENTLEN:
codeFragment = codeFragment[:(self.FRAGMENTLEN-4)] + ' ...'
else:
codeFragment = ''
line = None
markerColumn = None
html = escapeAndMark(codeFragment)
rc = u'<li><p>'
if line:
rc += u'''<a href="#l%s">''' % line
rc += u'''%s</a>, ''' % self.getLine(event)
rc += u'''%s: ''' % self.getColumn(event)
if 'value' in event.params:
rc += u'''<span class="message">%s: <code>%s</code></span>''' % (escape(self.getMessage(event)), escape(unicode(event.params['value'])))
else:
rc += u'''<span class="message">%s</span>''' % escape(self.getMessage(event))
rc += u'''%s ''' % self.getCount(event)
rc += u'''[<a title="more information about this error" href="%s.html">help</a>]</p>''' % self.getHelpURL(event)
rc += u'''<blockquote><pre>''' + html + '''<br />'''
if markerColumn:
rc += u' ' * markerColumn
rc += u'''<span class="marker">^</span>'''
rc += u'</pre></blockquote></li>'
return rc
| Python |
"""$Id: application_test.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
"""Output class for testing that all output messages are defined properly"""
from base import BaseFormatter
import feedvalidator
import os
LANGUAGE = os.environ.get('LANGUAGE', 'en')
lang = __import__('feedvalidator.i18n.%s' % LANGUAGE, globals(), locals(), LANGUAGE)
class Formatter(BaseFormatter):
def getMessage(self, event):
classes = [event.__class__]
while len(classes):
if lang.messages.has_key(classes[0]):
return lang.messages[classes[0]] % event.params
classes = classes + list(classes[0].__bases__)
del classes[0]
return None
def format(self, event):
"""returns the formatted representation of a single event"""
return self.getMessage(event)
| Python |
"""$Id: text_xml.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
"""Output class for xml output"""
from base import BaseFormatter
from feedvalidator.logging import *
import feedvalidator
def xmlEncode(value):
value = value.replace('&', '&')
value = value.replace('<', '<')
value = value.replace('>', '>')
value = value.replace('"', '"')
value = value.replace("'", ''')
return value
class Formatter(BaseFormatter):
def format(self, event):
params = event.params
params['type'] = event.__class__.__name__
params['text'] = self.getMessage(event)
# determine the level of severity
level = 'unknown'
if isinstance(event,Info): level = 'info'
if isinstance(event,Warning): level = 'warning'
if isinstance(event,Error): level = 'error'
params['level'] = level
# organize fixed elements into a known order
order = params.keys()
order.sort()
for key in ['msgcount', 'text', 'column', 'line', 'type', 'level']:
if key in order:
order.remove(key)
order.insert(0,key)
# output the elements
result = "<%s>\n" % level
for key in order:
value = xmlEncode(str(params[key]))
pub_key = key
if key == "backupcolumn":
pubkey = "column"
elif key == "backupline":
pubkey = "line"
result = result + (" <%s>%s</%s>\n" % (key, value, key))
result = result + "</%s>\n" % level
return result
| Python |
"""$Id: base.py 1055 2009-05-19 15:12:42Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1055 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
"""Base class for output classes"""
from UserList import UserList
import os
LANGUAGE = os.environ.get('LANGUAGE', 'en')
lang = __import__('feedvalidator.i18n.%s' % LANGUAGE, globals(), locals(), LANGUAGE)
from feedvalidator.logging import Info, Warning, Error
class BaseFormatter(UserList):
def __getitem__(self, i):
return self.format(self.data[i])
def getErrors(self):
return [self.format(msg) for msg in self.data if isinstance(msg,Error)]
def getWarnings(self):
return [self.format(msg) for msg in self.data if isinstance(msg,Warning)]
def getLine(self, event):
if not event.params.has_key('line'): return ''
return lang.line % event.params
def getColumn(self, event):
if not event.params.has_key('column'): return ''
return lang.column % event.params
def getLineAndColumn(self, event):
line = self.getLine(event)
if not line: return ''
column = self.getColumn(event)
return '%s, %s:' % (line, column)
def getCount(self, event):
if not event.params.has_key('msgcount'): return ''
count = int(event.params['msgcount'])
if count <= 1: return ''
return lang.occurances % event.params
def getMessageClass(self, event):
classes = [event.__class__]
while len(classes):
if lang.messages.has_key(classes[0]):
return classes[0]
classes = classes + list(classes[0].__bases__)
del classes[0]
return "Undefined message: %s[%s]" % (event.__class__, event.params)
def getMessage(self, event):
classes = [event.__class__]
while len(classes):
if lang.messages.has_key(classes[0]):
try:
return lang.messages[classes[0]] % event.params
except:
return lang.messages[classes[0]] + ' % ' + repr(event.params)
classes = classes + list(classes[0].__bases__)
del classes[0]
return "Undefined message: %s[%s]" % (event.__class__, event.params)
def format(self, event):
"""returns the formatted representation of a single event"""
return `event`
| Python |
"""$Id: __init__.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__all__ = ['base', 'text_plain', 'text_html']
| Python |
#!/usr/bin/python
"""
$Id: xmlEncoding.py 988 2008-03-12 18:22:48Z sa3ruby $
This module deals with detecting XML encodings, using both BOMs and
explicit declarations.
"""
__author__ = "Joseph Walton <http://www.kafsemo.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2004 Joseph Walton"
import codecs
import re
from logging import ObscureEncoding, NonstdEncoding
import logging
class FailingCodec:
def __init__(self, name):
self.name = name
def fail(self, txt, errors='strict'):
raise UnicodeError('No codec available for ' + self.name + ' in this installation of FeedValidator')
# Don't die if the codec can't be found, but return
# a decoder that will fail on use
def getdecoder(codec):
try:
return codecs.getdecoder(codec)
except:
return FailingCodec(codec).fail
# These are generic decoders that are only used
# to decode the XML declaration, from which we can read
# the real encoding
_decUTF32BE = getdecoder('UTF-32BE')
_decUTF32LE = getdecoder('UTF-32LE')
_decUTF16BE = getdecoder('UTF-16BE')
_decUTF16LE = getdecoder('UTF-16LE')
_decEBCDIC = getdecoder('IBM037') # EBCDIC
_decACE = getdecoder('ISO-8859-1') # An ASCII-compatible encoding
# Given a character index into a string, calculate its 1-based row and column
def _position(txt, idx):
row = txt.count('\n', 0, idx) + 1
ln = txt.rfind('\n', 0, idx) + 1
column = 0
for c in txt[ln:idx]:
if c == '\t':
column = (column // 8 + 1) * 8
else:
column += 1
column += 1
return (row, column)
def _normaliseNewlines(txt):
return txt.replace('\r\n', '\n').replace('\r', '\n')
def _logEvent(loggedEvents, e, pos=None):
if pos:
e.params['line'], e.params['column'] = pos
loggedEvents.append(e)
# Return the encoding from the declaration, or 'None'
# Return None if the 'permitted' list is passed in and the encoding
# isn't found in it. This is so that, e.g., a 4-byte-character XML file
# that claims to be US-ASCII will fail now.
def _decodeDeclaration(sig, dec, permitted, loggedEvents):
sig = _normaliseNewlines(dec(sig)[0])
eo = _encodingFromDecl(sig)
if not(eo):
_logEvent(loggedEvents,
logging.UnicodeError({'exception': 'This XML file (apparently ' + permitted[0] + ') requires an encoding declaration'}), (1, 1))
elif permitted and not(eo[0].upper() in permitted):
if _hasCodec(eo[0]):
# see if the codec is an alias of one of the permitted encodings
codec=codecs.lookup(eo[0])
for encoding in permitted:
if _hasCodec(encoding) and codecs.lookup(encoding)[-1]==codec[-1]: break
else:
_logEvent(loggedEvents,
logging.UnicodeError({'exception': 'This XML file claims an encoding of ' + eo[0] + ', but looks more like ' + permitted[0]}), eo[1])
return eo
# Return the encoding from the declaration, or 'fallback' if none is
# present. Return None if the 'permitted' list is passed in and
# the encoding isn't found in it
def _decodePostBOMDeclaration(sig, dec, permitted, loggedEvents, fallback=None):
sig = _normaliseNewlines(dec(sig)[0])
eo = _encodingFromDecl(sig)
if eo and not(eo[0].upper() in permitted):
_logEvent(loggedEvents,
logging.UnicodeError({'exception': 'Document starts with ' + permitted[0] + ' BOM marker but has incompatible declaration of ' + eo[0]}), eo[1])
return None
else:
return eo or (fallback, None)
def isStandard(x):
""" Is this encoding required by the XML 1.0 Specification, 4.3.3? """
return x.upper() in ['UTF-8', 'UTF-16']
def isCommon(x):
"""Is this encoding commonly used, according to
<http://www.syndic8.com/stats.php?Section=feeds#XMLEncodings>
(as of 2004-03-27)?"""
return isStandard(x) or x.upper() in ['US-ASCII', 'ISO-8859-1',
'EUC-JP', 'ISO-8859-2', 'ISO-8859-15', 'ISO-8859-7',
'KOI8-R', 'SHIFT_JIS', 'WINDOWS-1250', 'WINDOWS-1251',
'WINDOWS-1252', 'WINDOWS-1254', 'WINDOWS-1255', 'WINDOWS-1256',
# This doesn't seem to be popular, but is the Chinese
# government's mandatory standard
'GB18030'
]
# Inspired by xmlproc's autodetect_encoding, but rewritten
def _detect(doc_start, loggedEvents=[], fallback='UTF-8'):
"""This is the logic from appendix F.1 of the XML 1.0 specification.
Pass in the start of a document (>= 256 octets), and receive the encoding to
use, or None if there is a problem with the document."""
sig = doc_start[:4]
# With a BOM. We also check for a declaration, and make sure
# it doesn't contradict (for 4-byte encodings, it's required)
if sig == '\x00\x00\xFE\xFF': # UTF-32 BE
eo = _decodeDeclaration(doc_start[4:], _decUTF32BE, ['UTF-32', 'ISO-10646-UCS-4', 'CSUCS4', 'UCS-4'], loggedEvents)
elif sig == '\xFF\xFE\x00\x00': # UTF-32 LE
eo = _decodeDeclaration(doc_start[4:], _decUTF32LE, ['UTF-32', 'ISO-10646-UCS-4', 'CSUCS4', 'UCS-4'], loggedEvents)
elif sig == '\x00\x00\xFF\xFE' or sig == '\xFE\xFF\x00\x00':
raise UnicodeError('Unable to process UCS-4 with unusual octet ordering')
elif sig[:2] == '\xFE\xFF': # UTF-16 BE
eo = _decodePostBOMDeclaration(doc_start[2:], _decUTF16BE, ['UTF-16', 'ISO-10646-UCS-2', 'CSUNICODE', 'UCS-2'], loggedEvents, fallback='UTF-16')
elif sig[:2] == '\xFF\xFE': # UTF-16 LE
eo = _decodePostBOMDeclaration(doc_start[2:], _decUTF16LE, ['UTF-16', 'ISO-10646-UCS-2', 'CSUNICODE', 'UCS-2'], loggedEvents, fallback='UTF-16')
elif sig[:3] == '\xEF\xBB\xBF':
eo = _decodePostBOMDeclaration(doc_start[3:], _decACE, ['UTF-8'], loggedEvents, fallback='UTF-8')
# Without a BOM; we must read the declaration
elif sig == '\x00\x00\x00\x3C':
eo = _decodeDeclaration(doc_start, _decUTF32BE, ['UTF-32BE', 'UTF-32', 'ISO-10646-UCS-4', 'CSUCS4', 'UCS-4'], loggedEvents)
elif sig == '\x3C\x00\x00\x00':
eo = _decodeDeclaration(doc_start, _decUTF32LE, ['UTF-32LE', 'UTF-32', 'ISO-10646-UCS-4', 'CSUCS4', 'UCS-4'], loggedEvents)
elif sig == '\x00\x3C\x00\x3F':
eo = _decodeDeclaration(doc_start, _decUTF16BE, ['UTF-16BE', 'UTF-16', 'ISO-10646-UCS-2', 'CSUNICODE', 'UCS-2'], loggedEvents)
elif sig == '\x3C\x00\x3F\x00':
eo = _decodeDeclaration(doc_start, _decUTF16LE, ['UTF-16LE', 'UTF-16', 'ISO-10646-UCS-2', 'CSUNICODE', 'UCS-2'], loggedEvents)
elif sig == '\x3C\x3F\x78\x6D':
eo = _encodingFromDecl(_normaliseNewlines(_decACE(doc_start)[0])) or ('UTF-8', None)
elif sig == '\x4C\x6F\xA7\x94':
eo = _decodeDeclaration(doc_start, _decEBCDIC, ['IBM037', 'CP037', 'IBM038', 'EBCDIC-INT'], loggedEvents)
# There's no BOM, and no declaration. It's UTF-8, or mislabelled.
else:
eo = (fallback, None)
return eo
def detect(doc_start, loggedEvents=[], fallback='UTF-8'):
eo = _detect(doc_start, loggedEvents, fallback)
if eo:
return eo[0]
else:
return None
_encRe = re.compile(r'<\?xml\s+version\s*=\s*(?:"[-a-zA-Z0-9_.:]+"|\'[-a-zA-Z0-9_.:]+\')\s+(encoding\s*=\s*(?:"([-A-Za-z0-9._]+)"|\'([-A-Za-z0-9._]+)\'))')
def _encodingFromDecl(x):
m = _encRe.match(x)
if m:
if m.group(2):
return m.group(2), _position(x, m.start(2))
else:
return m.group(3), _position(x, m.start(3))
else:
return None
def removeDeclaration(x):
"""Replace an XML document string's encoding declaration with the
same number of spaces. Some XML parsers don't allow the
encoding to be overridden, and this is a workaround."""
m = _encRe.match(x)
if m:
s = m.start(1)
e = m.end(1)
res = x[:s] + ' ' * (e - s) + x[e:]
else:
res = x
return res
def _hasCodec(enc):
try:
return codecs.lookup(enc) is not None
except:
return False
def decode(mediaType, charset, bs, loggedEvents, fallback=None):
eo = _detect(bs, loggedEvents, fallback=None)
# Check declared encodings
if eo and eo[1] and _hasCodec(eo[0]):
if not(isCommon(eo[0])):
_logEvent(loggedEvents, ObscureEncoding({"encoding": eo[0]}), eo[1])
elif not(isStandard(eo[0])):
_logEvent(loggedEvents, NonstdEncoding({"encoding": eo[0]}), eo[1])
if eo:
encoding = eo[0]
else:
encoding = None
if charset and encoding and charset.lower() != encoding.lower():
# RFC 3023 requires us to use 'charset', but a number of aggregators
# ignore this recommendation, so we should warn.
loggedEvents.append(logging.EncodingMismatch({"charset": charset, "encoding": encoding}))
if mediaType and mediaType.startswith("text/") and charset is None:
loggedEvents.append(logging.TextXml({}))
# RFC 3023 requires text/* to default to US-ASCII. Issue a warning
# if this occurs, but continue validation using the detected encoding
try:
bs.decode("US-ASCII")
except:
if not encoding:
try:
bs.decode(fallback)
encoding=fallback
except:
pass
if encoding and encoding.lower() != 'us-ascii':
loggedEvents.append(logging.EncodingMismatch({"charset": "US-ASCII", "encoding": encoding}))
enc = charset or encoding
if enc is None:
loggedEvents.append(logging.MissingEncoding({}))
enc = fallback
elif not(_hasCodec(enc)):
if eo:
_logEvent(loggedEvents, logging.UnknownEncoding({'encoding': enc}), eo[1])
else:
_logEvent(loggedEvents, logging.UnknownEncoding({'encoding': enc}))
enc = fallback
if enc is None:
return enc, None
dec = getdecoder(enc)
try:
return enc, dec(bs)[0]
except UnicodeError, ue:
salvage = dec(bs, 'replace')[0]
if 'start' in ue.__dict__:
# XXX 'start' is in bytes, not characters. This is wrong for multibyte
# encodings
pos = _position(salvage, ue.start)
else:
pos = None
_logEvent(loggedEvents, logging.UnicodeError({"exception":ue}), pos)
return enc, salvage
_encUTF8 = codecs.getencoder('UTF-8')
def asUTF8(x):
"""Accept a Unicode string and return a UTF-8 encoded string, with
its encoding declaration removed, suitable for parsing."""
x = removeDeclaration(unicode(x))
return _encUTF8(x)[0]
if __name__ == '__main__':
from sys import argv
from os.path import isfile
for x in argv[1:]:
if isfile(x):
f = open(x, 'r')
l = f.read(1024)
log = []
eo = detect(l, log)
if eo:
print x,eo
else:
print repr(log)
| Python |
"""$Id: entry.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
from logging import *
from itunes import itunes_item
from extension import extension_entry
#
# pie/echo entry element.
#
class entry(validatorBase, extension_entry, itunes_item):
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'parseType')]
def prevalidate(self):
self.links=[]
self.content=None
def validate(self):
if not 'title' in self.children:
self.log(MissingElement({"parent":self.name, "element":"title"}))
if not 'author' in self.children and not 'author' in self.parent.children:
self.log(MissingElement({"parent":self.name, "element":"author"}))
if not 'id' in self.children:
self.log(MissingElement({"parent":self.name, "element":"id"}))
if not 'updated' in self.children:
self.log(MissingElement({"parent":self.name, "element":"updated"}))
if self.content:
if not 'summary' in self.children:
if self.content.attrs.has_key((None,"src")):
self.log(MissingSummary({"parent":self.parent.name, "element":self.name}))
ctype = self.content.type
if ctype.find('/') > -1 and not (
ctype.endswith('+xml') or ctype.endswith('/xml') or
ctype.startswith('text/')):
self.log(MissingSummary({"parent":self.parent.name, "element":self.name}))
else:
if not 'summary' in self.children:
self.log(MissingTextualContent({"parent":self.parent.name, "element":self.name}))
for link in self.links:
if link.rel == 'alternate': break
else:
self.log(MissingContentOrAlternate({"parent":self.parent.name, "element":self.name}))
# can only have one alternate per type
types={}
for link in self.links:
if not link.rel=='alternate': continue
if not link.type in types: types[link.type]=[]
if link.hreflang in types[link.type]:
self.log(DuplicateAtomLink({"parent":self.name, "element":"link", "type":link.type, "hreflang":link.hreflang}))
else:
types[link.type] += [link.hreflang]
if self.itunes: itunes_item.validate(self)
def do_author(self):
from author import author
return author()
def do_category(self):
from category import category
return category()
def do_content(self):
from content import content
self.content=content()
return self.content, noduplicates()
def do_contributor(self):
from author import author
return author()
def do_id(self):
return canonicaluri(), nows(), noduplicates(), unique('id',self.parent,DuplicateEntries)
def do_link(self):
from link import link
self.links += [link()]
return self.links[-1]
def do_published(self):
return rfc3339(), nows(), noduplicates()
def do_source(self):
return source(), noduplicates()
def do_rights(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_summary(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_title(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_updated(self):
return rfc3339(), nows(), noduplicates(), unique('updated',self.parent,DuplicateUpdated)
def do_app_edited(self):
return rfc3339(), nows(), noduplicates()
def do_app_control(self):
return app_control(), noduplicates()
class app_control(validatorBase):
def do_app_draft(self):
return yesno(), noduplicates()
from feed import feed
class source(feed):
def missingElement(self, params):
self.log(MissingSourceElement(params))
def validate(self):
self.validate_metadata()
def do_author(self):
if not 'author' in self.parent.children:
self.parent.children.append('author')
return feed.do_author(self)
def do_entry(self):
self.log(UndefinedElement({"parent":self.name, "element":"entry"}))
return eater()
| Python |
"""$Id: feed.py 1040 2009-02-15 20:01:32Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1040 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
from logging import *
from itunes import itunes_channel
from extension import extension_feed
#
# Atom root element
#
class feed(validatorBase, extension_feed, itunes_channel):
def getExpectedAttrNames(self):
return [(u'urn:atom-extension:indexing', u'index')]
def prevalidate(self):
self.links = []
self.validate_optional_attribute((u'urn:atom-extension:indexing', u'index'), yesno)
def missingElement(self, params):
offset = [self.line - self.dispatcher.locator.getLineNumber(),
self.col - self.dispatcher.locator.getColumnNumber()]
self.log(MissingElement(params), offset)
def validate_metadata(self):
if not 'title' in self.children:
self.missingElement({"parent":self.name, "element":"title"})
if not 'id' in self.children:
self.missingElement({"parent":self.name, "element":"id"})
if not 'updated' in self.children:
self.missingElement({"parent":self.name, "element":"updated"})
# complete feeds can only have current=self and no other links
if 'fh_complete' in self.children:
for link in self.links:
if link.rel in link.rfc5005:
if link.rel == "current":
if link.href not in self.dispatcher.selfURIs:
self.log(CurrentNotSelfInCompleteFeed({"rel":link.rel}))
else:
self.log(FeedRelInCompleteFeed({"rel":link.rel}))
# ensure that there is a link rel="self"
if self.name != 'source':
for link in self.links:
if link.rel=='self': break
else:
offset = [self.line - self.dispatcher.locator.getLineNumber(),
self.col - self.dispatcher.locator.getColumnNumber()]
self.log(MissingSelf({"parent":self.parent.name, "element":self.name}), offset)
types={}
archive=False
current=False
for link in self.links:
if link.rel == 'current': current = True
if link.rel in ['prev-archive', 'next-archive']: archive = True
# attempts to link past the end of the list
if link.rel == 'first' and link.href in self.dispatcher.selfURIs:
for link2 in self.links:
if link2.rel == 'previous':
self.log(LinkPastEnd({"self":link.rel, "rel":link2.rel}))
if link.rel == 'last' and link.href in self.dispatcher.selfURIs:
for link2 in self.links:
if link2.rel == 'next':
self.log(LinkPastEnd({"self":link.rel, "rel":link2.rel}))
# can only have one alternate per type
if not link.rel=='alternate': continue
if not link.type in types: types[link.type]={}
if link.rel in types[link.type]:
if link.hreflang in types[link.type][link.rel]:
self.log(DuplicateAtomLink({"parent":self.name, "element":"link", "type":link.type, "hreflang":link.hreflang}))
else:
types[link.type][link.rel] += [link.hreflang]
else:
types[link.type][link.rel] = [link.hreflang]
if 'fh_archive' in self.children:
# archives should either have links or be marked complete
if not archive and 'fh_complete' not in self.children:
self.log(ArchiveIncomplete({}))
# archives should have current links
if not current and ('fh_complete' not in self.children):
self.log(MissingCurrentInArchive({}))
if self.itunes: itunes_channel.validate(self)
def metadata(self):
if 'entry' in self.children:
self.log(MisplacedMetadata({"parent":self.name, "element":self.child}))
def validate(self):
entries = self.children.count('entry')
dups = 0
for event in self.dispatcher.loggedEvents:
if isinstance(event,DuplicateEntries):
dups += event.params.get('msgcount',1)
if entries > 9 and entries == dups + 1:
self.log(DuplicateIds({}))
self.dispatcher.loggedEvents = [event
for event in self.dispatcher.loggedEvents
if not isinstance(event,DuplicateEntries)]
if not 'entry' in self.children:
self.validate_metadata()
def do_author(self):
self.metadata()
from author import author
return author()
def do_category(self):
self.metadata()
from category import category
return category()
def do_contributor(self):
self.metadata()
from author import author
return author()
def do_generator(self):
self.metadata()
from generator import generator
return generator(), nonblank(), noduplicates()
def do_id(self):
self.metadata()
return canonicaluri(), nows(), noduplicates()
def do_icon(self):
self.metadata()
return nonblank(), nows(), rfc2396(), noduplicates()
def do_link(self):
self.metadata()
from link import link
self.links.append(link())
return self.links[-1]
def do_logo(self):
self.metadata()
return nonblank(), nows(), rfc2396(), noduplicates()
def do_title(self):
self.metadata()
from content import textConstruct
return textConstruct(), noduplicates()
def do_subtitle(self):
self.metadata()
from content import textConstruct
return textConstruct(), noduplicates()
def do_rights(self):
self.metadata()
from content import textConstruct
return textConstruct(), noduplicates()
def do_updated(self):
self.metadata()
return rfc3339(), nows(), noduplicates()
def do_entry(self):
if not 'entry' in self.children:
self.validate_metadata()
from entry import entry
return entry()
def do_app_collection(self):
from service import collection
return collection(), noduplicates()
| Python |
"""$Id: kml.py 1057 2009-07-21 21:54:14Z sa3ruby $"""
__author__ = "Gregor J. Rothfuss <http://greg.abstrakt.ch/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1057 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
import re
# This code tries to mimic the structure of the canonical KML XSD as much as possible.
# The KML XSD is at http://code.google.com/apis/kml/schema/kml21.xsd
# FeatureType from the XSD schema
#
class FeatureType(validatorBase):
def do_name(self):
return text(),noduplicates()
def do_visibility(self):
return zeroone(),noduplicates()
def do_open(self):
return zeroone(),noduplicates()
def do_address(self):
return nonhtml(),noduplicates()
def do_phoneNumber(self):
return text(),noduplicates() # todo: implement full check from http://www.koders.com/perl/fid426DF448FE99166A1AD0162538E583A0FA956EEA.aspx
def do_Snippet(self):
return Snippet(), noduplicates()
def do_description(self):
return text(), noduplicates()
def do_LookAt(self):
return LookAt(),noduplicates()
# TimePrimitive
def do_TimeStamp(self):
return TimeStamp(),noduplicates()
def do_TimeSpan(self):
return TimeSpan(),noduplicates()
# /TimePrimitive
def do_styleUrl(self):
return text(), noduplicates()
# StyleSelector
def do_Style(self):
return Style()
def do_StyleMap(self):
return StyleMap()
# /StyleSelector
# 2.0 only
def do_View(self):
return View(),noduplicates()
# /2.0 only
def do_Region(self):
return Region(), noduplicates()
def do_Metadata(self):
return Metadata()
def do_atom_link(self):
from link import link
return link()
def do_atom_author(self):
from author import author
return author()
# OverlayType from the XSD schema
#
class OverlayType(validatorBase):
def do_color(self):
return color(),noduplicates()
def do_drawOrder(self):
return Integer(),noduplicates()
def do_Icon(self):
return Icon(), noduplicates()
# ColorStyleType from the XSD schema
#
class ColorStyleType(validatorBase):
def do_color(self):
return color(),noduplicates()
def do_colorMode(self):
return colorMode(),noduplicates()
#
# Container from the XSD schema
#
class Container(validatorBase):
def do_Document(self):
return Document()
def do_Folder(self):
return Folder()
#
# Feature from the XSD schema
#
class Feature(validatorBase):
def do_Placemark(self):
return Placemark()
#
# Geometry from the XSD schema
#
class Geometry(Feature):
# TODO these should all be noduplicates(), but because they can appear
# inside MultiGeometry, they are not.
def do_Model(self):
return Model()
def do_LineString(self):
return LineString()
def do_LinearRing(self):
return LinearRing()
def do_Point(self):
return Point()
def do_Polygon(self):
return Polygon()
def do_MultiGeometry(self):
return MultiGeometry()
#
# GeometryElements from the XSD schema
#
class GeometryElements(Geometry):
def do_extrude(self):
return zeroone(),noduplicates()
def do_tessellate(self):
return zeroone(),noduplicates()
def do_altitudeMode(self):
return altitudeMode(),noduplicates()
#
# LinkType from the XSD schema
#
class LinkType(validatorBase):
def do_href(self):
return text(),noduplicates()
def do_refreshMode(self):
return refreshMode(),noduplicates()
def do_viewRefreshMode(self):
return viewRefreshMode(),noduplicates()
def do_viewRefreshTime(self):
return Float(), noduplicates()
def do_viewBoundScale(self):
return Float(), noduplicates()
def do_refreshVisibility(self):
return refreshVisibility(),noduplicates()
def do_refreshInterval(self):
return Float(), noduplicates()
def do_viewFormat(self):
return text(),noduplicates()
def do_httpQuery(self):
return text(),noduplicates()
#
# LookAtType from the XSD schema
#
class LookAtType(Feature):
def do_longitude(self):
return longitude(),noduplicates()
def do_latitude(self):
return latitude(),noduplicates()
def do_altitude(self):
return FloatWithNegative(),noduplicates()
def do_range(self):
return Float(),noduplicates()
def do_tilt(self):
return latitude(),noduplicates()
def do_heading(self):
return angle360(),noduplicates()
def do_altitudeMode(self):
return altitudeMode(),noduplicates()
#
# KML element.
#
class kml(validatorBase, Container, Feature):
from logging import TYPE_KML20, TYPE_KML21, TYPE_KML22
def do_NetworkLink(self):
return NetworkLink()
def do_GroundOverlay(self):
return GroundOverlay()
def do_ScreenOverlay(self):
return ScreenOverlay()
def do_NetworkLinkControl(self):
return NetworkLinkControl()
def do_atom_link(self):
from link import link
return link()
def do_atom_author(self):
from author import author
return author()
class NetworkLinkControl(validatorBase):
def do_minRefreshPeriod(self):
return Float(),noduplicates()
def do_linkName(self):
return text(),noduplicates()
def do_linkDescription(self):
return text(),noduplicates()
def do_cookie(self):
return text(),noduplicates()
def do_message(self):
return text(), noduplicates()
def do_linkSnippet(self):
return Snippet(), noduplicates()
def do_expires(self):
return w3cdtf(),noduplicates()
def do_Update(self):
return Update(),noduplicates()
def do_LookAt(self):
return LookAt(),noduplicates()
def do_View(self):
return View(),noduplicates()
class Update(validatorBase):
def validate(self):
if not "targetHref" in self.children:
self.log(MissingElement({"parent":self.name, "element":"targetHref"}))
def do_targetHref(self):
return text(),noduplicates()
# todo: child validation
def do_Change(self):
return noduplicates()
# todo: child validation
def do_Update(self):
return noduplicates()
# todo: child validation
def do_Delete(self):
return noduplicates()
class NetworkLink(validatorBase, FeatureType, Feature):
def validate(self):
if not "Link" in self.children and not "Url" in self.children:
self.log(MissingElement({"parent":self.name, "element":"Link"}))
def do_targetHref(self):
return Update(),noduplicates()
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_refreshInterval(self):
return Float(), noduplicates()
def do_flyToView(self):
return zeroone(),noduplicates()
def do_Link(self):
return Link(),noduplicates()
def do_Url(self):
return Url(),noduplicates()
class Document(validatorBase, FeatureType, Container, Feature):
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_ScreenOverlay(self):
return ScreenOverlay()
def do_GroundOverlay(self):
return GroundOverlay()
def do_NetworkLink(self):
return NetworkLink()
def do_Schema(self):
return Schema(), noduplicates()
class Schema(validatorBase):
def getExpectedAttrNames(self):
return [(None, u'name'), (None, u'parent')]
def do_SimpleField(self):
return SchemaField()
def do_SimpleArrayField(self):
return SchemaField()
def do_ObjField(self):
return SchemaField()
def do_ObjArrayField(self):
return SchemaField()
class SchemaField(validatorBase):
def getExpectedAttrNames(self):
return [
(None, u'name'),
(None, u'type'),
]
def validate(self):
self.validate_required_attribute((None,'name'), text)
self.validate_required_attribute((None,'type'), SchemaFieldType)
class Placemark(validatorBase, FeatureType, Geometry):
def prevalidate(self):
if not self.attrs.has_key((None,"id")):
self.log(MissingId({"parent":self.name, "element":"id"}))
self.validate_optional_attribute((None,'id'), unique('id',self.parent))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_GeometryCollection(self):
return GeometryCollection()
class MultiGeometry(Geometry):
# TODO: check for either geometry or multigeometry in feature, but not both?
def getExpectedAttrNames(self):
return [(None, u'id')]
class ScreenOverlay(validatorBase, FeatureType, OverlayType):
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_geomColor(self):
return geomColor(),noduplicates()
def do_overlayXY(self):
return overlayxy(), noduplicates()
def do_screenXY(self):
return overlayxy(), noduplicates()
def do_rotationXY(self):
return overlayxy(), noduplicates()
def do_size(self):
return overlayxy(), noduplicates()
class GroundOverlay(validatorBase, FeatureType, OverlayType):
def validate(self):
if not "LatLonBox" in self.children:
self.log(MissingElement({"parent":self.name, "element":"LatLonBox"}))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_altitude(self):
return FloatWithNegative(),noduplicates()
def do_altitudeMode(self):
return altitudeMode(),noduplicates()
def do_geomColor(self):
return geomColor(),noduplicates()
def do_LatLonBox(self):
return LatLonBox(), noduplicates()
class overlayxy(validatorBase):
def getExpectedAttrNames(self):
return [
(None, u'x'),
(None, u'y'),
(None, u'xunits'),
(None, u'yunits'),
]
def validate(self):
self.validate_required_attribute((None,'x'), FloatWithNegative)
self.validate_required_attribute((None,'y'), FloatWithNegative)
self.validate_required_attribute((None,'xunits'), kmlunits)
self.validate_required_attribute((None,'yunits'), kmlunits)
class Region(validatorBase):
def validate(self):
if not "LatLonAltBox" in self.children:
self.log(MissingElement({"parent":self.name, "element":"LatLonAltBox"}))
def do_LatLonAltBox(self):
return LatLonAltBox(), noduplicates()
def do_Lod(self):
return Lod(), noduplicates()
class LatLonBox(validatorBase):
def getExpectedAttrNames(self):
return [(None, u'id')]
def validate(self):
if not "north" in self.children:
self.log(MissingElement({"parent":self.name, "element":"north"}))
if not "south" in self.children:
self.log(MissingElement({"parent":self.name, "element":"south"}))
if not "east" in self.children:
self.log(MissingElement({"parent":self.name, "element":"east"}))
if not "west" in self.children:
self.log(MissingElement({"parent":self.name, "element":"west"}))
def do_north(self):
return latitude(),noduplicates()
def do_south(self):
return latitude(),noduplicates()
def do_east(self):
return longitude(),noduplicates()
def do_west(self):
return longitude(),noduplicates()
def do_rotation(self):
return longitude(),noduplicates()
class LatLonAltBox(validatorBase, LatLonBox):
def do_minAltitude(self):
return Float(),noduplicates()
def do_maxAltitude(self):
return Float(), noduplicates()
def do_altitudeMode(self):
return altitudeMode(),noduplicates()
class Lod(validatorBase):
def do_minLodPixels(self):
return Float(),noduplicates()
def do_maxLodPixels(self):
return Float(),noduplicates()
def do_minFadeExtent(self):
return Float(),noduplicates()
def do_maxFadeExtent(self):
return Float(),noduplicates()
class Metadata(validatorBase):
# TODO do smarter validation here
def validate(self):
return noduplicates()
class Snippet(text):
def validate(self):
return nonhtml(),noduplicates()
def getExpectedAttrNames(self):
return [(None, u'maxLines')]
class Folder(validatorBase, FeatureType, Container, Feature):
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_NetworkLink(self):
return NetworkLink()
def do_GroundOverlay(self):
return GroundOverlay()
def do_ScreenOverlay(self):
return ScreenOverlay()
class LookAt(validatorBase, LookAtType):
def getExpectedAttrNames(self):
return [(None, u'id')]
class StyleMap(validatorBase):
def validate(self):
if not "Pair" in self.children:
self.log(MissingElement({"parent":self.name, "element":"Pair"}))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_Pair(self):
return Pair()
class Style(validatorBase):
def prevalidate(self):
self.validate_optional_attribute((None,'id'), unique('id',self.parent))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_LineStyle(self):
return LineStyle(), noduplicates()
def do_PolyStyle(self):
return PolyStyle(), noduplicates()
def do_IconStyle(self):
return IconStyle(), noduplicates()
def do_ListStyle(self):
return ListStyle(), noduplicates()
def do_LabelStyle(self):
return LabelStyle(), noduplicates()
def do_BalloonStyle(self):
return BalloonStyle(), noduplicates()
def do_scale(self):
return Float(),noduplicates()
def do_labelColor(self):
return labelColor(),noduplicates()
class IconStyle(validatorBase, ColorStyleType):
def prevalidate(self):
self.validate_optional_attribute((None,'id'), unique('id',self.parent))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_heading(self):
return angle360(),noduplicates()
def do_Icon(self):
return Icon(),noduplicates()
def do_scale(self):
return Float(),noduplicates()
def do_hotSpot(self):
return overlayxy(), noduplicates()
class Icon(validatorBase):
def validate(self):
if not 'href' in self.children:
self.log(MissingElement({"parent":self.name, "element":"href"}))
def do_href(self):
# if not self.getFeedType() == TYPE_KML20 and self.startswith('root://'):
# self.log(DeprecatedRootHref())
return text(),noduplicates() # would be url, but has these weird root://
def do_x(self):
return noiconoffset()
def do_y(self):
return noiconoffset()
def do_w(self):
return noiconoffset()
def do_h(self):
return noiconoffset()
def do_refreshInterval(self):
return Float(), noduplicates()
def do_refreshMode(self):
return refreshMode(), noduplicates()
def do_viewRefreshMode(self):
return viewRefreshMode(), noduplicates()
def do_viewRefreshTime(self):
return Float(), noduplicates()
def do_viewBoundScale(self):
return Float(), noduplicates()
class BalloonStyle(validatorBase):
def prevalidate(self):
self.validate_optional_attribute((None,'id'), unique('id',self.parent))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_textColor(self):
return color(),noduplicates()
def do_bgColor(self):
return color(),noduplicates()
def do_color(self):
return color(),noduplicates()
def do_text(self):
return text(),noduplicates()
class ListStyle(validatorBase):
def prevalidate(self):
self.validate_optional_attribute((None,'id'), unique('id',self.parent))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_bgColor(self):
return color(),noduplicates()
def do_ItemIcon(self):
return ItemIcon()
def do_listItemType(self):
return listItemType(),noduplicates()
def do_scale(self):
return Float(),noduplicates()
class ItemIcon(validatorBase):
def validate(self):
if not 'href' in self.children:
self.log(MissingElement({"parent":self.name, "element":"href"}))
def do_href(self):
return text(),noduplicates()
def do_state(self):
return itemIconState(),noduplicates()
class LabelStyle(validatorBase, ColorStyleType):
def prevalidate(self):
self.validate_optional_attribute((None,'id'), unique('id',self.parent))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_labelColor(self):
return labelColor(),noduplicates()
def do_scale(self):
return Float(),noduplicates()
class LineStyle(validatorBase, ColorStyleType):
def prevalidate(self):
self.validate_optional_attribute((None,'id'), unique('id',self.parent))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_width(self):
return Float(),noduplicates()
class PolyStyle(validatorBase, ColorStyleType):
def prevalidate(self):
self.validate_optional_attribute((None,'id'), unique('id',self.parent))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_fill(self):
return zeroone(), noduplicates()
def do_outline(self):
return zeroone(), noduplicates()
class Link(validatorBase, LinkType):
def getExpectedAttrNames(self):
return [(None, u'id')]
class Pair(validatorBase):
def validate(self):
if not 'key' in self.children:
self.log(MissingElement({"parent":self.name, "element":"key"}))
if not 'styleUrl' in self.children:
self.log(MissingElement({"parent":self.name, "element":"styleUrl"}))
def do_key(self):
return styleState(),noduplicates()
def do_styleUrl(self):
return text(),noduplicates()
class Point(validatorBase, GeometryElements):
def validate(self):
if not "coordinates" in self.children:
self.log(MissingElement({"parent":self.name, "element":"coordinates"}))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_coordinates(self):
return coordinates()
class Model(validatorBase):
# TODO seems to me that Location and Orientation ought to be required?
def validate(self):
if not "Link" in self.children:
self.log(MissingElement({"parent":self.name, "element":"Link"}))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_altitudeMode(self):
return altitudeMode(), noduplicates()
def do_Location(self):
return Location(), noduplicates()
def do_Orientation(self):
return Orientation(), noduplicates()
def do_Scale(self):
return Scale(), noduplicates()
def do_Link(self):
return Link(), noduplicates()
def do_ResourceMap(self):
return ResourceMap(), noduplicates()
class ResourceMap(validatorBase):
def do_Alias(self):
return Alias()
class Alias(validatorBase):
def do_targetHref(self):
return text(),noduplicates()
def do_sourceHref(self):
return text(),noduplicates()
class Location(validatorBase):
# TODO they are loosely defined in the schema, but 0,0,0 makes no sense.
def validate(self):
if not "longitude" in self.children:
self.log(MissingElement({"parent":self.name, "element":"longitude"}))
if not "latitude" in self.children:
self.log(MissingElement({"parent":self.name, "element":"latitude"}))
if not "altitude" in self.children:
self.log(MissingElement({"parent":self.name, "element":"altitude"}))
def do_longitude(self):
return longitude(), noduplicates()
def do_latitude(self):
return latitude(), noduplicates()
def do_altitude(self):
return FloatWithNegative(), noduplicates()
class Scale(validatorBase):
def do_x(self):
return Float(), noduplicates()
def do_y(self):
return Float(), noduplicates()
def do_z(self):
return Float(), noduplicates()
class Orientation(validatorBase):
def do_heading(self):
return angle360(), noduplicates()
def do_tilt(self):
return angle360(), noduplicates()
def do_roll(self):
return angle360(), noduplicates()
class Polygon(validatorBase, GeometryElements):
def validate(self):
if not "outerBoundaryIs" in self.children:
self.log(MissingElement({"parent":self.name, "element":"outerBoundaryIs"}))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_outerBoundaryIs(self):
return boundary(), noduplicates()
def do_innerBoundaryIs(self):
return boundary()
class boundary(validatorBase):
def validate(self):
if not "LinearRing" in self.children:
self.log(MissingElement({"parent":self.name, "element":"LinearRing"}))
def do_LinearRing(self):
return LinearRing()
class LineString(validatorBase, GeometryElements):
def validate(self):
if not "coordinates" in self.children:
self.log(MissingElement({"parent":self.name, "element":"coordinates"}))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_coordinates(self):
return coordinates(), noduplicates()
class LinearRing(validatorBase, GeometryElements):
def validate(self):
if not "coordinates" in self.children:
self.log(MissingElement({"parent":self.name, "element":"coordinates"}))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_coordinates(self):
return coordinates(), noduplicates()
class TimeSpan(validatorBase):
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_begin(self):
return w3cdtf(),noduplicates()
def do_end(self):
return w3cdtf(),noduplicates()
class TimeStamp(validatorBase):
def validate(self):
if not "when" in self.children:
self.log(MissingElement({"parent":self.name, "element":"when"}))
def getExpectedAttrNames(self):
return [(None, u'id')]
def do_when(self):
return unbounded_w3cdtf(),noduplicates()
class kmlunits(enumeration):
error = InvalidKmlUnits
valuelist = [
"fraction", "pixels", "insetPixels"
]
class colorMode(enumeration):
error = InvalidColorMode
valuelist = [
"normal", "random"
]
class refreshMode(enumeration):
error = InvalidRefreshMode
valuelist = [
"onChange", "onInterval", "onExpire"
]
class viewRefreshMode(enumeration):
error = InvalidViewRefreshMode
valuelist = [
"never", "onRequest", "onStop", "onRegion"
]
class styleState(enumeration):
error = InvalidStyleState
valuelist = [
"normal", "highlight"
]
class listItemType(enumeration):
error = InvalidListItemType
valuelist = [
"radioFolder", "check", "checkHideChildren", "checkOffOnly"
]
class itemIconState(enumeration):
error = InvalidItemIconState
valuelist = [
"open", "closed", "error", "fetching0", "fetching1", "fetching2",
"open error", "closed error", "fetching0 error", "fetching1 error",
"fetching2 error"
]
class altitudeMode(enumeration):
error = InvalidAltitudeMode
valuelist = [
"clampToGround", "relativeToGround", "absolute"
]
class SchemaFieldType(enumeration):
error = InvalidSchemaFieldType
valuelist = [
"string", "int", "uint", "short", "ushort", "float", "double","bool"
]
#
# Deprecated in 2.0
#
class antialias(validatorBase):
def prevalidate(self):
self.log(Deprecated({"element":self.name, "replacement":"none"}))
def validate(self):
return zeroone(),noduplicates()
class View(validatorBase, LookAtType):
def prevalidate(self):
self.log(Deprecated({"element":self.name, "replacement":"LookAt"}))
def getExpectedAttrNames(self):
return [(None, u'id')]
#
# Deprecated in 2.1
#
class labelColor(text):
def prevalidate(self):
if not self.getFeedType() == TYPE_KML20:
self.log(Deprecated({"element":self.name, "replacement":"LabelStyle"}))
def validate(self):
if not re.match("([a-f]|[A-F]|[0-9]){8}",self.value):
return self.log(InvalidColor({'value':self.value}))
class geomColor(text):
def prevalidate(self):
if not self.getFeedType() == TYPE_KML20:
self.log(Deprecated({"element":self.name, "replacement":"color"}))
def validate(self):
if not re.match("([a-f]|[A-F]|[0-9]){8}",self.value):
return self.log(InvalidColor({'value':self.value}))
class geomScale(text):
def prevalidate(self):
if not self.getFeedType() == TYPE_KML20:
self.log(Deprecated({"element":self.name, "replacement":"scale"}))
def validate(self):
return Float()
class GeometryCollection(validatorBase, Geometry):
def prevalidate(self):
if not self.getFeedType() == TYPE_KML20:
self.log(Deprecated({"element":self.name, "replacement":"MultiGeometry"}))
def getExpectedAttrNames(self):
return [(None, u'id')]
class Url(validatorBase, LinkType):
def prevalidate(self):
if not self.getFeedType() == TYPE_KML20:
self.log(Deprecated({"element":self.name, "replacement":"Link"}))
class refreshVisibility(validatorBase):
def prevalidate(self):
if not self.getFeedType() == TYPE_KML20:
self.log(Deprecated({"element":self.name, "replacement":"Update"}))
def validate(self):
return zeroone, noduplicates()
# In theory, the spec also supports things like .2 if unit is fractions. ugh.
class noiconoffset(text):
def validate(self):
if not self.getFeedType() == TYPE_KML20:
self.log(Deprecated({"element":self.name, "replacement":"Icon"}))
return Integer(), noduplicates()
#
# Validators
#
class zeroone(text):
def normalizeWhitespace(self):
pass
def validate(self):
if not self.value.lower() in ['0','1']:
self.log(InvalidZeroOne({"parent":self.parent.name, "element":self.name,"value":self.value}))
class color(text):
def validate(self):
if not re.match("^([a-f]|[A-F]|[0-9]){8}$",self.value):
return self.log(InvalidColor({'value':self.value}))
class coordinates(text):
def validate(self):
values = self.value.strip().split()
for value in values:
# ensure that commas are only used to separate lat and long (and alt)
if not re.match('^[-+.0-9]+,[-+.0-9]+(,[-+.0-9]+)?$',
value.strip()):
return self.log(InvalidKmlCoordList({'value':self.value}))
# Now validate individual coordinates
point = value.split(',');
# First coordinate is longitude
try:
lon = float(point[0].strip())
if lon > 180 or lon < -180:
raise ValueError
else:
self.log(ValidLongitude({"parent":self.parent.name, "element":self.name, "value":lon}))
except ValueError:
self.log(InvalidKmlLongitude({"parent":self.parent.name, "element":self.name, "value":lon}))
# Second coordinate is latitude
try:
lat = float(point[1].strip())
if lat > 90 or lat < -90:
raise ValueError
else:
self.log(ValidLatitude({"parent":self.parent.name, "element":self.name, "value":lat}))
except ValueError:
self.log(InvalidKmlLatitude({"parent":self.parent.name, "element":self.name, "value":lat}))
# Third coordinate value (altitude) has to be float, if present
if len(point) == 3:
if not re.match('\d+\.?\d*$', point[2]):
self.log(InvalidFloat({"attr":self.name, "value":point[2]}))
class angle360(text):
def validate(self):
try:
angle = float(self.value)
if angle > 360 or angle < -360:
raise ValueError
else:
self.log(ValidAngle({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidAngle({"parent":self.parent.name, "element":self.name, "value":self.value}))
class FloatWithNegative(text):
def validate(self, name=None):
if not re.match('-?\d+\.?\d*$', self.value):
self.log(InvalidFloat({"attr":name or self.name, "value":self.value}))
| Python |
"""$Id: validators.py 1058 2009-08-22 09:35:18Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1058 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from logging import *
import re, time, datetime
from uri import canonicalForm, urljoin
from rfc822 import AddressList, parsedate, parsedate_tz, mktime_tz
rdfNS = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
def implausible_822(value):
if value[0] < 1990: return True
try:
from rfc822 import parsedate_tz, mktime_tz
except:
# no time zone functions available, granularity is a day
pvalue=parsedate(value)
return value > time.gmtime(time.time()+86400) or pvalue[0]<1990
try:
pvalue=parsedate_tz(value)
zvalue=mktime_tz(pvalue)
except:
# outside of range of what parsedate supports: definitely problematic
return True
# when time zone functions are available, granularity is ten minutes
return zvalue > time.time()+600 or pvalue[0]<1990
def implausible_8601(value):
if value < '1990-01-01': return True
try:
import xml.utils.iso8601
except:
# no time zone functions available, granularity is a day
tomorrow=time.strftime("%Y-%m-%dT%H:%M:%SZ",time.gmtime(time.time()+86400))
return (value > tomorrow)
try:
zvalue = xml.utils.iso8601.parse(value)
except:
# outside of range of what parse supports: definitely problematic
return True
# when time zone functions are available, granularity is ten minutes
return zvalue > time.time() + 600
#
# Valid mime type
#
mime_re = re.compile('[^\s()<>,;:\\"/[\]?=]+/[^\s()<>,;:\\"/[\]?=]+(\s*;\s*[^\s()<>,;:\\"/[\]?=]+=("(\\"|[^"])*"|[^\s()<>,;:\\"/[\]?=]+))*$')
#
# Extensibility hook: logic varies based on type of feed
#
def any(self, name, qname, attrs):
if self.getFeedType() != TYPE_RSS1:
return eater()
else:
from rdf import rdfExtension
return rdfExtension(qname)
#
# This class simply eats events. Useful to prevent cascading of errors
#
class eater(validatorBase):
def getExpectedAttrNames(self):
return self.attrs.getNames()
def characters(self, string):
for c in string:
if 0x80 <= ord(c) <= 0x9F or c == u'\ufffd':
from validators import BadCharacters
self.log(BadCharacters({"parent":self.parent.name, "element":self.name}))
def startElementNS(self, name, qname, attrs):
# RSS 2.0 arbitrary restriction on extensions
feedtype=self.getFeedType()
if (not qname) and feedtype and (feedtype==TYPE_RSS2) and self.name.find('_')>=0:
from logging import NotInANamespace
self.log(NotInANamespace({"parent":self.name, "element":name, "namespace":'""'}))
# ensure element is "namespace well formed"
if name.find(':') != -1:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":name}))
# ensure all attribute namespaces are properly defined
for (namespace,attr) in attrs.keys():
if ':' in attr and not namespace:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":attr}))
for c in attrs.get((namespace,attr)):
if 0x80 <= ord(c) <= 0x9F or c == u'\ufffd':
from validators import BadCharacters
self.log(BadCharacters({"parent":name, "element":attr}))
# eat children
self.push(self.__class__(), name, attrs)
from HTMLParser import HTMLParser, HTMLParseError
class HTMLValidator(HTMLParser):
htmltags = [
"a", "abbr", "acronym", "address", "applet", "area", "article", "aside",
"audio", "b", "base", "basefont", "bdo", "big", "blockquote", "body",
"br", "button", "canvas", "caption", "center", "cite", "code", "col",
"colgroup", "command", "datagrid", "datalist", "dd", "del", "details",
"dialog", "dir", "div", "dfn", "dl", "dt", "em", "event-source",
"fieldset", "figure", "font", "footer", "form", "frame", "frameset",
"h1", "h2", "h3", "h4", "h5", "h6", "head", "header", "hr", "html", "i",
"iframe", "img", "input", "ins", "isindex", "kbd", "label", "legend",
"li", "link", "m", "map", "menu", "meta", "meter", "nav", "noframes",
"noscript", "object", "ol", "output", "optgroup", "option", "p", "param",
"pre", "progress", "q", "s", "samp", "script", "section", "select",
"small", "source", "span", "strike", "strong", "style", "sub", "sup",
"table", "tbody", "td", "textarea", "tfoot", "th", "thead", "time",
"title", "tr", "tt", "u", "ul", "var", "xmp", "plaintext", "embed",
"comment", "listing", "video", "wbr"]
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'article',
'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button', 'canvas',
'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'command',
'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn', 'dialog', 'dir',
'div', 'dl', 'dt', 'em', 'event-source', 'fieldset', 'figure', 'footer',
'font', 'form', 'header', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i',
'img', 'input', 'ins', 'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map',
'menu', 'meter', 'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup',
'option', 'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong', 'sub',
'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot', 'th', 'thead',
'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript', 'wbr']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autoplay', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'coords', 'data', 'datafld',
'datapagesize', 'datasrc', 'datetime', 'default', 'delay', 'dir',
'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'prompt', 'pqg', 'radiogroup', 'readonly', 'rel', 'repeat-max',
'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows',
'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src',
'start', 'step', 'summary', 'suppress', 'tabindex', 'target', 'template',
'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign',
'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap',
'xml:lang', 'xmlns']
acceptable_css_properties = ['azimuth', 'background', 'background-color',
'border', 'border-bottom', 'border-bottom-color', 'border-bottom-style',
'border-bottom-width', 'border-collapse', 'border-color', 'border-left',
'border-left-color', 'border-left-style', 'border-left-width',
'border-right', 'border-right-color', 'border-right-style',
'border-right-width', 'border-spacing', 'border-style', 'border-top',
'border-top-color', 'border-top-style', 'border-top-width', 'border-width',
'clear', 'color', 'cursor', 'direction', 'display', 'elevation', 'float',
'font', 'font-family', 'font-size', 'font-style', 'font-variant',
'font-weight', 'height', 'letter-spacing', 'line-height',
'list-style-type', 'margin', 'margin-bottom', 'margin-left',
'margin-right', 'margin-top', 'overflow', 'padding', 'padding-bottom',
'padding-left', 'padding-right', 'padding-top', 'pause', 'pause-after',
'pause-before', 'pitch', 'pitch-range', 'richness', 'speak',
'speak-header', 'speak-numeral', 'speak-punctuation', 'speech-rate',
'stress', 'text-align', 'text-decoration', 'text-indent', 'unicode-bidi',
'vertical-align', 'voice-family', 'volume', 'white-space', 'width']
# survey of common keywords found in feeds
acceptable_css_keywords = ['aqua', 'auto', 'black', 'block', 'blue', 'bold',
'both', 'bottom', 'brown', 'center', 'collapse', 'dashed', 'dotted',
'fuchsia', 'gray', 'green', '!important', 'italic', 'left', 'lime',
'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d?\.?\d?\d(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = ['annotation', 'annotation-xml', 'maction', 'math',
'merror', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none', 'semantics']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant',
'maxsize', 'minsize', 'other', 'rowalign', 'rowalign', 'rowalign',
'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
# svgtiny - foreignObject + linearGradient + radialGradient + stop - image
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'font-face',
'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use']
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
'min', 'name', 'offset', 'opacity', 'orient', 'origin',
'overline-position', 'overline-thickness', 'panose-1', 'path',
'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'style', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole', 'xlink:href',
'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type', 'xml:base',
'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1', 'y2',
'zoomAndPan']
def log(self,msg):
offset = [self.element.line + self.getpos()[0] - 1 -
self.element.dispatcher.locator.getLineNumber(),
-self.element.dispatcher.locator.getColumnNumber()]
self.element.log(msg, offset)
def __init__(self,value,element):
self.element=element
self.stack = []
self.valid = True
HTMLParser.__init__(self)
if value.lower().find('<?import ') >= 0:
self.log(SecurityRisk({"parent":self.element.parent.name, "element":self.element.name, "tag":"?import"}))
try:
self.feed(value)
self.close()
if self.valid:
self.log(ValidHtml({"parent":self.element.parent.name, "element":self.element.name}))
except HTMLParseError, msg:
element = self.element
offset = [element.line - element.dispatcher.locator.getLineNumber(),
- element.dispatcher.locator.getColumnNumber()]
match = re.search(', at line (\d+), column (\d+)',str(msg))
if match: offset[0] += int(match.group(1))-1
element.log(NotHtml({"parent":element.parent.name, "element":element.name, "message":"Invalid HTML", "value": str(msg)}),offset)
def handle_starttag(self, tag, attributes):
if tag.lower() not in self.htmltags:
self.log(NotHtml({"parent":self.element.parent.name, "element":self.element.name,"value":tag, "message": "Non-html tag"}))
self.valid = False
elif tag.lower() not in HTMLValidator.acceptable_elements:
if not 'embed' in self.stack and not 'object' in self.stack:
self.log(SecurityRisk({"parent":self.element.parent.name, "element":self.element.name, "tag":tag}))
else:
for (name,value) in attributes:
if name.lower() == 'style':
for evil in checkStyle(value):
self.log(DangerousStyleAttr({"parent":self.element.parent.name, "element":self.element.name, "attr":"style", "value":evil}))
elif name.lower() not in self.acceptable_attributes:
self.log(SecurityRiskAttr({"parent":self.element.parent.name, "element":self.element.name, "attr":name}))
self.stack.append(tag)
def handle_endtag(self, tag):
if tag in self.stack:
while self.stack[-1] != tag: self.stack.pop()
self.stack.pop()
def handle_charref(self, name):
if name.startswith('x'):
value = int(name[1:],16)
else:
value = int(name)
if 0x80 <= value <= 0x9F or value == 0xfffd:
self.log(BadCharacters({"parent":self.element.parent.name,
"element":self.element.name, "value":"&#" + name + ";"}))
#
# Scub CSS properties for potentially evil intent
#
def checkStyle(style):
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return [style]
if not re.match("^(\s*[-\w]+\s*:\s*[^:;]*(;|$))*$", style):
return [style]
unsafe = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style.lower()):
if prop not in HTMLValidator.acceptable_css_properties:
if prop not in unsafe: unsafe.append(prop)
elif prop.split('-')[0] in ['background','border','margin','padding']:
for keyword in value.split():
if keyword not in HTMLValidator.acceptable_css_keywords and \
not HTMLValidator.valid_css_values.match(keyword):
if keyword not in unsafe: unsafe.append(keyword)
return unsafe
#
# This class simply html events. Identifies unsafe events
#
class htmlEater(validatorBase):
def getExpectedAttrNames(self):
if self.attrs and len(self.attrs):
return self.attrs.getNames()
def textOK(self): pass
def startElementNS(self, name, qname, attrs):
for attr in attrs.getNames():
if attr[0]==None:
if attr[1].lower() == 'style':
for value in checkStyle(attrs.get(attr)):
self.log(DangerousStyleAttr({"parent":self.parent.name, "element":self.name, "attr":attr[1], "value":value}))
elif attr[1].lower() not in HTMLValidator.acceptable_attributes:
self.log(SecurityRiskAttr({"parent":self.parent.name, "element":self.name, "attr":attr[1]}))
self.push(htmlEater(), self.name, attrs)
if name.lower() not in HTMLValidator.acceptable_elements:
self.log(SecurityRisk({"parent":self.parent.name, "element":self.name, "tag":name}))
def endElementNS(self,name,qname):
pass
#
# text: i.e., no child elements allowed (except rdf:Description).
#
class text(validatorBase):
def textOK(self): pass
def getExpectedAttrNames(self):
if self.getFeedType() == TYPE_RSS1:
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'parseType'),
(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'datatype'),
(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'resource')]
else:
return []
def startElementNS(self, name, qname, attrs):
if self.getFeedType() == TYPE_RSS1:
if self.value.strip() or self.children:
if self.attrs.get((u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'parseType')) != 'Literal':
self.log(InvalidRDF({"message":"mixed content"}))
if name=="div" and qname=="http://www.w3.org/1999/xhtml":
from content import diveater
self.push(diveater(), name, attrs)
else:
from rdf import rdfExtension
self.push(rdfExtension(qname), name, attrs)
else:
from base import namespaces
ns = namespaces.get(qname, '')
if name.find(':') != -1:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":name}))
else:
self.log(UndefinedElement({"parent":self.name, "element":name}))
self.push(eater(), name, attrs)
#
# noduplicates: no child elements, no duplicate siblings
#
class noduplicates(validatorBase):
def __init__(self, message=DuplicateElement):
self.message=message
validatorBase.__init__(self)
def startElementNS(self, name, qname, attrs):
pass
def characters(self, string):
pass
def prevalidate(self):
if self.name in self.parent.children:
self.log(self.message({"parent":self.parent.name, "element":self.name}))
#
# valid e-mail addr-spec
#
class addr_spec(text):
domains = """
AC AD AE AERO AF AG AI AL AM AN AO AQ AR ARPA AS ASIA AT AU AW AX AZ BA BB
BD BE BF BG BH BI BIZ BJ BM BN BO BR BS BT BV BW BY BZ CA CAT CC CD CF CG
CH CI CK CL CM CN CO COM COOP CR CU CV CX CY CZ DE DJ DK DM DO DZ EC EDU
EE EG ER ES ET EU FI FJ FK FM FO FR GA GB GD GE GF GG GH GI GL GM GN GOV
GP GQ GR GS GT GU GW GY HK HM HN HR HT HU ID IE IL IM IN INFO INT IO IQ IR
IS IT JE JM JO JOBS JP KE KG KH KI KM KN KP KR KW KY KZ LA LB LC LI LK LR
LS LT LU LV LY MA MC MD ME MG MH MIL MK ML MM MN MO MOBI MP MQ MR MS MT MU
MUSEUM MV MW MX MY MZ NA NAME NC NE NET NF NG NI NL NO NP NR NU NZ OM ORG
PA PE PF PG PH PK PL PM PN PR PRO PS PT PW PY QA RE RO RS RU RW SA SB SC
SD SE SG SH SI SJ SK SL SM SN SO SR ST SU SV SY SZ TC TD TEL TF TG TH TJ
TK TL TM TN TO TP TR TRAVEL TT TV TW TZ UA UG UK UM US UY UZ VA VC VE VG
VI VN VU WF WS XN--0ZWM56D XN--11B5BS3A9AJ6G XN--80AKHBYKNJ4F
XN--9T4B11YI5A XN--DEBA0AD XN--G6W251D XN--HGBK6AJ7F53BBA
XN--HLCJ6AYA9ESC7A XN--JXALPDLP XN--KGBECHTV XN--ZCKZAH YE YT YU ZA ZM ZW
""" # http://data.iana.org/TLD/tlds-alpha-by-domain.txt
domain_re = '''(([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([A-Z0-9\-]+\.)+))(%s|[0-9]{1,3})''' % '|'.join(domains.strip().split())
email_re = re.compile("([A-Z0-9_\-\+\.\']+)@" + domain_re + "$", re.I)
simple_email_re = re.compile('^[\w._%+-]+@[A-Za-z][\w.-]+$')
message = InvalidAddrSpec
def validate(self, value=None):
if not value: value=self.value
if not self.email_re.match(value):
if not self.simple_email_re.match(value):
self.log(self.message({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
try:
import socket
socket.gethostbyname(value.split('@')[-1])
except:
self.log(UnknownHost({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidContact({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# iso639 language code
#
def iso639_validate(log,value,element,parent):
import iso639codes
if '-' in value:
lang, sublang = value.split('-', 1)
else:
lang = value
if not iso639codes.isoLang.has_key(unicode.lower(unicode(lang))):
log(InvalidLanguage({"parent":parent, "element":element, "value":value}))
else:
log(ValidLanguage({"parent":parent, "element":element}))
class iso639(text):
def validate(self):
iso639_validate(self.log, self.value, self.name, self.parent.name)
#
# Encoding charset
#
class Charset(text):
def validate(self):
try:
import codecs
codecs.lookup(self.value)
except:
self.log(InvalidEncoding({'value': self.value}))
#
# Mime type
#
class MimeType(text):
def validate(self):
if not mime_re.match(self.value):
self.log(InvalidMIMEType({'attr':'type'}))
class MediaRange(MimeType):
def validate(self):
if not self.value.strip(): return
original_value = self.value
for value in original_value.split(','):
self.value = value.strip()
if value.find(';q=')>=0:
self.log(UndefinedParam({'param':'q'}))
MimeType.validate(self)
#
# iso8601 dateTime
#
class unbounded_iso8601(text):
iso8601_re = re.compile("^\d\d\d\d(-\d\d(-\d\d(T\d\d:\d\d(:\d\d(\.\d*)?)?" +
"(Z|([+-]\d\d:\d\d))?)?)?)?$")
message = InvalidISO8601DateTime
def validate(self):
if not self.iso8601_re.match(self.value):
self.log(self.message({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
work=self.value.split('T')
date=work[0].split('-')
year=int(date[0])
if len(date)>1:
month=int(date[1])
try:
if len(date)>2: datetime.date(year,month,int(date[2]))
except ValueError, e:
return self.log(self.message({"parent":self.parent.name, "element":self.name, "value":str(e)}))
if len(work) > 1:
time=work[1].split('Z')[0].split('+')[0].split('-')[0]
time=time.split(':')
if int(time[0])>23:
self.log(self.message({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
if len(time)>1 and int(time[1])>60:
self.log(self.message({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
if len(time)>2 and float(time[2])>60.0:
self.log(self.message({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
self.log(ValidW3CDTFDate({"parent":self.parent.name, "element":self.name, "value":self.value}))
return 1
class iso8601(unbounded_iso8601):
bounded = 1
def validate(self):
if self.bounded and unbounded_iso8601.validate(self):
if implausible_8601(self.value):
self.log(ImplausibleDate({"parent":self.parent.name,
"element":self.name, "value":self.value}))
return 1
class w3cdtf(iso8601):
# The same as in iso8601, except a timezone is not optional when
# a time is present
iso8601_re = re.compile("^\d\d\d\d(-\d\d(-\d\d(T\d\d:\d\d(:\d\d(\.\d*)?)?" +
"(Z|([+-]\d\d:\d\d)))?)?)?$")
message = InvalidW3CDTFDate
class unbounded_w3cdtf(w3cdtf):
bounded = 0
class rfc3339(iso8601):
# The same as in iso8601, except that the only thing that is optional
# is the seconds
iso8601_re = re.compile("^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d*)?" +
"(Z|([+-]\d\d:\d\d))$")
message = InvalidRFC3339Date
class iso8601_date(iso8601):
date_re = re.compile("^\d\d\d\d-\d\d-\d\d$")
def validate(self):
if iso8601.validate(self):
if not self.date_re.search(self.value):
self.log(InvalidISO8601Date({"parent":self.parent.name, "element":self.name, "value":self.value}))
iana_schemes = [ # http://www.iana.org/assignments/uri-schemes.html
"ftp", "http", "gopher", "mailto", "news", "nntp", "telnet", "wais",
"file", "prospero", "z39.50s", "z39.50r", "cid", "mid", "vemmi",
"service", "imap", "nfs", "acap", "rtsp", "tip", "pop", "data", "dav",
"opaquelocktoken", "sip", "sips", "tel", "fax", "modem", "ldap",
"https", "soap.beep", "soap.beeps", "xmlrpc.beep", "xmlrpc.beeps",
"urn", "go", "h323", "ipp", "tftp", "mupdate", "pres", "im", "mtqp",
"iris.beep", "dict", "snmp", "crid", "tag", "dns", "info",
"aaa", "aaas", "cap", "iax", "icap", "iris", "iris.xpc", "iris.xpcs",
"iris.lwz", "msrp", "msrps", "shttp", "thismessage", "tv", "xmpp"
]
#
# rfc2396 fully qualified (non-relative) uri
#
class rfc2396(text):
rfc2396_re = re.compile("([a-zA-Z][0-9a-zA-Z+\\-\\.]*:)?/{0,2}" +
"(\\[[0-9A-Fa-f:]+\\])?" +
"[0-9a-zA-Z;/?:@&=+$\\.\\-_!~*'()%,#]*$")
urn_re = re.compile(r"^[Uu][Rr][Nn]:[a-zA-Z0-9][a-zA-Z0-9-]{1,31}:([a-zA-Z0-9()+,\.:=@;$_!*'\-]|%[0-9A-Fa-f]{2})+$")
uuid_re = re.compile(r"^[Uu][Rr][Nn]:[Uu][Uu][Ii][Dd]:[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$")
tag_re = re.compile(r"^tag:([a-z0-9\-\._]+?@)?[a-z0-9\.\-]+?,\d{4}(-\d{2}(-\d{2})?)?:[0-9a-zA-Z;/\?:@&=+$\.\-_!~*'\(\)%,]*(#[0-9a-zA-Z;/\?:@&=+$\.\-_!~*'\(\)%,]*)?$")
urichars_re=re.compile("[0-9a-zA-Z;/?:@&=+$\\.\\-_!~*'()%,#]")
def validate(self, errorClass=InvalidLink, successClass=ValidURI, extraParams={}):
success = 0
scheme=self.value.split(':')[0].lower()
if scheme=='tag':
if self.tag_re.match(self.value):
success = 1
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(ValidTAG(logparams))
else:
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(InvalidTAG(logparams))
elif scheme=="urn":
if self.value.lower().startswith('urn:uuid:') and not \
self.uuid_re.match(self.value):
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(InvalidUUID(logparams))
elif self.urn_re.match(self.value):
success = 1
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(ValidURN(logparams))
else:
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(InvalidURN(logparams))
elif not self.rfc2396_re.match(self.value):
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
for c in self.value:
if ord(c)<128 and not rfc2396.urichars_re.match(c):
logparams['char'] = repr(str(c))
logparams['value'] = self.value
self.log(InvalidUriChar(logparams))
break
else:
try:
if self.rfc2396_re.match(self.value.encode('idna')):
errorClass=UriNotIri
except:
pass
self.log(errorClass(logparams))
elif scheme in ['http','ftp']:
if not re.match('^\w+://[^/].*',self.value):
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(errorClass(logparams))
else:
success = 1
elif self.value.find(':')>=0 and scheme.isalpha() and scheme not in iana_schemes:
self.log(SchemeNotIANARegistered({"parent":self.parent.name, "element":self.name, "value":scheme}))
else:
success = 1
if success:
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(successClass(logparams))
return success
#
# rfc3987 iri
#
class rfc3987(rfc2396):
def validate(self, errorClass=InvalidIRI, successClass=ValidURI, extraParams={}):
try:
if self.value: self.value = self.value.encode('idna')
except:
pass # apparently '.' produces label too long
return rfc2396.validate(self, errorClass, successClass, extraParams)
class rfc2396_full(rfc2396):
rfc2396_re = re.compile("[a-zA-Z][0-9a-zA-Z+\\-\\.]*:(//)?" +
"[0-9a-zA-Z;/?:@&=+$\\.\\-_!~*'()%,#]+$")
def validate(self, errorClass=InvalidFullLink, successClass=ValidURI, extraParams={}):
return rfc2396.validate(self, errorClass, successClass, extraParams)
#
# URI reference resolvable relative to xml:base
#
class xmlbase(rfc3987):
def validate(self, errorClass=InvalidIRI, successClass=ValidURI, extraParams={}):
if rfc3987.validate(self, errorClass, successClass, extraParams):
if self.dispatcher.xmlBase != self.xmlBase:
docbase=canonicalForm(self.dispatcher.xmlBase).split('#')[0]
elembase=canonicalForm(self.xmlBase).split('#')[0]
value=canonicalForm(urljoin(elembase,self.value)).split('#')[0]
if (value==elembase) and (elembase.encode('idna')!=docbase):
self.log(SameDocumentReference({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# rfc822 dateTime (+Y2K extension)
#
class rfc822(text):
rfc822_re = re.compile("(((mon)|(tue)|(wed)|(thu)|(fri)|(sat)|(sun))\s*,\s*)?" +
"\d\d?\s+((jan)|(feb)|(mar)|(apr)|(may)|(jun)|(jul)|(aug)|(sep)|(oct)|" +
"(nov)|(dec))\s+\d\d(\d\d)?\s+\d\d:\d\d(:\d\d)?\s+(([+-]\d\d\d\d)|" +
"(ut)|(gmt)|(est)|(edt)|(cst)|(cdt)|(mst)|(mdt)|(pst)|(pdt)|[a-ik-z])?$",
re.UNICODE)
rfc2822_re = re.compile("(((Mon)|(Tue)|(Wed)|(Thu)|(Fri)|(Sat)|(Sun)), )?" +
"\d\d? ((Jan)|(Feb)|(Mar)|(Apr)|(May)|(Jun)|(Jul)|(Aug)|(Sep)|(Oct)|" +
"(Nov)|(Dec)) \d\d\d\d \d\d:\d\d(:\d\d)? (([+-]?\d\d[03]0)|" +
"(UT)|(GMT)|(EST)|(EDT)|(CST)|(CDT)|(MST)|(MDT)|(PST)|(PDT)|Z)$")
def validate(self):
if self.rfc2822_re.match(self.value):
import calendar
value = parsedate(self.value)
try:
if value[0] > 1900:
dow = datetime.date(*value[:3]).strftime("%a")
if self.value.find(',')>0 and dow.lower() != self.value[:3].lower():
self.log(IncorrectDOW({"parent":self.parent.name, "element":self.name, "value":self.value[:3]}))
return
except ValueError, e:
self.log(InvalidRFC2822Date({"parent":self.parent.name, "element":self.name, "value":str(e)}))
return
if implausible_822(self.value):
self.log(ImplausibleDate({"parent":self.parent.name,
"element":self.name, "value":self.value}))
else:
self.log(ValidRFC2822Date({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
value1,value2 = '', self.value
value2 = re.sub(r'[\\](.)','',value2)
while value1!=value2: value1,value2=value2,re.sub('\([^(]*?\)',' ',value2)
if not self.rfc822_re.match(value2.strip().lower()):
self.log(InvalidRFC2822Date({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ProblematicalRFC822Date({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# Decode html entityrefs
#
from htmlentitydefs import name2codepoint
def decodehtml(data):
chunks=re.split('&#?(\w+);',data)
for i in range(1,len(chunks),2):
if chunks[i].isdigit():
# print chunks[i]
chunks[i]=unichr(int(chunks[i]))
elif chunks[i] in name2codepoint:
chunks[i]=unichr(name2codepoint[chunks[i]])
else:
chunks[i]='&' + chunks[i] +';'
# print repr(chunks)
return u"".join(map(unicode,chunks))
#
# Scan HTML for relative URLs
#
class absUrlMixin:
anchor_re = re.compile('<a\s+href=(?:"(.*?)"|\'(.*?)\'|([\w-]+))[\s>]', re.IGNORECASE)
img_re = re.compile('<img\s+[^>]*src=(?:"(.*?)"|\'(.*?)\'|([\w-]+))[\s>]', re.IGNORECASE)
absref_re = re.compile("\w+:")
def validateAbsUrl(self,value):
refs = self.img_re.findall(self.value) + self.anchor_re.findall(self.value)
for ref in [reduce(lambda a,b: a or b, x) for x in refs]:
ref = decodehtml(ref).strip()
if not self.absref_re.match(ref):
for c in ref:
if ord(c)<128 and not rfc2396.urichars_re.match(c):
# print "Invalid character:", ref
# self.log(InvalidUriChar({'value':repr(str(c))}))
self.log(InvalidUriChar({'value':ref, 'char':repr(str(c))}))
break
else:
self.log(ContainsRelRef({"parent":self.parent.name, "element":self.name, "value": ref}))
#
# Scan HTML for 'devious' content
#
class safeHtmlMixin:
def validateSafe(self,value):
HTMLValidator(value, self)
class safeHtml(text, safeHtmlMixin, absUrlMixin):
def prevalidate(self):
self.children.append(True) # force warnings about "mixed" content
def validate(self):
self.validateSafe(self.value)
self.validateAbsUrl(self.value)
#
# Elements for which email addresses are discouraged
#
class nonemail(text):
email_re = re.compile("<" + addr_spec.email_re.pattern[:-1] + ">", re.I)
def validate(self):
if self.email_re.search(self.value):
self.log(ContainsEmail({"parent":self.parent.name, "element":self.name}))
#
# Elements for which html is discouraged, also checks for relative URLs
#
class nonhtml(text,safeHtmlMixin):#,absUrlMixin):
htmlEndTag_re = re.compile("</(\w+)>")
htmlEntity_re = re.compile("&(#?\w+)")
def start(self):
nonhtml.startline = self.__dict__['startline'] = self.line
def prevalidate(self):
self.start()
self.children.append(True) # force warnings about "mixed" content
def validate(self, message=ContainsHTML):
tags = [t for t in self.htmlEndTag_re.findall(self.value) if t.lower() in HTMLValidator.htmltags]
if tags:
self.log(message({"parent":self.parent.name, "element":self.name, "value":tags[0]}))
# experimental RSS-Profile support
elif self.htmlEntity_re.search(self.value):
for value in self.htmlEntity_re.findall(self.value):
from htmlentitydefs import name2codepoint
if value in name2codepoint or value == 'apos' or not value.isalpha():
if not hasattr(self,'startline'): self.startline=self.line
lines = self.dispatcher.rssCharData[self.startline-1:self.line]
if not [chardata for chardata in lines if chardata]:
self.log(message({"parent":self.parent.name, "element":self.name, "value":'&'+value+';'}))
# experimental RSS-Profile support
# &#x � &ent </ <a
elif self.getFeedType() == TYPE_RSS2:
if re.search('&#[x0-9]|<[/a-zA-Z]', self.value):
lines = self.dispatcher.rssCharData[self.startline-1:self.line]
if not [chardata for chardata in lines if chardata]:
rss = self.parent.parent
while rss and rss.name!='rss': rss=rss.parent
if rss.version.startswith("2."):
self.log(CharacterData({}))
#
# valid e-mail addresses
#
class email(addr_spec,nonhtml):
message = InvalidContact
def validate(self):
value=self.value
list = AddressList(value)
if len(list)==1: value=list[0][1]
nonhtml.validate(self)
addr_spec.validate(self, value)
class email_with_name(email):
def validate(self):
if self.value.startswith('mailto:'):
from urllib import unquote
self.value = unquote(self.value.split(':',1)[1])
if self.value.find('@')>0:
if not self.value.endswith(")"):
if self.value.find(' ')>0:
self.log(EmailFormat({}))
else:
self.log(MissingRealName({}))
else:
email.validate(self)
else:
email.validate(self)
class nonNegativeInteger(text):
def validate(self):
try:
t = int(self.value)
if t < 0:
raise ValueError
else:
self.log(ValidInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidNonNegativeInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
class positiveInteger(text):
max = 0
def validate(self):
try:
t = int(self.value)
if t <= 0:
raise ValueError
elif self.max and t>self.max:
self.log(IntegerOverflow({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidPositiveInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
class UINT31(positiveInteger):
max = 2147483647
class Integer(text):
def validate(self):
if self.value == '': return
try:
t = int(self.value)
self.log(ValidInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
class Float(text):
def validate(self, name=None):
if not re.match('\d+\.?\d*$', self.value):
self.log(InvalidFloat({"attr":name or self.name, "value":self.value}))
class alphanumeric(text):
def validate(self):
if not re.match('^\s*[A-Za-z0-9]+\s*$', self.value):
self.log(InvalidAlphanum({"attr":self.name, "value":self.value}))
class percentType(text):
def validate(self):
try:
t = float(self.value)
if t < 0.0 or t > 100.0:
raise ValueError
else:
self.log(ValidPercentage({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidPercentage({"parent":self.parent.name, "element":self.name, "value":self.value}))
class latitude(text):
def validate(self):
try:
lat = float(self.value)
if lat > 90 or lat < -90:
raise ValueError
else:
self.log(ValidLatitude({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidLatitude({"parent":self.parent.name, "element":self.name, "value":self.value}))
class longitude(text):
def validate(self):
try:
lon = float(self.value)
if lon > 180 or lon < -180:
raise ValueError
else:
self.log(ValidLongitude({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidLongitude({"parent":self.parent.name, "element":self.name, "value":self.value}))
class httpURL(text):
http_re = re.compile("http://" + addr_spec.domain_re + '(?::\d+)?' + '(/|$)', re.IGNORECASE)
def validate(self):
if not self.http_re.match(self.value):
self.log(InvalidURLAttribute({"parent":self.parent.name, "element":self.name, "value":self.value}))
elif not rfc2396_full.rfc2396_re.match(self.value):
self.log(InvalidURLAttribute({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidURLAttribute({"parent":self.parent.name, "element":self.name, "value":self.value}))
class rdfResourceURI(rfc2396):
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'resource'),
(u'http://purl.org/dc/elements/1.1/', u'title')]
def validate(self):
if (rdfNS, 'resource') in self.attrs.getNames():
self.value=self.attrs.getValue((rdfNS, 'resource'))
rfc2396.validate(self)
elif self.getFeedType() == TYPE_RSS1:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"rdf:resource"}))
class rdfAbout(validatorBase):
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'about')]
def startElementNS(self, name, qname, attrs):
pass
def validate(self):
if (rdfNS, 'about') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"rdf:about"}))
else:
test=rfc2396().setElement(self.name, self.attrs, self)
test.value=self.attrs.getValue((rdfNS, 'about'))
test.validate()
class nonblank(text):
def validate(self, errorClass=NotBlank, extraParams={}):
if not self.value:
logparams={"parent":self.parent.name,"element":self.name}
logparams.update(extraParams)
self.log(errorClass(logparams))
class nows(text):
def __init__(self):
self.ok = 1
text.__init__(self)
def characters(self, string):
text.characters(self, string)
if self.ok and (self.value != self.value.strip()):
self.log(UnexpectedWhitespace({"parent":self.parent.name, "element":self.name}))
self.ok = 0
class unique(nonblank):
def __init__(self, name, scope, message=DuplicateValue):
self.scope_name=name
self.scope=scope
self.message=message
nonblank.__init__(self)
if not name+'s' in self.scope.__dict__:
self.scope.__dict__[name+'s']=[]
def validate(self):
nonblank.validate(self)
list=self.scope.__dict__[self.scope_name+'s']
if self.value in list:
self.log(self.message({"parent":self.parent.name, "element":self.name,"value":self.value}))
elif self.value:
list.append(self.value)
class rfc3987_full(xmlbase):
rfc2396_re = rfc2396_full.rfc2396_re
def validate(self, errorClass=InvalidFullLink, successClass=ValidURI, extraParams={}):
return rfc2396.validate(self, errorClass, successClass, extraParams)
class canonicaluri(rfc3987_full):
def validate(self):
prestrip = self.value
self.value = self.value.strip()
if rfc3987_full.validate(self):
c = canonicalForm(self.value)
if c is None or c != prestrip:
self.log(NonCanonicalURI({"parent":self.parent.name,"element":self.name,"uri":prestrip, "curi":c or 'N/A'}))
class yesno(text):
def normalizeWhitespace(self):
pass
def validate(self):
if not self.value in ['yes','no']:
self.log(InvalidYesNo({"parent":self.parent.name, "element":self.name,"value":self.value}))
class truefalse(text):
def normalizeWhitespace(self):
pass
def validate(self):
if not self.value.lower() in ['true','false']:
self.log(InvalidTrueFalse({"parent":self.parent.name, "element":self.name,"value":self.value}))
class truefalsestrict(text):
def normalizeWhitespace(self):
pass
def validate(self):
if not self.value in ['true','false']:
self.log(InvalidTrueFalse({"parent":self.parent.name, "element":self.name,"value":self.value}))
class duration(text):
duration_re = re.compile("\d+(:[0-5][0-9](:[0-5][0-9])?)?$")
def validate(self):
if not self.duration_re.match(self.value):
self.log(InvalidDuration({"parent":self.parent.name, "element":self.name
, "value":self.value}))
class lengthLimitedText(nonhtml):
def __init__(self, max):
self.max = max
text.__init__(self)
def validate(self):
if len(self.value)>self.max:
self.log(TooLong({"parent":self.parent.name, "element":self.name,
"len": len(self.value), "max": self.max}))
nonhtml.validate(self)
class keywords(text):
def validate(self):
if self.value.find(' ')>=0 and self.value.find(',')<0:
self.log(InvalidKeywords({"parent":self.parent.name, "element":self.name}))
class commaSeparatedIntegers(text):
def validate(self):
if not re.match("^\d+(,\s*\d+)*$", self.value):
self.log(InvalidCommaSeparatedIntegers({"parent":self.parent.name,
"element":self.name}))
class formname(text):
def validate(self):
if not re.match("^[a-zA-z][a-zA-z0-9:._]*", self.value):
self.log(InvalidFormComponentName({"parent":self.parent.name,
"element":self.name, "value":self.value}))
class enumeration(text):
def validate(self):
if self.value not in self.valuelist:
self.log(self.error({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class caseinsensitive_enumeration(enumeration):
def validate(self):
self.value=self.value.lower()
enumeration.validate(self)
class iso3166(enumeration):
error = InvalidCountryCode
valuelist = [
"AD", "AE", "AF", "AG", "AI", "AM", "AN", "AO", "AQ", "AR", "AS", "AT",
"AU", "AW", "AZ", "BA", "BB", "BD", "BE", "BF", "BG", "BH", "BI", "BJ",
"BM", "BN", "BO", "BR", "BS", "BT", "BV", "BW", "BY", "BZ", "CA", "CC",
"CD", "CF", "CG", "CH", "CI", "CK", "CL", "CM", "CN", "CO", "CR", "CU",
"CV", "CX", "CY", "CZ", "DE", "DJ", "DK", "DM", "DO", "DZ", "EC", "EE",
"EG", "EH", "ER", "ES", "ET", "FI", "FJ", "FK", "FM", "FO", "FR", "GA",
"GB", "GD", "GE", "GF", "GH", "GI", "GL", "GM", "GN", "GP", "GQ", "GR",
"GS", "GT", "GU", "GW", "GY", "HK", "HM", "HN", "HR", "HT", "HU", "ID",
"IE", "IL", "IN", "IO", "IQ", "IR", "IS", "IT", "JM", "JO", "JP", "KE",
"KG", "KH", "KI", "KM", "KN", "KP", "KR", "KW", "KY", "KZ", "LA", "LB",
"LC", "LI", "LK", "LR", "LS", "LT", "LU", "LV", "LY", "MA", "MC", "MD",
"MG", "MH", "MK", "ML", "MM", "MN", "MO", "MP", "MQ", "MR", "MS", "MT",
"MU", "MV", "MW", "MX", "MY", "MZ", "NA", "NC", "NE", "NF", "NG", "NI",
"NL", "NO", "NP", "NR", "NU", "NZ", "OM", "PA", "PE", "PF", "PG", "PH",
"PK", "PL", "PM", "PN", "PR", "PS", "PT", "PW", "PY", "QA", "RE", "RO",
"RU", "RW", "SA", "SB", "SC", "SD", "SE", "SG", "SH", "SI", "SJ", "SK",
"SL", "SM", "SN", "SO", "SR", "ST", "SV", "SY", "SZ", "TC", "TD", "TF",
"TG", "TH", "TJ", "TK", "TM", "TN", "TO", "TR", "TT", "TV", "TW", "TZ",
"UA", "UG", "UM", "US", "UY", "UZ", "VA", "VC", "VE", "VG", "VI", "VN",
"VU", "WF", "WS", "YE", "YT", "ZA", "ZM", "ZW"]
class iso4217(enumeration):
error = InvalidCurrencyUnit
valuelist = [
"AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZM",
"BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV",
"BRL", "BSD", "BTN", "BWP", "BYR", "BZD", "CAD", "CDF", "CHE", "CHF",
"CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CSD", "CUP", "CVE",
"CYP", "CZK", "DJF", "DKK", "DOP", "DZD", "EEK", "EGP", "ERN", "ETB",
"EUR", "FJD", "FKP", "GBP", "GEL", "GHC", "GIP", "GMD", "GNF", "GTQ",
"GWP", "GYD", "HKD", "HNL", "HRK", "HTG", "HUF", "IDR", "ILS", "INR",
"IQD", "IRR", "ISK", "JMD", "JOD", "JPY", "KES", "KGS", "KHR", "KMF",
"KPW", "KRW", "KWD", "KYD", "KZT", "LAK", "LBP", "LKR", "LRD", "LSL",
"LTL", "LVL", "LYD", "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP",
"MRO", "MTL", "MUR", "MWK", "MXN", "MXV", "MYR", "MZM", "NAD", "NGN",
"NIO", "NOK", "NPR", "NZD", "OMR", "PAB", "PEN", "PGK", "PHP", "PKR",
"PLN", "PYG", "QAR", "ROL", "RON", "RUB", "RWF", "SAR", "SBD", "SCR",
"SDD", "SEK", "SGD", "SHP", "SIT", "SKK", "SLL", "SOS", "SRD", "STD",
"SVC", "SYP", "SZL", "THB", "TJS", "TMM", "TND", "TOP", "TRL", "TRY",
"TTD", "TWD", "TZS", "UAH", "UGX", "USD", "USN", "USS", "UYU", "UZS",
"VEB", "VND", "VUV", "WST", "XAF", "XAG", "XAU", "XBA", "XBB", "XBC",
"XBD", "XCD", "XDR", "XFO", "XFU", "XOF", "XPD", "XPF", "XPT", "XTS",
"XXX", "YER", "ZAR", "ZMK", "ZWD"]
| Python |
#$Id
####
# Copyright 2000,2001 by Timothy O'Malley <timo@alum.mit.edu>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
"""Timeout Socket
This module enables a timeout mechanism on all TCP connections. It
does this by inserting a shim into the socket module. After this module
has been imported, all socket creation goes through this shim. As a
result, every TCP connection will support a timeout.
The beauty of this method is that it immediately and transparently
enables the entire python library to support timeouts on TCP sockets.
As an example, if you wanted to SMTP connections to have a 20 second
timeout:
import timeoutsocket
import smtplib
timeoutsocket.setDefaultSocketTimeout(20)
The timeout applies to the socket functions that normally block on
execution: read, write, connect, and accept. If any of these
operations exceeds the specified timeout, the exception Timeout
will be raised.
The default timeout value is set to None. As a result, importing
this module does not change the default behavior of a socket. The
timeout mechanism only activates when the timeout has been set to
a numeric value. (This behavior mimics the behavior of the
select.select() function.)
This module implements two classes: TimeoutSocket and TimeoutFile.
The TimeoutSocket class defines a socket-like object that attempts to
avoid the condition where a socket may block indefinitely. The
TimeoutSocket class raises a Timeout exception whenever the
current operation delays too long.
The TimeoutFile class defines a file-like object that uses the TimeoutSocket
class. When the makefile() method of TimeoutSocket is called, it returns
an instance of a TimeoutFile.
Each of these objects adds two methods to manage the timeout value:
get_timeout() --> returns the timeout of the socket or file
set_timeout() --> sets the timeout of the socket or file
As an example, one might use the timeout feature to create httplib
connections that will timeout after 30 seconds:
import timeoutsocket
import httplib
H = httplib.HTTP("www.python.org")
H.sock.set_timeout(30)
Note: When used in this manner, the connect() routine may still
block because it happens before the timeout is set. To avoid
this, use the 'timeoutsocket.setDefaultSocketTimeout()' function.
Good Luck!
"""
__version__ = "$Revision: 511 $"
__author__ = "Timothy O'Malley <timo@alum.mit.edu>"
#
# Imports
#
import select, string
import socket
if not hasattr(socket, "_no_timeoutsocket"):
_socket = socket.socket
else:
_socket = socket._no_timeoutsocket
#
# Set up constants to test for Connected and Blocking operations.
# We delete 'os' and 'errno' to keep our namespace clean(er).
# Thanks to Alex Martelli and G. Li for the Windows error codes.
#
import os
if os.name == "nt":
_IsConnected = ( 10022, 10056 )
_ConnectBusy = ( 10035, )
_AcceptBusy = ( 10035, )
else:
import errno
_IsConnected = ( errno.EISCONN, )
_ConnectBusy = ( errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK )
_AcceptBusy = ( errno.EAGAIN, errno.EWOULDBLOCK )
del errno
del os
#
# Default timeout value for ALL TimeoutSockets
#
_DefaultTimeout = None
def setDefaultSocketTimeout(timeout):
global _DefaultTimeout
_DefaultTimeout = timeout
def getDefaultSocketTimeout():
return _DefaultTimeout
#
# Exceptions for socket errors and timeouts
#
Error = socket.error
class Timeout(Exception):
pass
#
# Factory function
#
from socket import AF_INET, SOCK_STREAM
def timeoutsocket(family=AF_INET, type=SOCK_STREAM, proto=None):
if family != AF_INET or type != SOCK_STREAM:
if proto:
return _socket(family, type, proto)
else:
return _socket(family, type)
return TimeoutSocket( _socket(family, type), _DefaultTimeout )
# end timeoutsocket
#
# The TimeoutSocket class definition
#
class TimeoutSocket:
"""TimeoutSocket object
Implements a socket-like object that raises Timeout whenever
an operation takes too long.
The definition of 'too long' can be changed using the
set_timeout() method.
"""
_copies = 0
_blocking = 1
def __init__(self, sock, timeout):
self._sock = sock
self._timeout = timeout
# end __init__
def __getattr__(self, key):
return getattr(self._sock, key)
# end __getattr__
def get_timeout(self):
return self._timeout
# end set_timeout
def set_timeout(self, timeout=None):
self._timeout = timeout
# end set_timeout
def setblocking(self, blocking):
self._blocking = blocking
return self._sock.setblocking(blocking)
# end set_timeout
def connect_ex(self, addr):
errcode = 0
try:
self.connect(addr)
except Error, why:
errcode = why[0]
return errcode
# end connect_ex
def connect(self, addr, port=None, dumbhack=None):
# In case we were called as connect(host, port)
if port != None: addr = (addr, port)
# Shortcuts
sock = self._sock
timeout = self._timeout
blocking = self._blocking
# First, make a non-blocking call to connect
try:
sock.setblocking(0)
sock.connect(addr)
sock.setblocking(blocking)
return
except Error, why:
# Set the socket's blocking mode back
sock.setblocking(blocking)
# If we are not blocking, re-raise
if not blocking:
raise
# If we are already connected, then return success.
# If we got a genuine error, re-raise it.
errcode = why[0]
if dumbhack and errcode in _IsConnected:
return
elif errcode not in _ConnectBusy:
raise
# Now, wait for the connect to happen
# ONLY if dumbhack indicates this is pass number one.
# If select raises an error, we pass it on.
# Is this the right behavior?
if not dumbhack:
r,w,e = select.select([], [sock], [], timeout)
if w:
return self.connect(addr, dumbhack=1)
# If we get here, then we should raise Timeout
raise Timeout("Attempted connect to %s timed out." % str(addr) )
# end connect
def accept(self, dumbhack=None):
# Shortcuts
sock = self._sock
timeout = self._timeout
blocking = self._blocking
# First, make a non-blocking call to accept
# If we get a valid result, then convert the
# accept'ed socket into a TimeoutSocket.
# Be carefult about the blocking mode of ourselves.
try:
sock.setblocking(0)
newsock, addr = sock.accept()
sock.setblocking(blocking)
timeoutnewsock = self.__class__(newsock, timeout)
timeoutnewsock.setblocking(blocking)
return (timeoutnewsock, addr)
except Error, why:
# Set the socket's blocking mode back
sock.setblocking(blocking)
# If we are not supposed to block, then re-raise
if not blocking:
raise
# If we got a genuine error, re-raise it.
errcode = why[0]
if errcode not in _AcceptBusy:
raise
# Now, wait for the accept to happen
# ONLY if dumbhack indicates this is pass number one.
# If select raises an error, we pass it on.
# Is this the right behavior?
if not dumbhack:
r,w,e = select.select([sock], [], [], timeout)
if r:
return self.accept(dumbhack=1)
# If we get here, then we should raise Timeout
raise Timeout("Attempted accept timed out.")
# end accept
def send(self, data, flags=0):
sock = self._sock
if self._blocking:
r,w,e = select.select([],[sock],[], self._timeout)
if not w:
raise Timeout("Send timed out")
return sock.send(data, flags)
# end send
def recv(self, bufsize, flags=0):
sock = self._sock
if self._blocking:
r,w,e = select.select([sock], [], [], self._timeout)
if not r:
raise Timeout("Recv timed out")
return sock.recv(bufsize, flags)
# end recv
def makefile(self, flags="r", bufsize=-1):
self._copies = self._copies +1
return TimeoutFile(self, flags, bufsize)
# end makefile
def close(self):
if self._copies <= 0:
self._sock.close()
else:
self._copies = self._copies -1
# end close
# end TimeoutSocket
class TimeoutFile:
"""TimeoutFile object
Implements a file-like object on top of TimeoutSocket.
"""
def __init__(self, sock, mode="r", bufsize=4096):
self._sock = sock
self._bufsize = 4096
if bufsize > 0: self._bufsize = bufsize
if not hasattr(sock, "_inqueue"): self._sock._inqueue = ""
# end __init__
def __getattr__(self, key):
return getattr(self._sock, key)
# end __getattr__
def close(self):
self._sock.close()
self._sock = None
# end close
def write(self, data):
self.send(data)
# end write
def read(self, size=-1):
_sock = self._sock
_bufsize = self._bufsize
while 1:
datalen = len(_sock._inqueue)
if datalen >= size >= 0:
break
bufsize = _bufsize
if size > 0:
bufsize = min(bufsize, size - datalen )
buf = self.recv(bufsize)
if not buf:
break
_sock._inqueue = _sock._inqueue + buf
data = _sock._inqueue
_sock._inqueue = ""
if size > 0 and datalen > size:
_sock._inqueue = data[size:]
data = data[:size]
return data
# end read
def readline(self, size=-1):
_sock = self._sock
_bufsize = self._bufsize
while 1:
idx = string.find(_sock._inqueue, "\n")
if idx >= 0:
break
datalen = len(_sock._inqueue)
if datalen >= size >= 0:
break
bufsize = _bufsize
if size > 0:
bufsize = min(bufsize, size - datalen )
buf = self.recv(bufsize)
if not buf:
break
_sock._inqueue = _sock._inqueue + buf
data = _sock._inqueue
_sock._inqueue = ""
if idx >= 0:
idx = idx + 1
_sock._inqueue = data[idx:]
data = data[:idx]
elif size > 0 and datalen > size:
_sock._inqueue = data[size:]
data = data[:size]
return data
# end readline
def readlines(self, sizehint=-1):
result = []
data = self.read()
while data:
idx = string.find(data, "\n")
if idx >= 0:
idx = idx + 1
result.append( data[:idx] )
data = data[idx:]
else:
result.append( data )
data = ""
return result
# end readlines
def flush(self): pass
# end TimeoutFile
#
# Silently replace the socket() builtin function with
# our timeoutsocket() definition.
#
if not hasattr(socket, "_no_timeoutsocket"):
socket._no_timeoutsocket = socket.socket
socket.socket = timeoutsocket
del socket
socket = timeoutsocket
# Finis
| Python |
"""$Id: base.py 1049 2009-05-06 02:00:03Z rothfuss $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1049 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from xml.sax.handler import ContentHandler
from xml.sax.xmlreader import Locator
from logging import NonCanonicalURI, NotUTF8
import re
# references:
# http://web.resource.org/rss/1.0/modules/standard.html
# http://web.resource.org/rss/1.0/modules/proposed.html
# http://dmoz.org/Reference/Libraries/Library_and_Information_Science/Technical_Services/Cataloguing/Metadata/RDF/Applications/RSS/Specifications/RSS1.0_Modules/
namespaces = {
"http://www.bloglines.com/about/specs/fac-1.0": "access",
"http://webns.net/mvcb/": "admin",
"http://purl.org/rss/1.0/modules/aggregation/": "ag",
"http://purl.org/rss/1.0/modules/annotate/": "annotate",
"http://www.w3.org/2007/app": "app",
"http://media.tangent.org/rss/1.0/": "audio",
"http://backend.userland.com/blogChannelModule": "blogChannel",
"http://web.resource.org/cc/": "cc",
"http://www.microsoft.com/schemas/rss/core/2005": "cf",
"http://backend.userland.com/creativeCommonsRssModule": "creativeCommons",
"http://purl.org/rss/1.0/modules/company": "company",
"http://purl.org/rss/1.0/modules/content/": "content",
"http://conversationsnetwork.org/rssNamespace-1.0/": "conversationsNetwork",
"http://my.theinfo.org/changed/1.0/rss/": "cp",
"http://purl.org/dc/elements/1.1/": "dc",
"http://purl.org/dc/terms/": "dcterms",
"http://purl.org/rss/1.0/modules/email/": "email",
"http://purl.org/rss/1.0/modules/event/": "ev",
"http://purl.org/syndication/history/1.0": "fh",
"http://www.w3.org/2003/01/geo/wgs84_pos#": "geo",
"http://geourl.org/rss/module/": "geourl",
"http://www.georss.org/georss": "georss",
"http://www.opengis.net/gml": "gml",
"http://postneo.com/icbm": "icbm",
"http://purl.org/rss/1.0/modules/image/": "image",
"urn:atom-extension:indexing": "indexing",
"http://www.itunes.com/dtds/podcast-1.0.dtd": "itunes",
"http://rssnamespace.org/feedburner/ext/1.0": "feedburner",
"http://xmlns.com/foaf/0.1/": "foaf",
"http://purl.org/rss/1.0/modules/link/": "l",
"http://search.yahoo.com/mrss/": "media",
"http://www.w3.org/1998/Math/MathML": "mathml",
"http://a9.com/-/spec/opensearchrss/1.0/": "opensearch10",
"http://a9.com/-/spec/opensearch/1.1/": "opensearch",
"http://www.opml.org/spec2": "opml",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://www.w3.org/2000/01/rdf-schema#": "rdfs",
"http://purl.org/rss/1.0/modules/reference/": "ref",
"http://purl.org/rss/1.0/modules/richequiv/": "reqv",
"http://purl.org/rss/1.0/modules/rss091#": "rss091",
"http://purl.org/rss/1.0/modules/search/": "search",
"http://purl.org/rss/1.0/modules/slash/": "slash",
"http://purl.org/rss/1.0/modules/servicestatus/": "ss",
"http://hacks.benhammersley.com/rss/streaming/": "str",
"http://purl.org/rss/1.0/modules/subscription/": "sub",
"http://feedsync.org/2007/feedsync": "sx",
"http://www.w3.org/2000/svg": "svg",
"http://purl.org/rss/1.0/modules/syndication/": "sy",
"http://purl.org/rss/1.0/modules/taxonomy/": "taxo",
"http://purl.org/rss/1.0/modules/threading/": "thr",
"http://purl.org/syndication/thread/1.0": "thr",
"http://madskills.com/public/xml/rss/module/trackback/": "trackback",
"http://wellformedweb.org/CommentAPI/": "wfw",
"http://purl.org/rss/1.0/modules/wiki/": "wiki",
"http://www.usemod.com/cgi-bin/mb.pl?ModWiki": "wiki",
"http://schemas.xmlsoap.org/soap/envelope/": "soap",
"http://www.w3.org/2005/Atom": "atom",
"http://www.w3.org/1999/xhtml": "xhtml",
"http://my.netscape.com/rdf/simple/0.9/": "rss090",
"http://purl.org/rss/1.0/": "rss1",
"http://purl.org/net/rss1.1#": "rss11",
"http://base.google.com/ns/1.0": "g",
"http://www.w3.org/XML/1998/namespace": "xml",
"http://openid.net/xmlns/1.0": "openid",
"http://earth.google.com/kml/2.0": "kml20",
"http://earth.google.com/kml/2.1": "kml21",
"http://www.opengis.net/kml/2.2": "kml22",
"http://www.w3.org/1999/xlink": "xlink",
"xri://$xrd*($v*2.0)": "xrd",
"xri://$xrds": "xrds",
}
def near_miss(ns):
try:
return re.match(".*\w", ns).group().lower()
except:
return ns
nearly_namespaces = dict([(near_miss(u),p) for u,p in namespaces.items()])
stdattrs = [(u'http://www.w3.org/XML/1998/namespace', u'base'),
(u'http://www.w3.org/XML/1998/namespace', u'id'),
(u'http://www.w3.org/XML/1998/namespace', u'lang'),
(u'http://www.w3.org/XML/1998/namespace', u'space')]
#
# From the SAX parser's point of view, this class is the one responsible for
# handling SAX events. In actuality, all this class does is maintain a
# pushdown stack of the *real* content handlers, and delegates sax events
# to the current one.
#
class SAXDispatcher(ContentHandler):
firstOccurrenceOnly = 0
def __init__(self, base, selfURIs, encoding):
from root import root
ContentHandler.__init__(self)
self.lastKnownLine = 1
self.lastKnownColumn = 0
self.loggedEvents = []
self.feedType = 0
try:
self.xmlBase = base.encode('idna')
except:
self.xmlBase = base
self.selfURIs = selfURIs
self.encoding = encoding
self.handler_stack=[[root(self, base)]]
self.defaultNamespaces = []
# experimental RSS-Profile support
self.rssCharData = []
def setDocumentLocator(self, locator):
self.locator = locator
ContentHandler.setDocumentLocator(self, self.locator)
def setFirstOccurrenceOnly(self, firstOccurrenceOnly=1):
self.firstOccurrenceOnly = firstOccurrenceOnly
def startPrefixMapping(self, prefix, uri):
for handler in iter(self.handler_stack[-1]):
handler.namespace[prefix] = uri
if uri and len(uri.split())>1:
from xml.sax import SAXException
self.error(SAXException('Invalid Namespace: %s' % uri))
if prefix in namespaces.values():
if not namespaces.get(uri,'') == prefix and prefix:
from logging import ReservedPrefix, MediaRssNamespace
preferredURI = [key for key, value in namespaces.items() if value == prefix][0]
if uri == 'http://search.yahoo.com/mrss':
self.log(MediaRssNamespace({'prefix':prefix, 'ns':preferredURI}))
else:
self.log(ReservedPrefix({'prefix':prefix, 'ns':preferredURI}))
elif prefix=='wiki' and uri.find('usemod')>=0:
from logging import ObsoleteWikiNamespace
self.log(ObsoleteWikiNamespace({'preferred':namespaces[uri], 'ns':uri}))
elif prefix in ['atom','xhtml']:
from logging import TYPE_ATOM, AvoidNamespacePrefix
if self.getFeedType() == TYPE_ATOM:
self.log(AvoidNamespacePrefix({'prefix':prefix}))
elif namespaces.has_key(uri):
if not namespaces[uri] == prefix and prefix:
from logging import NonstdPrefix
self.log(NonstdPrefix({'preferred':namespaces[uri], 'ns':uri}))
if namespaces[uri] in ['atom', 'xhtml']:
from logging import TYPE_UNKNOWN, TYPE_ATOM, AvoidNamespacePrefix
if self.getFeedType() in [TYPE_ATOM,TYPE_UNKNOWN]:
self.log(AvoidNamespacePrefix({'prefix':prefix}))
elif uri == 'http://search.yahoo.com/mrss':
from logging import MediaRssNamespace
uri = 'http://search.yahoo.com/mrss/'
self.log(MediaRssNamespace({'prefix':prefix, 'ns':uri}))
else:
from validators import rfc3987
rule=rfc3987()
rule.setElement('xmlns:'+str(prefix), {}, self.handler_stack[-1][0])
rule.value=uri
if not uri or rule.validate():
from logging import UnknownNamespace
self.log(UnknownNamespace({'namespace':uri}))
def namespaceFor(self, prefix):
return None
def startElementNS(self, name, qname, attrs):
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
qname, name = name
for handler in iter(self.handler_stack[-1]):
handler.startElementNS(name, qname, attrs)
if len(attrs):
present = attrs.getNames()
unexpected = filter(lambda x: x not in stdattrs, present)
for handler in iter(self.handler_stack[-1]):
ean = handler.getExpectedAttrNames()
if ean: unexpected = filter(lambda x: x not in ean, unexpected)
for u in unexpected:
if u[0] and near_miss(u[0]) not in nearly_namespaces:
feedtype=self.getFeedType()
if (not qname) and feedtype and (feedtype==TYPE_RSS2):
from logging import UseOfExtensionAttr
self.log(UseOfExtensionAttr({"attribute":u, "element":name}))
continue
from logging import UnexpectedAttribute
if not u[0]: u=u[1]
self.log(UnexpectedAttribute({"parent":name, "attribute":u, "element":name}))
def resolveEntity(self, publicId, systemId):
if not publicId and not systemId:
import cStringIO
return cStringIO.StringIO()
try:
def log(exception):
from logging import SAXError
self.log(SAXError({'exception':str(exception)}))
if self.xmlvalidator:
self.xmlvalidator(log)
self.xmlvalidator=0
except:
pass
if (publicId=='-//Netscape Communications//DTD RSS 0.91//EN' and
systemId=='http://my.netscape.com/publish/formats/rss-0.91.dtd'):
from logging import ValidDoctype, DeprecatedDTD
self.log(ValidDoctype({}))
self.log(DeprecatedDTD({}))
elif (publicId=='-//Netscape Communications//DTD RSS 0.91//EN' and
systemId=='http://www.rssboard.org/rss-0.91.dtd'):
from logging import ValidDoctype
self.log(ValidDoctype({}))
else:
from logging import ContainsSystemEntity
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
self.log(ContainsSystemEntity({}))
from StringIO import StringIO
return StringIO()
def skippedEntity(self, name):
from logging import ValidDoctype
if [e for e in self.loggedEvents if e.__class__ == ValidDoctype]:
from htmlentitydefs import name2codepoint
if name in name2codepoint: return
from logging import UndefinedNamedEntity
self.log(UndefinedNamedEntity({'value':name}))
def characters(self, string):
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
for handler in iter(self.handler_stack[-1]):
handler.characters(string)
def endElementNS(self, name, qname):
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
qname, name = name
for handler in iter(self.handler_stack[-1]):
handler.endElementNS(name, qname)
del self.handler_stack[-1]
def push(self, handlers, name, attrs, parent):
if hasattr(handlers,'__iter__'):
for handler in iter(handlers):
handler.setElement(name, attrs, parent)
handler.value=""
handler.prevalidate()
else:
handlers.setElement(name, attrs, parent)
handlers.value=""
handlers.prevalidate()
handlers = [handlers]
self.handler_stack.append(handlers)
def log(self, event, offset=(0,0)):
def findDuplicate(self, event):
duplicates = [e for e in self.loggedEvents if e.__class__ == event.__class__]
if duplicates and (event.__class__ in [NonCanonicalURI]):
return duplicates[0]
for dup in duplicates:
for k, v in event.params.items():
if k != 'value':
if not k in dup.params or dup.params[k] != v: break
else:
return dup
if event.params.has_key('element') and event.params['element']:
if not isinstance(event.params['element'],tuple):
event.params['element']=':'.join(event.params['element'].split('_', 1))
elif event.params['element'][0]==u'http://www.w3.org/XML/1998/namespace':
event.params['element'] = 'xml:' + event.params['element'][-1]
if self.firstOccurrenceOnly:
dup = findDuplicate(self, event)
if dup:
dup.params['msgcount'] = dup.params['msgcount'] + 1
return
event.params['msgcount'] = 1
try:
line = self.locator.getLineNumber() + offset[0]
backupline = self.lastKnownLine
column = (self.locator.getColumnNumber() or 0) + offset[1]
backupcolumn = self.lastKnownColumn
except AttributeError:
line = backupline = column = backupcolumn = 1
event.params['line'] = line
event.params['backupline'] = backupline
event.params['column'] = column
event.params['backupcolumn'] = backupcolumn
self.loggedEvents.append(event)
def error(self, exception):
from logging import SAXError
self.log(SAXError({'exception':str(exception)}))
raise exception
fatalError=error
warning=error
def getFeedType(self):
return self.feedType
def setFeedType(self, feedType):
self.feedType = feedType
#
# This base class for content handlers keeps track of such administrative
# details as the parent of the current element, and delegating both log
# and push events back up the stack. It will also concatenate up all of
# the SAX events associated with character data into a value, handing such
# things as CDATA and entities.
#
# Subclasses are expected to declare "do_name" methods for every
# element that they support. These methods are expected to return the
# appropriate handler for the element.
#
# The name of the element and the names of the children processed so
# far are also maintained.
#
# Hooks are also provided for subclasses to do "prevalidation" and
# "validation".
#
from logging import TYPE_RSS2
class validatorBase(ContentHandler):
def __init__(self):
ContentHandler.__init__(self)
self.value = ""
self.attrs = None
self.children = []
self.isValid = 1
self.name = None
self.itunes = False
self.namespace = {}
def setElement(self, name, attrs, parent):
self.name = name
self.attrs = attrs
self.parent = parent
self.dispatcher = parent.dispatcher
self.line = self.dispatcher.locator.getLineNumber()
self.col = self.dispatcher.locator.getColumnNumber()
self.xmlLang = parent.xmlLang
if attrs and attrs.has_key((u'http://www.w3.org/XML/1998/namespace', u'base')):
self.xmlBase=attrs.getValue((u'http://www.w3.org/XML/1998/namespace', u'base'))
from validators import rfc3987
self.validate_attribute((u'http://www.w3.org/XML/1998/namespace',u'base'),
rfc3987)
from urlparse import urljoin
self.xmlBase = urljoin(parent.xmlBase, self.xmlBase)
else:
self.xmlBase = parent.xmlBase
return self
def simplename(self, name):
if not name[0]: return name[1]
return namespaces.get(name[0], name[0]) + ":" + name[1]
def namespaceFor(self, prefix):
if self.namespace.has_key(prefix):
return self.namespace[prefix]
elif self.parent:
return self.parent.namespaceFor(prefix)
else:
return None
def validate_attribute(self, name, rule):
if not isinstance(rule,validatorBase): rule = rule()
if isinstance(name,str): name = (None,name)
rule.setElement(self.simplename(name), {}, self)
rule.value=self.attrs.getValue(name)
rule.validate()
def validate_required_attribute(self, name, rule):
if self.attrs and self.attrs.has_key(name):
self.validate_attribute(name, rule)
else:
from logging import MissingAttribute
self.log(MissingAttribute({"attr": self.simplename(name)}))
def validate_optional_attribute(self, name, rule):
if self.attrs and self.attrs.has_key(name):
self.validate_attribute(name, rule)
def getExpectedAttrNames(self):
None
def unknown_starttag(self, name, qname, attrs):
from validators import any
return any(self, name, qname, attrs)
def startElementNS(self, name, qname, attrs):
if attrs.has_key((u'http://www.w3.org/XML/1998/namespace', u'lang')):
self.xmlLang=attrs.getValue((u'http://www.w3.org/XML/1998/namespace', u'lang'))
if self.xmlLang:
from validators import iso639_validate
iso639_validate(self.log, self.xmlLang, "xml:lang", name)
from validators import eater
feedtype=self.getFeedType()
if (not qname) and feedtype and (feedtype!=TYPE_RSS2):
from logging import UndeterminableVocabulary
self.log(UndeterminableVocabulary({"parent":self.name, "element":name, "namespace":'""'}))
qname="null"
if qname in self.dispatcher.defaultNamespaces: qname=None
nm_qname = near_miss(qname)
if nearly_namespaces.has_key(nm_qname):
prefix = nearly_namespaces[nm_qname]
qname, name = None, prefix + "_" + name
if prefix == 'itunes' and not self.itunes and not self.parent.itunes:
if hasattr(self, 'setItunes'): self.setItunes(True)
# ensure all attribute namespaces are properly defined
for (namespace,attr) in attrs.keys():
if ':' in attr and not namespace:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":attr}))
if qname=='http://purl.org/atom/ns#':
from logging import ObsoleteNamespace
self.log(ObsoleteNamespace({"element":"feed"}))
for key, string in attrs.items():
for c in string:
if 0x80 <= ord(c) <= 0x9F or c == u'\ufffd':
from validators import BadCharacters
self.log(BadCharacters({"parent":name, "element":key[-1]}))
if qname:
handler = self.unknown_starttag(name, qname, attrs)
name="unknown_"+name
self.child=name
else:
try:
self.child=name
if name.startswith('dc_'):
# handle "Qualified" Dublin Core
handler = getattr(self, "do_" + name.replace("-","_").split('.')[0])()
else:
handler = getattr(self, "do_" + name.replace("-","_"))()
except AttributeError:
if name.find(':') != -1:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":name}))
handler = eater()
elif name.startswith('xhtml_'):
from logging import MisplacedXHTMLContent
self.log(MisplacedXHTMLContent({"parent": ':'.join(self.name.split("_",1)), "element":name}))
handler = eater()
else:
try:
from extension import Questionable
# requalify the name with the default namespace
qname = name
from logging import TYPE_APP_CATEGORIES, TYPE_APP_SERVICE
if self.getFeedType() in [TYPE_APP_CATEGORIES, TYPE_APP_SERVICE]:
if qname.startswith('app_'): qname=qname[4:]
if name.find('_')<0 and self.name.find('_')>=0:
if 'http://www.w3.org/2005/Atom' in self.dispatcher.defaultNamespaces:
qname='atom_'+qname
# is this element questionable?
handler = getattr(Questionable(), "do_" + qname.replace("-","_"))()
from logging import QuestionableUsage
self.log(QuestionableUsage({"parent": ':'.join(self.name.split("_",1)), "element":qname}))
except AttributeError:
from logging import UndefinedElement
self.log(UndefinedElement({"parent": ':'.join(self.name.split("_",1)), "element":name}))
handler = eater()
self.push(handler, name, attrs)
# MAP - always append name, even if already exists (we need this to
# check for too many hour elements in skipHours, and it doesn't
# hurt anything else)
self.children.append(self.child)
def normalizeWhitespace(self):
self.value = self.value.strip()
def endElementNS(self, name, qname):
self.normalizeWhitespace()
self.validate()
if self.isValid and self.name:
from validators import ValidElement
self.log(ValidElement({"parent":self.parent.name, "element":name}))
def textOK(self):
from validators import UnexpectedText
self.log(UnexpectedText({"element":self.name,"parent":self.parent.name}))
def characters(self, string):
if string.strip(): self.textOK()
line=column=0
pc=' '
for c in string:
# latin characters double encoded as utf-8
if 0x80 <= ord(c) <= 0xBF:
if 0xC2 <= ord(pc) <= 0xC3:
try:
string.encode('iso-8859-1').decode('utf-8')
from validators import BadCharacters
self.log(BadCharacters({"parent":self.parent.name, "element":self.name}), offset=(line,max(1,column-1)))
except:
pass
pc = c
# win1252
if 0x80 <= ord(c) <= 0x9F or c == u'\ufffd':
from validators import BadCharacters
self.log(BadCharacters({"parent":self.parent.name, "element":self.name}), offset=(line,column))
column=column+1
if ord(c) in (10,13):
column=0
line=line+1
self.value = self.value + string
def log(self, event, offset=(0,0)):
if not event.params.has_key('element'):
event.params['element'] = self.name
self.dispatcher.log(event, offset)
self.isValid = 0
def setFeedType(self, feedType):
self.dispatcher.setFeedType(feedType)
def getFeedType(self):
return self.dispatcher.getFeedType()
def push(self, handler, name, value):
self.dispatcher.push(handler, name, value, self)
def leaf(self):
from validators import text
return text()
def prevalidate(self):
pass
def validate(self):
pass
| Python |
"""$Id: content.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase, namespaces
from validators import *
from logging import *
#
# item element.
#
class textConstruct(validatorBase,rfc2396,nonhtml):
from validators import mime_re
import re
def getExpectedAttrNames(self):
return [(None, u'type'),(None, u'src')]
def normalizeWhitespace(self):
pass
def maptype(self):
if self.type.find('/') > -1:
self.log(InvalidTextType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
def prevalidate(self):
nonhtml.start(self)
if self.attrs.has_key((None,"src")):
self.type=''
else:
self.type='text'
if self.getFeedType() == TYPE_RSS2 and self.name != 'atom_summary':
self.log(DuplicateDescriptionSemantics({"element":self.name}))
if self.attrs.has_key((None,"type")):
self.type=self.attrs.getValue((None,"type"))
if not self.type:
self.log(AttrNotBlank({"parent":self.parent.name, "element":self.name, "attr":"type"}))
self.maptype()
if self.attrs.has_key((None,"src")):
self.children.append(True) # force warnings about "mixed" content
self.value=self.attrs.getValue((None,"src"))
rfc2396.validate(self, errorClass=InvalidURIAttribute, extraParams={"attr": "src"})
self.value=""
if not self.attrs.has_key((None,"type")):
self.log(MissingTypeAttr({"parent":self.parent.name, "element":self.name, "attr":"type"}))
if self.type in ['text','html','xhtml'] and not self.attrs.has_key((None,"src")):
pass
elif self.type and not self.mime_re.match(self.type):
self.log(InvalidMIMEType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
else:
self.log(ValidMIMEAttribute({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
if not self.xmlLang:
self.log(MissingDCLanguage({"parent":self.name, "element":"xml:lang"}))
def validate(self):
if self.type in ['text','xhtml']:
if self.type=='xhtml':
nonhtml.validate(self, NotInline)
else:
nonhtml.validate(self, ContainsUndeclaredHTML)
else:
if self.type.find('/') > -1 and not (
self.type.endswith('+xml') or self.type.endswith('/xml') or
self.type.startswith('text/')):
import base64
try:
self.value=base64.decodestring(self.value)
if self.type.endswith('/html'): self.type='html'
except:
self.log(NotBase64({"parent":self.parent.name, "element":self.name,"value":self.value}))
if self.type=='html' or self.type.endswith("/html"):
self.validateSafe(self.value)
if self.type.endswith("/html"):
if self.value.find("<html")<0 and not self.attrs.has_key((None,"src")):
self.log(HtmlFragment({"parent":self.parent.name, "element":self.name,"value":self.value, "type":self.type}))
else:
nonhtml.validate(self, ContainsUndeclaredHTML)
if not self.value and len(self.children)==0 and not self.attrs.has_key((None,"src")):
self.log(NotBlank({"parent":self.parent.name, "element":self.name}))
def textOK(self):
if self.children: validatorBase.textOK(self)
def characters(self, string):
for c in string:
if 0x80 <= ord(c) <= 0x9F or c == u'\ufffd':
from validators import BadCharacters
self.log(BadCharacters({"parent":self.parent.name, "element":self.name}))
if (self.type=='xhtml') and string.strip() and not self.value.strip():
self.log(MissingXhtmlDiv({"parent":self.parent.name, "element":self.name}))
validatorBase.characters(self,string)
def startElementNS(self, name, qname, attrs):
if (self.type<>'xhtml') and not (
self.type.endswith('+xml') or self.type.endswith('/xml')):
self.log(UndefinedElement({"parent":self.name, "element":name}))
if self.type=="xhtml":
if name<>'div' and not self.value.strip():
self.log(MissingXhtmlDiv({"parent":self.parent.name, "element":self.name}))
elif qname not in ["http://www.w3.org/1999/xhtml"]:
self.log(NotHtml({"parent":self.parent.name, "element":self.name, "message":"unexpected namespace", "value": qname}))
if self.type=="application/xhtml+xml":
if name<>'html':
self.log(HtmlFragment({"parent":self.parent.name, "element":self.name,"value":self.value, "type":self.type}))
elif qname not in ["http://www.w3.org/1999/xhtml"]:
self.log(NotHtml({"parent":self.parent.name, "element":self.name, "message":"unexpected namespace", "value":qname}))
if self.attrs.has_key((None,"mode")):
if self.attrs.getValue((None,"mode")) == 'escaped':
self.log(NotEscaped({"parent":self.parent.name, "element":self.name}))
if name=="div" and qname=="http://www.w3.org/1999/xhtml":
handler=diveater()
else:
handler=eater()
self.children.append(handler)
self.push(handler, name, attrs)
# treat xhtml:div as part of the content for purposes of detecting escaped html
class diveater(eater):
def __init__(self):
eater.__init__(self)
self.mixed = False
def textOK(self):
pass
def characters(self, string):
validatorBase.characters(self, string)
def startElementNS(self, name, qname, attrs):
if not qname:
self.log(MissingNamespace({"parent":"xhtml:div", "element":name}))
elif qname == 'http://www.w3.org/1999/xhtml':
if name not in HTMLValidator.htmltags:
self.log(NotHtml({'message':'Non-XHTML element', 'value':name}))
elif name not in HTMLValidator.acceptable_elements:
self.log(SecurityRisk({'tag':name}))
for ns,attr in attrs.getNames():
if not ns and attr not in HTMLValidator.acceptable_attributes:
if attr == 'style':
for value in checkStyle(attrs.get((ns,attr))):
self.log(DangerousStyleAttr({"attr":attr, "value":value}))
else:
self.log(SecurityRiskAttr({'attr':attr}))
elif qname == 'http://www.w3.org/2000/svg':
if name not in HTMLValidator.svg_elements:
self.log(SecurityRisk({'tag':name}))
for ns,attr in attrs.getNames():
if not ns and attr not in HTMLValidator.svg_attributes:
self.log(SecurityRiskAttr({'attr':attr}))
elif qname == 'http://www.w3.org/1998/Math/MathML':
if name not in HTMLValidator.mathml_elements:
self.log(SecurityRisk({'tag':name}))
for ns,attr in attrs.getNames():
if not ns and attr not in HTMLValidator.mathml_attributes:
self.log(SecurityRiskAttr({'attr':attr}))
elif namespaces.has_key(qname):
if self.name != 'metadata':
self.log(UndefinedElement({"parent": self.name, "element":namespaces[qname] + ":" + name}))
self.push(eater(), name, attrs)
return
self.mixed = True
eater.startElementNS(self, name, qname, attrs)
def validate(self):
if not self.mixed: self.parent.value += self.value
class content(textConstruct):
def maptype(self):
if self.type == 'multipart/alternative':
self.log(InvalidMIMEType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
| Python |
"""$Id: extension.py 1061 2010-01-25 17:50:01Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net>, Mark Pilgrim <http://diveintomark.org/> and Phil Ringnalda <http://philringnalda.com>"
__version__ = "$Revision: 1061 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby, Mark Pilgrim and Phil Ringnalda"
from validators import *
from logging import *
########################################################################
# Extensions that are valid everywhere #
########################################################################
class extension_everywhere:
def do_dc_title(self):
return text(), noduplicates()
def do_dc_description(self):
return text(), noduplicates()
def do_dc_publisher(self):
if "webMaster" in self.children:
self.log(DuplicateSemantics({"core":"webMaster", "ext":"dc:publisher"}))
return text() # duplicates allowed
def do_dc_contributor(self):
return text() # duplicates allowed
def do_dc_type(self):
return text(), noduplicates()
def do_dc_format(self):
return text(), noduplicates()
def do_dc_identifier(self):
return text()
def do_dc_source(self):
if "source" in self.children:
self.log(DuplicateItemSemantics({"core":"source", "ext":"dc:source"}))
return text(), noduplicates()
def do_dc_language(self):
if "language" in self.children:
self.log(DuplicateSemantics({"core":"language", "ext":"dc:language"}))
return iso639(), noduplicates()
def do_dc_relation(self):
return text(), # duplicates allowed
def do_dc_coverage(self):
return text(), # duplicates allowed
def do_dc_rights(self):
if "copyright" in self.children:
self.log(DuplicateSemantics({"core":"copyright", "ext":"dc:rights"}))
return nonhtml(), noduplicates()
def do_dcterms_alternative(self):
return text() #duplicates allowed
def do_dcterms_abstract(self):
return text(), noduplicates()
def do_dcterms_tableOfContents(self):
return rdfResourceURI(), noduplicates()
def do_dcterms_created(self):
return w3cdtf(), noduplicates()
def do_dcterms_valid(self):
return eater()
def do_dcterms_available(self):
return eater()
def do_dcterms_issued(self):
return w3cdtf(), noduplicates()
def do_dcterms_modified(self):
if "lastBuildDate" in self.children:
self.log(DuplicateSemantics({"core":"lastBuildDate", "ext":"dcterms:modified"}))
return w3cdtf(), noduplicates()
def do_dcterms_dateAccepted(self):
return text(), noduplicates()
def do_dcterms_dateCopyrighted(self):
return text(), noduplicates()
def do_dcterms_dateSubmitted(self):
return text(), noduplicates()
def do_dcterms_extent(self):
return positiveInteger(), nonblank(), noduplicates()
# def do_dcterms_medium(self):
# spec defines it as something that should never be used
# undefined element'll do for now
def do_dcterms_isVersionOf(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_hasVersion(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_isReplacedBy(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_replaces(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_isRequiredBy(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_requires(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_isPartOf(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_hasPart(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_isReferencedBy(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_references(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_isFormatOf(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_hasFormat(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_conformsTo(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_spatial(self):
return eater()
def do_dcterms_temporal(self):
return eater()
def do_dcterms_audience(self):
return text()
def do_dcterms_mediator(self):
return text(), noduplicates()
# added to DMCI, but no XML mapping has been defined
def do_dcterms_accessRights(self):
return eater()
def do_dcterms_accrualMethod(self):
return eater()
def do_dcterms_accrualPeriodicity(self):
return eater()
def do_dcterms_accrualPolicy(self):
return eater()
def do_dcterms_bibliographicCitation(self):
return eater()
def do_dcterms_educationLevel(self):
return eater()
def do_dcterms_instructionalMethod(self):
return eater()
def do_dcterms_license(self):
return eater()
def do_dcterms_provenance(self):
return eater()
def do_dcterms_rightsHolder(self):
return eater()
def do_rdf_RDF(self):
return eater()
def do_rdf_type(self):
return eater()
def do_rdf_Description(self):
return eater()
def do_rdfs_seeAlso(self):
return rdfResourceURI() # duplicates allowed
def do_geo_Point(self):
return geo_point()
def do_geo_lat(self):
return latitude()
def do_geo_long(self):
return longitude()
def do_geo_alt(self):
return decimal()
def do_geourl_latitude(self):
return latitude()
def do_geourl_longitude(self):
return longitude()
def do_georss_where(self):
return georss_where()
def do_georss_point(self):
return gml_pos()
def do_georss_line(self):
return gml_posList()
def do_georss_polygon(self):
return gml_posList()
def do_georss_featuretypetag(self):
return text()
def do_georss_relationshiptag(self):
return text()
def do_georss_featurename(self):
return text()
def do_georss_elev(self):
return decimal()
def do_georss_floor(self):
return Integer()
def do_georss_radius(self):
return Float()
def do_icbm_latitude(self):
return latitude()
def do_icbm_longitude(self):
return longitude()
def do_opml_dateCreated(self):
return rfc822(), noduplicates()
def do_opml_dateModified(self):
return rfc822(), noduplicates()
def do_opml_ownerName(self):
return safeHtml(), noduplicates()
def do_opml_ownerEmail(self):
return email(), noduplicates()
def do_opml_ownerId(self):
return httpURL(), noduplicates()
########################################################################
# Extensions that are valid at either the channel or item levels #
########################################################################
from media import media_elements, media_content, media_group
class extension_channel_item(extension_everywhere, media_elements):
def do_taxo_topics(self):
return eater()
def do_l_link(self):
return l_link()
########################################################################
# Extensions that are valid at only at the item level #
########################################################################
class extension_item(extension_channel_item):
def do_annotate_reference(self):
return rdfResourceURI(), noduplicates()
def do_ag_source(self):
return text(), noduplicates()
def do_ag_sourceURL(self):
return rfc2396_full(), noduplicates()
def do_ag_timestamp(self):
return iso8601(), noduplicates()
def do_ev_startdate(self):
return unbounded_iso8601(), noduplicates()
def do_ev_enddate(self):
return unbounded_iso8601(), noduplicates()
def do_ev_location(self):
return eater()
def do_ev_organizer(self):
return eater()
def do_ev_type(self):
return text(), noduplicates()
def do_feedburner_awareness(self):
return rfc2396_full(), noduplicates()
def do_feedburner_origEnclosureLink(self):
return rfc2396_full(), noduplicates()
def do_feedburner_origLink(self):
return rfc2396_full(), noduplicates()
def do_foaf_maker(self):
return eater()
def do_foaf_primaryTopic(self):
return eater()
def do_slash_comments(self):
return nonNegativeInteger(), noduplicates()
def do_slash_section(self):
return text()
def do_slash_department(self):
return text()
def do_slash_hit_parade(self):
return commaSeparatedIntegers(), noduplicates()
def do_thr_children(self):
if self.getFeedType() != TYPE_RSS1:
self.log(UndefinedElement({'parent':self.name,"element":"thr:children"}))
return eater()
def do_thr_total(self):
return nonNegativeInteger(), noduplicates()
def do_thr_in_reply_to(self):
return in_reply_to()
def do_wfw_comment(self):
return rfc2396_full(), noduplicates()
def do_wfw_commentRss(self):
return rfc2396_full(), noduplicates()
def do_wfw_commentRSS(self):
self.log(CommentRSS({"parent":self.parent.name, "element":self.name}))
return rfc2396_full(), noduplicates()
def do_wiki_diff(self):
return text()
def do_wiki_history(self):
return text()
def do_wiki_importance(self):
return text()
def do_wiki_status(self):
return text()
def do_wiki_version(self):
return text()
def do_g_actor(self):
return nonhtml(), noduplicates()
def do_g_age(self):
return nonNegativeInteger(), noduplicates()
def do_g_agent(self):
return nonhtml(), noduplicates()
def do_g_area(self):
return nonhtml(), noduplicates() # intUnit
def do_g_apparel_type(self):
return nonhtml(), noduplicates()
def do_g_artist(self):
return nonhtml(), noduplicates()
def do_g_author(self):
return nonhtml(), noduplicates()
def do_g_bathrooms(self):
return nonNegativeInteger(), noduplicates()
def do_g_bedrooms(self):
return nonNegativeInteger(), noduplicates()
def do_g_brand(self):
return nonhtml(), noduplicates()
def do_g_calories(self):
return g_float(), noduplicates()
def do_g_cholesterol(self):
return g_float(), noduplicates()
def do_g_color(self):
return nonhtml(), noduplicates()
def do_g_cooking_time(self):
return g_float(), noduplicates()
def do_g_condition(self):
return nonhtml(), noduplicates()
def do_g_course(self):
return nonhtml(), noduplicates()
def do_g_course_date_range(self):
return g_dateTimeRange(), noduplicates()
def do_g_course_number(self):
return nonhtml(), noduplicates()
def do_g_course_times(self):
return nonhtml(), noduplicates()
def do_g_cuisine(self):
return nonhtml(), noduplicates()
def do_g_currency(self):
return iso4217(), noduplicates()
def do_g_delivery_notes(self):
return nonhtml(), noduplicates()
def do_g_delivery_radius(self):
return floatUnit(), noduplicates()
def do_g_education(self):
return nonhtml(), noduplicates()
def do_g_employer(self):
return nonhtml(), noduplicates()
def do_g_ethnicity(self):
return nonhtml(), noduplicates()
def do_g_event_date_range(self):
return g_dateTimeRange(), noduplicates()
def do_g_expiration_date(self):
return iso8601_date(), noduplicates()
def do_g_expiration_date_time(self):
return iso8601(), noduplicates()
def do_g_fiber(self):
return g_float(), noduplicates()
def do_g_from_location(self):
return g_locationType(), noduplicates()
def do_g_gender(self):
return g_genderEnumeration(), noduplicates()
def do_g_hoa_dues(self):
return g_float(), noduplicates()
def do_g_format(self):
return nonhtml(), noduplicates()
def do_g_id(self):
return nonhtml(), noduplicates()
def do_g_image_link(self):
return rfc2396_full(), maxten()
def do_g_immigration_status(self):
return nonhtml(), noduplicates()
def do_g_interested_in(self):
return nonhtml(), noduplicates()
def do_g_isbn(self):
return nonhtml(), noduplicates()
def do_g_job_function(self):
return nonhtml(), noduplicates()
def do_g_job_industry(self):
return nonhtml(), noduplicates()
def do_g_job_type(self):
return nonhtml(), noduplicates()
def do_g_label(self):
return g_labelType(), maxten()
def do_g_listing_type(self):
return truefalse(), noduplicates()
def do_g_location(self):
return g_full_locationType(), noduplicates()
def do_g_main_ingredient(self):
return nonhtml(), noduplicates()
def do_g_make(self):
return nonhtml(), noduplicates()
def do_g_manufacturer(self):
return nonhtml(), noduplicates()
def do_g_manufacturer_id(self):
return nonhtml(), noduplicates()
def do_g_marital_status(self):
return g_maritalStatusEnumeration(), noduplicates()
def do_g_meal_type(self):
return nonhtml(), noduplicates()
def do_g_megapixels(self):
return floatUnit(), noduplicates()
def do_g_memory(self):
return floatUnit(), noduplicates()
def do_g_mileage(self):
return g_intUnit(), noduplicates()
def do_g_model(self):
return nonhtml(), noduplicates()
def do_g_model_number(self):
return nonhtml(), noduplicates()
def do_g_name_of_item_being_reviewed(self):
return nonhtml(), noduplicates()
def do_g_news_source(self):
return nonhtml(), noduplicates()
def do_g_occupation(self):
return nonhtml(), noduplicates()
def do_g_payment_notes(self):
return nonhtml(), noduplicates()
def do_g_pages(self):
return positiveInteger(), nonblank(), noduplicates()
def do_g_payment_accepted(self):
return g_paymentMethodEnumeration()
def do_g_pickup(self):
return truefalse(), noduplicates()
def do_g_preparation_time(self):
return floatUnit(), noduplicates()
def do_g_price(self):
return floatUnit(), noduplicates()
def do_g_price_type(self):
return g_priceTypeEnumeration(), noduplicates()
def do_g_processor_speed(self):
return floatUnit(), noduplicates()
def do_g_product_type(self):
return nonhtml(), noduplicates()
def do_g_property_type(self):
return nonhtml(), noduplicates()
def do_g_protein(self):
return floatUnit(), noduplicates()
def do_g_publication_name(self):
return nonhtml(), noduplicates()
def do_g_publication_volume(self):
return nonhtml(), noduplicates()
def do_g_publish_date(self):
return iso8601_date(), noduplicates()
def do_g_quantity(self):
return nonNegativeInteger(), nonblank(), noduplicates()
def do_g_rating(self):
return g_ratingTypeEnumeration(), noduplicates()
def do_g_review_type(self):
return nonhtml(), noduplicates()
def do_g_reviewer_type(self):
return g_reviewerTypeEnumeration(), noduplicates()
def do_g_salary(self):
return g_float(), noduplicates()
def do_g_salary_type(self):
return g_salaryTypeEnumeration(), noduplicates()
def do_g_saturated_fat(self):
return g_float(), noduplicates()
def do_g_school_district(self):
return nonhtml(), noduplicates()
def do_g_service_type(self):
return nonhtml(), noduplicates()
def do_g_servings(self):
return g_float(), noduplicates()
def do_g_sexual_orientation(self):
return nonhtml(), noduplicates()
def do_g_size(self):
return nonhtml(), noduplicates() # TODO: expressed in either two or three dimensions.
def do_g_shipping(self):
return g_shipping(), noduplicates()
def do_g_sodium(self):
return g_float(), noduplicates()
def do_g_subject(self):
return nonhtml(), noduplicates()
def do_g_subject_area(self):
return nonhtml(), noduplicates()
def do_g_tax_percent(self):
return percentType(), noduplicates()
def do_g_tax_region(self):
return nonhtml(), noduplicates()
def do_g_to_location(self):
return g_locationType(), noduplicates()
def do_g_total_carbs(self):
return g_float(), noduplicates()
def do_g_total_fat(self):
return g_float(), noduplicates()
def do_g_travel_date_range(self):
return g_dateTimeRange(), noduplicates()
def do_g_university(self):
return nonhtml(), noduplicates()
def do_g_upc(self):
return nonhtml(), noduplicates()
def do_g_url_of_item_being_reviewed(self):
return rfc2396_full(), noduplicates()
def do_g_vehicle_type(self):
return nonhtml(), noduplicates()
def do_g_vin(self):
return nonhtml(), noduplicates()
def do_g_weight(self):
return floatUnit(), noduplicates()
def do_g_year(self):
return g_year(), noduplicates()
def do_media_group(self):
return media_group()
def do_media_content(self):
return media_content()
def do_sx_sync(self):
import sse
return sse.Sync()
def do_conversationsNetwork_introMilliseconds(self):
return nonNegativeInteger(), noduplicates()
def do_conversationsNetwork_image(self):
return httpURL(), noduplicates()
def do_conversationsNetwork_ratingAverage(self):
return Float(), noduplicates()
def do_conversationsNetwork_ratingCount(self):
return nonNegativeInteger(), noduplicates()
def do_conversationsNetwork_ratingIndividual(self):
return positiveInteger(), noduplicates()
def do_conversationsNetwork_ratingTimestamp(self):
return iso8601(), noduplicates()
class heisen_uri(rfc3987, rfc2396_full):
def validate(self):
if self.getFeedType() == TYPE_ATOM:
rfc3987.validate(self)
elif not rfc2396_full.rfc2396_re.match(self.value):
self.log(ContainsRelRef({'parent':self.parent.name}))
class feedFlare(nonhtml):
def getExpectedAttrNames(self):
return [(None,u'href'),(None,u'src')]
def prevalidate(self):
self.validate_required_attribute((None,'href'), heisen_uri)
self.validate_required_attribute((None,'src'), heisen_uri)
return text.prevalidate(self)
class feedInfo(validatorBase):
def getExpectedAttrNames(self):
return [(None,u'uri')]
def prevalidate(self):
self.validate_required_attribute((None,'uri'), rfc3987)
class xmlView(validatorBase):
def getExpectedAttrNames(self):
return [(None,u'href')]
def prevalidate(self):
self.validate_required_attribute((None,'href'), rfc2396_full)
class georss_where(validatorBase):
def do_gml_Point(self):
return gml_point()
def do_gml_LineString(self):
return gml_line()
def do_gml_Polygon(self):
return gml_polygon()
def do_gml_Envelope(self):
return gml_envelope()
class geo_srsName(validatorBase):
def getExpectedAttrNames(self):
return [(None, u'srsName')]
class gml_point(geo_srsName):
def do_gml_pos(self):
return gml_pos()
class geo_point(validatorBase):
def do_geo_lat(self):
return latitude()
def do_geo_long(self):
return longitude()
def validate(self):
if "geo_lat" not in self.children:
self.log(MissingElement({"parent":self.name.replace('_',':'), "element":"geo:lat"}))
if "geo_long" not in self.children:
self.log(MissingElement({"parent":self.name.replace('_',':'), "element":"geo:long"}))
class gml_pos(text):
def validate(self):
if not re.match('^[-+]?\d+\.?\d*[ ,][-+]?\d+\.?\d*$', self.value):
return self.log(InvalidCoord({'value':self.value}))
if self.value.find(',')>=0:
self.log(CoordComma({'value':self.value}))
class gml_line(geo_srsName):
def do_gml_posList(self):
return gml_posList()
class gml_posList(text):
def validate(self):
if self.value.find(',')>=0:
# ensure that commas are only used to separate lat and long
if not re.match('^[-+.0-9]+[, ][-+.0-9]( [-+.0-9]+[, ][-+.0-9])+$',
value.strip()):
return self.log(InvalidCoordList({'value':self.value}))
self.log(CoordComma({'value':self.value}))
self.value=self.value.replace(',',' ')
values = self.value.strip().split()
if len(values)<3 or len(values)%2 == 1:
return self.log(InvalidCoordList({'value':self.value}))
for value in values:
if not re.match('^[-+]?\d+\.?\d*$', value):
return self.log(InvalidCoordList({'value':value}))
class gml_polygon(geo_srsName):
def do_gml_exterior(self):
return gml_exterior()
class gml_exterior(validatorBase):
def do_gml_LinearRing(self):
return gml_linearRing()
class gml_linearRing(geo_srsName):
def do_gml_posList(self):
return gml_posList()
class gml_envelope(geo_srsName):
def do_gml_lowerCorner(self):
return gml_pos()
def do_gml_upperCorner(self):
return gml_pos()
class access_restriction(enumeration):
error = InvalidAccessRestrictionRel
valuelist = ["allow", "deny"]
def getExpectedAttrNames(self):
return [(None, u'relationship')]
def prevalidate(self):
self.children.append(True) # force warnings about "mixed" content
if not self.attrs.has_key((None,"relationship")):
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"relationship"}))
else:
self.value=self.attrs.getValue((None,"relationship"))
########################################################################
# Extensions that are valid at only at the RSS 2.0 item level #
########################################################################
class extension_rss20_item(extension_item):
def do_trackback_ping(self):
return rfc2396_full(), noduplicates()
def do_trackback_about(self):
return rfc2396_full()
def do_dcterms_accessRights(self):
return eater()
def do_dcterms_accrualMethod(self):
return eater()
def do_dcterms_accrualPeriodicity(self):
return eater()
def do_dcterms_accrualPolicy(self):
return eater()
def do_dcterms_bibliographicCitation(self):
return eater()
def do_dcterms_educationLevel(self):
return eater()
def do_dcterms_instructionalMethod(self):
return eater()
def do_dcterms_license(self):
return eater()
def do_dcterms_provenance(self):
return eater()
def do_dcterms_rightsHolder(self):
return eater()
########################################################################
# Extensions that are valid at only at the RSS 1.0 item level #
########################################################################
class extension_rss10_item(extension_item):
def do_trackback_ping(self):
return rdfResourceURI(), noduplicates()
def do_trackback_about(self):
return rdfResourceURI()
def do_l_permalink(self):
return l_permalink()
class l_permalink(rdfResourceURI, MimeType):
lNS = u'http://purl.org/rss/1.0/modules/link/'
def getExpectedAttrNames(self):
return rdfResourceURI.getExpectedAttrNames(self) + [(self.lNS, u'type')]
def validate(self):
if (self.lNS, 'type') in self.attrs.getNames():
self.value=self.attrs.getValue((self.lNS, 'type'))
MimeType.validate(self)
return rdfResourceURI.validate(self)
class l_link(rdfResourceURI, MimeType):
lNS = u'http://purl.org/rss/1.0/modules/link/'
def getExpectedAttrNames(self):
return rdfResourceURI.getExpectedAttrNames(self) + [
(self.lNS, u'lang'), (self.lNS, u'rel'),
(self.lNS, u'type'), (self.lNS, u'title')
]
def prevalidate(self):
self.validate_optional_attribute((self.lNS,'lang'), iso639)
self.validate_required_attribute((self.lNS,'rel'), rfc2396_full)
self.validate_optional_attribute((self.lNS,'title'), nonhtml)
if self.attrs.has_key((self.lNS, "type")):
if self.attrs.getValue((self.lNS, "type")).find(':') < 0:
self.validate_optional_attribute((self.lNS,'type'), MimeType)
else:
self.validate_optional_attribute((self.lNS,'type'), rfc2396_full)
########################################################################
# Extensions that are valid at only at the Atom entry level #
########################################################################
class extension_entry(extension_item):
def do_dc_creator(self): # atom:creator
return text() # duplicates allowed
def do_dc_subject(self): # atom:category
return text() # duplicates allowed
def do_dc_date(self): # atom:published
return w3cdtf(), noduplicates()
def do_creativeCommons_license(self):
return rfc2396_full()
def do_trackback_ping(self):
return rfc2396_full(), noduplicates()
# XXX This should have duplicate semantics with link[@rel='related']
def do_trackback_about(self):
return rfc2396_full()
########################################################################
# Extensions that are valid at only at the channel level #
########################################################################
class extension_channel(extension_channel_item):
def do_admin_generatorAgent(self):
if "generator" in self.children:
self.log(DuplicateSemantics({"core":"generator", "ext":"admin:generatorAgent"}))
return admin_generatorAgent(), noduplicates()
def do_admin_errorReportsTo(self):
return admin_errorReportsTo(), noduplicates()
def do_blogChannel_blogRoll(self):
return rfc2396_full(), noduplicates()
def do_blogChannel_mySubscriptions(self):
return rfc2396_full(), noduplicates()
def do_blogChannel_blink(self):
return rfc2396_full(), noduplicates()
def do_blogChannel_changes(self):
return rfc2396_full(), noduplicates()
def do_sy_updatePeriod(self):
return sy_updatePeriod(), noduplicates()
def do_sy_updateFrequency(self):
return positiveInteger(), nonblank(), noduplicates()
def do_sy_updateBase(self):
return w3cdtf(), noduplicates()
def do_foaf_maker(self):
return eater()
def do_cp_server(self):
return rdfResourceURI()
def do_wiki_interwiki(self):
return text()
def do_thr_in_reply_to(self):
return in_reply_to()
def do_cf_listinfo(self):
from cf import listinfo
return listinfo()
def do_cf_treatAs(self):
from cf import treatAs
return treatAs()
def do_feedburner_awareness(self):
return rfc2396_full(), noduplicates()
def do_feedburner_browserFriendly(self):
return nonhtml(), noduplicates()
def do_feedburner_emailServiceId(self):
return noduplicates()
def do_feedburner_feedFlare(self):
return feedFlare()
def do_feedburner_info(self):
return feedInfo()
def do_feedburner_feedburnerHostname(self):
return rfc2396_full(), noduplicates()
def do_opensearch_totalResults(self):
return nonNegativeInteger(), noduplicates()
do_opensearch10_totalResults = do_opensearch_totalResults
def do_opensearch_startIndex(self):
return Integer(), noduplicates()
do_opensearch10_startIndex = do_opensearch_startIndex
def do_opensearch_itemsPerPage(self):
return nonNegativeInteger(), noduplicates()
do_opensearch10_itemsPerPage = do_opensearch_itemsPerPage
def do_opensearch_Query(self):
from opensearch import Query
return Query()
def do_xhtml_div(self):
return eater()
def do_xhtml_meta(self):
return xhtml_meta()
def do_sx_sharing(self):
import sse
return sse.Sharing()
def do_fh_archive(self):
return validatorBase()
def do_fh_complete(self):
return validatorBase()
class xhtml_meta(validatorBase):
def getExpectedAttrNames(self):
return [ (None, u'name'), (None, u'content') ]
def prevalidate(self):
self.validate_required_attribute((None,'name'), xhtmlMetaEnumeration)
self.validate_required_attribute((None,'content'), robotsEnumeration)
class xhtmlMetaEnumeration(caseinsensitive_enumeration):
error = InvalidMetaName
valuelist = ["robots"]
class robotsEnumeration(caseinsensitive_enumeration):
error = InvalidMetaContent
valuelist = [
"all", "none",
"index", "index,follow", "index,nofollow",
"noindex", "noindex,follow", "noindex,nofollow",
"follow", "follow,index", "follow,noindex",
"nofollow", "nofollow,index", "nofollow,noindex"]
########################################################################
# Extensions that are valid at only at the Atom feed level #
########################################################################
class extension_feed(extension_channel):
def do_dc_creator(self): # atom:creator
return text() # duplicates allowed
def do_dc_subject(self): # atom:category
return text() # duplicates allowed
def do_dc_date(self): # atom:updated
return w3cdtf(), noduplicates()
def do_creativeCommons_license(self):
return rfc2396_full()
def do_access_restriction(self):
return access_restriction()
########################################################################
# Validators #
########################################################################
class admin_generatorAgent(rdfResourceURI): pass
class admin_errorReportsTo(rdfResourceURI): pass
class sy_updatePeriod(text):
def validate(self):
if self.value not in ('hourly', 'daily', 'weekly', 'monthly', 'yearly'):
self.log(InvalidUpdatePeriod({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidUpdatePeriod({"parent":self.parent.name, "element":self.name, "value":self.value}))
class g_complex_type(validatorBase):
def getExpectedAttrNames(self):
if self.getFeedType() == TYPE_RSS1:
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'parseType')]
else:
return []
class g_shipping(g_complex_type):
def do_g_service(self):
return g_serviceTypeEnumeration(), noduplicates()
def do_g_country(self):
return iso3166(), noduplicates()
def do_g_price(self):
return floatUnit(), noduplicates()
class g_dateTimeRange(g_complex_type):
def do_g_start(self):
return iso8601(), noduplicates()
def do_g_end(self):
return iso8601(), noduplicates()
class g_labelType(text):
def validate(self):
if self.value.find(',')>=0:
self.log(InvalidLabel({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class g_locationType(text):
def validate(self):
if len(self.value.split(',')) not in [2,3]:
self.log(InvalidLocation({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class g_full_locationType(text):
def validate(self):
fields = self.value.split(',')
if len(fields) != 5 or 0 in [len(f.strip()) for f in fields]:
self.log(InvalidFullLocation({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class g_genderEnumeration(enumeration):
error = InvalidGender
valuelist = ["Male", "M", "Female", "F"]
class g_maritalStatusEnumeration(enumeration):
error = InvalidMaritalStatus
valuelist = ["single", "divorced", "separated", "widowed", "married", "in relationship"]
class g_paymentMethodEnumeration(enumeration):
error = InvalidPaymentMethod
valuelist = ["Cash", "Check", "Visa", "MasterCard",
"AmericanExpress", "Discover", "WireTransfer"]
class g_priceTypeEnumeration(enumeration):
error = InvalidPriceType
valuelist = ["negotiable", "starting"]
class g_ratingTypeEnumeration(enumeration):
error = InvalidRatingType
valuelist = ["1", "2", "3", "4", "5"]
class g_reviewerTypeEnumeration(enumeration):
error = InvalidReviewerType
valuelist = ["editorial", "user"]
class g_salaryTypeEnumeration(enumeration):
error = InvalidSalaryType
valuelist = ["starting", "negotiable"]
class g_serviceTypeEnumeration(enumeration):
error = InvalidServiceType
valuelist = ['FedEx', 'UPS', 'DHL', 'Mail', 'Other', 'Overnight', 'Standard']
class g_float(text):
def validate(self):
import re
if not re.match('\d+\.?\d*\s*\w*', self.value):
self.log(InvalidFloat({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class floatUnit(text):
def validate(self):
import re
if not re.match('\d+\.?\d*\s*\w*$', self.value):
self.log(InvalidFloatUnit({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class decimal(text):
def validate(self):
import re
if not re.match('[-+]?\d+\.?\d*\s*$', self.value):
self.log(InvalidFloatUnit({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class g_year(text):
def validate(self):
import time
try:
year = int(self.value)
if year < 1900 or year > time.localtime()[0]+4: raise InvalidYear
except:
self.log(InvalidYear({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class g_intUnit(text):
def validate(self):
try:
if int(self.value.split(' ')[0].replace(',','')) < 0: raise InvalidIntUnit
except:
self.log(InvalidIntUnit({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class maxten(validatorBase):
def textOK(self):
pass
def prevalidate(self):
if 10 == len([1 for child in self.parent.children if self.name==child]):
self.log(TooMany({"parent":self.parent.name, "element":self.name}))
class in_reply_to(canonicaluri, xmlbase):
def getExpectedAttrNames(self):
return [(None, u'href'), (None, u'ref'), (None, u'source'), (None, u'type')]
def validate(self):
if self.attrs.has_key((None, "href")):
self.value = self.attrs.getValue((None, "href"))
self.name = "href"
xmlbase.validate(self)
if self.attrs.has_key((None, "ref")):
self.value = self.attrs.getValue((None, "ref"))
self.name = "ref"
canonicaluri.validate(self)
if self.attrs.has_key((None, "source")):
self.value = self.attrs.getValue((None, "source"))
self.name = "source"
xmlbase.validate(self)
if self.attrs.has_key((None, "type")):
self.value = self.attrs.getValue((None, "type"))
if not mime_re.match(self.value):
self.log(InvalidMIMEType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.value}))
else:
self.log(ValidMIMEAttribute({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.value}))
########################################################################
# Extensions that you just gotta question #
########################################################################
class Questionable(extension_everywhere):
children = []
def do_atom_author(self):
from author import author
return author()
def do_atom_category(self):
from category import category
return category()
def do_atom_content(self):
from content import content
return content()
def do_atom_contributor(self):
from author import author
return author()
def do_atom_generator(self):
from generator import generator
return generator()
def do_atom_icon(self):
return rfc2396(), noduplicates()
def do_atom_id(self):
return canonicaluri(), noduplicates()
def do_atom_link(self):
from link import link
return link()
def do_atom_logo(self):
return rfc2396(), noduplicates()
def do_atom_published(self):
return rfc3339(), noduplicates()
def do_atom_rights(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_atom_subtitle(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_atom_summary(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_atom_title(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_atom_updated(self):
return rfc3339(), noduplicates()
def do_app_workspace(self):
from service import workspace
return workspace()
def do_app_collection(self):
from service import collection
return collection()
def do_app_categories(self):
from categories import categories
return categories()
| Python |
"""$Id: __init__.py 1027 2008-09-26 14:41:21Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1027 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
import socket
if hasattr(socket, 'setdefaulttimeout'):
socket.setdefaulttimeout(10)
Timeout = socket.timeout
else:
import timeoutsocket
timeoutsocket.setDefaultSocketTimeout(10)
Timeout = timeoutsocket.Timeout
import urllib2
import logging
from logging import *
from xml.sax import SAXException
from xml.sax.xmlreader import InputSource
import re
import xmlEncoding
import mediaTypes
from httplib import BadStatusLine
MAXDATALENGTH = 2000000
def _validate(aString, firstOccurrenceOnly, loggedEvents, base, encoding, selfURIs=None, mediaType=None):
"""validate RSS from string, returns validator object"""
from xml.sax import make_parser, handler
from base import SAXDispatcher
from exceptions import UnicodeError
from cStringIO import StringIO
if re.match("^\s+<\?xml",aString) and re.search("<generator.*wordpress.*</generator>",aString):
lt = aString.find('<'); gt = aString.find('>')
if lt > 0 and gt > 0 and lt < gt:
loggedEvents.append(logging.WPBlankLine({'line':1,'column':1}))
# rearrange so that other errors can be found
aString = aString[lt:gt+1]+aString[0:lt]+aString[gt+1:]
# By now, aString should be Unicode
source = InputSource()
source.setByteStream(StringIO(xmlEncoding.asUTF8(aString)))
validator = SAXDispatcher(base, selfURIs or [base], encoding)
validator.setFirstOccurrenceOnly(firstOccurrenceOnly)
if mediaType == 'application/atomsvc+xml':
validator.setFeedType(TYPE_APP_SERVICE)
elif mediaType == 'application/atomcat+xml':
validator.setFeedType(TYPE_APP_CATEGORIES)
validator.loggedEvents += loggedEvents
# experimental RSS-Profile support
validator.rssCharData = [s.find('&#x')>=0 for s in aString.split('\n')]
xmlver = re.match("^<\?\s*xml\s+version\s*=\s*['\"]([-a-zA-Z0-9_.:]*)['\"]",aString)
if xmlver and xmlver.group(1)<>'1.0':
validator.log(logging.BadXmlVersion({"version":xmlver.group(1)}))
try:
from xml.sax.expatreader import ExpatParser
class fake_dtd_parser(ExpatParser):
def reset(self):
ExpatParser.reset(self)
self._parser.UseForeignDTD(1)
parser = fake_dtd_parser()
except:
parser = make_parser()
parser.setFeature(handler.feature_namespaces, 1)
parser.setContentHandler(validator)
parser.setErrorHandler(validator)
parser.setEntityResolver(validator)
if hasattr(parser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
parser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
def xmlvalidate(log):
import libxml2
from StringIO import StringIO
from random import random
prefix="...%s..." % str(random()).replace('0.','')
msg=[]
libxml2.registerErrorHandler(lambda msg,str: msg.append(str), msg)
input = libxml2.inputBuffer(StringIO(xmlEncoding.asUTF8(aString)))
reader = input.newTextReader(prefix)
reader.SetParserProp(libxml2.PARSER_VALIDATE, 1)
ret = reader.Read()
while ret == 1: ret = reader.Read()
msg=''.join(msg)
for line in msg.splitlines():
if line.startswith(prefix): log(line.split(':',4)[-1].strip())
validator.xmlvalidator=xmlvalidate
try:
parser.parse(source)
except SAXException:
pass
except UnicodeError:
import sys
exctype, value = sys.exc_info()[:2]
validator.log(logging.UnicodeError({"exception":value}))
if validator.getFeedType() == TYPE_RSS1:
try:
from rdflib.syntax.parsers.RDFXMLHandler import RDFXMLHandler
class Handler(RDFXMLHandler):
ns_prefix_map = {}
prefix_ns_map = {}
def add(self, triple): pass
def __init__(self, dispatcher):
RDFXMLHandler.__init__(self, self)
self.dispatcher=dispatcher
def error(self, message):
self.dispatcher.log(InvalidRDF({"message": message}))
source.getByteStream().reset()
parser.reset()
parser.setContentHandler(Handler(parser.getContentHandler()))
parser.setErrorHandler(handler.ErrorHandler())
parser.parse(source)
except:
pass
return validator
def validateStream(aFile, firstOccurrenceOnly=0, contentType=None, base=""):
loggedEvents = []
if contentType:
(mediaType, charset) = mediaTypes.checkValid(contentType, loggedEvents)
else:
(mediaType, charset) = (None, None)
rawdata = aFile.read(MAXDATALENGTH)
if aFile.read(1):
raise ValidationFailure(logging.ValidatorLimit({'limit': 'feed length > ' + str(MAXDATALENGTH) + ' bytes'}))
encoding, rawdata = xmlEncoding.decode(mediaType, charset, rawdata, loggedEvents, fallback='utf-8')
validator = _validate(rawdata, firstOccurrenceOnly, loggedEvents, base, encoding, mediaType=mediaType)
if mediaType and validator.feedType:
mediaTypes.checkAgainstFeedType(mediaType, validator.feedType, validator.loggedEvents)
return {"feedType":validator.feedType, "loggedEvents":validator.loggedEvents}
def validateString(aString, firstOccurrenceOnly=0, fallback=None, base=""):
loggedEvents = []
if type(aString) != unicode:
encoding, aString = xmlEncoding.decode("", None, aString, loggedEvents, fallback)
else:
encoding = "utf-8" # setting a sane (?) default
if aString is not None:
validator = _validate(aString, firstOccurrenceOnly, loggedEvents, base, encoding)
return {"feedType":validator.feedType, "loggedEvents":validator.loggedEvents}
else:
return {"loggedEvents": loggedEvents}
def validateURL(url, firstOccurrenceOnly=1, wantRawData=0):
"""validate RSS from URL, returns events list, or (events, rawdata) tuple"""
loggedEvents = []
request = urllib2.Request(url)
request.add_header("Accept-encoding", "gzip, deflate")
request.add_header("User-Agent", "FeedValidator/1.3")
usock = None
try:
try:
usock = urllib2.urlopen(request)
rawdata = usock.read(MAXDATALENGTH)
if usock.read(1):
raise ValidationFailure(logging.ValidatorLimit({'limit': 'feed length > ' + str(MAXDATALENGTH) + ' bytes'}))
# check for temporary redirects
if usock.geturl()<>request.get_full_url():
from httplib import HTTPConnection
spliturl=url.split('/',3)
if spliturl[0]=="http:":
conn=HTTPConnection(spliturl[2])
conn.request("GET",'/'+spliturl[3].split("#",1)[0])
resp=conn.getresponse()
if resp.status<>301:
loggedEvents.append(TempRedirect({}))
except BadStatusLine, status:
raise ValidationFailure(logging.HttpError({'status': status.__class__}))
except urllib2.HTTPError, status:
rawdata = status.read()
if len(rawdata) < 512 or 'content-encoding' in status.headers:
loggedEvents.append(logging.HttpError({'status': status}))
usock = status
else:
rawdata=re.sub('<!--.*?-->','',rawdata)
lastline = rawdata.strip().split('\n')[-1].strip()
if lastline in ['</rss>','</feed>','</rdf:RDF>', '</kml>']:
loggedEvents.append(logging.HttpError({'status': status}))
usock = status
else:
raise ValidationFailure(logging.HttpError({'status': status}))
except urllib2.URLError, x:
raise ValidationFailure(logging.HttpError({'status': x.reason}))
except Timeout, x:
raise ValidationFailure(logging.IOError({"message": 'Server timed out', "exception":x}))
except Exception, x:
raise ValidationFailure(logging.IOError({"message": x.__class__.__name__,
"exception":x}))
if usock.headers.get('content-encoding', None) == None:
loggedEvents.append(Uncompressed({}))
if usock.headers.get('content-encoding', None) == 'gzip':
import gzip, StringIO
try:
rawdata = gzip.GzipFile(fileobj=StringIO.StringIO(rawdata)).read()
except:
import sys
exctype, value = sys.exc_info()[:2]
event=logging.IOError({"message": 'Server response declares Content-Encoding: gzip', "exception":value})
raise ValidationFailure(event)
if usock.headers.get('content-encoding', None) == 'deflate':
import zlib
try:
rawdata = zlib.decompress(rawdata, -zlib.MAX_WBITS)
except:
import sys
exctype, value = sys.exc_info()[:2]
event=logging.IOError({"message": 'Server response declares Content-Encoding: deflate', "exception":value})
raise ValidationFailure(event)
if usock.headers.get('content-type', None) == 'application/vnd.google-earth.kmz':
import tempfile, zipfile, os
try:
(fd, tempname) = tempfile.mkstemp()
os.write(fd, rawdata)
os.close(fd)
zfd = zipfile.ZipFile(tempname)
namelist = zfd.namelist()
for name in namelist:
if name.endswith('.kml'):
rawdata = zfd.read(name)
zfd.close()
os.unlink(tempname)
except:
import sys
value = sys.exc_info()[:1]
event=logging.IOError({"message": 'Problem decoding KMZ', "exception":value})
raise ValidationFailure(event)
mediaType = None
charset = None
# Is the Content-Type correct?
contentType = usock.headers.get('content-type', None)
if contentType:
(mediaType, charset) = mediaTypes.checkValid(contentType, loggedEvents)
# Check for malformed HTTP headers
for (h, v) in usock.headers.items():
if (h.find(' ') >= 0):
loggedEvents.append(HttpProtocolError({'header': h}))
selfURIs = [request.get_full_url()]
baseURI = usock.geturl()
if not baseURI in selfURIs: selfURIs.append(baseURI)
# Get baseURI from content-location and/or redirect information
if usock.headers.get('content-location', None):
from urlparse import urljoin
baseURI=urljoin(baseURI,usock.headers.get('content-location', ""))
elif usock.headers.get('location', None):
from urlparse import urljoin
baseURI=urljoin(baseURI,usock.headers.get('location', ""))
if not baseURI in selfURIs: selfURIs.append(baseURI)
usock.close()
usock = None
mediaTypes.contentSniffing(mediaType, rawdata, loggedEvents)
encoding, rawdata = xmlEncoding.decode(mediaType, charset, rawdata, loggedEvents, fallback='utf-8')
if rawdata is None:
return {'loggedEvents': loggedEvents}
rawdata = rawdata.replace('\r\n', '\n').replace('\r', '\n') # normalize EOL
validator = _validate(rawdata, firstOccurrenceOnly, loggedEvents, baseURI, encoding, selfURIs, mediaType=mediaType)
# Warn about mismatches between media type and feed version
if mediaType and validator.feedType:
mediaTypes.checkAgainstFeedType(mediaType, validator.feedType, validator.loggedEvents)
params = {"feedType":validator.feedType, "loggedEvents":validator.loggedEvents}
if wantRawData:
params['rawdata'] = rawdata
return params
finally:
try:
if usock: usock.close()
except:
pass
__all__ = ['base',
'channel',
'compatibility',
'image',
'item',
'logging',
'rdf',
'root',
'rss',
'skipHours',
'textInput',
'util',
'validators',
'validateURL',
'validateString']
| Python |
"""$Id: en.py 1041 2009-02-18 00:19:04Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 1041 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
import feedvalidator
from feedvalidator.logging import *
line = "line %(line)s"
column = "column %(column)s"
occurances = " (%(msgcount)s occurrences)"
messages = {
SAXError: "XML parsing error: %(exception)s",
WPBlankLine: "Blank line before XML declaration",
NotHtml: "%(message)s",
UnicodeError: "%(exception)s (maybe a high-bit character?)",
UndefinedElement: "Undefined %(parent)s element: %(element)s",
MissingNamespace: "Missing namespace for %(element)s",
MissingElement: "Missing %(parent)s element: %(element)s",
MissingRecommendedElement: "%(parent)s should contain a %(element)s element",
MissingAttribute: "Missing %(element)s attribute: %(attr)s",
MissingRecommendedAttribute: "Missing recommended %(element)s attribute: %(attr)s",
UnexpectedAttribute: "Unexpected %(attribute)s attribute on %(element)s element",
NoBlink: "There is no blink element in RSS; use blogChannel:blink instead",
NoThrWhen: "thr:when attribute obsolete; use thr:updated instead",
NoBlink: "There is no thr:when attribute in Atom; use thr:updated instead",
InvalidWidth: "%(element)s must be between 1 and 144",
InvalidHeight: "%(element)s must be between 1 and 400",
InvalidHour: "%(element)s must be an integer between 0 and 23",
InvalidDay: "%(element)s must be Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, or Sunday",
InvalidInteger: "%(element)s must be an integer",
InvalidNonNegativeInteger: "%(element)s must be a non-negative integer",
InvalidPositiveInteger: "%(element)s must be a positive integer",
InvalidAlphanum: "%(element)s must be alphanumeric",
InvalidLatitude: "%(element)s must be between -90 and 90",
InvalidLongitude: "%(element)s must be between -180 and 180",
InvalidCommaSeparatedIntegers: "%(element)s must be comma-separated integers",
InvalidHttpGUID: "guid must be a full URL, unless isPermaLink attribute is false",
InvalidUpdatePeriod: "%(element)s must be hourly, daily, weekly, monthly, or yearly",
NotBlank: "%(element)s should not be blank",
AttrNotBlank: "The %(attr)s attribute of %(element)s should not be blank",
DuplicateElement: "%(parent)s contains more than one %(element)s",
DuplicateSemantics: "A channel should not include both %(core)s and %(ext)s",
DuplicateItemSemantics: "An item should not include both %(core)s and %(ext)s",
DuplicateValue: "%(element)s values must not be duplicated within a feed",
NonstdPrefix: '"%(preferred)s" is the preferred prefix for the namespace "%(ns)s"',
ReservedPrefix: 'The prefix "%(prefix)s" generally is associated with the namespace "%(ns)s"',
MediaRssNamespace: 'Missing trailing slash in mediaRSS namespace',
InvalidContact: "Invalid email address",
InvalidAddrSpec: "%(element)s must be an email address",
InvalidLink: "%(element)s must be a valid URI",
InvalidIRI: "%(element)s must be a valid IRI",
InvalidFullLink: "%(element)s must be a full and valid URL",
InvalidUriChar: "Invalid character in a URI",
InvalidISO8601Date: "%(element)s must be an ISO8601 date",
InvalidISO8601DateTime: "%(element)s must be an ISO8601 date-time",
InvalidW3CDTFDate: "%(element)s must be an W3CDTF date",
InvalidRFC2822Date: "%(element)s must be an RFC-822 date-time",
IncorrectDOW: "Incorrect day of week",
InvalidRFC3339Date: "%(element)s must be an RFC-3339 date-time",
InvalidNPTTime: "%(attr)s must be an NPT-time",
InvalidLanguage: "%(element)s must be an ISO-639 language code",
InvalidURIAttribute: "%(attr)s attribute of %(element)s must be a valid URI",
InvalidURLAttribute: "%(element)s must be a full URL",
InvalidIntegerAttribute: "%(attr)s attribute of %(element)s must be a positive integer",
InvalidBooleanAttribute: "%(attr)s attribute of %(element)s must be 'true' or 'false'",
InvalidMIMEAttribute: "%(attr)s attribute of %(element)s must be a valid MIME type",
ItemMustContainTitleOrDescription: "item must contain either title or description",
ContainsHTML: "%(element)s should not contain HTML",
ContainsEmail: "%(element)s should not include email address",
ContainsUndeclaredHTML: "%(element)s should not contain HTML unless declared in the type attribute",
NotEnoughHoursInTheDay: "skipHours can not contain more than 24 hour elements",
EightDaysAWeek: "skipDays can not contain more than 7 day elements",
SecurityRisk: "%(element)s should not contain %(tag)s tag",
SecurityRiskAttr: "%(element)s should not contain %(attr)s attribute",
ContainsRelRef: "%(element)s should not contain relative URL references",
ContainsSystemEntity: "Feeds must not contain SYSTEM entities",
InvalidContentMode: "mode must be 'xml', 'escaped', or 'base64'",
InvalidMIMEType: "Not a valid MIME type",
NotEscaped: "%(element)s claims to be escaped, but isn't",
NotInline: "%(element)s claims to be inline, but may contain html",
NotBase64: "%(element)s claims to be base64-encoded, but isn't",
InvalidURN: "%(element)s is not a valid URN",
InvalidUUID: "%(element)s is not a valid UUID",
InvalidTAG: "%(element)s is not a valid TAG",
InvalidURI: "%(element)s is not a valid URI",
ObsoleteVersion: "This feed is an obsolete version",
ObsoleteNamespace: "This feed uses an obsolete namespace",
InvalidNamespace: "%(element)s is in an invalid namespace: %(namespace)s",
InvalidDoctype: "This feed contains conflicting DOCTYPE and version information",
DuplicateAtomLink: "Duplicate alternate links with the same type and hreflang",
MissingHref: "%(element)s must have an href attribute",
AtomLinkNotEmpty: "%(element)s should not have text (all data is in attributes)",
BadCharacters: '%(element)s contains bad characters',
BadXmlVersion: "Incorrect XML Version: %(version)s",
UnregisteredAtomLinkRel: "Unregistered link relationship",
HttpError: "Server returned %(status)s",
IOError: "%(exception)s (%(message)s; misconfigured server?)",
ObscureEncoding: "Obscure XML character encoding: %(encoding)s",
NonstdEncoding: "This encoding is not mandated by the XML specification: %(encoding)s",
UnexpectedContentType: '%(type)s should not be served with the "%(contentType)s" media type',
EncodingMismatch: 'Your feed appears to be encoded as "%(encoding)s", but your server is reporting "%(charset)s"',
UnknownEncoding: "Unknown XML character encoding: %(encoding)s",
NotSufficientlyUnique: "The specified guid is not sufficiently unique",
MissingEncoding: "No character encoding was specified",
UnexpectedText: "Unexpected Text",
ValidatorLimit: "Unable to validate, due to hardcoded resource limits (%(limit)s)",
TempRedirect: "Temporary redirect",
TextXml: "Content type of text/xml with no charset",
Uncompressed: "Response is not compressed",
HttpProtocolError: 'Response includes bad HTTP header name: "%(header)s"',
NonCanonicalURI: 'Identifier "%(uri)s" is not in canonical form (the canonical form would be "%(curi)s")',
InvalidRDF: 'RDF parsing error: %(message)s',
InvalidDuration: 'Invalid duration',
InvalidYesNo: '%(element)s must be "yes", "no"',
InvalidYesNoClean: '%(element)s must be "yes", "no", or "clean"',
TooLong: 'length of %(len)d exceeds the maximum allowable for %(element)s of %(max)d',
InvalidItunesCategory: '%(text)s is not one of the predefined iTunes categories or sub-categories',
ObsoleteItunesCategory: '%(text)s is an obsolete iTunes category or sub-category',
InvalidKeywords: 'Use commas to separate keywords',
InvalidTextType: 'type attribute must be "text", "html", or "xhtml"',
MissingXhtmlDiv: 'Missing xhtml:div element',
MissingSelf: 'Missing atom:link with rel="self"',
MissingAtomSelfLink: 'Missing atom:link with rel="self"',
DuplicateEntries: 'Two entries with the same id',
DuplicateIds: 'All entries have the same id',
MisplacedMetadata: '%(element)s must appear before all entries',
MissingSummary: 'Missing summary',
MissingTextualContent: 'Missing textual content',
MissingContentOrAlternate: 'Missing content or alternate link',
MissingSourceElement: "Missing %(parent)s element: %(element)s",
MissingTypeAttr: "Missing %(element)s attribute: %(attr)s",
HtmlFragment: "%(type)s type used for a document fragment",
DuplicateUpdated: "Two entries with the same value for atom:updated",
UndefinedNamedEntity: "Undefined named entity",
ImplausibleDate: "Implausible date",
UnexpectedWhitespace: "Whitespace not permitted here",
SameDocumentReference: "Same-document reference",
SelfDoesntMatchLocation: "Self reference doesn't match document location",
InvalidOPMLVersion: 'The "version" attribute for the opml element must be 1.0 or 1.1.',
MissingXmlURL: 'An <outline> element whose type is "rss" must have an "xmlUrl" attribute.',
InvalidOutlineVersion: 'An <outline> element whose type is "rss" may have a version attribute, whose value must be RSS, RSS1, RSS2, or scriptingNews.',
InvalidOutlineType: 'The type attribute on an <outline> element should be a known type.',
InvalidExpansionState: '<expansionState> is a comma-separated list of line numbers.',
InvalidTrueFalse: '%(element)s must be "true" or "false"',
MissingOutlineType: 'An <outline> element with more than just a "text" attribute should have a "type" attribute indicating how the other attributes are to be interpreted.',
MissingTitleAttr: 'Missing outline attribute: title',
MissingUrlAttr: 'Missing outline attribute: url',
NotUTF8: 'iTunes elements should only be present in feeds encoded as UTF-8',
MissingItunesElement: 'Missing recommended iTunes %(parent)s element: %(element)s',
UnsupportedItunesFormat: 'Format %(extension)s is not supported by iTunes',
InvalidCountryCode: "Invalid country code: \"%(value)s\"",
InvalidCurrencyUnit: "Invalid value for %(attr)s",
InvalidFloat: "Invalid value for %(attr)s",
InvalidFloatUnit: "Invalid value for %(attr)s",
InvalidFullLocation: "Invalid value for %(attr)s",
InvalidGender: "Invalid value for %(attr)s",
InvalidIntUnit: "Invalid value for %(attr)s",
InvalidLabel: "Invalid value for %(attr)s",
InvalidLocation: "Invalid value for %(attr)s",
InvalidMaritalStatus: "Invalid value for %(attr)s",
InvalidPaymentMethod: "Invalid value for %(attr)s",
InvalidPercentage: '%(element)s must be a percentage',
InvalidPriceType: "Invalid value for %(attr)s",
InvalidRatingType: "Invalid value for %(attr)s",
InvalidReviewerType: "Invalid value for %(attr)s",
InvalidSalaryType: "Invalid value for %(attr)s",
InvalidServiceType: "Invalid value for %(attr)s",
InvalidValue: "Invalid value for %(attr)s",
InvalidYear: "Invalid value for %(attr)s",
TooMany: "%(parent)s contains more than ten %(element)s elements",
InvalidPermalink: "guid must be a full URL, unless isPermaLink attribute is false",
NotInANamespace: "Missing namespace for %(element)s",
UndeterminableVocabulary:"Missing namespace for %(element)s",
SelfNotAtom: '"self" link references a non-Atom representation',
InvalidFormComponentName: 'Invalid form component name',
ImageLinkDoesntMatch: "Image link doesn't match channel link",
ImageUrlFormat: "Image not in required format",
ProblematicalRFC822Date: "Problematical RFC 822 date-time value",
DuplicateEnclosure: "item contains more than one enclosure",
MissingItunesEmail: "The recommended <itunes:email> element is missing",
MissingGuid: "%(parent)s should contain a %(element)s element",
UriNotIri: "IRI found where URL expected",
ObsoleteWikiNamespace: "Obsolete Wiki Namespace",
DuplicateDescriptionSemantics: "Avoid %(element)s",
InvalidCreditRole: "Invalid Credit Role",
InvalidMediaTextType: 'type attribute must be "plain" or "html"',
InvalidMediaHash: 'Invalid Media Hash',
InvalidMediaRating: 'Invalid Media Rating',
InvalidMediaRestriction: "media:restriction must be 'all' or 'none'",
InvalidMediaRestrictionRel: "relationship must be 'allow' or 'disallow'",
InvalidMediaRestrictionType: "type must be 'country' or 'uri'",
InvalidMediaMedium: 'Invalid content medium: "%(value)s"',
InvalidMediaExpression: 'Invalid content expression: "%(value)s"',
DeprecatedMediaAdult: 'media:adult is deprecated',
MediaGroupWithoutAlternatives: 'media:group must have multiple media:content children',
CommentRSS: 'wfw:commentRSS should be wfw:commentRss',
NonSpecificMediaType: '"%(contentType)s" media type is not specific enough',
DangerousStyleAttr: "style attribute contains potentially dangerous content",
NotURLEncoded: "%(element)s must be URL encoded",
InvalidLocalRole: "Invalid local role",
InvalidEncoding: "Invalid character encoding",
ShouldIncludeExample: "OpenSearchDescription should include an example Query",
InvalidAdultContent: "Non-boolean value for %(element)s",
InvalidLocalParameter: "Invalid local parameter name",
UndeclaredPrefix: "Undeclared %(element)s prefix",
UseOfExtensionAttr: "Use of extension attribute on RSS 2.0 core element: %(attribute)s",
DeprecatedDTD: "The use of this DTD has been deprecated by Netscape",
MisplacedXHTMLContent: "Misplaced XHTML content",
SchemeNotIANARegistered: "URI scheme not IANA registered",
InvalidCoord: "Invalid coordinates",
InvalidCoordList: "Invalid coordinate list",
CoordComma: "Comma found in coordinate pair",
AvoidNamespacePrefix: "Avoid Namespace Prefix: %(prefix)s",
Deprecated: "%(element)s has been superceded by %(replacement)s.",
DeprecatedRootHref: "root:// URLs have been superceded by full http:// URLs",
InvalidAltitudeMode: "Invalid altitudeMode",
InvalidAngle: "%(element)s must be between -360 and 360",
InvalidColor: "Not a valid color",
InvalidColorMode: "Invalid colorMode.",
InvalidItemIconState: "Invalid state for Icon",
InvalidListItemType: "Invalid list item type",
InvalidKmlCoordList: "Invalid coordinate list. Make sure that coordinates are of the form longitude,latitude or longitude,latitude,altitude and seperated by a single space. It is also a good idea to avoid line breaks or other extraneous white space",
InvalidKmlLatitude: "Invalid latitude found within coordinates. Latitudes have to be between -90 and 90.",
InvalidKmlLongitude: "Invalid longitude found within coordinates. Longitudes have to be between -180 and 180.",
InvalidKmlMediaType: "%(contentType)s is an invalid KML media type. Use application/vnd.google-earth.kml+xml or application/vnd.google-earth.kmz",
InvalidKmlUnits: "Invalid units.",
InvalidRefreshMode: "Invalid refreshMode",
InvalidSchemaFieldType: "Invalid Schema field type",
InvalidStyleState: "Invalid key for StyleMap.",
InvalidViewRefreshMode: "Invalid viewRefreshMode.",
InvalidZeroOne: "Invalid value. Should be 0 or 1.",
MissingId: "%(parent)s should contain a %(element)s attribute. This is important if you want to link directly to features.",
InvalidSseType: "sx:related type must be either 'aggregated' or 'compete'",
FeedHistoryRelInEntry: "%(rel)s link relation found in entry",
LinkPastEnd: "%(rel)s link in %(self)s entry in list",
FeedRelInCompleteFeed: "%(rel)s link relation found in complete feed",
MissingCurrentInArchive: "Current link not found in archive feed",
CurrentNotSelfInCompleteFeed: "Current not self in complete feed",
ArchiveIncomplete: "Archive incomplete",
RelativeSelf: "Relative href value on self link",
ConflictingCatAttr: "Categories can't have both href and %(attr)s attributes",
ConflictingCatChildren: "Categories can't have both href attributes and children",
UndefinedParam: "Undefined media-range parameter",
CharacterData: 'Encode "&" and "<" in plain text using hexadecimal character references.',
EmailFormat: 'Email address is not in the recommended format',
MissingRealName: 'Email address is missing real name',
MisplacedItem: 'Misplaced Item',
ImageTitleDoesntMatch: "Image title doesn't match channel title",
AvoidTextInput: "Avoid Text Input",
NeedDescriptionBeforeContent: "Ensure description precedes content:encoded",
SlashDate: "Ensure lastBuildDate is present when slash:comments is used",
UseZeroForMidnight: "Use zero for midnight",
UseZeroForUnknown: "Use zero for unknown length",
UnknownHost: "Unknown host",
UnknownNamespace: "Use of unknown namespace: %(namespace)s",
IntegerOverflow: "%(element)s value too large",
InvalidNSS: "Invalid Namespace Specific String: %(element)s",
SinceAfterUntil: "Since After until",
MissingByAndWhenAttrs: "Missing by and when attributes",
QuestionableUsage: "Undocumented use of %(element)s",
InvalidRSSVersion: "Invalid RSS Version",
}
| Python |
"""$Id: __init__.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
| Python |
from base import validatorBase
from validators import *
from extension import extension_everywhere
class service(validatorBase, extension_everywhere):
def getExpectedAttrNames(self):
return [] # (None,u'scheme'),(None,u'fixed')]
def validate(self):
if not "app_workspace" in self.children:
self.log(MissingElement({"parent":self.name, "element":"app:workspace"}))
def do_app_workspace(self):
return workspace()
class workspace(validatorBase, extension_everywhere):
def validate(self):
if not "atom_title" in self.children:
self.log(MissingElement({"parent":self.name, "element":"atom:title"}))
def do_app_collection(self):
return collection()
def do_atom_title(self):
from content import textConstruct
return textConstruct(), noduplicates()
class collection(validatorBase, extension_everywhere):
def getExpectedAttrNames(self):
return [(None,u'href')]
def prevalidate(self):
self.validate_required_attribute((None,'href'), rfc3987)
def validate(self):
if not "atom_title" in self.children:
self.log(MissingElement({"parent":self.name, "element":"atom:title"}))
def do_atom_title(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_title(self):
from root import atom_namespace
assert(atom_namespace in self.dispatcher.defaultNamespaces)
self.child = 'atom_title'
return self.do_atom_title()
def do_app_categories(self):
from categories import categories
return categories()
def do_app_accept(self):
from categories import categories
return MediaRange()
| Python |
"""$Id: author.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
#
# author element.
#
class author(validatorBase):
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'parseType')]
def validate(self):
if not "name" in self.children and not "atom_name" in self.children:
self.log(MissingElement({"parent":self.name, "element":"name"}))
def do_name(self):
return nonhtml(), nonemail(), nonblank(), noduplicates()
def do_email(self):
return addr_spec(), noduplicates()
def do_uri(self):
return nonblank(), rfc3987(), nows(), noduplicates()
def do_foaf_workplaceHomepage(self):
return rdfResourceURI()
def do_foaf_homepage(self):
return rdfResourceURI()
def do_foaf_weblog(self):
return rdfResourceURI()
def do_foaf_plan(self):
return text()
def do_foaf_firstName(self):
return text()
def do_xhtml_div(self):
from content import diveater
return diveater()
# RSS/Atom support
do_atom_name = do_name
do_atom_email = do_email
do_atom_uri = do_uri
| Python |
"""$Id: image.py 988 2008-03-12 18:22:48Z sa3ruby $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 988 $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
from extension import extension_everywhere
#
# image element.
#
class image(validatorBase, extension_everywhere):
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'resource'),
(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'about'),
(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'parseType')]
def validate(self):
if self.value.strip():
self.log(UnexpectedText({"parent":self.parent.name, "element":"image"}))
if self.attrs.has_key((rdfNS,"resource")):
return # looks like an RSS 1.0 feed
if not "title" in self.children:
self.log(MissingTitle({"parent":self.name, "element":"title"}))
if not "url" in self.children:
self.log(MissingElement({"parent":self.name, "element":"url"}))
if self.attrs.has_key((rdfNS,"parseType")):
return # looks like an RSS 1.1 feed
if not "link" in self.children:
self.log(MissingLink({"parent":self.name, "element":"link"}))
def do_title(self):
return title(), noduplicates()
def do_link(self):
return link(), noduplicates()
def do_url(self):
return url(), noduplicates()
def do_width(self):
return width(), noduplicates()
def do_height(self):
return height(), noduplicates()
def do_description(self):
return nonhtml(), noduplicates()
def do_dc_creator(self):
return text()
def do_dc_subject(self):
return text() # duplicates allowed
def do_dc_date(self):
return w3cdtf(), noduplicates()
def do_cc_license(self):
return eater()
class link(rfc2396_full):
def validate(self):
rfc2396_full.validate(self)
if hasattr(self.parent.parent, 'link') and \
self.parent.parent.link and self.parent.parent.link != self.value:
self.log(ImageLinkDoesntMatch({"parent":self.parent.name, "element":self.name}))
class url(rfc2396_full):
def validate(self):
rfc2396_full.validate(self)
import re
ext = self.value.split('.')[-1].lower()
if re.match("^\w+$", ext) and ext not in ['jpg','jpeg','gif','png']:
self.log(ImageUrlFormat({"parent":self.parent.name, "element":self.name}))
class title(nonhtml, noduplicates):
def validate(self):
if not self.value.strip():
self.log(NotBlank({"parent":self.parent.name, "element":self.name}))
else:
self.log(ValidTitle({"parent":self.parent.name, "element":self.name}))
nonhtml.validate(self)
if hasattr(self.parent.parent, 'title') and \
self.parent.parent.title and self.parent.parent.title != self.value:
self.log(ImageTitleDoesntMatch({"parent":self.parent.name, "element":self.name}))
class width(text, noduplicates):
def validate(self):
try:
w = int(self.value)
if (w <= 0) or (w > 144):
self.log(InvalidWidth({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidWidth({"parent":self.parent.name, "element":self.name}))
except ValueError:
self.log(InvalidWidth({"parent":self.parent.name, "element":self.name, "value":self.value}))
class height(text, noduplicates):
def validate(self):
try:
h = int(self.value)
if (h <= 0) or (h > 400):
self.log(InvalidHeight({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidHeight({"parent":self.parent.name, "element":self.name}))
except ValueError:
self.log(InvalidHeight({"parent":self.parent.name, "element":self.name, "value":self.value}))
| Python |
from validators import *
class media_elements:
def do_media_adult(self):
self.log(DeprecatedMediaAdult({"parent":self.name, "element":"media:adult"}))
return truefalse(), noduplicates()
def do_media_category(self):
return media_category()
def do_media_copyright(self):
return media_copyright(), noduplicates()
def do_media_credit(self):
return media_credit()
def do_media_description(self):
return media_title(), noduplicates()
def do_media_keywords(self):
return text()
def do_media_hash(self):
return media_hash()
def do_media_player(self):
return media_player()
def do_media_rating(self):
return media_rating()
def do_media_restriction(self):
return media_restriction()
def do_media_text(self):
return media_text()
def do_media_title(self):
return media_title(), noduplicates()
def do_media_thumbnail(self):
return media_thumbnail()
class media_category(nonhtml,rfc2396_full):
def getExpectedAttrNames(self):
return [(None,u'label'),(None, u'scheme')]
def prevalidate(self):
self.name = "label"
self.value = self.attrs.get((None,u'label'))
if self.value: nonhtml.validate(self)
self.name = "scheme"
self.value = self.attrs.get((None,u'scheme'))
if self.value: rfc2396_full.validate(self)
self.name = "media_category"
self.value = ""
class media_copyright(nonhtml,rfc2396_full):
def getExpectedAttrNames(self):
return [(None,u'url')]
def prevalidate(self):
self.name = "url"
self.value = self.attrs.get((None,u'url'))
if self.value: rfc2396_full.validate(self)
self.name = "media_copyright"
self.value = ""
class media_credit(text,rfc2396_full):
EBU = [
"actor", "adaptor", "anchor person", "animal trainer", "animator",
"announcer", "armourer", "art director", "artist/performer",
"assistant camera", "assistant chief lighting technician",
"assistant director", "assistant producer", "assistant visual editor",
"author", "broadcast assistant", "broadcast journalist", "camera operator",
"carpenter", "casting", "causeur", "chief lighting technician", "choir",
"choreographer", "clapper loader", "commentary or commentator",
"commissioning broadcaster", "composer", "computer programmer",
"conductor", "consultant", "continuity checker", "correspondent",
"costume designer", "dancer", "dialogue coach", "director",
"director of photography", "distribution company", "draughtsman",
"dresser", "dubber", "editor/producer", "editor", "editor", "ensemble",
"executive producer", "expert", "fight director", "floor manager",
"focus puller", "foley artist", "foley editor", "foley mixer",
"graphic assistant", "graphic designer", "greensman", "grip",
"hairdresser", "illustrator", "interviewed guest", "interviewer",
"key character", "key grip", "key talents", "leadman", "librettist",
"lighting director", "lighting technician", "location manager",
"lyricist", "make up artist", "manufacturer", "matte artist",
"music arranger", "music group", "musician", "news reader", "orchestra",
"participant", "photographer", "post", "producer", "production assistant",
"production company", "production department", "production manager",
"production secretary", "programme production researcher",
"property manager", "publishing company", "puppeteer", "pyrotechnician",
"reporter", "rigger", "runner", "scenario", "scenic operative",
"script supervisor", "second assistant camera",
"second assistant director", "second unit director", "set designer",
"set dresser", "sign language", "singer", "sound designer", "sound mixer",
"sound recordist", "special effects", "stunts", "subtitles",
"technical director", "term", "translation", "transportation manager",
"treatment/programme proposal", "vision mixer", "visual editor",
"visual effects", "wardrobe", "witness",
# awaiting confirmation
"artist", "performer", "editor", "producer", "treatment",
"treatment proposal", "programme proposal",
]
def getExpectedAttrNames(self):
return [(None, u'role'),(None,u'scheme')]
def prevalidate(self):
scheme = self.attrs.get((None, 'scheme')) or 'urn:ebu'
role = self.attrs.get((None, 'role'))
if role:
if scheme=='urn:ebu' and role not in self.EBU:
self.log(InvalidCreditRole({"parent":self.parent.name, "element":self.name, "attr":"role", "value":role}))
elif role != role.lower():
self.log(InvalidCreditRole({"parent":self.parent.name, "element":self.name, "attr":"role", "value":role}))
self.value = scheme
self.name = "scheme"
if scheme != 'urn:ebu': rfc2396_full.validate(self)
self.name = "media_credit"
self.value = ""
class media_hash(text):
def getExpectedAttrNames(self):
return [(None,u'algo')]
def prevalidate(self):
self.algo = self.attrs.get((None, 'algo'))
if self.algo and self.algo not in ['md5', 'sha-1']:
self.log(InvalidMediaHash({"parent":self.parent.name, "element":self.name, "attr":"algo", "value":self.algo}))
def validate(self):
self.value = self.value.strip()
if not re.match("^[0-9A-Za-z]+$",self.value):
self.log(InvalidMediaHash({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
if self.algo == 'sha-1':
if len(self.value) != 40:
self.log(InvalidMediaHash({"parent":self.parent.name, "element":self.name, "algo":self.algo, "value":self.value}))
else:
if len(self.value) != 32:
self.log(InvalidMediaHash({"parent":self.parent.name, "element":self.name, "algo":self.algo, "value":self.value}))
class media_rating(rfc2396_full):
def getExpectedAttrNames(self):
return [(None, u'scheme')]
def validate(self):
scheme = self.attrs.get((None, 'scheme')) or 'urn:simple'
if scheme == 'urn:simple':
if self.value not in ['adult', 'nonadult']:
self.log(InvalidMediaRating({"parent":self.parent.name, "element":self.name, "scheme":scheme, "value":self.value}))
elif scheme == 'urn:mpaa':
if self.value not in ['g', 'm', 'nc-17', 'pg', 'pg-13', 'r', 'x']:
self.log(InvalidMediaRating({"parent":self.parent.name, "element":self.name, "scheme":scheme, "value":self.value}))
elif scheme == 'urn:v-chip':
if self.value not in ['14+', '18+', 'c', 'c8', 'g', 'pg',
'tv-14', 'tv-g', 'tv-ma', 'tv-pg', 'tv-y', 'tv-y7', 'tv-y7-fv']:
self.log(InvalidMediaRating({"parent":self.parent.name, "element":self.name, "scheme":scheme, "value":self.value}))
elif scheme == 'urn:icra':
code = '([nsvlocx]z [01]|(n[a-c]|s[a-f]|v[a-j]|l[a-c]|o[a-h]|c[a-b]|x[a-e]) 1)'
if not re.match(r"^r \(%s( %s)*\)$" %(code,code),self.value):
self.log(InvalidMediaRating({"parent":self.parent.name, "element":self.name, "scheme":scheme, "value":self.value}))
pass
else:
self.value = scheme
self.name = 'scheme'
rfc2396_full.validate(self)
class media_restriction(text,rfc2396_full,iso3166):
def getExpectedAttrNames(self):
return [(None, u'relationship'),(None,u'type')]
def validate(self):
relationship = self.attrs.get((None, 'relationship'))
if not relationship:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"relationship"}))
elif relationship not in ['allow','disallow']:
self.log(InvalidMediaRestrictionRel({"parent":self.parent.name, "element":self.name, "attr":"relationship", "value":relationship}))
type = self.attrs.get((None, 'type'))
if not type:
if self.value and self.value not in ['all','none']:
self.log(InvalidMediaRestriction({"parent":self.parent.name, "element":self.name, "value":self.value}))
elif type == 'country':
self.name = 'country'
countries = self.value.upper().split(' ')
for self.value in countries:
iso3166.validate(self)
elif type == 'uri':
rfc2396_full.validate(self)
else:
self.log(InvalidMediaRestrictionType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":type}))
class media_player(validatorBase,positiveInteger,rfc2396_full):
def getExpectedAttrNames(self):
return [(None,u'height'),(None,u'url'),(None, u'width')]
def validate(self):
self.value = self.attrs.get((None, 'url'))
if self.value:
self.name = "url"
rfc2396_full.validate(self)
else:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"url"}))
self.value = self.attrs.get((None, 'height'))
self.name = "height"
if self.value: positiveInteger.validate(self)
self.value = self.attrs.get((None, 'width'))
self.name = "width"
if self.value: positiveInteger.validate(self)
class media_text(nonhtml):
def getExpectedAttrNames(self):
return [(None,u'end'),(None,u'lang'),(None,u'start'),(None, u'type')]
def prevalidate(self):
self.type = self.attrs.get((None, 'type'))
if self.type and self.type not in ['plain', 'html']:
self.log(InvalidMediaTextType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
start = self.attrs.get((None, 'start'))
if start and not media_thumbnail.npt_re.match(start):
self.log(InvalidNPTTime({"parent":self.parent.name, "element":self.name, "attr":"start", "value":start}))
else:
self.log(ValidNPTTime({"parent":self.parent.name, "element":self.name, "attr":"start", "value":start}))
end = self.attrs.get((None, 'end'))
if end and not media_thumbnail.npt_re.match(end):
self.log(InvalidNPTTime({"parent":self.parent.name, "element":self.name, "attr":"end", "value":end}))
else:
self.log(ValidNPTTime({"parent":self.parent.name, "element":self.name, "attr":"end", "value":end}))
lang = self.attrs.get((None, 'lang'))
if lang: iso639_validate(self.log,lang,'lang',self.parent)
def validate(self):
if self.type == 'html':
self.validateSafe(self.value)
else:
nonhtml.validate(self, ContainsUndeclaredHTML)
class media_title(nonhtml):
def getExpectedAttrNames(self):
return [(None, u'type')]
def prevalidate(self):
self.type = self.attrs.get((None, 'type'))
if self.type and self.type not in ['plain', 'html']:
self.log(InvalidMediaTextType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
def validate(self):
if self.type == 'html':
self.validateSafe(self.value)
else:
nonhtml.validate(self, ContainsUndeclaredHTML)
class media_thumbnail(validatorBase,positiveInteger,rfc2396_full):
npt_re = re.compile("^(now)|(\d+(\.\d+)?)|(\d+:\d\d:\d\d(\.\d+)?)$")
def getExpectedAttrNames(self):
return [(None,u'height'),(None,u'time'),(None,u'url'),(None, u'width')]
def validate(self):
time = self.attrs.get((None, 'time'))
if time and not media_thumbnail.npt_re.match(time):
self.log(InvalidNPTTime({"parent":self.parent.name, "element":self.name, "attr":"time", "value":time}))
else:
self.log(ValidNPTTime({"parent":self.parent.name, "element":self.name, "attr":"time", "value":time}))
self.value = self.attrs.get((None, 'url'))
if self.value:
self.name = "url"
rfc2396_full.validate(self)
else:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"url"}))
self.value = self.attrs.get((None, 'height'))
self.name = "height"
if self.value: positiveInteger.validate(self)
self.value = self.attrs.get((None, 'width'))
self.name = "width"
if self.value: positiveInteger.validate(self)
from extension import extension_everywhere
class media_content(validatorBase, media_elements, extension_everywhere,
positiveInteger, rfc2396_full, truefalse, nonNegativeInteger):
def getExpectedAttrNames(self):
return [
(None,u'bitrate'),
(None,u'channels'),
(None,u'duration'),
(None,u'expression'),
(None,u'fileSize'),
(None,u'framerate'),
(None,u'height'),
(None,u'isDefault'),
(None,u'lang'),
(None,u'medium'),
(None,u'samplingrate'),
(None,u'type'),
(None,u'url'),
(None,u'width')
]
def validate(self):
self.value = self.attrs.get((None,u'bitrate'))
if self.value and not re.match('\d+\.?\d*', self.value):
self.log(InvalidFloat({"parent":self.parent.name, "element":self.name,
"attr": 'bitrate', "value":self.value}))
self.value = self.attrs.get((None, 'channels'))
self.name = "channels"
if self.value: nonNegativeInteger.validate(self)
self.value = self.attrs.get((None,u'duration'))
if self.value and not re.match('\d+\.?\d*', self.value):
self.log(InvalidFloat({"parent":self.parent.name, "element":self.name,
"attr": 'duration', "value":self.value}))
self.value = self.attrs.get((None,u'expression'))
if self.value and self.value not in ['sample', 'full', 'nonstop']:
self.log(InvalidMediaExpression({"parent":self.parent.name, "element":self.name, "value": self.value}))
self.value = self.attrs.get((None, 'fileSize'))
self.name = "fileSize"
if self.value: positiveInteger.validate(self)
self.value = self.attrs.get((None,u'framerate'))
if self.value and not re.match('\d+\.?\d*', self.value):
self.log(InvalidFloat({"parent":self.parent.name, "element":self.name,
"attr": 'framerate', "value":self.value}))
self.value = self.attrs.get((None, 'height'))
self.name = "height"
if self.value: positiveInteger.validate(self)
self.value = self.attrs.get((None, 'isDefault'))
if self.value: truefalse.validate(self)
self.value = self.attrs.get((None, 'lang'))
if self.value: iso639_validate(self.log,self.value,'lang',self.parent)
self.value = self.attrs.get((None,u'medium'))
if self.value and self.value not in ['image', 'audio', 'video', 'document', 'executable']:
self.log(InvalidMediaMedium({"parent":self.parent.name, "element":self.name, "value": self.value}))
self.value = self.attrs.get((None,u'samplingrate'))
if self.value and not re.match('\d+\.?\d*', self.value):
self.log(InvalidFloat({"parent":self.parent.name, "element":self.name,
"attr": 'samplingrate', "value":self.value}))
self.value = self.attrs.get((None,u'type'))
if self.value and not mime_re.match(self.value):
self.log(InvalidMIMEAttribute({"parent":self.parent.name, "element":self.name, "attr":'type'}))
self.name = "url"
self.value = self.attrs.get((None,u'url'))
if self.value: rfc2396_full.validate(self)
self.value = self.attrs.get((None, 'width'))
self.name = "width"
if self.value: positiveInteger.validate(self)
class media_group(validatorBase, media_elements):
def do_media_content(self):
return media_content()
def validate(self):
if len([child for child in self.children if child=='media_content']) < 2:
self.log(MediaGroupWithoutAlternatives({}))
| Python |
"""MIME-Type Parser
This module provides basic functions for handling mime-types. It can handle
matching mime-types against a list of media-ranges. See section 14.1 of the
HTTP specification [RFC 2616] for a complete explanation.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
Contents:
- parse_mime_type(): Parses a mime-type into its component parts.
- parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q'
quality parameter.
- quality(): Determines the quality ('q') of a mime-type when
compared against a list of media-ranges.
- quality_parsed(): Just like quality() except the second parameter must be
pre-parsed.
- best_match(): Choose the mime-type with the highest quality ('q')
from a list of candidates.
"""
__version__ = '0.1.3'
__author__ = 'Joe Gregorio'
__email__ = 'joe@bitworking.org'
__license__ = 'MIT License'
__credits__ = ''
def parse_mime_type(mime_type):
"""Parses a mime-type into its component parts.
Carves up a mime-type and returns a tuple of the (type, subtype, params)
where 'params' is a dictionary of all the parameters for the media range.
For example, the media range 'application/xhtml;q=0.5' would get parsed
into:
('application', 'xhtml', {'q', '0.5'})
"""
parts = mime_type.split(';')
params = dict([tuple([s.strip() for s in param.split('=', 1)])\
for param in parts[1:]
])
full_type = parts[0].strip()
# Java URLConnection class sends an Accept header that includes a
# single '*'. Turn it into a legal wildcard.
if full_type == '*':
full_type = '*/*'
(type, subtype) = full_type.split('/')
return (type.strip(), subtype.strip(), params)
def parse_media_range(range):
"""Parse a media-range into its component parts.
Carves up a media range and returns a tuple of the (type, subtype,
params) where 'params' is a dictionary of all the parameters for the media
range. For example, the media range 'application/*;q=0.5' would get parsed
into:
('application', '*', {'q', '0.5'})
In addition this function also guarantees that there is a value for 'q'
in the params dictionary, filling it in with a proper default if
necessary.
"""
(type, subtype, params) = parse_mime_type(range)
if 'q' not in params or not params['q'] or \
not float(params['q']) or float(params['q']) > 1\
or float(params['q']) < 0:
params['q'] = '1'
return (type, subtype, params)
def fitness_and_quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns a tuple of
the fitness value and the value of the 'q' quality parameter of the best
match, or (-1, 0) if no match was found. Just as for quality_parsed(),
'parsed_ranges' must be a list of parsed media ranges.
"""
best_fitness = -1
best_fit_q = 0
(target_type, target_subtype, target_params) =\
parse_media_range(mime_type)
for (type, subtype, params) in parsed_ranges:
type_match = (type == target_type or\
type == '*' or\
target_type == '*')
subtype_match = (subtype == target_subtype or\
subtype == '*' or\
target_subtype == '*')
if type_match and subtype_match:
param_matches = sum([1 for (key, value) in \
target_params.items() if key != 'q' and \
key in params and value == params[key]], 0)
fitness = (type == target_type) and 100 or 0
fitness += (subtype == target_subtype) and 10 or 0
fitness += param_matches
if fitness > best_fitness:
best_fitness = fitness
best_fit_q = params['q']
return best_fitness, float(best_fit_q)
def quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns the 'q'
quality parameter of the best match, 0 if no match was found. This function
bahaves the same as quality() except that 'parsed_ranges' must be a list of
parsed media ranges. """
return fitness_and_quality_parsed(mime_type, parsed_ranges)[1]
def quality(mime_type, ranges):
"""Return the quality ('q') of a mime-type against a list of media-ranges.
Returns the quality 'q' of a mime-type when compared against the
media-ranges in ranges. For example:
>>> quality('text/html','text/*;q=0.3, text/html;q=0.7,
text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5')
0.7
"""
parsed_ranges = [parse_media_range(r) for r in ranges.split(',')]
return quality_parsed(mime_type, parsed_ranges)
def best_match(supported, header):
"""Return mime-type with the highest quality ('q') from list of candidates.
Takes a list of supported mime-types and finds the best match for all the
media-ranges listed in header. The value of header must be a string that
conforms to the format of the HTTP Accept: header. The value of 'supported'
is a list of mime-types. The list of supported mime-types should be sorted
in order of increasing desirability, in case of a situation where there is
a tie.
>>> best_match(['application/xbel+xml', 'text/xml'],
'text/*;q=0.5,*/*; q=0.1')
'text/xml'
"""
split_header = _filter_blank(header.split(','))
parsed_header = [parse_media_range(r) for r in split_header]
weighted_matches = []
pos = 0
for mime_type in supported:
weighted_matches.append((fitness_and_quality_parsed(mime_type,
parsed_header), pos, mime_type))
pos += 1
weighted_matches.sort()
return weighted_matches[-1][0][1] and weighted_matches[-1][2] or ''
def _filter_blank(i):
for s in i:
if s.strip():
yield s
| Python |
# -*- coding: utf-8 -*-
#old way
from distutils.core import setup
#new way
#from setuptools import setup, find_packages
setup(name='mimeparse',
version='0.1.4',
description='A module provides basic functions for parsing mime-type names and matching them against a list of media-ranges.',
long_description="""
This module provides basic functions for handling mime-types. It can handle
matching mime-types against a list of media-ranges. See section 14.1 of
the HTTP specification [RFC 2616] for a complete explanation.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
Contents:
- parse_mime_type(): Parses a mime-type into its component parts.
- parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q' quality parameter.
- quality(): Determines the quality ('q') of a mime-type when compared against a list of media-ranges.
- quality_parsed(): Just like quality() except the second parameter must be pre-parsed.
- best_match(): Choose the mime-type with the highest quality ('q') from a list of candidates.
""",
classifiers=[
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
keywords='mime-type',
author='Joe Gregorio',
author_email='joe@bitworking.org',
maintainer='Joe Gregorio',
maintainer_email='joe@bitworking.org',
url='http://code.google.com/p/mimeparse/',
license='MIT',
py_modules=['mimeparse']
)
| Python |
"""
Python tests for Mime-Type Parser.
This module loads a json file and converts the tests specified therein to a set
of PyUnitTestCases. Then it uses PyUnit to run them and report their status.
"""
__version__ = "0.1"
__author__ = 'Ade Oshineye'
__email__ = "ade@oshineye.com"
__credits__ = ""
import mimeparse
import unittest
from functools import partial
# Conditional import to support Python 2.5
try:
import json
except ImportError:
import simplejson as json
def test_parse_media_range(args, expected):
expected = tuple(expected)
result = mimeparse.parse_media_range(args)
message = "Expected: '%s' but got %s" % (expected, result)
assert expected == result, message
def test_quality(args, expected):
result = mimeparse.quality(args[0], args[1])
message = "Expected: '%s' but got %s" % (expected, result)
assert expected == result, message
def test_best_match(args, expected):
result = mimeparse.best_match(args[0], args[1])
message = "Expected: '%s' but got %s" % (expected, result)
assert expected == result, message
def test_parse_mime_type(args, expected):
expected = tuple(expected)
result = mimeparse.parse_mime_type(args)
message = "Expected: '%s' but got %s" % (expected, result)
assert expected == result, message
def add_tests(suite, json_object, func_name, test_func):
test_data = json_object[func_name]
for test_datum in test_data:
args, expected = test_datum[0], test_datum[1]
desc = "%s(%s) with expected result: %s" % (func_name, str(args), str(expected))
if len(test_datum) == 3:
desc = test_datum[2] + " : " + desc
func = partial(test_func, *(args, expected))
func.__name__ = test_func.__name__
testcase = unittest.FunctionTestCase(func, description=desc)
suite.addTest(testcase)
def run_tests():
json_object = json.load(open("testdata.json"))
suite = unittest.TestSuite()
add_tests(suite, json_object, "parse_media_range", test_parse_media_range)
add_tests(suite, json_object, "quality", test_quality)
add_tests(suite, json_object, "best_match", test_best_match)
add_tests(suite, json_object, "parse_mime_type", test_parse_mime_type)
test_runner = unittest.TextTestRunner(verbosity=1)
test_runner.run(suite)
if __name__ == "__main__":
run_tests()
| Python |
from urllib import urlencode
import atompubbase
import atompubbase.events
from atompubbase.model import init_event_handlers
init_event_handlers()
def apply_credentials_file(filename, http, error):
parts = file(filename, "r").read().splitlines()
if len(parts) == 2:
name, password = parts
http.add_credentials(name, password)
elif len(parts) == 3:
name, password, authtype = parts
authname, service = authtype.split()
if authname != "ClientLogin":
error(msg.CRED_FILE, "Unknown type of authentication: %s ['ClientLogin' is the only good value at this time.]" % authname)
return
cl = ClientLogin(http, name, password, service)
else:
error(msg.CRED_FILE, "Wrong format for credentials file")
class ClientLogin:
"""
Perform ClientLogin up front, save the auth token, and then
register for all the PRE events so that we can add the auth token
to all requests.
"""
def __init__(self, http, name, password, service):
auth = dict(accountType="HOSTED_OR_GOOGLE", Email=name, Passwd=password, service=service,
source='AtomPubBase-1.0')
resp, content = http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
atompubbase.events.register_callback("PRE", self.pre_cb)
def pre_cb(self, headers, body, filters):
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
| Python |
# Copyright Google 2007
"""
An eventing system for atompubbase objects.
Each class that is registered with add_event_handlers()
will be hooked into the event system. Clients can then
register for callbacks when a member function is called,
filtering on when to trigger the callback. There
are several axes that can be used to filter on:
Time PRE|POST
Method Name GET|PUT|DELETE|CREATE
Media MEDIA|NEXT
Class SERVICE|COLLECTION|ENTRY
Note that Class is really driven by which
classes use the eventing system. Media is triggered
if the method name ends in "_media".
For example, given the following class:
class Entry(object):
def get(self, headers, body = None):
pass
def put_media(self, headers, body = None):
pass
It can be added to the event system by calling:
add_event_handlers(Entry)
Now you can register callbacks for when methods of instances
of the class Entry are called. For example:
def mycb(headers, body, attributes):
pass
register_callback("PRE_ENTRY", mycb)
The 'mycb' callback will be called before any
method is called in the Entry class. The headers
and body parameters will be passed along. The
headers can be changed by the callback.
You can construct a filter string by selecting zero
or one value across each axis and concatenating
them with underscores, order is not important.
PRE calls contain the header and body of the request,
POST calls contain the header and body of the response.
If you wish to receive all the events then register
with the ANY filter.
Presuming registered classes:
class Service:
def get(headers, body=None): pass
class Entry:
def get(headers, body=None): pass
def get_media(headers, body=None): pass
def delete(headers, body=None): pass
def put(headers, body): pass
def put_media(headers, body): pass
class Collection:
def get(headers, body=None): pass
def create(headers, body): pass
These are all valid filters:
PRE_GET_MEDIA - Called before Entry.get_media() is called
PRE - Called before any classes member function is called.
COLLECTION - Called before any Collection classes member function is called.
POST_COLLECTION - Called after any Collection classes member function is called.
POST_COLLECTION_CREATE - Called after Collection.create() is called.
ANY - Called before and after every classes member function is called.
"""
import sys
PREPOST = set(["PRE", "POST"])
WRAPPABLE = set(["get", "put", "delete", "create"])
class Events(object):
def __init__(self):
# Callbacks are a list of tuples (filter, cb)
# where filter the set is the set of method attributes
# used to select that callback.
self.callbacks = []
def register(self, filter, cb):
"""
Add a callback (cb) to be called when it matches
the filter. The filter is a string of attibute
names separated by underscores.
Example:
events.register("PRE_ENTRY", mycb)
The 'mycb' callback will be called before any
method is called in the Entry class.
"""
filter = set([coord for coord in filter.upper().split("_")])
if not PREPOST.intersection(filter) and "ANY" not in filter:
filter.add("PRE")
self.callbacks.append((filter, cb))
def clear(self):
self.callbacks = []
def trigger(self, when, methodname, instance, headers, body):
method_filter = set(methodname.upper().split("_"))
method_filter.add(instance.__class__.__name__.upper())
method_filter.add(when)
matches = method_filter.copy()
matches.add("ANY")
for filter, cb in self.callbacks:
if filter.issubset(matches):
cb(headers, body, method_filter)
events = Events()
def _wrap(method, methodname):
"""
Create a closure around the given method that calls into
the eventing system.
"""
def wrapped(self, headers=None, body=None):
if headers == None:
headers = {}
try:
headers["-request-uri"] = self.uri()
except AttributeError:
pass
events.trigger("PRE", methodname, self, headers, body)
(headers, body) = method(self, headers, body)
events.trigger("POST", methodname, self, headers, body)
return (headers, body)
return wrapped
_wrapped = set()
def add_event_handlers(theclass):
"""
Wrap each callable non-internal member function of the class
with a wrapper function that calls into the eventing system.
"""
if theclass not in _wrapped:
for methodname in dir(theclass):
method = getattr(theclass, methodname)
methodprefix = methodname.split("_")[0]
if methodprefix in WRAPPABLE and callable(method) and not methodname.startswith("_"):
setattr(theclass, methodname, _wrap(method, methodname))
_wrapped.add(theclass)
def register_callback(filter, cb):
"""
Add a callback (cb) to be called when it matches
the filter. The filter is a string of attibute
names separated by underscores.
Example:
register_callback("PRE_ENTRY", mycb)
The 'mycb' callback will be called before any
method is called in the Entry class.
"""
events.register(filter, cb)
def clear():
"""
Unregister all callbacks.
"""
events.clear()
__all__ = ["add_event_handlers", "register_callback", "clear"]
| Python |
#!/usr/bin/env python
import glob, unittest, os, sys
from trace import fullmodname
try:
from tests.utils import cleanup
except:
def cleanup():
pass
# try to start in a consistent, predictable location
if sys.path[0]:
os.chdir(sys.path[0])
# find all of the planet test modules
modules = map(fullmodname, glob.glob(os.path.join('tests', 'test_*.py')))
print "Running the tests found in the following modules:"
print modules
# load all of the tests into a suite
try:
suite = unittest.TestLoader().loadTestsFromNames(modules)
except Exception, exception:
# attempt to produce a more specific message
for module in modules:
__import__(module)
raise
verbosity = 1
if "-q" in sys.argv or '--quiet' in sys.argv:
verbosity = 0
if "-v" in sys.argv or '--verbose' in sys.argv:
verbosity = 2
# run test suite
unittest.TextTestRunner(verbosity=verbosity).run(suite)
cleanup()
| Python |
"""MIME-Type Parser
This module provides basic functions for handling mime-types. It can handle
matching mime-types against a list of media-ranges. See section 14.1 of the
HTTP specification [RFC 2616] for a complete explanation.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
Contents:
- parse_mime_type(): Parses a mime-type into its component parts.
- parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q'
quality parameter.
- quality(): Determines the quality ('q') of a mime-type when
compared against a list of media-ranges.
- quality_parsed(): Just like quality() except the second parameter must be
pre-parsed.
- best_match(): Choose the mime-type with the highest quality ('q')
from a list of candidates.
"""
__version__ = '0.1.3'
__author__ = 'Joe Gregorio'
__email__ = 'joe@bitworking.org'
__license__ = 'MIT License'
__credits__ = ''
def parse_mime_type(mime_type):
"""Parses a mime-type into its component parts.
Carves up a mime-type and returns a tuple of the (type, subtype, params)
where 'params' is a dictionary of all the parameters for the media range.
For example, the media range 'application/xhtml;q=0.5' would get parsed
into:
('application', 'xhtml', {'q', '0.5'})
"""
parts = mime_type.split(';')
params = dict([tuple([s.strip() for s in param.split('=', 1)])\
for param in parts[1:]
])
full_type = parts[0].strip()
# Java URLConnection class sends an Accept header that includes a
# single '*'. Turn it into a legal wildcard.
if full_type == '*':
full_type = '*/*'
(type, subtype) = full_type.split('/')
return (type.strip(), subtype.strip(), params)
def parse_media_range(range):
"""Parse a media-range into its component parts.
Carves up a media range and returns a tuple of the (type, subtype,
params) where 'params' is a dictionary of all the parameters for the media
range. For example, the media range 'application/*;q=0.5' would get parsed
into:
('application', '*', {'q', '0.5'})
In addition this function also guarantees that there is a value for 'q'
in the params dictionary, filling it in with a proper default if
necessary.
"""
(type, subtype, params) = parse_mime_type(range)
if 'q' not in params or not params['q'] or \
not float(params['q']) or float(params['q']) > 1\
or float(params['q']) < 0:
params['q'] = '1'
return (type, subtype, params)
def fitness_and_quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns a tuple of
the fitness value and the value of the 'q' quality parameter of the best
match, or (-1, 0) if no match was found. Just as for quality_parsed(),
'parsed_ranges' must be a list of parsed media ranges.
"""
best_fitness = -1
best_fit_q = 0
(target_type, target_subtype, target_params) =\
parse_media_range(mime_type)
for (type, subtype, params) in parsed_ranges:
type_match = (type == target_type or\
type == '*' or\
target_type == '*')
subtype_match = (subtype == target_subtype or\
subtype == '*' or\
target_subtype == '*')
if type_match and subtype_match:
param_matches = sum([1 for (key, value) in \
target_params.items() if key != 'q' and \
key in params and value == params[key]], 0)
fitness = (type == target_type) and 100 or 0
fitness += (subtype == target_subtype) and 10 or 0
fitness += param_matches
if fitness > best_fitness:
best_fitness = fitness
best_fit_q = params['q']
return best_fitness, float(best_fit_q)
def quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns the 'q'
quality parameter of the best match, 0 if no match was found. This function
bahaves the same as quality() except that 'parsed_ranges' must be a list of
parsed media ranges. """
return fitness_and_quality_parsed(mime_type, parsed_ranges)[1]
def quality(mime_type, ranges):
"""Return the quality ('q') of a mime-type against a list of media-ranges.
Returns the quality 'q' of a mime-type when compared against the
media-ranges in ranges. For example:
>>> quality('text/html','text/*;q=0.3, text/html;q=0.7,
text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5')
0.7
"""
parsed_ranges = [parse_media_range(r) for r in ranges.split(',')]
return quality_parsed(mime_type, parsed_ranges)
def best_match(supported, header):
"""Return mime-type with the highest quality ('q') from list of candidates.
Takes a list of supported mime-types and finds the best match for all the
media-ranges listed in header. The value of header must be a string that
conforms to the format of the HTTP Accept: header. The value of 'supported'
is a list of mime-types. The list of supported mime-types should be sorted
in order of increasing desirability, in case of a situation where there is
a tie.
>>> best_match(['application/xbel+xml', 'text/xml'],
'text/*;q=0.5,*/*; q=0.1')
'text/xml'
"""
split_header = _filter_blank(header.split(','))
parsed_header = [parse_media_range(r) for r in split_header]
weighted_matches = []
pos = 0
for mime_type in supported:
weighted_matches.append((fitness_and_quality_parsed(mime_type,
parsed_header), pos, mime_type))
pos += 1
weighted_matches.sort()
return weighted_matches[-1][0][1] and weighted_matches[-1][2] or ''
def _filter_blank(i):
for s in i:
if s.strip():
yield s
| Python |
# -*- coding: utf-8 -*-
#old way
from distutils.core import setup
#new way
#from setuptools import setup, find_packages
setup(name='mimeparse',
version='0.1.4',
description='A module provides basic functions for parsing mime-type names and matching them against a list of media-ranges.',
long_description="""
This module provides basic functions for handling mime-types. It can handle
matching mime-types against a list of media-ranges. See section 14.1 of
the HTTP specification [RFC 2616] for a complete explanation.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
Contents:
- parse_mime_type(): Parses a mime-type into its component parts.
- parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q' quality parameter.
- quality(): Determines the quality ('q') of a mime-type when compared against a list of media-ranges.
- quality_parsed(): Just like quality() except the second parameter must be pre-parsed.
- best_match(): Choose the mime-type with the highest quality ('q') from a list of candidates.
""",
classifiers=[
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
keywords='mime-type',
author='Joe Gregorio',
author_email='joe@bitworking.org',
maintainer='Joe Gregorio',
maintainer_email='joe@bitworking.org',
url='http://code.google.com/p/mimeparse/',
license='MIT',
py_modules=['mimeparse']
)
| Python |
"""
Python tests for Mime-Type Parser.
This module loads a json file and converts the tests specified therein to a set
of PyUnitTestCases. Then it uses PyUnit to run them and report their status.
"""
__version__ = "0.1"
__author__ = 'Ade Oshineye'
__email__ = "ade@oshineye.com"
__credits__ = ""
import mimeparse
import unittest
from functools import partial
# Conditional import to support Python 2.5
try:
import json
except ImportError:
import simplejson as json
def test_parse_media_range(args, expected):
expected = tuple(expected)
result = mimeparse.parse_media_range(args)
message = "Expected: '%s' but got %s" % (expected, result)
assert expected == result, message
def test_quality(args, expected):
result = mimeparse.quality(args[0], args[1])
message = "Expected: '%s' but got %s" % (expected, result)
assert expected == result, message
def test_best_match(args, expected):
result = mimeparse.best_match(args[0], args[1])
message = "Expected: '%s' but got %s" % (expected, result)
assert expected == result, message
def test_parse_mime_type(args, expected):
expected = tuple(expected)
result = mimeparse.parse_mime_type(args)
message = "Expected: '%s' but got %s" % (expected, result)
assert expected == result, message
def add_tests(suite, json_object, func_name, test_func):
test_data = json_object[func_name]
for test_datum in test_data:
args, expected = test_datum[0], test_datum[1]
desc = "%s(%s) with expected result: %s" % (func_name, str(args), str(expected))
if len(test_datum) == 3:
desc = test_datum[2] + " : " + desc
func = partial(test_func, *(args, expected))
func.__name__ = test_func.__name__
testcase = unittest.FunctionTestCase(func, description=desc)
suite.addTest(testcase)
def run_tests():
json_object = json.load(open("testdata.json"))
suite = unittest.TestSuite()
add_tests(suite, json_object, "parse_media_range", test_parse_media_range)
add_tests(suite, json_object, "quality", test_quality)
add_tests(suite, json_object, "best_match", test_best_match)
add_tests(suite, json_object, "parse_mime_type", test_parse_mime_type)
test_runner = unittest.TextTestRunner(verbosity=1)
test_runner.run(suite)
if __name__ == "__main__":
run_tests()
| Python |
"""
There are four classes that make up the core
of the atompub model.
class Context
class Service
class Collection
class Entry
Context represents the current state, as represented
by a service document, a collection and an entry.
Each atompub object (Service, Collection, or Entry)
is just instantiated with a URI (or with a Context)
that it then uses to perform its work. Each object can produce
a list of URIs (actually Context objects) (possibly filtered)
for the next level down. The only parsing done will be xpaths to
pick out URIs, e.g. collections from service documents.
Here is an example of how the classes are used together:
# Note that httplib2.Http is passed in so you
# can pass in your own instrumented version, etc.
from httplib2 import Http
h = httplib2.Http()
c = Context(h, service_document_uri)
service = Service(c)
collection = Collection(service.iter()[0])
entry = Entry(collection.iter()[0])
(headers, body) = entry.get()
body = "<entry>...some updated stuff </entry>"
entry.put(body)
# saving and restoring is a matter of pickling/unpickling the Context.
import pickle
f = file("somefile", "w")
pickle.dump(entry.context(), f)
import pickle
f = file("somefile", "r")
context = pickle.load(f)
# You pass the class names into restore() for it to use to restore the context.
(service, collection, entry) = context.restore(Service, Collection, Entry)
# You don't have to use the context, Entries
# and Collections can be instantiated from URIs instead
# of Context instances.
entry = Entry(entry_edit_uri)
"""
import events
from mimeparse import mimeparse
import urlparse
import httplib2
import copy
try:
from xml.etree.ElementTree import fromstring, tostring
except:
from elementtree.ElementTree import fromstring, tostring
from xml.parsers.expat import ExpatError
ATOM = "http://www.w3.org/2005/Atom"
XHTML = "http://www.w3.org/1999/xhtml"
APP = "http://www.w3.org/2007/app"
ATOM_ENTRY = "{%s}entry" % ATOM
LINK = "{%s}link" % ATOM
ATOM_TITLE= "{%s}title" % ATOM
APP_COLL = "{%s}collection" % APP
APP_MEMBER_TYPE = "{%s}accept" % APP
XHTML_DIV = "{%s}div" % XHTML
class ParseException(Exception):
def __init__(self, headers, body):
self.headers = headers
self.body = body
def __str__(self):
return "XML is non-well-formed"
def get_child_title(node):
title = node.find(".//" + ATOM_TITLE)
if title == None:
return ""
title_type = title.get('type', 'text')
if title_type in ['text', 'html']:
return title.text
else:
div = title.find(".//" + XHTML_DIV)
div_text = div.text + "".join([c.text + c.tail for c in div.getchildren()])
return div_text
def absolutize(baseuri, uri):
"""
Given a baseuri, return the absolute
version of the given uri. Works whether
uri is relative or absolute.
"""
if uri == None:
return None
(scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
if not authority:
uri = urlparse.urljoin(baseuri, uri)
return uri
def link_value(etree, xpath, relation):
"""
Given and elementtree element 'etree', find all link
elements under the given xpath and return the @href
of the link of the given relation.
"""
xpath = xpath + "/" + LINK
for link in etree.findall(xpath):
if link.get('rel') == relation:
return link.get('href')
return None
class Context(object):
"""
Encapsulates the current service documents,
the current collection and the current
entry. Can be picked and un-pickled to
achieve persistence of context.
"""
_service = None
_collection = None
_entry = None
http = None
_collection_stack = []
def __init__(self, http = None, service=None, collection=None, entry=None):
"""http is either an instance of httplib2.Http() or something that
acts like it. For this module the only tow functions that need to
be implemented are request() and add_credentials().
"""
self._collection_stack = []
if http:
self.http = http
else:
self.http = httplib2.Http()
self._service = service
self._collection = collection
self._entry = entry
def _get_service(self):
return self._service
def _set_service(self, service):
self._service = service
self._collection = None
self._collection_stack = []
self._entry = None
service = property(_get_service, _set_service, None, "The URI of the Service Document. None if not set yet.")
def _get_collection(self):
return self._collection
def _set_collection(self, collection):
self._collection = collection
self._collection_stack = []
self._entry = None
collection = property(_get_collection, _set_collection, None, "The URI of the collection. None if not set yet.")
def _get_entry(self):
return self._entry
def _set_entry(self, entry):
self._entry = entry
entry = property(_get_entry, _set_entry, None, "The URI of the entry. None if not set yet.")
def restore(self, service_type, collection_type, entry_type):
"""
Restore the state from a Context. The types of the objects
to be instantiated for the service, collection and entry
are passed in. If no URI is set for a specific level
then None is returned for that instance.
"""
service = self._service and service_type(self) or None
collection = self._collection and collection_type(self) or None
entry = self._entry and entry_type(self) or None
return (service, collection, entry)
def collpush(self, uri):
"""
The collpush and collpop members are similar to the
command line 'pushd' and 'popd' commands. They let you
change to a different collection and then pop back
to the older collection when you are done.
"""
self._collection_stack.append((self._collection, self._entry))
self._collection = uri
self._entry = None
def collpop(self):
"""
See collpush.
"""
self._collection, self._entry = self._collection_stack.pop()
class Service(object):
"""
An Atom Publishing Protocol Service Document.
"""
def __init__(self, context_or_uri):
self.context = isinstance(context_or_uri, Context) and context_or_uri or Context(service=context_or_uri)
self.representation = None
self._etree = None
def context(self):
"""
Get the curent Context associated with this Service Document.
"""
return self.context
def uri(self):
return self.context.service
def get(self, headers=None, body=None):
"""
Retrieve the current Service Document from the server.
Returns a tuple of the HTTP response headers
and the body.
"""
headers, body = self.context.http.request(self.context.service, headers=headers)
if headers.status == 200:
self.representation = body
try:
self._etree = fromstring(body)
except ExpatError:
raise ParseException(headers, body)
return (headers, body)
def etree(self):
"""
Returns an ElementTree representation of the Service Document.
"""
if not self._etree:
self.get()
return self._etree
def iter_match(self, mimerange):
"""
Returns a generator that iterates over
the collections in the service document
that accept the given mimerange. The mimerange
can be a specific mimetype - "image/png" - or
a range - "image/*".
"""
if not self.representation:
headers, body = self.get()
for coll in self._etree.findall(".//" + APP_COLL):
accept_type = [t.text for t in coll.findall(APP_MEMBER_TYPE)]
if len(accept_type) == 0:
accept_type.append("application/atom+xml")
coll_type = [t for t in accept_type if mimeparse.best_match([t], mimerange)]
if coll_type:
context = copy.copy(self.context)
context.collection = absolutize(self.context.service, coll.get('href'))
yield context
def iter(self):
"""
Returns a generator that iterates over all
the collections in the service document.
"""
return self.iter_match("*/*")
def iter_info(self):
"""
Returns a generator that iterates over all
the collections in the service document.
Each yield tuple contains the collection
URI, the collection title and the workspace title
"""
if not self.representation:
headers, body = self.get()
for workspace in self._etree.findall(".//{%s}workspace" % APP):
workspace_title = get_child_title(workspace)
for coll in workspace.findall(".//" + APP_COLL):
coll_title = get_child_title(coll)
coll_uri = absolutize(self.context.service, coll.get('href'))
yield (workspace_title, coll_title, coll_uri)
class Collection(object):
def __init__(self, context_or_uri):
"""
Create a Collection from either the URI of the
collection, or from a Context object.
"""
self._context = isinstance(context_or_uri, Context) and context_or_uri or Context(service=context_or_uri)
self.representation = None
self._etree = None
self.next = None
def context(self):
"""
The Context associated with this Collection.
"""
return self._context
def uri(self):
return self._context.collection
def etree(self):
"""
Returns an ElementTree representation of the
current page of the collection.
"""
if not self.representation:
self.get()
return self._etree
def _record_next(self, base_uri, headers, body):
if headers.status == 200:
self.representation = body
try:
self._etree = fromstring(body)
except ExpatError:
raise ParseException(headers, body)
self.next = link_value(self._etree, ".", "next")
if self.next:
self.next = absolutize(base_uri, self.next)
else:
self.representation = self._etree = selfnext = None
def get(self, headers=None, body=None):
"""
Retrieves the first feed in a paged series of
collection documents.
Returns a tuple of the HTTP response headers
and the body.
"""
headers, body = self._context.http.request(self._context.collection, headers=headers, body=body)
self._record_next(self._context.collection, headers, body)
return (headers, body)
def has_next(self):
"""
Collections can be paged across many
Atom feeds. Returns True if there is a
'next' feed we can get.
"""
return self.next != None
def get_next(self, headers=None, body=None):
"""
Collections can be paged across many
Atom feeds. Get's the next feed in the
paging.
Returns a tuple of the HTTP response headers
and the body.
"""
headers, body = self._context.http.request(self.next, headers=headers, body=body)
self._record_next(self.next, headers, body)
return (headers, body)
def create(self, headers=None, body=None):
"""
Create a new member in the collection.
Can be used to create members of regular
and media collections. Be sure to set the
'content-type' header appropriately.
Returns a tuple of the HTTP response headers
and the body.
"""
headers, body = self._context.http.request(self._context.collection, method="POST", headers=headers, body=body)
return (headers, body)
def entry_create(self, headers=None, body=None):
"""
Convenience method that returns an Entry object
if the create has succeeded, or None if it fails.
"""
headers, body = self._context.http.request(self._context.collection, method="POST", headers=headers, body=body)
if headers.status == 201 and 'location' in headers:
context = copy.copy(self._context)
context.entry = headers['location']
return context
else:
return None
def iter(self):
"""
Returns in iterable that produces a Context
object for every Entry in the collection.
"""
self.get()
while True:
for entry in self._etree.findall(ATOM_ENTRY):
context = copy.copy(self._context)
edit_link = link_value(entry, ".", "edit")
context.entry = absolutize(self._context.collection, edit_link)
yield context
if self.has_next():
self.get_next()
else:
break
def iter_entry(self):
"""
Returns in iterable that produces an elementtree
Entry for every Entry in the collection. Note that this
Entry is the possibly incomplete Entry in the collection
feed.
"""
self.get()
while True:
for entry in self._etree.findall(ATOM_ENTRY):
yield entry
if self.has_next():
self.get_next()
else:
break
class Entry(object):
def __init__(self, context_or_uri):
"""
Create an Entry from either the URI of the
entry edit URI, or from a Context object.
"""
self._context = isinstance(context_or_uri, Context) and context_or_uri or Context(entry=context_or_uri)
self.representation = None
self._etree = None
self.edit_media = None
def _clear(self):
self.representation = None
self._etree = None
self.edit_media = None
def etree(self):
"""
Returns an ElementTree representation of the Entry.
"""
if not self.representation:
self.get()
return self._etree
def context(self):
return self._context
def uri(self):
return self._context.entry
def get(self, headers=None, body=None):
"""
Retrieve the representation for this entry.
"""
headers, body = self._context.http.request(self._context.entry, headers=headers)
self.representation = body
try:
self._etree = fromstring(body)
except ExpatError:
raise ParseException(headers, body)
self.edit_media = absolutize(self._context.entry, link_value(self._etree, ".", "edit-media"))
return (headers, body)
def has_media(self):
"""
Returns True if this is a Media Link Entry.
"""
if not self.representation:
self.get()
return self.edit_media != None
def get_media(self, headers=None, body=None):
"""
If this entry is a Media Link Entry, then retrieve
the associated media.
"""
if not self.representation:
self.get()
headers, body = self._context.http.request(self.edit_media, headers=headers)
return (headers, body)
def put(self, headers=None, body=None):
"""
Update the entry on the server. If the body to send
is not supplied then the internal elementtree element
will be serialized and sent to the server.
"""
if headers == None:
headers = {}
if 'content-type' not in headers:
headers['content-type'] = 'application/atom+xml;type=entry'
if not self.representation:
self.get()
if body == None:
body = tostring(self._etree)
headers, body = self._context.http.request(self._context.entry, headers=headers, method="PUT", body=body)
if headers.status < 300:
self._clear()
return (headers, body)
def put_media(self, headers=None, body=None):
"""
If this entry is a Media Link Entry, then update
the associated media.
"""
if not self.representation:
self.get()
headers, body = self._context.http.request(self.edit_media, headers=headers, method="PUT", body=body)
if headers.status < 300:
self._clear()
return (headers, body)
def delete(self, headers=None, body=None):
"""
Delete the entry from the server.
"""
headers, body = self._context.http.request(self._context.entry, headers=headers, method="DELETE")
if headers.status < 300:
self._clear()
return (headers, body)
def init_event_handlers():
"""
Add in hooks to the Service, Collection
and Entry classes to enable Events.
"""
events.add_event_handlers(Service)
events.add_event_handlers(Collection)
events.add_event_handlers(Entry)
| Python |
SERVICE1 = """<?xml version="1.0" encoding='utf-8'?>
<service xmlns="http://www.w3.org/2007/app">
<workspace title="Main Site" >
<collection
title="My Blog Entries"
href="http://example.org/reilly/main" />
<collection
title="Pictures"
href="http://example.org/reilly/pic" >
<accept>image/*</accept>
</collection>
</workspace>
<workspace title="Side Bar Blog">
<collection title="Remaindered Links"
href="http://example.org/reilly/list" />
</workspace>
</service>"""
ENTRY1 = """<?xml version="1.0" encoding="utf-8"?>
<entry xmlns="http://www.w3.org/2005/Atom">
<title type="text">third</title>
<id>http://bitworking.org/foo/app/main/third</id>
<author>
<name>Joe Gregorio</name>
</author>
<updated>2006-08-04T15:52:00-05:00</updated>
<summary type="html"><p>not much</p></summary>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml"><p>Some stuff</p>
<p><i>[Update: The Atom draft is finished.]</i></p>
outside a child element.
<p>More stuff.</p></div>
</content>
</entry>
"""
import apptools
import unittest
try:
from xml.etree.ElementTree import fromstring, tostring
except:
from elementtree.ElementTree import fromstring, tostring
class parseAtomTest(unittest.TestCase):
def testSimple(self):
res = apptools.parse_atom_entry(".", fromstring(ENTRY1))
self.assertEqual(res['title'], "third")
self.assertEqual(res['summary'], "<p>not much</p>")
self.assertTrue(res['content'].startswith("""\n <html:p xmlns:html="http://www.w3.org/1999/xhtml">Some stuff</html:p>"""))
class unparseAtomEntryTest(unittest.TestCase):
def testEntry(self):
element = fromstring(ENTRY1)
d = apptools.parse_atom_entry(".", fromstring(ENTRY1))
d['content'] = "This is text"
d['content__type'] = 'text'
d['summary'] = "<p>This is text</p>"
d['summary__type'] = 'xhtml'
apptools.unparse_atom_entry(element, d)
new_text = tostring(element)
d = apptools.parse_atom_entry(".", fromstring(new_text))
self.assertEqual("This is text", d['content'])
self.assertEqual('<html:p xmlns:html="http://www.w3.org/1999/xhtml">This is text</html:p>', d['summary'])
class wrapTest(unittest.TestCase):
def testWrap(self):
self.assertEqual("This\nis", apptools.wrap("This\nis", 80))
self.assertEqual("This is ", apptools.wrap("This is", 80))
self.assertEqual("This\nis ", apptools.wrap("This is", 3))
self.assertEqual("This\nis\n", apptools.wrap("This is\n", 3))
unittest.main()
| Python |
from pretty import pretty
try:
from xml.etree.ElementTree import fromstring, tostring
except:
from elementtree.ElementTree import fromstring, tostring
src = """<html:div xmlns:html="http://www.w3.org/1999/xhtml">
<html:p >I took a couple of days off work
and we drove down to Florida to visit family in "The Villages",
a 55+ golf cart community that currently has about 50,000 residents.
</html:p>
<html:p xmlns:html="http://www.w3.org/1999/xhtml">That is not a typo. Check out the <html:a href="http://en.wikipedia.org/wiki/The_Villages">wikipedia</html:a> <html:a href="http://en.wikipedia.org/wiki/The_Villages%2C_Florida">entries</html:a>.
</html:p>
<html:p xmlns:html="http://www.w3.org/1999/xhtml">On Monday we went out to feed the ducks at a nearby pond, but well fed
by everyone else, they weren't interested in our bread. Instead the bread was
attacked from below by the fish in the pond, which wasn't very interesting, that is, until
a local heron came over and started feasting on the fish we'd attracted. There's nothing
like the sight of a still living fish wiggling down the throat of a heron to make
a young boy's day.
</html:p>
<html:table style="width: 194px;" xmlns:html="http://www.w3.org/1999/xhtml"><html:tr><html:td align="center" style="height: 194px;"><html:a href="http://picasaweb.google.com/joe.gregorio/TheVillagesFlorida"><html:img height="160" src="http://lh6.google.com/joe.gregorio/RoK-XGNIkuE/AAAAAAAAAA8/ePqbYyHlxvU/s160-c/TheVillagesFlorida.jpg" style="margin: 1px 0 0 4px;" width="160" /></html:a></html:td></html:tr><html:tr><html:td style="text-align: center; font-family: arial,sans-serif; font-size: 11px;"><html:a href="http://picasaweb.google.com/joe.gregorio/TheVillagesFlorida" style="color: #4D4D4D; font-weight: bold; text-decoration: none;">The Villages, Florida</html:a></html:td></html:tr>
</html:table>
</html:div>"""
print pretty(fromstring(src))
| Python |
#!/usr/bin/env python
import glob, unittest, os, sys
from trace import fullmodname
try:
from tests.utils import cleanup
except:
def cleanup():
pass
# try to start in a consistent, predictable location
if sys.path[0]:
os.chdir(sys.path[0])
# find all of the planet test modules
modules = map(fullmodname, glob.glob(os.path.join('tests', 'test_*.py')))
print "Running the tests found in the following modules:"
print modules
# load all of the tests into a suite
try:
suite = unittest.TestLoader().loadTestsFromNames(modules)
except Exception, exception:
# attempt to produce a more specific message
for module in modules:
__import__(module)
raise
verbosity = 1
if "-q" in sys.argv or '--quiet' in sys.argv:
verbosity = 0
if "-v" in sys.argv or '--verbose' in sys.argv:
verbosity = 2
# run test suite
unittest.TextTestRunner(verbosity=verbosity).run(suite)
cleanup()
| Python |
from model import init_event_handlers, Context, Service, Collection, Entry, ATOM, XHTML
from httplib2 import Http
import unittest
import events
try:
from xml.etree.ElementTree import fromstring, tostring
except:
from elementtree.ElementTree import fromstring, tostring
ATOM_CONTENT = "{%s}content/{%s}div" % (ATOM, XHTML)
class Test(unittest.TestCase):
def test(self):
c = Context(http = Http(".cache"), service = "http://bitworking.org/projects/apptestsite/app.cgi/service/;service_document")
s = Service(c)
collection = Collection(s.iter_match("application/atom+xml;type=entry").next())
init_event_handlers()
class EventListener(object):
events = []
def callback(self, headers, body, attributes):
self.events.append(attributes)
listener = EventListener()
events.register_callback("ANY", listener.callback)
CONTENT = """<entry xmlns="http://www.w3.org/2005/Atom">
<title>Test Post From AtomPubBase Live Test</title>
<id>urn:uuid:1225c695-ffb8-4ebb-aaaa-80da354efa6a</id>
<updated>2005-09-02T10:30:00Z</updated>
<summary>Hi!</summary>
<author>
<name>Joe Gregorio</name>
</author>
<content>Plain text content for this test.</content>
</entry>
"""
entry_context = collection.entry_create(body=CONTENT, headers={'content-type':'application/atom+xml;type=entry'})
self.assertNotEqual(None, entry_context)
entry = Entry(entry_context)
entry.etree().find(ATOM_CONTENT).text = "Bye"
headers, body = entry.put()
self.assertEqual(200, headers.status)
headers, body = entry.delete()
self.assertEqual(200, headers.status)
print listener.events
unittest.main()
| Python |
try:
from xml.etree.ElementTree import fromstring, tostring, SubElement
import xml.etree.ElementTree as ElementTree
except:
from elementtree.ElementTree import fromstring, tostring, SubElement
import elementtree.ElementTree as ElementTree
class namespace(object):
def __init__(self, uri):
self.ns_uri = uri
self.memoized = {}
def __call__(self, element):
if element not in self.memoized:
self.memoized[element] = "{%s}%s" % (self.ns_uri, element)
return self.memoized[element]
ATOM = namespace("http://www.w3.org/2005/Atom")
APP = namespace("http://www.w3.org/2007/app")
XHTML = namespace("http://www.w3.org/1999/xhtml")
my_namespaces = {
"http://www.w3.org/1999/xhtml": "xhtml",
"http://www.w3.org/2007/app" : "app",
"http://www.w3.org/2005/Atom" : "atom"
}
ElementTree._namespace_map.update(my_namespaces)
import re
from urlparse import urljoin
from xml.sax.saxutils import quoteattr, escape
import time
import calendar
def get_element(etree, name):
value = ""
if '}' not in name:
name = ATOM(name)
l = etree.findall(name)
if l:
value = l[0].text
return value
RFC3339 = re.compile("^(?P<year>\d\d\d\d)-(?P<month>\d\d)-(?P<day>\d\d)T(?P<hour>\d\d):(?P<minute>\d\d):(?P<second>\d\d)(\.\d*)?" +
"(?P<timezone>Z|((?P<tzhour>[+-]\d\d):\d\d))$")
def get_date(etree, name):
"""
Returns the Date Construct value as seconds from the epoch
in UTC. The 'name' should be the element name of an
RFC 4287 Date Contruct, such as ATOM('published'), ATOM('updated')
or APP('edited'). The parameter 'etree' in an elementtree
element. Note that you don't need to add the namespace
to elements in the ATOM namespace.
"""
date = get_element(etree, name)
m = RFC3339.search(date)
if not m:
raise ValueError("Not a valid RFC 3339 format.")
d = m.groupdict()
ndate = [int(x) for x in [d['year'], d['month'], d['day'], d['hour'], d['minute'], d['second']]]
ndate.append(0) # weekday
ndate.append(1) # year day
if d['timezone'] != 'Z':
ndate[3] -= int(d['tzhour'])
ndate.append(0)
return calendar.timegm(tuple(ndate))
def serialize_nons(element, top):
tag = element.tag.split("}", 1)[1]
tail = u""
if element.tail != None:
tail = escape(element.tail)
text = u""
if element.text != None:
text = element.text
attribs = " ".join(["%s=%s" % (k, quoteattr(v)) for k, v in element.attrib.iteritems()])
if attribs:
attribs = " " + attribs
if top:
value = escape(text)
close = u""
else:
value = "<%s%s>%s" % (tag, attribs, escape(text))
close = "</%s>" % tag
if value == None:
value = u""
return value + "".join([serialize_nons(c, False) for c in element.getchildren()]) + close + tail
def get_text(name, entry):
value = ""
texttype = "text"
l = entry.findall(ATOM(name))
if l:
value = l[0].text
texttype = mime2atom(l[0].get('type', 'text'))
if texttype in ["text", "html"]:
pass
elif texttype == "xhtml":
div = l[0].find("{http://www.w3.org/1999/xhtml}div")
value = serialize_nons(div, True)
else:
value = ""
if value == None:
value = ""
return (texttype, value)
def set_text(entry, name, ttype, value):
elements = entry.findall(ATOM(name))
if not elements:
element = SubElement(entry, ATOM(name))
else:
element = elements[0]
element.set('type', ttype)
[element.remove(e) for e in element.getchildren()]
if ttype in ["html", "text"]:
element.text = value
elif ttype == "xhtml":
element.text = ""
try:
div = fromstring((u"<div xmlns='http://www.w3.org/1999/xhtml'>%s</div>" % value).encode('utf-8'))
element.append(div)
except:
element.text = value
element.set('type', 'html')
mime_to_atom = {
"application/xhtml+xml": "xhtml",
"text/html": "html",
"text/plain": "text"
}
def mime2atom(t):
if t in mime_to_atom:
return mime_to_atom[t]
else:
return t
def wrap(text, width):
l = 0
ret = []
for s in text.split(' '):
ret.append(s)
l += len(s)
nl = s.find('\n') >= 0
if l > width or nl:
l = 0
if not nl:
ret.append('\n')
else:
ret.append(' ')
return "".join(ret)
| Python |
import urlparse
import urllib
import httplib2
from email import message_from_string, message_from_file
import os
class MockHttp:
"""
A mock for httplib2.Http that takes its
response headers and bodies from files on disk
"""
def __init__(self, directory):
self.directory = directory
self.hit_counter = {}
def request(self, uri, method="GET", body=None, headers=None, redirections=5):
counter = self.hit_counter.get(method+uri, 0)
counter += 1
self.hit_counter[method+uri] = counter
path = urlparse.urlparse(uri)[2]
fname = os.path.join(self.directory, method, urllib.quote(path.strip("/")) + ".file")
fname_next = fname + "." + str(counter)
if os.path.exists(fname_next):
fname = fname_next
if os.path.exists(fname):
f = file(fname, "r")
response = message_from_file(f)
f.close()
body = response.get_payload()
headers = httplib2.Response(response)
return (headers, body)
else:
return (httplib2.Response({"status": "404"}), "")
def add_credentials(self, name, password):
pass
class MockRecorder(httplib2.Http):
def __init__(self, h, directory):
self.h = h
self.directory = directory
self.hit_counter = {}
def request(self, uri, method="GET", body=None, headers=None, redirections=5):
counter = self.hit_counter.get(method+uri, 0)
counter += 1
self.hit_counter[method+uri] = counter
headers, body = self.h.request(uri, method, body, headers, redirections)
path = urlparse.urlparse(uri)[2]
fname = os.path.join(self.directory, method, urllib.quote(path.strip("/")) + ".file")
if counter >= 2:
fname = fname + "." + str(counter)
dirname = os.path.dirname(fname)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = file(fname, "w")
f.write(
"\r\n".join(["%s: %s" % (key, value) for key, value in headers.iteritems()])
)
f.write("\r\n\r\n")
f.write(body)
f.close()
return (headers, body)
def add_credentials(self, name, password):
h.add_credentials(name, password)
| Python |
"""
iri2uri
Converts an IRI to a URI.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"
__history__ = """
"""
import urlparse
# Convert an IRI to a URI following the rules in RFC 3987
#
# The characters we need to enocde and escape are defined in the spec:
#
# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
# / %xD0000-DFFFD / %xE1000-EFFFD
escape_range = [
(0xA0, 0xD7FF ),
(0xE000, 0xF8FF ),
(0xF900, 0xFDCF ),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD ),
(0x20000, 0x2FFFD ),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD ),
(0x50000, 0x5FFFD ),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD ),
(0x80000, 0x8FFFD ),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD ),
(0xB0000, 0xBFFFD ),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD ),
(0xE1000, 0xEFFFD),
(0xF0000, 0xFFFFD ),
(0x100000, 0x10FFFD)
]
def encode(c):
retval = c
i = ord(c)
for low, high in escape_range:
if i < low:
break
if i >= low and i <= high:
retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')])
break
return retval
def iri2uri(uri):
"""Convert an IRI to a URI. Note that IRIs must be
passed in a unicode strings. That is, do not utf-8 encode
the IRI before passing it into the function."""
if isinstance(uri ,unicode):
(scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
authority = authority.encode('idna')
# For each character in 'ucschar' or 'iprivate'
# 1. encode as utf-8
# 2. then %-encode each octet of that utf-8
uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))
uri = "".join([encode(c) for c in uri])
return uri
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test_uris(self):
"""Test that URIs are invariant under the transformation."""
invariant = [
u"ftp://ftp.is.co.za/rfc/rfc1808.txt",
u"http://www.ietf.org/rfc/rfc2396.txt",
u"ldap://[2001:db8::7]/c=GB?objectClass?one",
u"mailto:John.Doe@example.com",
u"news:comp.infosystems.www.servers.unix",
u"tel:+1-816-555-1212",
u"telnet://192.0.2.16:80/",
u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ]
for uri in invariant:
self.assertEqual(uri, iri2uri(uri))
def test_iri(self):
""" Test that the right type of escaping is done for each part of the URI."""
self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}"))
self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}"))
self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}"))
self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")))
self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8')))
unittest.main()
| Python |
from __future__ import generators
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 2.3 or later
Changelog:
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger"]
__license__ = "MIT"
__version__ = "$Rev: 276 $"
import re
import sys
import email
import email.Utils
import email.Message
import email.FeedParser
import StringIO
import gzip
import zlib
import httplib
import urlparse
import base64
import os
import copy
import calendar
import time
import random
# remove depracated warning in python2.6
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
try:
import socks
except ImportError:
socks = None
# Build the appropriate socket wrapper for ssl
try:
import ssl # python 2.6
_ssl_wrap_socket = ssl.wrap_socket
except ImportError:
def _ssl_wrap_socket(sock, key_file, cert_file):
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if sys.version_info >= (2,3):
from iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error',
'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError',
'debuglevel']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# Python 2.3 support
if sys.version_info < (2,4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, 'getheaders'):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r'^\w+://')
re_slash = re.compile(r'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme.match(filename):
if isinstance(filename,str):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,unicode):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_slash.sub(",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if headers.has_key('cache-control'):
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip() for x in part.split("=")]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headers.has_key(headername):
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif cc.has_key('no-cache'):
retval = "TRANSPARENT"
elif cc_response.has_key('no-cache'):
retval = "STALE"
elif cc.has_key('only-if-cached'):
retval = "FRESH"
elif response_headers.has_key('date'):
date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if cc_response.has_key('max-age'):
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif response_headers.has_key('expires'):
expires = email.Utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if cc.has_key('max-age'):
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if cc.has_key('min-fresh'):
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if cc.has_key('no-store') or cc_response.has_key('no-store'):
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % response_headers.status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.encodestring(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-rise this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.encodestring("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)
))
headers['Authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'],
)
self.challenge['nc'] += 1
def response(self, response, content):
if not response.has_key('authentication-info'):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if updated_challenge.has_key('nextnonce'):
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer (t.broyer@ltgt.net)"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']
])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['Authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist,
)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['Authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None):
"""The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX
constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000)
"""
self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass = proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns,
self.proxy_user, self.proxy_pass)
def isgood(self):
return socks and (self.proxy_host != None) and (self.proxy_port != None)
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""HTTPConnection subclass that supports timeouts"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if self.proxy_info and self.proxy_info.isgood():
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(*self.proxy_info.astuple())
else:
self.sock = socket.socket(af, socktype, proto)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
self.sock.connect(sa)
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"This class allows communication via SSL."
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None):
self.timeout = timeout
self.proxy_info = proxy_info
httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file,
cert_file=cert_file, strict=strict)
def connect(self):
"Connect to a host on a given (SSL) port."
if self.proxy_info and self.proxy_info.isgood():
self.sock.setproxy(*self.proxy_info.astuple())
sock.setproxy(*self.proxy_info.astuple())
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock =_ssl_wrap_socket(sock, self.key_file, self.cert_file)
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None, proxy_info=None):
"""The value of proxy_info is a ProxyInfo instance.
If 'cache' is a string then it is used as a directory name
for a disk cache. Otherwise it must be an object that supports
the same interface as FileCache."""
self.proxy_info = proxy_info
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, str):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
for i in range(2):
try:
conn.request(method, request_uri, body, headers)
response = conn.getresponse()
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except (socket.error, httplib.HTTPException):
if i == 0:
conn.close()
conn.connect()
continue
else:
raise
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break;
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = ((response.status == 303) and (method not in ["GET", "HEAD"])) and "GET" or method
(response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1)
response.previous = old_response
else:
raise RedirectLimit( _("Redirected more times than rediection_limit allows."), response, content)
elif response.status in [200, 203] and method == "GET":
# Don't cache 206's since we aren't going to handle byte range requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin
with either 'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc.
There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a string
object.
Any extra headers that are to be sent with the request should be provided in the
'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = _normalize_headers(headers)
if not headers.has_key('user-agent'):
headers['user-agent'] = "Python-httplib2/%s" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = (scheme == 'https') and HTTPSConnectionWithTimeout or HTTPConnectionWithTimeout
certs = list(self.certificates.iter(authority))
if scheme == 'https' and certs:
conn = self.connections[conn_key] = connection_type(authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout, proxy_info=self.proxy_info)
else:
conn = self.connections[conn_key] = connection_type(authority, timeout=self.timeout, proxy_info=self.proxy_info)
conn.set_debuglevel(debuglevel)
if method in ["GET", "HEAD"] and 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'deflate, gzip'
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split('\r\n\r\n', 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except IndexError:
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
(response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if info.has_key('last-modified') and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception, e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = "Request Timeout"
response = Response( {
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response( {
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key] = value
self.status = int(self['status'])
else:
for key, value in info.iteritems():
self[key] = value
self.status = int(self.get('status', self.status))
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError, name
| Python |
__author__ = "Joe Gregorio <http://bitworking.org/>"
__version__ = "$Revision: 150 $"
__copyright__ = "Copyright (c) 2006 Joe Gregorio"
__license__ = "MIT"
import os
import sys
import httplib2
try:
from xml.etree.ElementTree import fromstring, tostring, SubElement
except:
from elementtree.ElementTree import fromstring, tostring, SubElement
import atompubbase
import atompubbase.auth
from atompubbase.model import Entry, Collection, Service, Context, init_event_handlers, ParseException
import urlparse
import cStringIO
import sys
from optparse import OptionParser
import time
import feedvalidator
from feedvalidator import compatibility
from mimeparse import mimeparse
from xml.sax.saxutils import escape
from feedvalidator.formatter.text_plain import Formatter as text_formatter
import xml.dom.minidom
import random
import base64
import urllib
import re
import copy
# By default we'll check the bitworking collection
INTROSPECTION_URI = "http://bitworking.org/projects/apptestsite/app.cgi/service/;service_document"
parser = OptionParser()
parser.add_option("--credentials", dest="credentials",
help="FILE that contains a name and password on separate lines with an optional third line with the authentication type of 'ClientLogin <service>'.",
metavar="FILE")
parser.add_option("--output", dest="output",
help="FILE to store test results",
metavar="FILE")
parser.add_option("--verbose",
action="store_true",
dest="verbose",
default=False,
help="Print extra information while running.")
parser.add_option("--quiet",
action="store_true",
dest="quiet",
default=False,
help="Do not print anything while running.")
parser.add_option("--debug",
action="store_true",
dest="debug",
default=False,
help="Print low level HTTP information while running.")
parser.add_option("--html",
action="store_true",
dest="html",
default=False,
help="Output is formatted in HTML")
parser.add_option("--record",
dest="record",
metavar="DIR",
help="Record all the responses to be used later in playback mode.")
parser.add_option("--playback",
dest="playback",
metavar="DIR",
help="Playback responses stored from a previous run.")
options, cmd_line_args = parser.parse_args()
# Restructure so that we use atompubbase
# Add hooks that do validation of the documents at every step
# Add hooks to specific actions that validate other things (such as response status codes)
# Add hooks that log the requests and responses for later inspection (putting them on the HTML page).
#
# Need an object to keep track of the current state, i.e. the test and
# request/response pair that each error/warning/informational is about.
#
# Need to track the desired output format.
#
# Might have to fix up the anchors that the html formatter produces.
#
# Create an httplib2 instance for atompubbase that has a memory based cache.
atompubbase.model.init_event_handlers()
def get_test_data(filename):
return unicode(file(os.path.join(os.path.abspath(os.path.dirname(__file__)),
filename), "r").read(), "utf-8")
def get_test_data_raw(filename):
return file(os.path.join(os.path.abspath(os.path.dirname(__file__)),
filename), "r").read()
def method_from_filters(filters):
method = "GET"
if "CREATE" in filters:
method = "POST"
elif "PUT" in filters:
method = "PUT"
elif "DELETE" in filters:
method = "DELETE"
return method
class MemoryCache:
mem = {}
def set(self, key, value):
self.mem[key] = value
def get(self, key):
return self.mem.get(key, None)
def delete(self, key):
if key in self.mem:
del self.mem[key]
class Enum:
def __init__(self, **entries):
self.entries = entries
self.order = entries.keys()
self.__dict__.update([(name, i) for (i, name) in enumerate(entries.keys())])
def name(self, index):
return self.order[index]
def desc(self, index):
return self.entries[self.name(index)]
# Make this an enum
msg = Enum(
VALID_ATOM = "[RFC4287]",
ENTRIES_ORDERED_BY_ATOM_EDITED = "[RFC5023] Section 10",
CREATE_RETURNS_201 = "[RFC5023] Section 9.2",
CREATE_RETURNS_LOCATION = "[RFC5023] Section 9.2",
CREATE_CONTENT_LOCATION = "[RFC5023] Section 9.2",
CREATE_RETURNS_ENTRY = "[RFC5023] Section 9.2",
CREATE_APPEAR_COLLECTION = "[RFC5023] Section 9.1",
PUT_STATUS_CODE = "[RFC2616] Section 9.6",
GET_STATUS_CODE = "[RFC2616] Section 10.2.1",
DELETE_STATUS_CODE = "[RFC2616] Section 9.7",
SLUG_HEADER = "[RFC5023] Section 9.7",
ENTRY_LINK_EDIT = "[RFC5023] Section 9.1",
MEDIA_ENTRY_LINK_EDIT = "[RFC5023] Section 9.6",
HTTP_ETAG = "[RFC2616] Section 13.3.4",
HTTP_LAST_MODIFIED = "[RFC2616] Section 13.3.4",
HTTP_CONTENT_ENCODING = "[RFC2616] Section 14.11",
WELL_FORMED_XML = "[W3C XML 1.0] Section 2.1 sec-well-formed",
INTERNATIONALIZATION = "[W3C XML 1.0] Section 2.1 charsets",
CRED_FILE = "[AppClietTest]",
INFO = "Info",
SUCCESS = "",
REQUEST = "Request",
RESPONSE = "Response",
BEGIN_TEST = ""
)
class StopTest(Exception):
"Exception to raise if you want to stop the current test."
pass
RFC = re.compile("\[RFC(?P<number>\d{4})\] Section (?P<section>\S+)$")
RFC_URI = "http://tools.ietf.org/html/rfc%(number)s#section-%(section)s"
W3C = re.compile("\[W3C XML 1.0\] Section (\S+) (?P<section>\S+)$")
W3C_URI = "http://www.w3.org/TR/REC-xml/#%(section)s"
def expand_spec_reference(message):
html_message = ' '
if message != msg.INFO:
html_message = msg.desc(message)
match = RFC.search(html_message)
if match:
uri = RFC_URI % match.groupdict()
html_message = "<a href='%s'>%s</a>" % (uri, html_message)
match = W3C.search(html_message)
if match:
uri = W3C_URI % match.groupdict()
html_message = "<a href='%s'>%s</a>" % (uri, html_message)
return html_message
class Recorder:
"""
Records all the warning, errors, etc. and is able to
spit the results out as a text or html report.
"""
transcript = [] # a list of (MSG_TYPE, message, details)
tests = []
html = False
verbosity = 0
has_errors = False
has_warnings = False
def __init__(self):
atompubbase.events.register_callback("ANY", self.log_request_response)
atompubbase.events.register_callback("POST_CREATE", self.create_validation_cb)
atompubbase.events.register_callback("POST_GET", self.get_check_response_cb)
atompubbase.events.register_callback("POST_GET", self.content_validation_cb)
def error(self, message, detail):
self.has_errors = True
self.transcript.append(("Error", message, detail))
def warning(self, message, detail):
self.has_warnings = True
self.transcript.append(("Warning", message, detail))
def info(self, detail):
self.transcript.append(("Info", msg.INFO, detail))
def success(self, detail):
self.transcript.append(("Success", msg.SUCCESS, detail))
def log(self, message, detail):
self.transcript.append(("Log", message, detail))
def _end_test(self):
if self.transcript:
self.tests.append(self.transcript)
self.transcript = []
def begin_test(self, detail):
self._end_test()
self.transcript.append(("Begin_Test", msg.BEGIN_TEST, detail))
def tostr(self):
self._end_test()
if self.html:
return self._tohtml()
else:
return self._totext()
def _tohtml(self):
resp = [u"""<!DOCTYPE HTML>
<html>
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<link href="validator/res/base.css" type="text/css" rel="stylesheet">
<script type="text/javascript" src="validator/res/jquery-1.2.3.js"></script>
<script type="text/javascript" src="validator/res/report.js" ></script>
<link href="validator/res/prettify.css" type="text/css" rel="stylesheet" />
<script type="text/javascript" src="validator/res/prettify.js"></script>
<title>AppClientTest - Results</title>
</head>
<body>
<h1>Test Report</h1>
<dl>
<dt>Date</dt>
<dd>%s</dd>
</dl>
<div class='legend'>
<h3>Legend</h3>
<dl>
<dt><img src='validator/res/info.gif'> Informational</dt>
<dd>Information on what was being tested.</dd>
<dt><img src='validator/res/warning.gif'> Warning</dt>
<dd>Warnings indicate behavior that, while legal, may cause<br/>
either performance or interoperability problems in the field.</dd>
<dt><img src='validator/res/error.gif'> Error</dt>
<dd>Errors are violations of either the Atom, AtomPub<br/> or HTTP specifications.</dd>
<dt><img src='validator/res/log.gif'> Log</dt>
<dd>Detailed information on the transaction to help you<br/> debug your service.</dd>
<dt><img src='validator/res/success.gif'> Success</dt>
<dd>A specific sub-test has been passed successfully.</dd>
</div>
""" % (time.asctime())]
for transcript in self.tests:
(code, message, detail) = transcript[0]
transcript = transcript[1:]
resp.append(u"<h2>%s</h2><p>%s</p>\n" % tuple(detail.split(":")))
resp.append(u"<ol>\n")
# convert msg.desc(message) into a link if possible
resp.extend([u" <li class='%s'><img src='validator/res/%s.gif'> %s <span class='%s'>%s</span></li>\n" %
(code, code.lower(), expand_spec_reference(message), code, detail) for (code, message, detail) in transcript])
resp.append(u"</ol>\n")
return (u"".join(resp)).encode("utf-8")
def _totext(self):
resp = []
for transcript in self.tests:
resp.extend([u"%s:%s %s" % (code, msg.name(message), detail) for (code, message, detail) in transcript if code not in ["Log", "Info"]])
return (u"\n".join(resp)).encode("utf-8")
def _validate(self, headers, body):
if headers.status in [200, 201]:
baseuri = headers.get('content-location', '')
try:
events = feedvalidator.validateStream(cStringIO.StringIO(body),
firstOccurrenceOnly=1,
base=baseuri)['loggedEvents']
except feedvalidator.logging.ValidationFailure, vf:
events = [vf.event]
errors = [event for event in events if isinstance(event, feedvalidator.logging.Error)]
if errors:
self.error(msg.VALID_ATOM, "\n".join(text_formatter(errors)))
warnings = [event for event in events if isinstance(event, feedvalidator.logging.Warning)]
if warnings:
self.warning(msg.VALID_ATOM, "\n".join(text_formatter(warnings)))
if self.verbosity > 2:
infos = [event for event in events if isinstance(event, feedvalidator.logging.Info)]
if infos:
self.info("\n".join(text_formatter(infos)))
def content_validation_cb(self, headers, body, filters):
self._validate(headers, body)
def create_validation_cb(self, headers, body, filters):
self._validate(headers, body)
def get_check_response_cb(self, headers, body, filters):
"""
For operations that should return 200, like get, put and delete.
"""
if headers.status != 200:
error(msg.GET_STATUS_CODE, "Could not successfully retrieve the document. Got a status code of: %d" % headers.status)
raise StopTest
if not headers.has_key('etag'):
self.warning(msg.HTTP_ETAG, "No ETag: header was sent with the response.")
if not headers.has_key('last-modified'):
self.warning(msg.HTTP_LAST_MODIFIED, "No Last-Modified: header was sent with the response.")
if headers.get('content-length', 0) > 0 and not headers.has_key('-content-encoding'):
self.warning(msg.HTTP_CONTENT_ENCODING, "No Content-Encoding: header was sent with the response indicating that a compressed entity body was not returned.")
def log_request_response(self, headers, body, filters):
request_line = u""
if "PRE" in filters:
direction = msg.REQUEST
if "-request-uri" in headers:
uri = headers["-request-uri"]
del headers["-request-uri"]
method = method_from_filters(filters)
request_line = method + u" " + uri
else:
direction = msg.RESPONSE
headers_str = request_line + "\n" + u"\n".join(["%s: %s" % (k, v) for (k, v) in headers.iteritems()])
need_escape = True
if body == None or len(body) == 0:
body = u""
else:
if 'content-type' in headers:
mtype, subtype, params = mimeparse.parse_mime_type(headers['content-type'])
if subtype[-4:] == "+xml":
try:
dom = xml.dom.minidom.parseString(body)
body = dom.toxml()
if len(body.splitlines()) < 2:
body = dom.toprettyxml()
except xml.parsers.expat.ExpatError:
try:
body = unicode(body, params.get('charset', 'utf-8'))
except UnicodeDecodeError:
try:
body = unicode(body, 'iso-8859-1')
except UnicodeDecodeError:
body = urllib.quote(body)
elif 'charset' in params:
body = unicode(body, params['charset'])
elif mtype == 'image' and self.html:
body = "<img src='data:%s/%s;base64,%s'/>" % (mtype, subtype, base64.b64encode(body))
need_escape = False
else:
body = "Could not safely serialize the body"
else:
body = "Could not safely serialize the body"
if headers_str or body or request_line:
if self.html and need_escape:
body = escape(body)
if self.html:
log = u"<pre><code>\n" + escape(headers_str) + "\n\n\n</code></pre>\n<pre><code class='prettyprint'>" + body + u"</code></pre>"
else:
log = headers_str + "\n\n" + body
self.log(direction, log)
recorder = Recorder()
error = recorder.error
warning = recorder.warning
info = recorder.info
success = recorder.success
begin_test = recorder.begin_test
class Test:
"""Base class for all the tests. Has a 'run' member
function which runs over all member functions
that begin with 'test' and executes them.
"""
def __init__(self):
self.reports = []
self.context = ""
self.collection_uri = ""
self.entry_uri = ""
def run(self):
methods = [ method for method in dir(self) if callable(getattr(self, method)) and method.startswith("test")]
for method in methods:
if not options.quiet:
print >>sys.stderr, ".",
sys.stdout.flush()
test_member_function = getattr(self, method)
try:
self.description = str(test_member_function.__doc__)
self.context = method
begin_test(method.split("test", 1)[1].replace("_", " ") + ":" + self.description)
test_member_function()
except StopTest:
pass
except ParseException, e:
recorder.log_request_response(e.headers, e.body, set(["POST"]))
error(msg.WELL_FORMED_XML, "Not well-formed XML")
except Exception, e:
import traceback
info("Internal error occured while running tests: " + str(e) + traceback.format_exc())
def check_order_of_entries(entries, order):
info("Check order of entries in the collection document")
failed = False
for e in entries:
idelement = e.find("{%s}id" % atompubbase.model.ATOM)
if None != idelement and idelement.text in order:
if order[0] != idelement.text:
error(msg.ENTRIES_ORDERED_BY_ATOM_EDITED, "Failed to preserve order of entries, was expecting %s, but found %s" % (order[0], idelement.text))
failed = True
break
else:
order = order[1:]
if len(order) == 0:
break
if len(order) and not failed:
warning(msg.CREATE_APPEAR_COLLECTION, "All entries did not appear in the collection. The following ids never appeared: %s" % str(order))
failed = True
if not failed:
success("Order of entries is correct")
def check_create_response(h, b):
if h.status != 201:
error(msg.CREATE_RETURNS_201, "Entry creation failed with status: %d %s" % (h.status, h.reason))
raise StopTest
if 'location' not in h:
error(msg.CREATE_RETURNS_LOCATION, "Location: not returned in response headers.")
raise StopTest
if 'content-location' not in h:
warning(msg.CREATE_CONTENT_LOCATION, "Content-Location: not returned in response headers.")
if len(b) == 0:
warning(msg.CREATE_RETURNS_ENTRY, "Atom Entry not returned on member creation.")
def check_entry_slug(e, slug):
slugified = [link for link in e.findall("{%s}link" % atompubbase.model.ATOM)
if ('rel' not in link.attrib or link.attrib['rel'] == "alternate") and slug in link.attrib['href']]
if not slugified:
warning(msg.SLUG_HEADER, "Slug was ignored")
else:
success("Slug was honored")
def check_entry_links(e, ismedia):
editlink = [link for link in e.findall("{%s}link" % atompubbase.model.ATOM)
if ("edit" == link.attrib.get('rel', None))]
if not editlink:
warning(msg.ENTRY_LINK_EDIT, "Member Entry did not contain an atom:link element with a relation of 'edit'")
else:
success("Member contained an 'edit' link")
if ismedia:
editmedialink = [link for link in e.findall("{%s}link" % atompubbase.model.ATOM)
if ("edit-media" == link.attrib.get('rel', None))]
if not editmedialink:
warning(msg.MEDIA_ENTRY_LINK_EDIT, "Member Entry did not contain an atom:link element with a relation of 'edit-media'")
else:
success("Member contained an 'edit-media' link")
def check_update_response(h, b, desc):
if h.status not in [200, 204]:
error(msg.PUT_STATUS_CODE, "Failed to accept updated %s" % desc)
else:
success("Updated %s" % desc)
def check_remove_response(h, b):
if h.status not in [200, 202, 204]:
error(msg.DELETE_STATUS_CODE, "Entry removal failed with status: %d %s" % (h.status, h.reason))
raise StopTest
def get_entry_id(context, h, b):
entry_id = None
if 'location' in h:
entry_context = copy.copy(context)
entry_context.entry = h['location']
e = Entry(entry_context)
idelement = e.etree().find("{%s}id" % atompubbase.model.ATOM)
if None != idelement:
entry_id = idelement.text
if None == entry_id:
info("Atom entry did not contain the required atom:id, can't continue with test.")
raise StopTest
return (entry_id, e)
def remove_entries_by_id(entries, ids):
num_entries = len(ids)
for econtext in entries:
e = Entry(econtext)
idelement = e.etree().find("{%s}id" % atompubbase.model.ATOM)
if None != idelement and idelement.text in ids:
info("Remove entry")
h, b = e.delete()
check_remove_response(h, b)
num_entries -= 1
if num_entries == 0:
break
if num_entries == 0:
success("Removed all entries that we're previously added.")
class EntryCollectionTests(Test):
def __init__(self, collection):
Test.__init__(self)
self.collection = collection
def testBasic_Entry_Manipulation(self):
"""Add and remove three entries to the collection"""
info("Service Document: %s" % self.collection.context().collection)
body = get_test_data("i18n.atom").encode("utf-8")
h, b = self.collection.get()
# Add in a slug and category if allowed.
slugs = []
ids = []
added_entries = {}
for i in range(3):
info("Create new entry #%d" % (i+1))
slugs.append("".join([random.choice("abcdefghijkl") for x in range(10)]))
h, b = self.collection.create(headers = {
'content-type': 'application/atom+xml',
'slug': slugs[i]
},
body = body % (i+1, repr(time.time())))
check_create_response(h, b)
(entry_id, entry) = get_entry_id(self.collection.context(), h, b)
ids.append(entry_id)
added_entries[entry_id] = entry
if i < 2:
time.sleep(1.1)
entries = list(self.collection.iter_entry())
# Entries should appear in reverse chronological order of their creation.
ids.reverse()
# Confirm the order
check_order_of_entries(entries, ids)
# Retrieve the second entry added
entry = added_entries[ids[1]]
e = entry.etree()
if e == None:
raise StopTest
# Check the slug and links
check_entry_slug(e, slugs[1])
check_entry_links(e, ismedia=False)
e.find(atompubbase.model.ATOM_TITLE).text = "Internationalization - 2"
info("Update entry #2 and write back to the collection")
h, b = entry.put()
check_update_response(h, b, "Entry #2")
# Confirm new order
ids[0:0] = ids[1:2]
del ids[2]
check_order_of_entries(self.collection.iter_entry(), ids)
# Remove Entries by atom:id
remove_entries_by_id(self.collection.iter(), ids)
class MediaCollectionTests(Test):
def __init__(self, collection):
Test.__init__(self)
self.collection = collection
def testBasic_Media_Manipulation(self):
"""Add and remove an image in the collection"""
info("Service Document: %s" % self.collection.context().collection)
body = get_test_data_raw("success.gif")
info("Create new media entry")
slug = "".join([random.choice("abcdefghijkl") for x in range(10)])
h, b = self.collection.create(headers = {
'content-type': 'image/gif',
'slug': slug
},
body = body)
check_create_response(h, b)
entry_id, entry = get_entry_id(self.collection.context(), h, b)
entries = list(self.collection.iter_entry())
check_order_of_entries(entries, [entry_id])
h, b = entry.get()
e = entry.etree()
if e == None:
raise StopTest
# Check the slug
check_entry_slug(e, slug)
check_entry_links(e, ismedia=True)
title = e.find(atompubbase.model.ATOM_TITLE)
if None == title:
title = SubElement(e, atompubbase.model.ATOM_TITLE)
title.text = "Success"
info("Update Media Link Entry and write back to the collection")
h, b = entry.put()
check_update_response(h, b, "Media Link Entry")
# Remove Entry
info("Remove entry")
h, b = entry.delete()
check_remove_response(h, b)
success("Removed Media Entry")
class TestIntrospection(Test):
def __init__(self, uri, http):
Test.__init__(self)
self.http = http
self.introspection_uri = uri
def testEntry_Collection(self):
"""Find the first entry collection listed in an Introspection document and run the Entry collection tests against it."""
context = Context(self.http, self.introspection_uri)
service = Service(context)
entry_collections = list(service.iter_match("application/atom+xml;type=entry"))
if 0 == len(entry_collections):
info("Didn't find any Entry Collections to test")
else:
test = EntryCollectionTests(Collection(entry_collections[0]))
test.run()
media_collections = list(service.iter_match("image/gif"))
if 0 == len(media_collections):
info("Didn't find any Media Collections that would accept GIF images")
else:
test = MediaCollectionTests(Collection(media_collections[0]))
test.run()
def main(options, cmd_line_args):
if options.debug:
httplib2.debuglevel = 5
if options.verbose:
recorder.verbosity = 3
if options.html:
recorder.html = True
http = httplib2.Http(MemoryCache())
http.force_exception_to_status_code = False
if options.credentials:
atompubbase.auth.apply_credentials_file(options.credentials, http, error)
if options.record:
from atompubbase.mockhttp import MockRecorder
http = MockRecorder(http, options.record)
elif options.playback:
from atompubbase.mockhttp import MockHttp
http = MockHttp(options.playback)
if not cmd_line_args:
cmd_line_args = [INTROSPECTION_URI]
for target_uri in cmd_line_args:
if not options.quiet:
print >>sys.stderr, "Testing the service at <%s>" % target_uri
print >>sys.stderr, "Running: ",
test = TestIntrospection(target_uri, http)
test.run()
if not options.quiet:
print >>sys.stderr, "\n\n",
outfile = sys.stdout
if options.output:
outfile = file(options.output, "w")
print >>outfile, recorder.tostr()
status = 0
if recorder.has_warnings:
status = 1
if recorder.has_errors:
status = 2
return status
if __name__ == '__main__':
sys.exit(main(options, cmd_line_args))
| Python |
#!/usr/bin/python2.6
#
# Simple http server to emulate api.playfoursquare.com
import logging
import shutil
import sys
import urlparse
import SimpleHTTPServer
import BaseHTTPServer
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handle playfoursquare.com requests, for testing."""
def do_GET(self):
logging.warn('do_GET: %s, %s', self.command, self.path)
url = urlparse.urlparse(self.path)
logging.warn('do_GET: %s', url)
query = urlparse.parse_qs(url.query)
query_keys = [pair[0] for pair in query]
response = self.handle_url(url)
if response != None:
self.send_200()
shutil.copyfileobj(response, self.wfile)
self.wfile.close()
do_POST = do_GET
def handle_url(self, url):
path = None
if url.path == '/v1/venue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/addvenue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/venues':
path = '../captures/api/v1/venues.xml'
elif url.path == '/v1/user':
path = '../captures/api/v1/user.xml'
elif url.path == '/v1/checkcity':
path = '../captures/api/v1/checkcity.xml'
elif url.path == '/v1/checkins':
path = '../captures/api/v1/checkins.xml'
elif url.path == '/v1/cities':
path = '../captures/api/v1/cities.xml'
elif url.path == '/v1/switchcity':
path = '../captures/api/v1/switchcity.xml'
elif url.path == '/v1/tips':
path = '../captures/api/v1/tips.xml'
elif url.path == '/v1/checkin':
path = '../captures/api/v1/checkin.xml'
elif url.path == '/history/12345.rss':
path = '../captures/api/v1/feed.xml'
if path is None:
self.send_error(404)
else:
logging.warn('Using: %s' % path)
return open(path)
def send_200(self):
self.send_response(200)
self.send_header('Content-type', 'text/xml')
self.end_headers()
def main():
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = 8080
server_address = ('0.0.0.0', port)
httpd = BaseHTTPServer.HTTPServer(server_address, RequestHandler)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import os
import subprocess
import sys
BASEDIR = '../main/src/com/joelapenna/foursquare'
TYPESDIR = '../captures/types/v1'
captures = sys.argv[1:]
if not captures:
captures = os.listdir(TYPESDIR)
for f in captures:
basename = f.split('.')[0]
javaname = ''.join([c.capitalize() for c in basename.split('_')])
fullpath = os.path.join(TYPESDIR, f)
typepath = os.path.join(BASEDIR, 'types', javaname + '.java')
parserpath = os.path.join(BASEDIR, 'parsers', javaname + 'Parser.java')
cmd = 'python gen_class.py %s > %s' % (fullpath, typepath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
cmd = 'python gen_parser.py %s > %s' % (fullpath, parserpath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
| Python |
#!/usr/bin/python
"""
Pull a oAuth protected page from foursquare.
Expects ~/.oget to contain (one on each line):
CONSUMER_KEY
CONSUMER_KEY_SECRET
USERNAME
PASSWORD
Don't forget to chmod 600 the file!
"""
import httplib
import os
import re
import sys
import urllib
import urllib2
import urlparse
import user
from xml.dom import pulldom
from xml.dom import minidom
import oauth
"""From: http://groups.google.com/group/foursquare-api/web/oauth
@consumer = OAuth::Consumer.new("consumer_token","consumer_secret", {
:site => "http://foursquare.com",
:scheme => :header,
:http_method => :post,
:request_token_path => "/oauth/request_token",
:access_token_path => "/oauth/access_token",
:authorize_path => "/oauth/authorize"
})
"""
SERVER = 'api.foursquare.com:80'
CONTENT_TYPE_HEADER = {'Content-Type' :'application/x-www-form-urlencoded'}
SIGNATURE_METHOD = oauth.OAuthSignatureMethod_HMAC_SHA1()
AUTHEXCHANGE_URL = 'http://api.foursquare.com/v1/authexchange'
def parse_auth_response(auth_response):
return (
re.search('<oauth_token>(.*)</oauth_token>', auth_response).groups()[0],
re.search('<oauth_token_secret>(.*)</oauth_token_secret>',
auth_response).groups()[0]
)
def create_signed_oauth_request(username, password, consumer):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
consumer, http_method='POST', http_url=AUTHEXCHANGE_URL,
parameters=dict(fs_username=username, fs_password=password))
oauth_request.sign_request(SIGNATURE_METHOD, consumer, None)
return oauth_request
def main():
url = urlparse.urlparse(sys.argv[1])
# Nevermind that the query can have repeated keys.
parameters = dict(urlparse.parse_qsl(url.query))
password_file = open(os.path.join(user.home, '.oget'))
lines = [line.strip() for line in password_file.readlines()]
if len(lines) == 4:
cons_key, cons_key_secret, username, password = lines
access_token = None
else:
cons_key, cons_key_secret, username, password, token, secret = lines
access_token = oauth.OAuthToken(token, secret)
consumer = oauth.OAuthConsumer(cons_key, cons_key_secret)
if not access_token:
oauth_request = create_signed_oauth_request(username, password, consumer)
connection = httplib.HTTPConnection(SERVER)
headers = {'Content-Type' :'application/x-www-form-urlencoded'}
connection.request(oauth_request.http_method, AUTHEXCHANGE_URL,
body=oauth_request.to_postdata(), headers=headers)
auth_response = connection.getresponse().read()
token = parse_auth_response(auth_response)
access_token = oauth.OAuthToken(*token)
open(os.path.join(user.home, '.oget'), 'w').write('\n'.join((
cons_key, cons_key_secret, username, password, token[0], token[1])))
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer,
access_token, http_method='POST', http_url=url.geturl(),
parameters=parameters)
oauth_request.sign_request(SIGNATURE_METHOD, consumer, access_token)
connection = httplib.HTTPConnection(SERVER)
connection.request(oauth_request.http_method, oauth_request.to_url(),
body=oauth_request.to_postdata(), headers=CONTENT_TYPE_HEADER)
print connection.getresponse().read()
#print minidom.parse(connection.getresponse()).toprettyxml(indent=' ')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import datetime
import sys
import textwrap
import common
from xml.dom import pulldom
PARSER = """\
/**
* Copyright 2009 Joe LaPenna
*/
package com.joelapenna.foursquare.parsers;
import com.joelapenna.foursquare.Foursquare;
import com.joelapenna.foursquare.error.FoursquareError;
import com.joelapenna.foursquare.error.FoursquareParseException;
import com.joelapenna.foursquare.types.%(type_name)s;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Auto-generated: %(timestamp)s
*
* @author Joe LaPenna (joe@joelapenna.com)
* @param <T>
*/
public class %(type_name)sParser extends AbstractParser<%(type_name)s> {
private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName());
private static final boolean DEBUG = Foursquare.PARSER_DEBUG;
@Override
public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException,
FoursquareError, FoursquareParseException {
parser.require(XmlPullParser.START_TAG, null, null);
%(type_name)s %(top_node_name)s = new %(type_name)s();
while (parser.nextTag() == XmlPullParser.START_TAG) {
String name = parser.getName();
%(stanzas)s
} else {
// Consume something we don't understand.
if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name);
skipSubTree(parser);
}
}
return %(top_node_name)s;
}
}"""
BOOLEAN_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText()));
"""
GROUP_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser));
"""
COMPLEX_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser));
"""
STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(parser.nextText());
"""
def main():
type_name, top_node_name, attributes = common.WalkNodesForAttributes(
sys.argv[1])
GenerateClass(type_name, top_node_name, attributes)
def GenerateClass(type_name, top_node_name, attributes):
"""generate it.
type_name: the type of object the parser returns
top_node_name: the name of the object the parser returns.
per common.WalkNodsForAttributes
"""
stanzas = []
for name in sorted(attributes):
typ, children = attributes[name]
replacements = Replacements(top_node_name, name, typ, children)
if typ == common.BOOLEAN:
stanzas.append(BOOLEAN_STANZA % replacements)
elif typ == common.GROUP:
stanzas.append(GROUP_STANZA % replacements)
elif typ in common.COMPLEX:
stanzas.append(COMPLEX_STANZA % replacements)
else:
stanzas.append(STANZA % replacements)
if stanzas:
# pop off the extranious } else for the first conditional stanza.
stanzas[0] = stanzas[0].replace('} else ', '', 1)
replacements = Replacements(top_node_name, name, typ, [None])
replacements['stanzas'] = '\n'.join(stanzas).strip()
print PARSER % replacements
def Replacements(top_node_name, name, typ, children):
# CameCaseClassName
type_name = ''.join([word.capitalize() for word in top_node_name.split('_')])
# CamelCaseClassName
camel_name = ''.join([word.capitalize() for word in name.split('_')])
# camelCaseLocalName
attribute_name = camel_name.lower().capitalize()
# mFieldName
field_name = 'm' + camel_name
if children[0]:
sub_parser_camel_case = children[0] + 'Parser'
else:
sub_parser_camel_case = (camel_name[:-1] + 'Parser')
return {
'type_name': type_name,
'name': name,
'top_node_name': top_node_name,
'camel_name': camel_name,
'parser_name': typ + 'Parser',
'attribute_name': attribute_name,
'field_name': field_name,
'typ': typ,
'timestamp': datetime.datetime.now(),
'sub_parser_camel_case': sub_parser_camel_case,
'sub_type': children[0]
}
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import logging
from xml.dom import minidom
from xml.dom import pulldom
BOOLEAN = "boolean"
STRING = "String"
GROUP = "Group"
# Interfaces that all FoursquareTypes implement.
DEFAULT_INTERFACES = ['FoursquareType']
# Interfaces that specific FoursqureTypes implement.
INTERFACES = {
}
DEFAULT_CLASS_IMPORTS = [
]
CLASS_IMPORTS = {
# 'Checkin': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Venue': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Tip': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
}
COMPLEX = [
'Group',
'Badge',
'Beenhere',
'Checkin',
'CheckinResponse',
'City',
'Credentials',
'Data',
'Mayor',
'Rank',
'Score',
'Scoring',
'Settings',
'Stats',
'Tags',
'Tip',
'User',
'Venue',
]
TYPES = COMPLEX + ['boolean']
def WalkNodesForAttributes(path):
"""Parse the xml file getting all attributes.
<venue>
<attribute>value</attribute>
</venue>
Returns:
type_name - The java-style name the top node will have. "Venue"
top_node_name - unadultured name of the xml stanza, probably the type of
java class we're creating. "venue"
attributes - {'attribute': 'value'}
"""
doc = pulldom.parse(path)
type_name = None
top_node_name = None
attributes = {}
level = 0
for event, node in doc:
# For skipping parts of a tree.
if level > 0:
if event == pulldom.END_ELEMENT:
level-=1
logging.warn('(%s) Skip end: %s' % (str(level), node))
continue
elif event == pulldom.START_ELEMENT:
logging.warn('(%s) Skipping: %s' % (str(level), node))
level+=1
continue
if event == pulldom.START_ELEMENT:
logging.warn('Parsing: ' + node.tagName)
# Get the type name to use.
if type_name is None:
type_name = ''.join([word.capitalize()
for word in node.tagName.split('_')])
top_node_name = node.tagName
logging.warn('Found Top Node Name: ' + top_node_name)
continue
typ = node.getAttribute('type')
child = node.getAttribute('child')
# We don't want to walk complex types.
if typ in COMPLEX:
logging.warn('Found Complex: ' + node.tagName)
level = 1
elif typ not in TYPES:
logging.warn('Found String: ' + typ)
typ = STRING
else:
logging.warn('Found Type: ' + typ)
logging.warn('Adding: ' + str((node, typ)))
attributes.setdefault(node.tagName, (typ, [child]))
logging.warn('Attr: ' + str((type_name, top_node_name, attributes)))
return type_name, top_node_name, attributes
| Python |
# -*- coding: UTF-8 -*-
import os, sys
tmp_dir = sys.argv[0].split('/')
home_dir = ''
for i in range(len(tmp_dir) - 1):
home_dir = home_dir + tmp_dir[i] + '/'
#update keywordlist
os.system('python %slist/make_kwl.py' % (home_dir))
#api
os.system('python %spy/meituan.py' % (home_dir))
os.system('python %spy/aibang.py' % (home_dir))
os.system('python %spy/58.py' % (home_dir))
#os.system('python %spy/kutuan.py' % (home_dir))
#os.system('python %spy/cocotuan.py' % (home_dir))
os.system('python %spy/ynet.py' % (home_dir))
#web
os.system('python %spy/qq.py' % (home_dir))
os.system('python %spy/nuomi.py' % (home_dir))
os.system('python %spy/manzuo.py' % (home_dir))
#os.system('python %spy/24quan.py' % (home_dir))
os.system('python %spy/55tuan.py' % (home_dir))
os.system('python %spy/zhaozhe.py' % (home_dir))
| Python |
import sys
tmp_dir = sys.argv[0].split('/')
home_dir = ''
for i in range(len(tmp_dir) - 1):
home_dir = home_dir + tmp_dir[i] + '/'
fin = open('%s/keyword' % (home_dir), 'r')
fout = open('%s/keywordlist' % (home_dir), 'w')
fout.write('{\n')
flag = 0
while 1 + 1 == 2:
l = fin.readline().replace('\r', '').replace('\n', '')
if not len(l):
break
if flag:
fout.write(',\n')
l = l.split(':')
fout.write("'%s':'%s'" % (l[0], l[1]))
flag = 1
fout.write('\n}')
| Python |
# -*- coding: UTF-8 -*-
from xml.etree import ElementTree as ET
import urllib2
from deal_info_api import *
def deal_city(tuan, city):
(f_id, d_id, orig) = judge_begin(tuan, city)
f_url = urllib2.urlopen('http://www.meituan.com/api/v1/%s/deals' % (city))
root_url = ET.fromstring(f_url.read())
f_xml = open('%sxml/%s_%s.xml' % (get_home_dir(), tuan, city), 'w')
f_xml.write('<deals>\n')
deals = root_url.getiterator('deal')
for deal in deals:
if judge(f_id, d_id, deal, 'id'):
continue
f_xml.write('\t' + '<deal>\n')
deal_info_source(f_xml, 'meituan', '美团网'.decode('utf-8'), 'http://www.meituan.com/', 'http://www.meituan.com/r/i2188')
f_xml.write('\t' * 2 + '<city>%s</city>\n' % city)
deal_info_add(f_xml, deal, 'id', 'id', 2)
deal_info_add(f_xml, deal, 'title', 'name', 2)
deal_info_cats(f_xml, deal, 'title')
deal_info_vendor_1(f_xml, deal)
deal_info_add(f_xml, deal, 'start_date', 'start_time', 2)
deal_info_add(f_xml, deal, 'end_date', 'end_time', 2)
deal_info_add(f_xml, deal, 'value', 'o_price', 2)
deal_info_add(f_xml, deal, 'price', 'c_price', 2)
deal_info_discount_and_save_money(f_xml, deal, 'price', 'value')
deal_info_add(f_xml, deal, 'large_image_url', 'img', 2)
deal_info_add(f_xml, deal, 'deal_url', 'url', 2)
f_xml.write('\t' + '</deal>\n')
f_xml.write(orig[8 : ])
f_xml.close()
f_id.close()
judge_end(tuan, city)
f_city = urllib2.urlopen('http://www.meituan.com/api/v1/divisions')
root_city = ET.fromstring(f_city.read())
city_id = root_city.getiterator('id')
for node in city_id:
deal_city('meituan', node.text)
| Python |
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.
A well-formed XML/HTML document yields a well-formed data
structure. An ill-formed XML/HTML document yields a correspondingly
ill-formed data structure. If your document is only locally
well-formed, you can use this library to find and process the
well-formed part of it.
Beautiful Soup works with Python 2.2 and up. It has no external
dependencies, but you'll have more success at converting data to UTF-8
if you also install these three packages:
* chardet, for auto-detecting character encodings
http://chardet.feedparser.org/
* cjkcodecs and iconv_codec, which add more encodings to the ones supported
by stock Python.
http://cjkpython.i18n.org/
Beautiful Soup defines classes for two main parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid. This class has web browser-like heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
the encoding of an HTML or XML document, and converting it to
Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/documentation.html
Here, have some legalese:
Copyright (c) 2004-2010, Leonard Richardson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the the Beautiful Soup Consortium and All
Night Kosher Bakery nor the names of its contributors may be
used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
"""
from __future__ import generators
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "3.0.8.1"
__copyright__ = "Copyright (c) 2004-2010 Leonard Richardson"
__license__ = "New-style BSD"
from sgmllib import SGMLParser, SGMLParseError
import codecs
import markupbase
import types
import re
import sgmllib
try:
from htmlentitydefs import name2codepoint
except ImportError:
name2codepoint = {}
try:
set
except NameError:
from sets import Set as set
#These hacks make Beautiful Soup able to parse XML with namespaces
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
DEFAULT_OUTPUT_ENCODING = "utf-8"
def _match_css_class(str):
"""Build a RE to match the given CSS class."""
return re.compile(r"(^|.*\s)%s($|\s)" % str)
# First, the classes that represent markup elements.
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.index(self)
if hasattr(replaceWith, "parent")\
and replaceWith.parent is self.parent:
# We're replacing this element with one of its siblings.
index = replaceWith.parent.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def replaceWithChildren(self):
myParent = self.parent
myIndex = self.parent.index(self)
self.extract()
reversedChildren = list(self.contents)
reversedChildren.reverse()
for child in reversedChildren:
myParent.insert(myIndex, child)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
del self.parent.contents[self.parent.index(self)]
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if isinstance(newChild, basestring) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent is self:
index = self.index(newChild)
if index > position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position-1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
#These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
# (Possibly) special case some findAll*(...) searches
elif text is None and not limit and not attrs and not kwargs:
# findAll*(True)
if name is True:
return [element for element in generator()
if isinstance(element, Tag)]
# findAll*('tag-name')
elif isinstance(name, basestring):
return [element for element in generator()
if isinstance(element, Tag) and
element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
# Build a SoupStrainer
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These Generators can be used to navigate starting from both
#NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i is not None:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i is not None:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i is not None:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i is not None:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i is not None:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s
class NavigableString(unicode, PageElement):
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (NavigableString.__str__(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
def __unicode__(self):
return str(self).decode(DEFAULT_OUTPUT_ENCODING)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
if encoding:
return self.encode(encoding)
else:
return self
class CData(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)
class ProcessingInstruction(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
output = self
if "%SOUP-ENCODING%" in output:
output = self.substituteEncoding(output, encoding)
return "<?%s?>" % self.toEncoding(output, encoding)
class Comment(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!--%s-->" % NavigableString.__str__(self, encoding)
class Declaration(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!%s>" % NavigableString.__str__(self, encoding)
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i
XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
"quot" : '"',
"amp" : "&",
"lt" : "<",
"gt" : ">" }
XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
else:
return u'&%s;' % x
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.name = name
if attrs is None:
attrs = []
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
self.convertHTMLEntities = parser.convertHTMLEntities
self.convertXMLEntities = parser.convertXMLEntities
self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
# Convert any HTML, XML, or numeric entities in the attribute values.
convert = lambda(k, val): (k,
re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
self._convertEntities,
val))
self.attrs = map(convert, self.attrs)
def getString(self):
if (len(self.contents) == 1
and isinstance(self.contents[0], NavigableString)):
return self.contents[0]
def setString(self, string):
"""Replace the contents of the tag with a string"""
self.clear()
self.append(string)
string = property(getString, setString)
def getText(self, separator=u""):
if not len(self.contents):
return u""
stopNode = self._lastRecursiveChild().next
strings = []
current = self.contents[0]
while current is not stopNode:
if isinstance(current, NavigableString):
strings.append(current.strip())
current = current.next
return separator.join(strings)
text = property(getText)
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def clear(self):
"""Extract all children."""
for child in self.contents[:]:
child.extract()
def index(self, element):
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in range(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.findAll, args, kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if other is self:
return True
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in range(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.__str__(encoding)
def __unicode__(self):
return self.__str__(None)
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
+ "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
+ ")")
def _sub_entity(self, x):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
encodedName = self.toEncoding(self.name, encoding)
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isinstance(val, basestring):
if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
val = self.substituteEncoding(val, encoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
if "'" in val:
# TODO: replace with apos when
# appropriate.
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
attrs.append(fmt % (self.toEncoding(key, encoding),
self.toEncoding(val, encoding)))
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % encodedName
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.renderContents(encoding, prettyPrint, indentContents)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (encodedName, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
if len(self.contents) == 0:
return
current = self.contents[0]
while current is not None:
next = current.next
if isinstance(current, Tag):
del current.contents[:]
current.parent = None
current.previous = None
current.previousSibling = None
current.next = None
current.nextSibling = None
current = next
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.__str__(encoding, True)
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.__str__(encoding)
elif isinstance(c, Tag):
s.append(c.__str__(encoding, prettyPrint, indentLevel))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def childGenerator(self):
# Just use the iterator from the contents
return iter(self.contents)
def recursiveChildGenerator(self):
if not len(self.contents):
raise StopIteration
stopNode = self._lastRecursiveChild().next
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isinstance(attrs, basestring):
kwargs['class'] = _match_css_class(attrs)
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k,v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, "__iter__") \
and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if self._matches(markup, self.text):
found = markup
else:
raise Exception, "I don't know how to match against a %s" \
% markup.__class__
return found
def _matches(self, markup, matchAgainst):
#print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst is True:
result = markup is not None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup and not isinstance(markup, basestring):
markup = unicode(markup)
#Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif hasattr(matchAgainst, '__iter__'): # list-like
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isinstance(markup, basestring):
if isinstance(markup, unicode):
matchAgainst = unicode(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif hasattr(portion, '__iter__'): # is a list
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
# Now, the parser classes.
class BeautifulStoneSoup(Tag, SGMLParser):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
PRESERVE_WHITESPACE_TAGS = []
MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
XHTML_ENTITIES = "xhtml"
# TODO: This only exists for backwards-compatibility
ALL_ENTITIES = XHTML_ENTITIES
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None, isHTML=False):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
sgmllib will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
sgmllib, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke sgmllib:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
self.convertEntities = convertEntities
# Set the rules for how we'll deal with the entities we
# encounter
if self.convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
if convertEntities == self.HTML_ENTITIES:
self.convertXMLEntities = False
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = True
elif convertEntities == self.XHTML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = False
elif convertEntities == self.XML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
else:
self.convertXMLEntities = False
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
SGMLParser.__init__(self)
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed(isHTML=isHTML)
except StopParsing:
pass
self.markup = None # The markup can now be GCed
def convert_charref(self, name):
"""This method fixes a bug in Python's SGMLParser."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127 : # ASCII ends at 127, not 255
return
return self.convert_codepoint(n)
def _feed(self, inDocumentEncoding=None, isHTML=False):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, unicode):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit\
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
if markup:
if self.markupMassage:
if not hasattr(self.markupMassage, "__iter__"):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
# TODO: We get rid of markupMassage so that the
# soup object can be deepcopied later on. Some
# Python installations can't copy regexes. If anyone
# was relying on the existence of markupMassage, this
# might cause problems.
del(self.markupMassage)
self.reset()
SGMLParser.feed(self, markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def __getattr__(self, methodName):
"""This method routes method call requests to either the SGMLParser
superclass or the Tag superclass, depending on the method name."""
#print "__getattr__ called on %s.%s" % (self.__class__, methodName)
if methodName.startswith('start_') or methodName.startswith('end_') \
or methodName.startswith('do_'):
return SGMLParser.__getattr__(self, methodName)
elif not methodName.startswith('__'):
return Tag.__getattr__(self, methodName)
else:
raise AttributeError
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name)
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
SGMLParser.reset(self)
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = u''.join(self.currentData)
if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
not set([tag.name for tag in self.tagStack]).intersection(
self.PRESERVE_WHITESPACE_TAGS)):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers is not None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers is None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join([' %s="%s"' % (x, y) for x, y in attrs])
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
#print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.endData()
self.handle_data(text)
self.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if self.convertEntities:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML and/or XML entity references to the corresponding Unicode
characters."""
data = None
if self.convertHTMLEntities:
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data and self.convertXMLEntities:
data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
if not data and self.convertHTMLEntities and \
not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
# TODO: We've got a problem here. We're told this is
# an entity reference, but it's not an XML entity
# reference or an HTML entity reference. Nonetheless,
# the logical thing to do is to pass it through as an
# unrecognized entity reference.
#
# Except: when the input is "&carol;" this function
# will be called with input "carol". When the input is
# "AT&T", this function will be called with input
# "T". We have no way of knowing whether a semicolon
# was present originally, so we don't know whether
# this is an unknown entity or just a misplaced
# ampersand.
#
# The more common case is a misplaced ampersand, so I
# escape the ampersand and omit the trailing semicolon.
data = "&%s" % ref
if not data:
# This case is different from the one above, because we
# haven't already gone through a supposedly comprehensive
# mapping of entities to Unicode characters. We might not
# have gone through any mapping at all. So the chances are
# very high that this is a real entity, and not a
# misplaced ampersand.
data = "&%s;" % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup, MinimalSoup, or
BeautifulStoneSoup before writing your own subclass."""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('smartQuotesTo'):
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
kwargs['isHTML'] = True
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
('br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base', 'col'))
PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
QUOTE_TAGS = {'script' : None, 'textarea' : None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ('span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center')
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ('blockquote', 'div', 'fieldset', 'ins', 'del')
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
'thead' : ['table'],
'tbody' : ['table'],
'tfoot' : ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ('address', 'form', 'p', 'pre')
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# Used to detect the charset in a META tag; see start_meta
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def start_meta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if (self.declaredHTMLEncoding is not None or
self.originalEncoding == self.fromEncoding):
# An HTML encoding was sniffed while converting
# the document to Unicode, or an HTML encoding was
# sniffed during a previous pass through the
# document, or an encoding was specified
# explicitly and it worked. Rewrite the meta tag.
def rewrite(match):
return match.group(1) + "%SOUP-ENCODING%"
newAttr = self.CHARSET_RE.sub(rewrite, contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the encoding information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
pass
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
('em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big')
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ('noscript',)
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisiness,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except ImportError:
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except ImportError:
pass
try:
import iconv_codec
except ImportError:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml', isHTML=False):
self.declaredHTMLEncoding = None
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup, isHTML)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if markup == '' or isinstance(markup, unicode):
self.originalEncoding = None
self.unicode = unicode(markup)
return
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = self.MS_CHARS.get(orig)
if isinstance(sub, tuple):
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed.lower() in("windows-1252",
"iso-8859-1",
"iso-8859-2"):
markup = re.compile("([\x80-\x9f])").sub \
(lambda(x): self._subMSChar(x.group(1)),
markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception, e:
# print "That didn't work!"
# print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata
def _detectEncoding(self, xml_data, isHTML=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_match = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
if not xml_encoding_match and isHTML:
regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I)
xml_encoding_match = regexp.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].lower()
if isHTML:
self.declaredHTMLEncoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
'\x81' : ' ',
'\x82' : ('sbquo', '201A'),
'\x83' : ('fnof', '192'),
'\x84' : ('bdquo', '201E'),
'\x85' : ('hellip', '2026'),
'\x86' : ('dagger', '2020'),
'\x87' : ('Dagger', '2021'),
'\x88' : ('circ', '2C6'),
'\x89' : ('permil', '2030'),
'\x8A' : ('Scaron', '160'),
'\x8B' : ('lsaquo', '2039'),
'\x8C' : ('OElig', '152'),
'\x8D' : '?',
'\x8E' : ('#x17D', '17D'),
'\x8F' : '?',
'\x90' : '?',
'\x91' : ('lsquo', '2018'),
'\x92' : ('rsquo', '2019'),
'\x93' : ('ldquo', '201C'),
'\x94' : ('rdquo', '201D'),
'\x95' : ('bull', '2022'),
'\x96' : ('ndash', '2013'),
'\x97' : ('mdash', '2014'),
'\x98' : ('tilde', '2DC'),
'\x99' : ('trade', '2122'),
'\x9a' : ('scaron', '161'),
'\x9b' : ('rsaquo', '203A'),
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
'\x9f' : ('Yuml', ''),}
#######################################################################
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
| Python |
# -*- coding: UTF-8 -*-
from xml.etree import ElementTree as ET
import os, sys
def get_home_dir():
tmp_dir = sys.argv[0].split('/')
home_dir = ''
for i in range(len(tmp_dir) - 2):
home_dir = home_dir + tmp_dir[i] + '/'
return home_dir
def deal_info_add(f_xml, subdeal, li_from, li_to, tabsum):
subdeal_li = subdeal.getiterator(li_from)
for node in subdeal_li:
f_xml.write(('\t' * tabsum + '<%s>%s</%s>\n' % (li_to, node.text, li_to)).encode('utf-8'))
def deal_info_source(f_xml, name, full, url, invite):
f_xml.write('\t' * 2 + '<source>\n')
f_xml.write(('\t' * 3 + '<name>%s</name>\n' % (name)).encode('utf-8'))
f_xml.write(('\t' * 3 + '<full>%s</full>\n' % (full)).encode('utf-8'))
f_xml.write(('\t' * 3 + '<url>%s</url>\n' % (url)).encode('utf-8'))
f_xml.write(('\t' * 3 + '<invite>%s</invite>\n' % (invite)).encode('utf-8'))
f_xml.write('\t' * 2 + '</source>\n')
def deal_info_cats(f_xml, deal, source):
deal_title = deal.getiterator(source)
title = ''
for node in deal_title:
title += node.text
f_kw = open('%slist/keyword' % (get_home_dir()), 'r')
while 1 + 1 == 2:
keyword = f_kw.readline()
if not len(keyword):
break
keyword = keyword.split(':')
if title.find(keyword[0].decode('utf-8')) > -1:
f_kwl = open('%slist/keywordlist' % (get_home_dir()), 'r')
keywordlist = eval(f_kwl.read())
f_xml.write(('\t' * 2 + '<cats>%s</cats>\n' % (keywordlist[keyword[0]].decode('utf-8'))).encode('utf-8'))
return
f_xml.write('\t' * 2 + '<cats>other</cats>\n')
def deal_info_vendor_1(f_xml,deal):
f_xml.write('\t' * 2 + '<vendor>\n')
vendor_name = deal.getiterator('vendor_name')
if len(vendor_name):
f_xml.write(('\t' * 3 + '<name>%s</name>\n' % (vendor_name[0].text)).encode('utf-8'))
vendor_url = deal.getiterator('vendor_website_url')
if len(vendor_url):
if str(vendor_url[0].text) == 'None':
f_xml.write(('\t' * 3 + '<url><![CDATA[https://encrypted.google.com/search?hl=zh-CN&q=%s]]></url>\n' % (vendor_name[0].text)).encode('utf-8'))
else:
f_xml.write(('\t' * 3 + '<url>%s</url>\n' % (vendor_url[0].text)).encode('utf-8'))
f_xml.write('\t' * 2 + '</vendor>\n')
def deal_info_vendor_2(f_xml):
f_xml.write('\t' * 2 + '<vendor>\n')
f_xml.write('\t' * 3 + '<name></name>\n')
f_xml.write('\t' * 3 + '<url></url>\n')
f_xml.write('\t' * 2 + '</vendor>\n')
def deal_info_discount_and_save_money(f_xml, deal, price, value):
deal_price = deal.getiterator(price)
deal_value = deal.getiterator(value)
f_xml.write('\t' * 2 + '<discount>%0.1f</discount>\n' % (float(deal_price[0].text) / float(deal_value[0].text) * 10))
f_xml.write('\t' * 2 + '<save_money>%0.1f</save_money>\n' % (float(deal_value[0].text) - float(deal_price[0].text)))
def deal_info_img(f_xml, deal, image):
deal_image = deal.getiterator(image)
for node in deal_image:
f_xml.write(('\t' * 2 + '<img><![CDATA[%s]]></img>\n' % (node.text)).encode('utf-8'))
def judge(f_id, d_id, deal, source):
deal_id = deal.getiterator(source)
if d_id.find(deal_id[0].text) > -1:
return 1
f_id.write((deal_id[0].text + '\n').encode('utf-8'))
return 0
def judge_begin(tuan, city):
if not os.path.isfile('%sxml/%s_%s.xml' % (get_home_dir(), tuan, city)):
os.system('echo "<deals>\n</deals>" > %sxml/%s_%s.xml' % (get_home_dir(), tuan, city))
os.system('cp %sxml/%s_%s.xml %sxml/%s_%s.xml.bak' % (get_home_dir(), tuan, city, get_home_dir(), tuan, city))
f_orig = open('%sxml/%s_%s.xml.bak' % (get_home_dir(), tuan, city), 'r')
orig = f_orig.read()
f_orig.close()
if not os.path.isfile('%slist/%s_%s_id' % (get_home_dir(), tuan, city)):
os.system('touch %slist/%s_%s_id' % (get_home_dir(), tuan, city))
f_id = open('%slist/%s_%s_id' % (get_home_dir(), tuan, city), 'r+a')
d_id = f_id.read()
return (f_id, d_id, orig)
def judge_end(tuan, city):
f_xml = open('%sxml/%s_%s.xml' % (get_home_dir(), tuan, city), 'r')
d_xml = f_xml.read()
f_xml.close()
if d_xml.find('<deals>\n</deals>') > -1:
os.system('rm %sxml/%s_%s.xml' % (get_home_dir(), tuan, city))
os.system('rm %sxml/%s_%s.xml.bak' % (get_home_dir(), tuan, city))
| Python |
# -*- coding: UTF-8 -*-
from BeautifulSoup import BeautifulSoup
import urllib2
from deal_info_web import *
def deal_city(tuan, city, city_id):
(f_id, d_id, orig) = judge_begin(tuan, city)
f_url = urllib2.urlopen('http://www.55tuan.com/?c=%s' % (city_id))
soup = BeautifulSoup(f_url.read())
f_xml = open('%sxml/%s_%s.xml' % (get_home_dir(), tuan, city), 'w')
f_xml.write('<deals>\n')
deal_id = soup.find(attrs={'name':'group_buy_id'})['value']
if not judge(f_id, d_id, deal_id):
f_xml.write('\t' + '<deal>\n')
deal_info_source(f_xml, '55tuan', '窝窝团'.decode('utf-8'), 'http://www.55tuan.com/', 'http://www.55tuan.com/')
f_xml.write('\t' * 2 + '<city>%s</city>\n' % city)
deal_info_add(f_xml, 'id', deal_id, 2)
deal_title = soup.find(attrs={'class':'biaoti'})('span')[1].contents[0]
deal_info_add(f_xml, 'name', deal_title, 2)
deal_info_cats(f_xml, deal_title)
deal_vendor_name = soup.find(attrs={'class':'adress'}).p.strong.span.contents[0]
deal_info_vendor(f_xml, deal_vendor_name)
deal_info_time(f_xml)
deal_value = soup.find(attrs={'class':'deal-discount'})('tr')[1]('td')[0].contents[0].replace('¥', '')
deal_info_add(f_xml, 'o_price', deal_value, 2)
deal_price = soup.find(attrs={'class':'buy'}).ul.li.contents[0].replace('¥', '')
deal_info_add(f_xml, 'c_price', deal_price, 2)
deal_info_discount_and_save_money(f_xml, deal_price, deal_value)
deal_img = 'http://www.55tuan.com/' + soup.find(attrs={'class':'pic'}).img['src']
deal_info_img(f_xml, deal_img)
deal_url = 'http://www.55tuan.com/tuan-%s' % (deal_id)
deal_info_add(f_xml, 'url', deal_url, 2)
f_xml.write('\t' + '</deal>\n')
f_xml.write(orig[8 : ])
f_xml.close()
f_id.close()
judge_end(tuan, city)
deal_city('55tuan', 'beijing', '1')
#deal_city('55tuan', 'shanghai', '2')
#deal_city('55tuan', 'guangzhou', '3')
| Python |
# -*- coding: UTF-8 -*-
from BeautifulSoup import BeautifulSoup
import urllib2
from deal_info_web import *
def deal_city(tuan, city, ename):
(f_id, d_id, orig) = judge_begin(tuan, city)
f_url = urllib2.urlopen('http://www.24quan.com/city.php?ename=%s&r=' % (ename))
soup = BeautifulSoup(f_url.read())
f_xml = open('%sxml/%s_%s.xml' % (get_home_dir(), tuan, city), 'w')
f_xml.write('<deals>\n')
deal_id = soup.find(attrs={'class':'deal-today-link'})['href'].replace('/team.php?id=', '')
if not judge(f_id, d_id, deal_id):
f_xml.write('\t' + '<deal>\n')
deal_info_source(f_xml, '24quan', '24券'.decode('utf-8'), 'http://www.24quan.com/', 'http://www.24quan.com/')
f_xml.write('\t' * 2 + '<city>%s</city>\n' % city)
deal_info_add(f_xml, 'id', deal_id, 2)
deal_title = soup.find(attrs={'id':'deal-title'}).contents[0]
deal_info_add(f_xml, 'name', deal_title, 2)
deal_info_cats(f_xml, deal_title)
deal_vendor_name = soup.find(attrs={'id':'side-business'}).h2.contents[0]
deal_info_vendor(f_xml, deal_vendor_name)
deal_info_time(f_xml)
deal_value = soup.find(attrs={'class':'deal-discount'})('tr')[1]('td')[0].contents[0].replace('¥'.decode('utf-8'), '')
deal_info_add(f_xml, 'o_price', deal_value, 2)
deal_price = soup.find(attrs={'class':'deal-price'}).strong.contents[0].replace('¥'.decode('utf-8'), '')
deal_info_add(f_xml, 'c_price', deal_price, 2)
deal_info_discount_and_save_money(f_xml, deal_price, deal_value)
deal_img = soup.find(attrs={'class':'deal-buy-cover-img'}).img['src']
deal_info_img(f_xml, deal_img)
deal_url = 'http://www.24quan.com/team.php?id=%s' % (deal_id)
deal_info_add(f_xml, 'url', deal_url, 2)
f_xml.write('\t' + '</deal>\n')
f_xml.write(orig[8 : ])
f_xml.close()
f_id.close()
judge_end(tuan, city)
deal_city('24quan', 'beijing', 'bj')
deal_city('24quan', 'shanghai', 'sh')
deal_city('24quan', 'wuhan', 'wh')
deal_city('24quan', 'changsha', 'cs')
deal_city('24quan', 'shenzhen', 'sz')
deal_city('24quan', 'zhengzhou', 'zz')
| Python |
# -*- coding: UTF-8 -*-
from BeautifulSoup import BeautifulSoup
import urllib2
from deal_info_web import *
def deal_city(tuan, city):
(f_id, d_id, orig) = judge_begin(tuan, city)
f_url = urllib2.urlopen('http://www.nuomi.com/%s' % (city))
soup = BeautifulSoup(f_url.read())
f_xml = open('%sxml/%s_%s.xml' % (get_home_dir(), tuan, city), 'w')
f_xml.write('<deals>\n')
deal_id = soup.find(attrs={'class':'renren'})['href'].replace('http://share.renren.com/share/buttonshare?link=http%3A%2F%2Fwww.nuomi.com%2F', '').replace('.html', '')
if not judge(f_id, d_id, deal_id):
f_xml.write('\t' + '<deal>\n')
deal_info_source(f_xml, 'nuomi', '糯米网'.decode('utf-8'), 'http://www.nuomi.com/', 'http://www.nuomi.com/i/1FM1nZnU')
f_xml.write('\t' * 2 + '<city>%s</city>\n' % city)
deal_info_add(f_xml, 'id', deal_id, 2)
deal_title = soup.find(attrs={'class':'deal-main'}).h1.contents[0]
deal_info_add(f_xml, 'name', deal_title, 2)
deal_info_cats(f_xml, deal_title)
deal_vendor_name = ''
deal_info_vendor(f_xml, deal_vendor_name)
deal_info_time(f_xml)
deal_value = soup.find(attrs={'class':'original'}).contents[0].replace('¥'.decode('utf-8'), '').replace(',', '')
deal_info_add(f_xml, 'o_price', deal_value, 2)
deal_price = str(float(deal_value) - float(soup.find(attrs={'class':'deal-discount'}).table('tr')[1]('td')[2].strong.contents[0].replace('¥'.decode('utf-8'), '').replace(',', '')))
deal_info_add(f_xml, 'c_price', deal_price, 2)
deal_info_discount_and_save_money(f_xml, deal_price, deal_value)
deal_img = soup.find(attrs={'class':'product-pic'}).img['src']
deal_info_img(f_xml, deal_img)
deal_url = 'http://www.nuomi.com/%s.html' % (deal_id)
deal_info_add(f_xml, 'url', deal_url, 2)
f_xml.write('\t' + '</deal>\n')
f_xml.write(orig[8 : ])
f_xml.close()
f_id.close()
judge_end(tuan, city)
deal_city('nuomi', 'beijing')
deal_city('nuomi', 'shanghai')
deal_city('nuomi', 'hangzhou')
| Python |
# -*- coding: UTF-8 -*-
from xml.etree import ElementTree as ET
import urllib2
from deal_info_api import *
def deal_info_time(f_xml, subdeal, li_from, li_to, tabsum, hms):
subdeal_li = subdeal.getiterator(li_from)
zt = subdeal_li[0].text
year = zt.split('年'.decode('utf-8'))
month = year[1].split('月'.decode('utf-8'))
day = month[1].split('日'.decode('utf-8'))
f_xml.write(('\t' * tabsum + '<%s>%s-%s-%s %s+0800</%s>\n' % (li_to, year[0], month[0], day[0], hms, li_to)).encode('utf-8'))
def deal_info_url(f_xml, deal, city_enname):
deal_id = deal.getiterator('id')
f_xml.write('\t' * 2 + '<url>http://t.58.com/home/%s/%s</url>\n' % (city_enname, deal_id[0].text))
def deal_city(tuan, city_node, city, city_id, city_enname):
(f_id, d_id, orig) = judge_begin(tuan, city)
f_xml = open('%sxml/%s_%s.xml' % (get_home_dir(), tuan, city), 'w')
f_xml.write('<deals>\n')
deals = city_node.getiterator('product')
for deal in deals:
if judge(f_id, d_id, deal, 'id'):
continue
f_xml.write('\t' + '<deal>\n')
deal_info_source(f_xml, '58', '58同城'.decode('utf-8'), 'http://t.58.com/', 'http://t.58.com/')
f_xml.write('\t' * 2 + '<city>%s</city>\n' % city)
deal_info_add(f_xml, deal, 'id', 'id', 2)
deal_info_add(f_xml, deal, 'name', 'name', 2)
deal_info_cats(f_xml, deal, 'name')
deal_info_vendor_1(f_xml, deal)
deal_info_time(f_xml, deal, 'begin_date', 'start_time', 2, '00:00:00')
deal_info_time(f_xml, deal, 'end_date', 'end_time', 2, '23:59:59')
deal_info_add(f_xml, deal, 'market_price', 'o_price', 2)
deal_info_add(f_xml, deal, 'group_price', 'c_price', 2)
deal_info_discount_and_save_money(f_xml, deal, 'group_price', 'market_price')
deal_info_img(f_xml, deal, 'image')
deal_info_url(f_xml, deal, city_enname)
f_xml.write('\t' + '</deal>\n')
f_xml.write(orig[8 : ])
f_xml.close()
f_id.close()
judge_end(tuan, city)
city_list = open('%slist/citylist' % (get_home_dir()), 'r')
cities = eval(city_list.read())
f_url = urllib2.urlopen('http://t.58.com/api/products')
root_url = ET.fromstring(f_url.read())
root = root_url.getiterator('city')
for node in root:
city_id = node.getiterator('id')
city_name = node.getiterator('name')
city_listname = node.getiterator('listname')
try:
deal_city('58', node, cities[city_name[0].text.encode('utf-8')], city_id[0].text, city_listname[0].text)
except KeyError:
os.system(('date >> %serror.log && echo "%s KeyError: %s\n" >> %serror.log' % (get_home_dir(), '58', city_name[0].text)).encode('utf-8'), get_home_dir())
| Python |
# -*- coding: UTF-8 -*-
from BeautifulSoup import BeautifulSoup
import urllib2
from deal_info_web import *
def deal_city(tuan, city):
(f_id, d_id, orig) = judge_begin(tuan, city)
f_url = urllib2.urlopen('http://www.zhaozhe.com/%s/index.html' % (city))
soup = BeautifulSoup(f_url.read())
f_xml = open('%sxml/%s_%s.xml' % (get_home_dir(), tuan, city), 'w')
f_xml.write('<deals>\n')
deal_id = soup.find(attrs={'class':'renren'})['href'].split('link=')[1].replace('http%3a%2f%2fwww.zhaozhe.com%2fshare%2fbeijing%2fr0%2f', '').replace('.html', '')
if not judge(f_id, d_id, deal_id):
f_xml.write('\t' + '<deal>\n')
deal_info_source(f_xml, 'zhaozhe', '找折'.decode('utf-8'), 'http://www.zhaozhe.com/', 'http://www.zhaozhe.com/')
f_xml.write('\t' * 2 + '<city>%s</city>\n' % city)
deal_info_add(f_xml, 'id', deal_id, 2)
deal_title = soup.find(attrs={'class':'deal-title'}).h1.contents[2]
deal_info_add(f_xml, 'name', deal_title, 2)
deal_info_cats(f_xml, deal_title)
deal_vendor_name = soup.find(attrs={'class':'clientinfo'}).h2.contents[0].replace('\r', '').replace('\n', '').replace(' ', '')
deal_info_vendor(f_xml, deal_vendor_name)
deal_info_time(f_xml)
deal_value = soup.find(attrs={'class':'discount-font'}).contents[0].replace('\r', '').replace('\n', '').replace(' ', '').replace('¥'.decode('utf-8'), '')
deal_info_add(f_xml, 'o_price', deal_value, 2)
deal_price = soup.find(attrs={'class':'discount-price'}).contents[0].replace('\r', '').replace('\n', '').replace(' ', '').replace('¥'.decode('utf-8'), '')
deal_info_add(f_xml, 'c_price', deal_price, 2)
deal_info_discount_and_save_money(f_xml, deal_price, deal_value)
deal_img = 'http://www.zhaozhe.com' + soup.find(attrs={'id':'img_deals'}).img['src']
deal_info_img(f_xml, deal_img)
deal_url = 'http://www.zhaozhe.com/%s/%s.html' % (city, deal_id)
deal_info_add(f_xml, 'url', deal_url, 2)
f_xml.write('\t' + '</deal>\n')
f_xml.write(orig[8 : ])
f_xml.close()
f_id.close()
judge_end(tuan, city)
deal_city('zhaozhe', 'beijing')
| Python |
# -*- coding: UTF-8 -*-
from xml.etree import ElementTree as ET
import os, time
from deal_info_api import get_home_dir, deal_info_source, judge_begin, judge_end
def deal_info_add(f_xml, li, text, tabsum):
f_xml.write(('\t' * tabsum + '<%s>%s</%s>\n' % (li, text, li)).encode('utf-8'))
def deal_info_source(f_xml, name, full, url, invite):
f_xml.write('\t' * 2 + '<source>\n')
f_xml.write(('\t' * 3 + '<name>%s</name>\n' % (name)).encode('utf-8'))
f_xml.write(('\t' * 3 + '<full>%s</full>\n' % (full)).encode('utf-8'))
f_xml.write(('\t' * 3 + '<url>%s</url>\n' % (url)).encode('utf-8'))
f_xml.write(('\t' * 3 + '<invite>%s</invite>\n' % (invite)).encode('utf-8'))
f_xml.write('\t' * 2 + '</source>\n')
def deal_info_cats(f_xml, title):
f_kw = open('%slist/keyword' % (get_home_dir()), 'r')
while 1 + 1 == 2:
keyword = f_kw.readline()
if not len(keyword):
break
keyword = keyword.split(':')
if title.find(keyword[0].decode('utf-8')) > -1:
f_kwl = open('%slist/keywordlist' % (get_home_dir()), 'r')
keywordlist = eval(f_kwl.read())
f_xml.write(('\t' * 2 + '<cats>%s</cats>\n' % (keywordlist[keyword[0]].decode('utf-8'))).encode('utf-8'))
return
f_xml.write('\t' * 2 + '<cats>other</cats>\n')
def deal_info_vendor(f_xml, text):
f_xml.write('\t' * 2 + '<vendor>\n')
f_xml.write(('\t' * 3 + '<name>%s</name>\n' % (text)).encode('utf-8'))
if text == '':
f_xml.write('\t' * 3 + '<url></url>\n')
else:
f_xml.write('\t' * 3 + '<url><![CDATA[https://encrypted.google.com/search?hl=zh-CN&q=%s]]></url>\n' % (text).encode('utf-8'))
f_xml.write('\t' * 2 + '</vendor>\n')
def deal_info_time(f_xml):
ut = time.gmtime(time.time() + 28800)
t = '%Y-%m-%d'
f_xml.write('\t' * 2 + '<start_time>%s 00:00:00+0800</start_time>\n' % (time.strftime(t, ut)))
f_xml.write('\t' * 2 + '<end_time>%s 23:59:59+0800</end_time>\n' % (time.strftime(t, ut)))
def deal_info_discount_and_save_money(f_xml, price, value):
f_xml.write('\t' * 2 + '<discount>%0.1f</discount>\n' % (float(price) / float(value) * 10))
f_xml.write('\t' * 2 + '<save_money>%0.1f</save_money>\n' % (float(value) - float(price)))
def deal_info_img(f_xml, img):
f_xml.write(('\t' * 2 + '<img><![CDATA[%s]]></img>\n' % (img)).encode('utf-8'))
def judge(f_id, d_id, deal_id):
if d_id.find(deal_id) > -1:
return 1
f_id.write((deal_id + '\n').encode('utf-8'))
return 0
| Python |
# -*- coding: UTF-8 -*-
from xml.etree import ElementTree as ET
import urllib2, time
from deal_info_api import *
def deal_info_time(f_xml, deal, li_from, li_to, tabsum):
ut = time.gmtime(time.time() + 28800)
t = '%Y-%m-%d %X'
f_xml.write(('\t' * tabsum + '<start_time>%s+0800</start_time>\n' % (time.strftime(t, ut))).encode('utf-8'))
deal_end_time = deal.getiterator(li_from)
f_xml.write(('\t' * tabsum + '<%s>%s+0800</%s>\n' % (li_to, deal_end_time[0].text, li_to)).encode('utf-8'))
def deal_city(tuan, city):
(f_id, d_id, orig) = judge_begin(tuan, city)
f_url = urllib2.urlopen('http://www.cocotuan.com/api/cocotuan.asmx/GetTodayProductsWithoutFullDescriptionByCity?cityName=%s' % (city))
root_url = ET.fromstring(f_url.read())
f_xml = open('%sxml/%s_%s.xml' % (get_home_dir(), tuan, city), 'w')
f_xml.write('<deals>\n')
deals = root_url.getiterator('{http://www.cocotuan.com/}' + 'Product')
for deal in deals:
if judge(f_id, d_id, deal, '{http://www.cocotuan.com/}' + 'Url'):
continue
f_xml.write('\t' + '<deal>\n')
deal_info_source(f_xml, 'cocotuan', '可可团'.decode('utf-8'), 'http://www.cocotuan.com/', 'http://www.cocotuan.com/')
f_xml.write('\t' * 2 + '<city>%s</city>\n' % city)
deal_info_add(f_xml, deal, '{http://www.cocotuan.com/}Url', 'id', 2)
deal_info_add(f_xml, deal, '{http://www.cocotuan.com/}Name', 'name', 2)
deal_info_cats(f_xml, deal, '{http://www.cocotuan.com/}Name')
deal_info_vendor_2(f_xml)
deal_info_time(f_xml, deal, '{http://www.cocotuan.com/}AlertTime', 'end_time', 2)
deal_info_add(f_xml, deal, '{http://www.cocotuan.com/}MarketPrice', 'o_price', 2)
deal_info_add(f_xml, deal, '{http://www.cocotuan.com/}LocalPrice', 'c_price', 2)
deal_info_discount_and_save_money(f_xml, deal, '{http://www.cocotuan.com/}LocalPrice', '{http://www.cocotuan.com/}MarketPrice')
deal_info_add(f_xml, deal, '{http://www.cocotuan.com/}Picture', 'img', 2)
deal_info_add(f_xml, deal, '{http://www.cocotuan.com/}Url', 'url', 2)
f_xml.write('\t' + '</deal>\n')
f_xml.write(orig[8 : ])
f_xml.close()
f_id.close()
judge_end(tuan, city)
f_city = urllib2.urlopen('http://www.cocotuan.com/api/cocotuan.asmx/GetOpenedCities')
root_city = ET.fromstring(f_city.read())
city_id = root_city.getiterator('{http://www.cocotuan.com/}CityID')
for node in city_id:
deal_city('cocotuan', node.text.lower())
| Python |
# -*- coding: UTF-8 -*-
from xml.etree import ElementTree as ET
import urllib2
from deal_info_api import *
def deal_info_url(f_xml, deal, url):
deal_url = deal.getiterator(url)
for node in deal_url:
f_xml.write(('\t' * 2 + '<url><![CDATA[%s]]></url>\n' % (node.text)).encode('utf-8'))
def deal_city(tuan, city, city_id):
(f_id, d_id, orig) = judge_begin(tuan, city)
f_url = urllib2.urlopen('http://tuangou.ynet.com/index.php?m=API&a=goods&city=%s' % (city_id))
root_url = ET.fromstring(f_url.read())
f_xml = open('%sxml/%s_%s.xml' % (get_home_dir(), tuan, city), 'w')
f_xml.write('<deals>\n')
deals = root_url.getiterator('goods')
for deal in deals:
if judge(f_id, d_id, deal, 'id'):
continue
f_xml.write('\t' + '<deal>\n')
deal_info_source(f_xml, 'ynet', '北青团购'.decode('utf-8'), 'http://tuangou.ynet.com/', 'http://tuangou.ynet.com/')
f_xml.write('\t' * 2 + '<city>%s</city>\n' % city)
deal_info_add(f_xml, deal, 'id', 'id', 2)
deal_info_add(f_xml, deal, 'title', 'name', 2)
deal_info_cats(f_xml, deal, 'title')
deal_info_vendor_2(f_xml,)
deal_info_add(f_xml, deal, 'begintime', 'start_time', 2)
deal_info_add(f_xml, deal, 'endtime', 'end_time', 2)
deal_info_add(f_xml, deal, 'marketprice', 'o_price', 2)
deal_info_add(f_xml, deal, 'groupprice', 'c_price', 2)
deal_info_discount_and_save_money(f_xml, deal, 'groupprice', 'marketprice')
deal_info_img(f_xml, deal, 'bigimg')
deal_info_url(f_xml, deal, 'url')
f_xml.write('\t' + '</deal>\n')
f_xml.write(orig[8 : ])
f_xml.close()
f_id.close()
judge_end(tuan, city)
city_list = open('%slist/citylist' % (get_home_dir()), 'r')
cities = eval(city_list.read())
f_city = urllib2.urlopen('http://tuangou.ynet.com/index.php?m=API&a=citys')
root_city = ET.fromstring(f_city.read())
city_id = root_city.getiterator('id')
city_name = root_city.getiterator('name')
for i in range(len(city_name)):
try:
deal_city('ynet', cities[city_name[i].text.encode('utf-8')], city_id[i].text)
except KeyError:
os.system(('date >> %serror.log && echo "%s KeyError: %s\n" >> %serror.log' % (get_home_dir(), 'ynet', city_name[i].text)).encode('utf-8'), get_home_dir())
| Python |
# -*- coding: UTF-8 -*-
from xml.etree import ElementTree as ET
import urllib2
from deal_info_api import *
def deal_info_id(f_xml, deal, city):
deal_id = deal.getiterator('id')
if str(deal_id[0].text) == 'None':
f_xml.close()
os.system('rm %sxml/aibang_%s.xml' % (get_home_dir(), city))
return 1
for node in deal_id:
f_xml.write(('\t' * 2 + '<id>%s</id>\n' % (node.text)).encode('utf-8'))
return 0
def deal_city(tuan, city, city_id):
(f_id, d_id, orig) = judge_begin(tuan, city)
f_url = urllib2.urlopen('http://tuan.aibang.com/api/v1/%s/deals' % (city_id))
root_url = ET.fromstring(f_url.read())
f_xml = open('%sxml/%s_%s.xml' % (get_home_dir(), tuan, city), 'w')
f_xml.write('<deals>\n')
deals = root_url.getiterator('deal')
for deal in deals:
if judge(f_id, d_id, deal, 'id'):
continue
f_xml.write('\t' + '<deal>\n')
deal_info_source(f_xml, 'aibang', '爱帮网'.decode('utf-8'), 'http://tuan.aibang.com/', 'http://tuan.aibang.com/t/722313')
f_xml.write('\t' * 2 + '<city>%s</city>\n' % city)
if deal_info_id(f_xml, deal, city):
return
deal_info_add(f_xml, deal, 'title', 'name', 2)
deal_info_cats(f_xml, deal, 'title')
deal_info_vendor_1(f_xml, deal)
deal_info_add(f_xml, deal, 'start_date', 'start_time', 2)
deal_info_add(f_xml, deal, 'end_date', 'end_time', 2)
deal_info_add(f_xml, deal, 'value', 'o_price', 2)
deal_info_add(f_xml, deal, 'price', 'c_price', 2)
deal_info_discount_and_save_money(f_xml, deal, 'price', 'value')
deal_info_img(f_xml, deal, 'large_image_url')
deal_info_add(f_xml, deal, 'deal_url', 'url', 2)
f_xml.write('\t' + '</deal>\n')
f_xml.write(orig[8 : ])
f_xml.close()
f_id.close()
judge_end(tuan, city)
city_list = open('%slist/citylist' % (get_home_dir()), 'r')
cities = eval(city_list.read())
f_city = urllib2.urlopen('http://tuan.aibang.com/api/v1/divisions')
root_city = ET.fromstring(f_city.read())
city_id = root_city.getiterator('id')
city_name = root_city.getiterator('city')
for i in range(len(city_name)):
try:
deal_city('aibang', cities[city_name[i].text.encode('utf-8')], city_id[i].text)
except KeyError:
os.system(('date >> %serror.log && echo "%s KeyError: %s\n" >> %serror.log' % (get_home_dir(), 'aibang', city_name[i].text)).encode('utf-8'), get_home_dir())
| Python |
# -*- coding: UTF-8 -*-
from BeautifulSoup import BeautifulSoup
import urllib2
from deal_info_web import *
def deal_city(tuan, city):
(f_id, d_id, orig) = judge_begin(tuan, city)
f_url = urllib2.urlopen('http://www.manzuo.com/%s/index.htm' % (city))
soup = BeautifulSoup(f_url.read())
f_xml = open('%sxml/%s_%s.xml' % (get_home_dir(), tuan, city), 'w')
f_xml.write('<deals>\n')
deal_id = soup.find(attrs={'class':'ainput share share_s'})['onclick'].split(',')[2].split(')')[0].replace("'", '')
if not judge(f_id, d_id, deal_id):
f_xml.write('\t' + '<deal>\n')
deal_info_source(f_xml, 'Manzuo', '满座网'.decode('utf-8'), 'http://www.manzuo.com/', 'http://www.manzuo.com/')
f_xml.write('\t' * 2 + '<city>%s</city>\n' % city)
deal_info_add(f_xml, 'id', deal_id, 2)
deal_title = soup.find(attrs={'class':'con_title'}).contents[1]
deal_info_add(f_xml, 'name', deal_title, 2)
deal_info_cats(f_xml, deal_title)
deal_vendor_name = soup.find(attrs={'class':'con_18_2'}).h3.contents[0]
deal_info_vendor(f_xml, deal_vendor_name)
deal_info_time(f_xml)
deal_value = soup.find(attrs={'class':'con_04 flo_le'}).table('td')[0].b.contents[0]
deal_info_add(f_xml, 'o_price', deal_value, 2)
deal_price = soup.find(attrs={'class':'new_con_buy_01'}).span.contents[0]
deal_info_add(f_xml, 'c_price', deal_price, 2)
deal_info_discount_and_save_money(f_xml, deal_price, deal_value)
deal_img = soup.find(attrs={'class':'con_12'}).img['src']
deal_info_img(f_xml, deal_img)
deal_url = 'http://www.manzuo.com/his/%s.htm' % (deal_id)
deal_info_add(f_xml, 'url', deal_url, 2)
f_xml.write('\t' + '</deal>\n')
f_xml.write(orig[8 : ])
f_xml.close()
f_id.close()
judge_end(tuan, city)
deal_city('manzuo', 'beijing')
deal_city('manzuo', 'shanghai')
deal_city('manzuo', 'qingdao')
deal_city('manzuo', 'hangzhou')
| Python |
# -*- coding: UTF-8 -*-
from xml.etree import ElementTree as ET
import os, urllib2, time
from deal_info_api import *
def deal_info_time(f_xml, subdeal, li_from, li_to, tabsum):
subdeal_li = subdeal.getiterator(li_from)
ut = time.gmtime(int(subdeal_li[0].text) + 28800)
t = '%Y-%m-%d %X'
f_xml.write(('\t' * tabsum + '<%s>%s+0800</%s>\n' % (li_to, time.strftime(t, ut), li_to)).encode('utf-8'))
def deal_city(city_node, city):
f_xml = open('%sxml/kutuan_%s.xml' % (get_home_dir(), city), 'a')
deal = city_node
f_xml.write('\t' + '<deal>\n')
deal_info_source(f_xml, 'kutuan', '酷团'.decode('utf-8'), 'http://www.kutuan.com/', 'http://www.kutuan.com/')
f_xml.write('\t' * 2 + '<city>%s</city>\n' % city)
deal_info_add(f_xml, deal, 'url', 'id', 2)
deal_info_add(f_xml, deal, 'title', 'name', 2)
deal_info_cats(f_xml, deal, 'title')
deal_info_vendor_2(f_xml)
deal_info_time(f_xml, deal, 'beginTime', 'start_time', 2)
deal_info_time(f_xml, deal, 'endTime', 'end_time', 2)
deal_info_add(f_xml, deal, 'primaryPrice', 'o_price', 2)
deal_info_add(f_xml, deal, 'price', 'c_price', 2)
deal_info_discount_and_save_money(f_xml, deal, 'price', 'primaryPrice')
deal_info_img(f_xml, deal, 'image')
deal_info_add(f_xml, deal, 'url', 'url', 2)
f_xml.write('\t' + '</deal>\n')
os.system('ls %sxml/kutuan_*.xml > %slist/kutuan_xml_list' % (get_home_dir(), get_home_dir()))
xml_list = open('%slist/kutuan_xml_list' % (get_home_dir()), 'r')
while 1 + 1 == 2:
xml_file = xml_list.readline().replace('\n', '')
if len(xml_file):
os.system('mv %s%s %s%s.orig' % (get_home_dir(), xml_file, get_home_dir(), xml_file))
else:
break
xml_list.close()
os.system('rm %slist/kutuan_xml_list' % (get_home_dir()))
f_city_list = open('%slist/citylist' % (get_home_dir()), 'r')
city_list = eval(f_city_list.read())
f_url = urllib2.urlopen('http://www.kutuan.com/tuangou/api')
root_url = ET.fromstring(f_url.read())
cities = [0] * 1000
city_sum = 0
exist = '|'
root = root_url.getiterator('tuangou')
for node in root:
city_name = node.getiterator('city')
try:
deal_city(node, city_list[city_name[0].text.encode('utf-8')])
if exist.find(city_list[city_name[0].text.encode('utf-8')]) == -1:
cities[city_sum] = city_list[city_name[0].text.encode('utf-8')]
exist += city_list[city_name[0].text.encode('utf-8')] + '|'
city_sum += 1
except KeyError:
os.system(('date >> %serror.log && echo "%s KeyError: %s\n" >> %serror.log' % (get_home_dir(), 'kutuan', city_name[0].text)).encode('utf-8'), get_home_dir())
for i in range(city_sum):
os.system('mv %sxml/kutuan_%s.xml %sxml/kutuan_%s.xml.bak' % (get_home_dir(), cities[i], get_home_dir(), cities[i]))
f_tmp = open('%sxml/kutuan_%s.xml' % (get_home_dir(), cities[i]), 'w')
f_bak = open('%sxml/kutuan_%s.xml.bak' % (get_home_dir(), cities[i]), 'r')
f_tmp.write('<deals>\n')
f_tmp.write(f_bak.read())
f_tmp.write('</deals>\n')
f_tmp.close()
f_bak.close()
os.system('rm %sxml/kutuan_%s.xml.bak' % (get_home_dir(), cities[i]))
tuan = 'kutuan'
city = cities[i]
os.system('cp %sxml/%s_%s.xml %sxml/%s_%s.xml.bak' % (get_home_dir(), tuan, city, get_home_dir(), tuan, city))
f_bak = open('%sxml/%s_%s.xml.bak' % (get_home_dir(), tuan, city), 'r')
bak = f_bak.read()
f_bak.close()
if not os.path.isfile('%sxml/%s_%s.xml.orig' % (get_home_dir(), tuan, city)):
os.system('echo "<deals>\n</deals>" > %sxml/%s_%s.xml.orig' % (get_home_dir(), tuan, city))
f_orig = open('%sxml/%s_%s.xml.orig' % (get_home_dir(), tuan, city), 'r')
orig = f_orig.read()
f_orig.close()
if not os.path.isfile('%slist/%s_%s_id' % (get_home_dir(), tuan, city)):
os.system('touch %slist/%s_%s_id' % (get_home_dir(), tuan, city))
global f_id
f_id = open('%slist/%s_%s_id' % (get_home_dir(), tuan, city), 'r+a')
global d_id
d_id = f_id.read()
root = ET.fromstring(bak)
f_xml = open('%sxml/%s_%s.xml' % (get_home_dir(), tuan, city), 'w')
f_xml.write('<deals>\n')
deals = root.getiterator('deal')
for deal in deals:
if judge(f_id, d_id, deal, 'id'):
continue
f_xml.write('\t' + '<deal>\n')
for x in deal:
if x.tag == 'source' or x.tag == 'vendor':
f_xml.write('\t' * 2 + '<%s>\n' % (x.tag))
for y in x:
f_xml.write(('\t' * 3 +'<%s>%s</%s>\n' % (y.tag, y.text, y.tag)).encode('utf-8'))
f_xml.write('\t' * 2 + '</%s>\n' % (x.tag))
else:
f_xml.write(('\t' * 2 +'<%s>%s</%s>\n' % (x.tag, x.text, x.tag)).encode('utf-8'))
f_xml.write('\t' + '</deal>\n')
f_xml.write(orig[8 : ])
f_xml.close()
f_id.close()
f_xml = open('%sxml/%s_%s.xml' % (get_home_dir(), tuan, city), 'r')
d_xml = f_xml.read()
f_xml.close()
if d_xml.find('<deals>\n</deals>') > -1:
os.system('rm %sxml/%s_%s.xml' % (get_home_dir(), tuan, city))
os.system('rm %sxml/%s_%s.xml.orig' % (get_home_dir(), tuan, city))
os.system('rm %sxml/%s_%s.xml.bak' % (get_home_dir(), tuan, city))
| Python |
# -*- coding: UTF-8 -*-
from BeautifulSoup import BeautifulSoup
import urllib2
from deal_info_web import *
def deal_city(tuan, city):
(f_id, d_id, orig) = judge_begin(tuan, city)
f_url = urllib2.urlopen('http://tuan.qq.com/%s/' % (city))
soup = BeautifulSoup(f_url.read())
f_xml = open('%sxml/%s_%s.xml' % (get_home_dir(), tuan, city), 'w')
f_xml.write('<deals>\n')
deal_id = soup.find(attrs={'class':'current_price'}).button['onclick'].replace('location.href="/shenzhen/deal/buy/', '').replace('"', '')
if not judge(f_id, d_id, deal_id):
f_xml.write('\t' + '<deal>\n')
deal_info_source(f_xml, 'QQ', 'QQ团购'.decode('utf-8'), 'http://tuan.qq.com/', 'http://tuan.qq.com/')
f_xml.write('\t' * 2 + '<city>%s</city>\n' % city)
deal_info_add(f_xml, 'id', deal_id, 2)
deal_title = soup.find(attrs={'class':'tit'}).h3.contents[2].replace('\t', '').replace('\n', '')
deal_info_add(f_xml, 'name', deal_title, 2)
deal_info_cats(f_xml, deal_title)
deal_vendor_name = soup.find(attrs={'class':'shop_address'}).h4.contents[0]
deal_info_vendor(f_xml, deal_vendor_name)
deal_info_time(f_xml)
deal_value = soup.find(attrs={'class':'primary_price'}).contents[1].contents[0].replace('¥','')
deal_info_add(f_xml, 'o_price', deal_value, 2)
deal_price = soup.find(attrs={'class':'current_price'}).span.contents[0].replace('¥', '')
deal_info_add(f_xml, 'c_price', deal_price, 2)
deal_info_discount_and_save_money(f_xml, deal_price, deal_value)
deal_img = soup.find(attrs={'class':'photo'}).img['src']
deal_info_img(f_xml, deal_img)
deal_url = 'http://tuan.qq.com/shenzhen/deal/show/%s' % (deal_id)
deal_info_add(f_xml, 'url', deal_url, 2)
f_xml.write('\t' + '</deal>\n')
f_xml.write(orig[8 : ])
f_xml.close()
f_id.close()
judge_end(tuan, city)
deal_city('qq', 'shenzhen')
| Python |
#!/usr/bin/env python
import time
t = time.time()
u = time.gmtime(t)
s = time.strftime('%a, %e %b %Y %T GMT', u)
print 'Content-Type: text/javascript'
print 'Cache-Control: no-cache'
print 'Date: ' + s
print 'Expires: ' + s
print ''
print 'var timeskew = new Date().getTime() - ' + str(t*1000) + ';'
| Python |
'''
Created on 21-03-2011
@author: maciek
'''
from formater import formatString
import os
class IndexGenerator(object):
'''
Generates Index.html for iOS app OTA distribution
'''
basePath = os.path.dirname(__file__)
templateFile = os.path.join(basePath,"templates/index.tmpl")
releaseUrls = ""
appName = ""
changeLog = ""
description = ""
version = ""
release = ""
def __init__(self,appName, releaseUrls, changeLog, description, version, releases):
'''
Constructor
'''
self.appName = appName
self.releaseUrls = releaseUrls
self.changeLog = changeLog
self.description = description
self.version = version
self.releases = releases
def get(self):
'''
returns index.html source code from template file
'''
urlList = self.releaseUrls.split(",")
releaseList = self.releases.split(",")
generatedHtml=""
count=0;
for release in releaseList:
generatedHtml += " <li>\n"
generatedHtml += " <h3><a href=\"javascript:load('" + urlList[count] + "')\">" + release + "</a></h3>\n"
generatedHtml += " </li>\n"
count += 1
template = open(self.templateFile).read()
index = formatString(template, downloads=generatedHtml,
changeLog=self.changeLog,
appName=self.appName,
description=self.description,
version = self.version);
return index | Python |
'''
Created on 21-03-2011
@author: maciek
'''
def formatString(format, **kwargs):
'''
'''
if not format: return ''
for arg in kwargs.keys():
format = format.replace("{" + arg + "}", "##" + arg + "##")
format = format.replace ("{", "{{")
format = format.replace("}", "}}")
for arg in kwargs.keys():
format = format.replace("##" + arg + "##", "{" + arg + "}")
res = format.format(**kwargs)
res = res.replace("{{", "{")
res = res.replace("}}", "}")
return res | Python |
'''
Created on 21-03-2011
@author: maciek
'''
from IndexGenerator import IndexGenerator
from optparse import OptionParser
import os
import tempfile
import shutil
import logging
logging.basicConfig(level = logging.DEBUG)
parser = OptionParser()
parser.add_option('-n', '--app-name', action='store', dest='appName', help='aplication name')
parser.add_option('-u', '--release-urls', action='store', dest='releaseUrls', help='URLs of download files - as coma separated list of entrires')
parser.add_option('-d', '--destination-directory', action='store', dest='otaAppDir', help='Directory where OTA files are created')
parser.add_option('-v', '--version', action='store', dest='version', help='Version of the application')
parser.add_option('-r', '--releases', action='store', dest='releases', help='Release names of the application')
parser.add_option('-R', '--release-notes', action='store', dest='releaseNotes', help='Release notes of the application (in txt2tags format)')
parser.add_option('-D', '--description', action='store', dest='description', help='Description of the application (in txt2tags format)')
(options, args) = parser.parse_args()
if options.appName == None:
parser.error("Please specify the appName.")
elif options.releaseUrls == None:
parser.error("Please specify releaseUrls")
elif options.otaAppDir == None:
parser.error("Please specify destination directory")
elif options.version == None:
parser.error("Please specify version")
elif options.releases == None:
parser.error("Please specify releases")
elif options.releaseNotes == None:
parser.error("Please specify releaseNotes")
elif options.description == None:
parser.error("Please specify description")
appName = options.appName
releaseUrls = options.releaseUrls
otaAppDir = options.otaAppDir
version = options.version
releases = options.releases
releaseNotes = options.releaseNotes
description = options.description
def findIconFilename():
iconPath = "res/drawable-hdpi/icon.png"
if not os.path.exists(iconPath):
iconPath = "res/drawable-mdpi/icon.png"
if not os.path.exists(iconPath):
iconPath = "res/drawable-ldpi/icon.png"
if not os.path.exists(iconPath):
iconPath = "res/drawable/icon.png"
logging.debug("IconPath: "+iconPath)
return iconPath
def createOTApackage():
'''
crates all needed files in tmp dir
'''
releaseNotesContent = open(releaseNotes).read()
descriptionContent = open(description).read()
indexGenerator = IndexGenerator(appName, releaseUrls, releaseNotesContent, descriptionContent, version, releases)
index = indexGenerator.get();
tempIndexFile = tempfile.TemporaryFile()
tempIndexFile.write(index)
tempIndexFile.flush()
tempIndexFile.seek(0)
return tempIndexFile
tempIndexFile = createOTApackage()
if not os.path.isdir(otaAppDir):
logging.debug("creating dir: "+otaAppDir)
os.mkdir(otaAppDir)
else:
logging.warning("dir: "+otaAppDir+" exists")
indexFile = open(os.path.join(otaAppDir,"index.html"),'w')
shutil.copyfileobj(tempIndexFile, indexFile)
srcIconFileName = findIconFilename()
disIconFileName = os.path.join(otaAppDir,"Icon.png")
shutil.copy(srcIconFileName,disIconFileName)
| Python |
"""Will attempt to get a lock, and quit if unavailable.
Adapted from http://code.activestate.com/recipes/576572/"""
import contextlib, errno, os, sys
@contextlib.contextmanager
def flock(path):
try:
fd = os.open(path, os.O_CREAT | os.O_EXCL | os.O_RDWR)
except OSError, e:
if e.errno != errno.EEXIST:
raise
print 'Cannot get lockfile; exiting.'
sys.exit(1)
try:
yield fd
finally:
os.unlink(path)
| Python |
from django.core.management import setup_environ
import settings
setup_environ(settings)
from server.feedme.models import *
import datetime
import sys
import numpy
import nltk
import time
from django.core.mail import EmailMultiAlternatives
from django.utils import html
from django.contrib.auth.models import User
from django.db.models import F
from datetime import timedelta
from django.template import Context, loader
from django.forms import EmailField, ValidationError
from django.conf import settings
NUM_POSTS = 5
# We don't want to show up in statistics
admins = ['msbernst@mit.edu', 'marcua@csail.mit.edu',
'karger@csail.mit.edu', 'rthe@media.mit.edu',
'ericker@gmail.com']
noreceive = [u'marcua@mit.edu', u'pooch@redwater.net', u'msbernst@gmail.com', u'marcua@csail.mit.edu', u'emax@csail.mit.edu', u'meredith.blumenstock@gmail.com', u'glittle@gmail.com', u'atibbetts@gmail.com', u'jdtibbs@gmail.com', u'cowling@csail.mit.edu']
if __name__ == "__main__":
email_validator = EmailField()
for receiver in Receiver.objects.exclude(user__email__in = noreceive):
try:
email_validator.clean(receiver.user.email)
except ValidationError:
print (receiver.user.email + ' is not a valid email').encode('ascii', 'backslashreplace')
continue
posts = SharedPost.objects \
.filter(sharedpostreceiver__receiver = receiver) \
.filter(sharedpostreceiver__sent = True) \
.exclude(sharer__user__email__in = admins) \
.distinct() \
.order_by('?')[:5]
# if posts.count() > NUM_POSTS:
# posts = posts[0:NUM_POSTS]
if posts.count() > 0:
context = Context({"shared_posts": posts})
template = loader.get_template("recipient_survey.html")
html_content = template.render(context)
plaintext_template = loader.get_template("recipient_survey_plaintext.html")
text_content = plaintext_template.render(context)
text_content = nltk.clean_html(text_content)
subject = u"FeedMe Survey---$30 Raffle for Participating"
to_emails = [receiver.user.email]
print (u'sending ' + subject + u' to ' + unicode(to_emails)).encode('ascii', 'backslashreplace')
#print (u'Text: ' + html_content).encode('ascii', 'backslashreplace')
#print "-------------"
from_email = settings.DEFAULT_FROM_EMAIL
email = EmailMultiAlternatives(subject, text_content, from_email, to_emails)
#email.attach_alternative(html_content, "text/html")
email.send()
time.sleep(1)
#sys.exit(0)
| Python |
import smtpd
import asyncore
server = smtpd.DebuggingServer(('127.0.0.1', 1025), None)
asyncore.loop()
| Python |
from __future__ import with_statement
from django.core.management import setup_environ
import settings
setup_environ(settings)
from django.contrib.auth.models import User
from server.feedme.models import *
from django.db import transaction
import math
import datetime
import sys, codecs, traceback
from flock import flock
# set stdout to Unicode so we can write Unicode strings to stdout
# todo: create some sort of startup script which calls this
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
@transaction.commit_manually
def reindex_all():
"""Intended as an offline process -- creates term vectors to describe
individuals, and attaches them to the individuals"""
try:
receivers = Receiver.objects.all()
update_receivers(receivers)
except:
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
traceback.print_exception(exceptionType, exceptionValue, exceptionTraceback, file=sys.stdout)
transaction.rollback()
else:
print 'before committing: ' + str(datetime.datetime.now())
transaction.commit()
print 'after committing: ' + str(datetime.datetime.now())
@transaction.commit_manually
def incremental_update():
"""Updates the term vectors for anyone whose dirty bit is set"""
receivers = Receiver.objects.filter(term_vector_dirty = True)
print str(len(receivers)) + ' users to update incrementally'
for receiver in receivers:
# we do them individually to make any other blocking activity
# not need to wait quite as long
update_receivers([receiver])
transaction.commit()
def update_receivers(receivers):
# clear old vectors
print 'deleting old vectors'
TermVectorCell.objects.filter(receiver__in = receivers).delete()
print 'deleted old vectors '
print str(datetime.datetime.now())
# do online updating of profiles of people who received the post
# it is very inefficient to loop through three times, but this
# guarantees that all the terms are added to all people
# before tf*idf is calculated and before vectors are trimmed.
print "beginning recalculation of tf-idf scores"
token_dict = dict()
# first cache all the tokens
print "tokenizing"
for receiver in receivers:
#print " tokenizing " + receiver.user.username
token_dict[receiver.user.username] = receiver.tokenize()
print str(datetime.datetime.now())
print "creating terms in database"
for receiver in receivers:
#print u' creating terms for: ' + receiver.user.username
create_profile_terms(receiver, token_dict[receiver.user.username])
print str(datetime.datetime.now())
print "updating tf-idf"
for receiver in receivers:
#print u' preliminary tf*idf for: ' + receiver.user.username
update_tf_idf(receiver, token_dict[receiver.user.username])
print str(datetime.datetime.now())
print "trimming to top terms"
for receiver in receivers:
#print u' trimming tf*idf for: ' + receiver.user.username
trim_profile_terms(receiver)
print str(datetime.datetime.now())
print 'saving new info'
for receiver in receivers:
receiver.term_vector_dirty = False
receiver.save()
print str(datetime.datetime.now())
def create_profile_terms(receiver, frequency_distribution):
"""creates profile terms for this user -- does not set tf-idf"""
# print ' ' + str(len(frequency_distribution.samples())) + ' words'
for frequency_item in frequency_distribution.items():
# get or create the Term and the TermVectorCell
try:
term = Term.objects.get(term=frequency_item[0])
except Term.DoesNotExist:
term = Term(term=frequency_item[0])
term.save()
try:
term_vector_cell = TermVectorCell.objects \
.filter(receiver = receiver) \
.get(term = term)
except TermVectorCell.DoesNotExist:
term_vector_cell = TermVectorCell(
term=term,
count=0,
receiver = receiver)
term_vector_cell.save()
def update_tf_idf(receiver, frequency_distribution):
"""assumes that all terms have been updated for all users. updates tf*idf scores."""
num_people = Receiver.objects.count() * 1.0 # we need a double
for term_vector_cell in TermVectorCell.objects.filter(receiver = receiver).select_related('term'):
term = term_vector_cell.term
tf = frequency_distribution.freq(term.term)
num_receivers_with_term_shared = Receiver.objects.filter(termvectorcell__term = term).count()
idf = math.log(num_people / (1 + num_receivers_with_term_shared))
tf_idf = tf*idf
term_vector_cell.count = tf_idf
term_vector_cell.save()
def trim_profile_terms(receiver):
"""trims a person's term vector to just the top 100 terms by tf*idf score"""
MAX_TERMS = 100
query = TermVectorCell.objects.filter(receiver = receiver).order_by('-count')
if query.count() > 0:
cutoff = query[min(len(query), MAX_TERMS)-1].count
TermVectorCell.objects.filter(receiver = receiver).filter(count__lt = cutoff).delete()
def describe_receiver(receiver):
print u'describing ' + receiver.user.username
vector = TermVectorCell.objects.filter(receiver = receiver) \
.order_by('-count')
for term_vector_cell in vector:
output = term_vector_cell.term.term + \
u': ' + unicode(term_vector_cell.count)
print output.encode('utf-8')
print '-------------------------------------------'
if __name__ == '__main__':
if len(sys.argv) == 2:
mode = str(sys.argv[1])
lock_directory = '/tmp/'
if mode == "incremental":
with flock(lock_directory + '.feedme-incremental-termvector'):
incremental_update()
elif mode == "reindex":
with flock(lock_directory + '.feedme-reindex-termvector'):
print str(datetime.datetime.now()) + ' starting'
yesterday = datetime.datetime.now() - datetime.timedelta(days = 1)
newposts = SharedPost.objects \
.filter(sharedpostreceiver__time__gte = yesterday) \
.distinct()
newpost_sharers = Sharer.objects \
.filter(sharedpost__in = newposts) \
.distinct()
print str(newpost_sharers.count()) + ' people shared since yesterday'
print str(newposts.count()) + ' shared posts since yesterday'
sp_clicked = SharedPost.objects \
.filter(sharedpostreceiver__time__gte = yesterday) \
.filter(clickthroughs__gte = 1) \
.distinct()
print str(sp_clicked.count()) + ' FeedMe links sent yesterday had at least one clickthrough to the link'
sp_thanked = SharedPost.objects \
.filter(sharedpostreceiver__time__gte = yesterday) \
.filter(thanks__gte = 1) \
.distinct()
print str(sp_thanked.count()) + ' FeedMe links sent yesterday had a thank you'
logins = LoggedIn.objects.filter(time__gte = yesterday)
print str(logins.count()) + ' GReader views/refreshes since yesterday'
viewed = ViewedPost.objects.filter(time__gte = yesterday)
print str(viewed.count()) + ' posts viewed since yesterday'
clicked = ViewedPost.objects.filter(time__gte = yesterday) \
.filter(link_clickthrough = True)
print str(clicked.count()) + ' GReader posts with clicked-through links yesterday'
signups = Sharer.objects.filter(user__is_active = True) \
.filter(user__date_joined__gte = \
yesterday) \
.distinct() \
.count()
print str(signups) + ' sharers signed up for FeedMe'
print str(datetime.datetime.now())
print
print 'sharing records:'
sharers = Sharer.objects.filter(sharedpost__in = newposts).distinct()
for sharer in sharers:
shared_by_person = newposts.filter(sharer = sharer)
print sharer.user.email + ': ' \
+ str(len(shared_by_person)) + ' posts'
print str(datetime.datetime.now())
print u'Updating receiver term vectors...'
reindex_all()
print str(datetime.datetime.now())
print u'term vectors updated!'
else:
print 'Requires one argument: "incremental" or "reindex"'
| Python |
"""Disk And Execution MONitor (Daemon)
Configurable daemon behaviors:
1.) The current working directory set to the "/" directory.
2.) The current file creation mode mask set to 0.
3.) Close all open files (1024).
4.) Redirect standard I/O streams to "/dev/null".
A failed call to fork() now raises an exception.
References:
1) Advanced Programming in the Unix Environment: W. Richard Stevens
2) Unix Programming Frequently Asked Questions:
http://www.erlenstar.demon.co.uk/unix/faq_toc.html
"""
__author__ = "Chad J. Schroeder"
__copyright__ = "Copyright (C) 2005 Chad J. Schroeder"
__revision__ = "$Id$"
__version__ = "0.2"
# Standard Python modules.
import os # Miscellaneous OS interfaces.
import sys # System-specific parameters and functions.
# Default daemon parameters.
# File mode creation mask of the daemon.
UMASK = 0
# Default working directory for the daemon.
WORKDIR = "/"
# Default maximum for the number of available file descriptors.
MAXFD = 1024
# The standard I/O file descriptors are redirected to /dev/null by default.
if (hasattr(os, "devnull")):
REDIRECT_TO = os.devnull
else:
REDIRECT_TO = "/dev/null"
def createDaemon():
"""Detach a process from the controlling terminal and run it in the
background as a daemon.
"""
try:
# Fork a child process so the parent can exit. This returns control to
# the command-line or shell. It also guarantees that the child will not
# be a process group leader, since the child receives a new process ID
# and inherits the parent's process group ID. This step is required
# to insure that the next call to os.setsid is successful.
pid = os.fork()
except OSError, e:
raise Exception, "%s [%d]" % (e.strerror, e.errno)
if (pid == 0): # The first child.
# To become the session leader of this new session and the process group
# leader of the new process group, we call os.setsid(). The process is
# also guaranteed not to have a controlling terminal.
os.setsid()
# Is ignoring SIGHUP necessary?
#
# It's often suggested that the SIGHUP signal should be ignored before
# the second fork to avoid premature termination of the process. The
# reason is that when the first child terminates, all processes, e.g.
# the second child, in the orphaned group will be sent a SIGHUP.
#
# "However, as part of the session management system, there are exactly
# two cases where SIGHUP is sent on the death of a process:
#
# 1) When the process that dies is the session leader of a session that
# is attached to a terminal device, SIGHUP is sent to all processes
# in the foreground process group of that terminal device.
# 2) When the death of a process causes a process group to become
# orphaned, and one or more processes in the orphaned group are
# stopped, then SIGHUP and SIGCONT are sent to all members of the
# orphaned group." [2]
#
# The first case can be ignored since the child is guaranteed not to have
# a controlling terminal. The second case isn't so easy to dismiss.
# The process group is orphaned when the first child terminates and
# POSIX.1 requires that every STOPPED process in an orphaned process
# group be sent a SIGHUP signal followed by a SIGCONT signal. Since the
# second child is not STOPPED though, we can safely forego ignoring the
# SIGHUP signal. In any case, there are no ill-effects if it is ignored.
#
# import signal # Set handlers for asynchronous events.
# signal.signal(signal.SIGHUP, signal.SIG_IGN)
try:
# Fork a second child and exit immediately to prevent zombies. This
# causes the second child process to be orphaned, making the init
# process responsible for its cleanup. And, since the first child is
# a session leader without a controlling terminal, it's possible for
# it to acquire one by opening a terminal in the future (System V-
# based systems). This second fork guarantees that the child is no
# longer a session leader, preventing the daemon from ever acquiring
# a controlling terminal.
pid = os.fork() # Fork a second child.
except OSError, e:
raise Exception, "%s [%d]" % (e.strerror, e.errno)
if (pid == 0): # The second child.
# Since the current working directory may be a mounted filesystem, we
# avoid the issue of not being able to unmount the filesystem at
# shutdown time by changing it to the root directory.
os.chdir(WORKDIR)
# We probably don't want the file mode creation mask inherited from
# the parent, so we give the child complete control over permissions.
os.umask(UMASK)
else:
# exit() or _exit()? See below.
os._exit(0) # Exit parent (the first child) of the second child.
else:
# exit() or _exit()?
# _exit is like exit(), but it doesn't call any functions registered
# with atexit (and on_exit) or any registered signal handlers. It also
# closes any open file descriptors. Using exit() may cause all stdio
# streams to be flushed twice and any temporary files may be unexpectedly
# removed. It's therefore recommended that child branches of a fork()
# and the parent branch(es) of a daemon use _exit().
os._exit(0) # Exit parent of the first child.
# Close all open file descriptors. This prevents the child from keeping
# open any file descriptors inherited from the parent. There is a variety
# of methods to accomplish this task. Three are listed below.
#
# Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum
# number of open file descriptors to close. If it doesn't exists, use
# the default value (configurable).
#
# try:
# maxfd = os.sysconf("SC_OPEN_MAX")
# except (AttributeError, ValueError):
# maxfd = MAXFD
#
# OR
#
# if (os.sysconf_names.has_key("SC_OPEN_MAX")):
# maxfd = os.sysconf("SC_OPEN_MAX")
# else:
# maxfd = MAXFD
#
# OR
#
# Use the getrlimit method to retrieve the maximum file descriptor number
# that can be opened by this process. If there is not limit on the
# resource, use the default value.
#
import resource # Resource usage information.
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
# Iterate through and close all file descriptors.
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
# Redirect the standard I/O file descriptors to the specified file. Since
# the daemon has no controlling terminal, most daemons redirect stdin,
# stdout, and stderr to /dev/null. This is done to prevent side-effects
# from reads and writes to the standard I/O file descriptors.
# This call to open is guaranteed to return the lowest file descriptor,
# which will be 0 (stdin), since it was closed above.
os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
# Duplicate standard input to standard output and standard error.
os.dup2(0, 1) # standard output (1)
os.dup2(0, 2) # standard error (2)
return(0)
if __name__ == "__main__":
retCode = createDaemon()
# The code, as is, will create a new file in the root directory, when
# executed with superuser privileges. The file will contain the following
# daemon related process parameters: return code, process ID, parent
# process group ID, session ID, user ID, effective user ID, real group ID,
# and the effective group ID. Notice the relationship between the daemon's
# process ID, process group ID, and its parent's process ID.
procParams = """
return code = %s
process ID = %s
parent process ID = %s
process group ID = %s
session ID = %s
user ID = %s
effective user ID = %s
real group ID = %s
effective group ID = %s
""" % (retCode, os.getpid(), os.getppid(), os.getpgrp(), os.getsid(0),
os.getuid(), os.geteuid(), os.getgid(), os.getegid())
open("createDaemon.log", "w").write(procParams + "\n")
sys.exit(retCode) | Python |
from django.core.management import setup_environ
import settings
setup_environ(settings)
from django.contrib.auth.models import User
from server.feedme.models import *
import urllib
import feedparser
from django.utils import simplejson
import operator
from feedme.recommend import *
import datetime
from django.core.mail import EmailMultiAlternatives
from django.conf import settings
def rank_posts():
week_ago = datetime.datetime.now() - datetime.timedelta(days = 7)
for receiver in Receiver.objects.filter(recommend = True):
if SharedPostReceiver.objects.filter(receiver = receiver).filter( \
time__gte = week_ago).count() > 0:
sharers = Sharer.objects.filter(sharedpost__sharedpostreceiver__receiver = receiver)
broadcasts = SharedPost.objects.filter(sharer__in = sharers) \
.exclude(sharer__user = receiver.user) \
.exclude(sharedpostreceiver__receiver = receiver) \
.filter(broadcast = True)
print str(len(broadcasts)) + ' posts to broadcast'
if len(broadcasts) > 0:
# COSINE RANKING
# -------------------
print 'reviewing friend: ' + receiver.user.username
term_vector = TermVectorCell.objects.filter(receiver = receiver) \
.order_by('term__term').select_related('term')
cosine_ranked_posts = []
for sharedpost in broadcasts:
freq_dist_counts = sharedpost.post.tokenize()
freq_dist = sorted(freq_dist_counts)
cosine = cosine_distance(freq_dist = freq_dist, freq_dist_counts = freq_dist_counts, \
term_vector = term_vector)
cosine_ranked_posts.append((sharedpost.post, cosine))
cosine_ranked_posts.sort(key=operator.itemgetter(1))
cosine_ranked_posts.reverse()
print 'friendrank for ' + receiver.user.username + ': '
print cosine_ranked_posts[:10]
send_digest_posts(cosine_ranked_posts[:10], receiver)
else:
print 'don\'t have enough posts'
else:
print receiver.user.username + ' hasn\'t received posts recently'
def send_digest_posts(posts, receiver):
"""Sends the list of posts in an email to the recipient"""
subject = u"FeedMe Personalized Newspaper: " + posts[0][0].title
from_email = settings.DEFAULT_FROM_EMAIL
to_emails = [receiver.user.email]
print (u'sending ' + subject + u' to ' + unicode(to_emails)).encode('utf-8')
html_content = u''
html_content += u'Your friends on FeedMe thought that these posts ' +\
u'might be interesting. We\'ve selected just the ones ' +\
u'you\'re most likely to be interested in, and we\'ll ' +\
u'send them to you weekly. (Though we have another design' +\
u' building on this in the works.)<br />'
for post_array in posts:
post = post_array[0]
html_content += u"<a href='" + post.url + \
u"'>" + post.title + u"</a> " +\
u"[<a href='" + post.feed.rss_url + u"'>" + \
post.feed.title + u"</a>] <br />\n"
html_content += u"<br /><br /><span style='color: gray'>Sent via FeedMe: " +\
u"a (very) alpha tool at MIT. Have comments, or are your " +\
u"friends spamming you? Email us at feedme@csail.mit.edu." +\
u"<br /><br /><a href='http://feedme.csail.mit.edu" +\
u"/unsubscribe/'>Change your e-mail receiving settings" +\
u"</a> to get only a digest, or never be recommended posts."
print html_content.encode('utf-8')
text_content = nltk.clean_html(html_content)
email = EmailMultiAlternatives(subject, text_content, from_email, to_emails)
email.attach_alternative(html_content, "text/html")
email.send()
if __name__ == '__main__':
rank_posts()
## # POSTRANK
## # ---------------
## # make the call to postrank -- set up post args correctly
## # http://www.postrank.com/developers/api#postrank
## posts = [sharedpost.post.url for sharedpost in broadcasts]
## post_args = dict()
## post_args['url[]'] = posts
## post_args = urllib.urlencode(post_args, True)
## url = 'http://api.postrank.com/v1/postrank?appkey=feedme.csail.mit.edu&format=json'
## url_handle = urllib.urlopen(url, post_args)
## # get the results back and sort by postrank score
## result_dict = simplejson.loads(url_handle.read())
## ranked_posts = []
## for post in result_dict.keys():
## ranked_posts.append((post, result_dict[post]['postrank']))
## ranked_posts.sort(key=operator.itemgetter(1))
## ranked_posts.reverse()
## print 'postrank: '
## print ranked_posts[:10]
| Python |
# encoding: utf-8
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import authenticate
try:
# If you have django-registration installed, this is a form you can
# use for users that signup.
from registration.forms import RegistrationFormUniqueEmail
from registration.forms import attrs_dict
class EmailRegistrationForm(RegistrationFormUniqueEmail):
tos = forms.BooleanField(widget=forms.CheckboxInput(attrs=attrs_dict),
label=_(u'I understand FeedMe and agree to use this research software. (FeedMe has been reviewed by MIT\'s human subjects board; your data will not be shared.)'),
error_messages={ 'required': u"You must agree to the terms to register" })
def __init__(self, *args, **kwargs):
super(EmailRegistrationForm, self).__init__(*args, **kwargs)
del self.fields['username']
def save(self, *args, **kwargs):
# Note: if the username column has not been altered to allow 75 chars, this will not
# work for some long email addresses.
self.cleaned_data['username'] = self.cleaned_data['email']
return super(EmailRegistrationForm, self).save(*args, **kwargs)
except ImportError:
pass
class EmailLoginForm(forms.Form):
email = forms.CharField(label=_("Email"), max_length=75, widget=forms.TextInput(attrs=dict(maxlength=75)))
password = forms.CharField(label=_(u"Password"), widget=forms.PasswordInput)
def clean(self):
# Try to authenticate the user
if self.cleaned_data.get('email') and self.cleaned_data.get('password'):
user = authenticate(username=self.cleaned_data['email'], password=self.cleaned_data['password'])
if user is not None:
if user.is_active:
self.user = user # So the login view can access it
else:
raise forms.ValidationError(_("This account is inactive."))
else:
raise forms.ValidationError(_("Please enter a correct username and password. Note that both fields are case-sensitive."))
return self.cleaned_data
| Python |
# encoding: utf-8
from django.conf import settings
from django.contrib.auth.models import User
from django import forms
dummy_field = forms.EmailField()
def is_email(username):
try:
dummy_field.clean(username)
return True
except forms.ValidationError:
return False
# This is an authentication backend, that allows email addresses to be used as usernames,
# which the default auth backend doesn't.
class EmailOrUsernameModelBackend(object):
def authenticate(self, username=None, password=None):
# If username is an email, then try to pull it up
if is_email(username):
try:
user = User.objects.get(email=username)
except User.DoesNotExist:
return None
else:
# We have a non email-address we should try for username
# This is good, because it means superusers can access the admin, without
# using email addresses, which the admin login can't handle yet.
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return None
if user.check_password(password):
return user
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None | Python |
from django.conf.urls.defaults import *
urlpatterns = patterns('email_usernames.views',
url(r'^login/$', 'email_login', name="email-login"),
)
try:
from registration.views import register
from email_usernames.forms import EmailRegistrationForm
urlpatterns += patterns('',
url(r'^register/$', register, { 'form_class':EmailRegistrationForm }, name="email-register"),
)
except ImportError:
pass | Python |
from django.conf import settings
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib.auth import login
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from email_usernames.forms import EmailLoginForm
def email_login(request, template_name="registration/login.html", extra_context=None):
"""A generic view that you can use instead of the default auth.login view, for email logins.
On GET:
will render the specified template with and pass the an empty EmailLoginForm as login_form
with its context, that you can use for the login.
On POST:
will try to validate and authenticate the user, using the EmailLoginForm. Upon successful
login, will redirect to whatever the standard LOGIN_REDIRECT_URL is set to, or the 'next'
parameter, if specified."""
if request.method == 'POST':
login_form = EmailLoginForm(data=request.POST)
if login_form.is_valid():
# The user has been authenticated, so log in and redirect
user = login(request, login_form.user)
# Redirect to page pointed to by the 'next' param, or else just the first page
next_page = request.REQUEST.get('next', settings.LOGIN_REDIRECT_URL)
return HttpResponseRedirect(next_page)
else:
login_form = EmailLoginForm()
context = { 'form':login_form }
if extra_context is None: extra_context = {}
for key, value in extra_context.items():
if callable(value):
context[key] = value()
else:
context[key] = value
return render_to_response(template_name, context, context_instance=RequestContext(request))
| Python |
from django.db.models.signals import post_syncdb
message = """
'django-email-accounts' has detected that you just installed Django's authentication system (django.auth).
For your convenience, django-email-accounts can alter the user table's username field to allow 75 characters instead
of the default 35 chars. Unless you do this, emails that are longer than 30 characters will be cut off, and this
app will probably not work!
NOTE: this will only work if the SQL user account you have created for django has the privileges
to run ALTER statements.
Do you want django-email-accounts to try to alter the auth_user.username column to allow 75 characters?
(y/N): """
def query_fix_usertable(sender, app, created_models, verbosity, interactive, **kwargs):
model_names = [m.__name__ for m in created_models]
if not interactive or app.__name__ != 'django.contrib.auth.models' or "User" not in model_names:
return
answer = raw_input(message)
while not answer.lower() in ('y', 'n', 'yes', 'no'):
raw_input("You need to either decide yes ('y') or no ('n'). Default is no. (y/N): ")
from django.db import connection
cursor = connection.cursor()
cursor.execute("ALTER TABLE auth_user MODIFY COLUMN username varchar(75) NOT NULL")
#post_syncdb.connect(query_fix_usertable)
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
import os
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
DATABASE_OPTIONS = {} # Set to empty dictionary for default.
# Host for sending e-mail.
EMAIL_HOST = 'localhost'
# Port for sending e-mail.
EMAIL_PORT = 25
SECRET_KEY = ''
TEMPLATE_DIRS = ()
DEBUG = # True or False
TEMPLATE_DEBUG = DEBUG # Or override this
os.environ['PYTHON_EGG_CACHE'] = '/absolute/path/to/a/writable/directory/.python-eggs'
| Python |
from django.http import HttpResponse
from django.contrib.auth.models import User
from models import *
from django.contrib.auth.decorators import login_required
import datetime
from server.feedme.models import *
def start_email_daemon():
"""Creates a daemon to send emails every 20 seconds. Uses daemon.py
to fork the process. Warning: under default settings, this won't write
error messages to disk, so may just crash. I think there's a way to
make it write errors somewhere.
Source: http://code.activestate.com/recipes/278731/
"""
from django.core.management import setup_environ
import settings
setup_environ(settings)
import daemon
daemon.createDaemon()
debug_to_file = True
while True:
unsent = SharedPost.objects \
.filter(sharedpostreceiver__sent__exact = False)
for shared_post in unsent:
send_post_email(shared_post)
time.sleep(20)
| Python |
from django.shortcuts import render_to_response
from models import *
import codecs, sys
import versionutils
# set stdout to Unicode so we can write Unicode strings to stdout
# todo: create some sort of startup script which calls this
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
def homepage(request):
return render_to_response('index.html',
{'feedme_url' : versionutils.latest_url(), 'user': request.user})
| Python |
from django.http import HttpResponse
from django.utils import simplejson
from django.contrib.auth.models import User
from models import *
from django.contrib.auth.decorators import login_required
from django.db import IntegrityError
from django.db import transaction
from django.forms.fields import email_re
import math
import operator
import datetime
import time
from django.core.cache import cache
from django.shortcuts import render_to_response
def robots(request):
return render_to_response('robots.txt', mimetype='text/plain')
| Python |
from django.shortcuts import render_to_response
def bookmarklet_install(request):
return render_to_response('bookmarklet_install.html')
| Python |
from django.utils import simplejson
import urllib2
def latest_url():
response = urllib2.urlopen('http://groups.csail.mit.edu/haystack/feedme/current_version.js')
html = response.read()
html = html.replace("versionData = ", "")
version = simplejson.loads(html)
return version['url']
| Python |
from django.http import HttpResponse
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from html5lib import treebuilders
from BeautifulSoup import BeautifulSoup
import nltk
import re
import html5lib
import versionutils
@login_required
def bookmarklet(request):
if 'post_contents' not in request.POST:
return HttpResponse("Not a valid request.")
text = request.POST['post_contents']
parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("beautifulsoup"))
soup = parser.parse(text)
# kill all javascript
for script in soup("script"):
soup.script.extract()
text = soup.prettify()
new_request = request.POST.copy() # because the request dict is immutable
new_request['post_contents'] = text
new_request['bookmarklet'] = True
text = re.sub(r'class\s?=\s?".*?"', '', text)
text = re.sub(r"class\s?=\s?'.*?'", '', text)
text = re.sub(r'id\s?=\s?".*?"', '', text)
text = re.sub(r"id\s?=\s?'.*?'", '', text)
text = re.sub('style\s?=\s?".*?"', '', text)
text = re.sub("style\s?=\s?'.*?'", '', text)
return render_to_response('bookmarklet.html', \
{
'post_url' : request.POST['post_url'],
'post_title' : request.POST['post_title'],
'feed_url' : request.POST['feed_url'],
'feed_title' : request.POST['feed_title'],
'text' : text.decode('utf-8'),
'jsurl' : versionutils.latest_url()
})
| Python |
from django.http import HttpResponse
from django.utils import simplejson
from django.contrib.auth.models import User
from models import *
from django.contrib.auth.decorators import login_required
def address_book(request):
if request.user.is_authenticated:
receivers = Receiver.objects.filter(
sharedpostreceiver__shared_post__sharer__user = request.user) \
.distinct()
response = [ { 'email': receiver.user.email } for receiver in receivers ]
else:
response = []
response_json = simplejson.dumps(response)
return HttpResponse(response_json, mimetype='application/json')
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.