code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Benjamin Heil
#
# This file is part of Feedmail.
#
# Feedmail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import feedparser
import socket
import urllib2
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.template.loader import render_to_string
from django.contrib.auth.decorators import login_required
from django.utils import simplejson
from django.conf import settings
from django.utils.html import escape
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from feedmanager.models import FeedItem, FeedAbo, FeedData, FeedError
from feedmanager.parser import getEntryFingerprint, getEntryFingerprint_MD5
_FEEDMAIL_JSON_VERSION = '0.1'
def _getJSONData(request, value):
data = {'type': settings.SITE_FULL_NAME,
'version': _FEEDMAIL_JSON_VERSION,
'data': value.strip()}
return simplejson.dumps(data)
@login_required
def feedmanager_get_feed_table(request):
abo_list = FeedAbo.objects.filter(user=request.user)
feed_list = []
for abo in abo_list:
feed_list.append({
'name': abo.feed.name,
'id': abo.feed.id,
'url': abo.feed.url,
'is_gone': abo.feed.is_gone
})
feed_list.sort(key=lambda feed: feed['name'].lower())
data = render_to_string('feedmanager/feed_table.html', {'feed_list': feed_list},
context_instance=RequestContext(request))
return HttpResponse(_getJSONData(request, data),
mimetype="application/json")
@login_required
def feedmanager_remove_feed(request):
try:
id = request.GET.get('id')
feed = FeedItem.objects.get(id=id)
abo = FeedAbo.objects.get(user=request.user, feed=feed)
except:
return HttpResponseForbidden()
abo.delete()
abos_left = FeedAbo.objects.filter(feed=feed)
if not abos_left:
# This was the last abo for this feed, so remove the feed completely
# But first remove all errors associated with this feed
errors = FeedError.objects.filter(feed=feed)
errors.delete()
feed.delete()
return HttpResponse(_getJSONData(request, 'success'),
mimetype="application/json")
@login_required
def feedmanager_dashboard(request, is_dashboard=True):
return render_to_response('feedmanager/dashboard.html',
{'is_dashboard': is_dashboard},
context_instance=RequestContext(request))
def _check_feed(url):
r = feedparser.parse(url, agent=settings.FEEDPARSER_USER_AGENT)
http_status = r.get('status', 200)
http_headers = r.get('headers', {'content-type': 'application/rss+xml', 'content-length':'1'})
exc_type = r.get("bozo_exception", Exception()).__class__
if http_status != 304 and http_status != 302 and not r.get('version', ''):
socket_errors = []
for e in ['error', 'gaierror']:
if hasattr(socket, e):
socket_errors.append(getattr(socket, e))
if http_status == 410:
return u'[410] An dieser Adresse existiert nichts mehr', r
elif http_status == 404:
return u'[404] Diese URL ist ungültig', r
elif http_status not in [200, 302]:
return u'[%s] Unbekannter Fehler' % http_status, r
elif http_headers.get('content-type', 'rss').find('html') != -1:
return u'Sieht nach HTML aus! Das ist die Adresse einer Webseite, keines Feeds.', r
elif http_headers.get('content-length', '1') == '0':
return u'Leere Seite', r
elif hasattr(socket, 'timeout') and exc_type == socket.timeout:
return u'Feed brauchte zu lange zum Antworten', r
elif exc_type == IOError:
return r.bozo_exception, r
elif hasattr(feedparser, 'zlib') and exc_type == feedparser.zlib.error:
return u'Fehlerhafte Kompression des Feeds', r
elif exc_type in socket_errors:
return r.bozo_exception.args[1], r
elif exc_type == urllib2.URLError:
if r.bozo_exception.reason.__class__ in socket_errors:
exc_reason = r.bozo_exception.reason.args[1]
else:
exc_reason = r.bozo_exception.reason
return exc_reason, r
elif exc_type == AttributeError:
return r.bozo_exception, r
elif r.bozo:
return u'Kann diese Adresse nicht verarbeiten', r
return '', r
def _save_existing_fingerprints(feed_result):
for entry in feed_result:
fingerprint = entry.get('id', getEntryFingerprint(entry))
fingerprint_md5 = getEntryFingerprint_MD5(entry, fingerprint=fingerprint)
try:
f = FeedData.objects.get(fingerprint=fingerprint,
fingerprint_md5=fingerprint_md5)
except FeedData.DoesNotExist:
data = FeedData(fingerprint=fingerprint,
fingerprint_md5=fingerprint_md5)
data.save()
@login_required
def feedmanager_add_feed(request):
if not request.method == "POST":
return HttpResponseForbidden()
url = request.POST.get('url', '')
check_result, feed_result = _check_feed(url)
if check_result:
msg = u'Dies ist kein gültiger RSS- oder Atom-Feed und kann nicht abonniert werden. Folgender Fehler trat auf:<br /><br /><strong>%s</strong>' % check_result
return HttpResponse(_getJSONData(request, msg),
mimetype="application/json")
try:
# Feed existiert schon (muss unique sein), aber wir brauchen die ID des Feeds
feed = FeedItem.objects.get(url=url)
feed_result = None
except FeedItem.DoesNotExist:
feed = FeedItem(url=url)
feed.save()
try:
f = FeedAbo.objects.get(user=request.user, feed=feed)
return HttpResponse(_getJSONData(request, 'Du hast diesen Feed schon abonniert!'),
mimetype="application/json")
except FeedAbo.DoesNotExist:
pass
abo = FeedAbo()
abo.user = request.user
abo.feed = feed
abo.save()
if feed_result:
_save_existing_fingerprints(feed_result.entries)
return HttpResponse(_getJSONData(request, u'Der Feed wurde abonniert! Der Feed lieferte diesen Namen:<br /><br /><strong>%s</strong><br /><br /><i>Beachte, dass Dir nur <strong>zukünftige</strong> Einträge per Mail zugeschickt werden.</i>' % escape(feed.name)),
mimetype="application/json")
@login_required
def feedmanager_get_error_table(request):
feed_list = [abo.feed for abo in FeedAbo.objects.filter(user=request.user)]
error_list = FeedError.objects.filter(feed__in=feed_list).order_by('-timestamp')
paginator = Paginator(error_list, settings.PAGINATION_ITEMS_PER_PAGE)
page = request.GET.get('page')
if page:
try:
errors = paginator.page(page)
except PageNotAnInteger:
errors = paginator.page(1)
except EmptyPage:
errors = paginator.page(paginator.num_pages)
else:
errors = paginator.page(1)
data = render_to_string('feedmanager/error_table.html',
{'errors': errors},
context_instance=RequestContext(request))
return HttpResponse(_getJSONData(request, data),
mimetype="application/json")
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Benjamin Heil
#
# This file is part of Feedmail.
#
# Feedmail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import hashlib
import feedparser
from django.conf import settings
from django.utils.encoding import smart_unicode
from django.utils.html import escape
from django.template import Context
from django.template.loader import get_template
from html2text import html2text
def contains(a, b):
return a.find(b) != -1
def _is_html(t):
return type(t) is type(())
def getFeedName(url):
feed = feedparser.parse(url, agent=settings.FEEDPARSER_USER_AGENT)
try:
name = feed.feed.title.encode('utf-8')
return name
except:
return u"noname"
def getEntryName(feed, entry):
name = feed.name
if 'name' in entry.get('author_detail', []): # normally {} but py2.1
if entry.author_detail.name:
if name:
name += ": "
try:
name += entry.author_detail.name
except UnicodeDecodeError:
name += unicode(entry.author_detail.name, 'utf-8')
return name
def getEntryContent(entry):
conts = entry.get('content', [])
if entry.get('summary_detail', {}):
conts += [entry.summary_detail]
try:
if conts:
for c in conts:
if contains(c.type, 'html'):
return ('HTML', c.value)
for c in conts:
if c.type == 'text/plain':
return c.value
return conts[0].value
except AttributeError:
# Bei manchen Feeds wird nur ein leerer Content geliefert
# 'content': [{'value': u''}], Beispiel http://sourceforge.net/api/file/index/project-id/170561/mtime/desc/limit/20/rss
pass
return ''
def getEntryFingerprint_MD5(entry, fingerprint=None):
if not fingerprint:
fingerprint = getEntryFingerprint(entry)
m = hashlib.md5()
m.update(fingerprint.encode('utf-8'))
return m.hexdigest()
def getEntryFingerprint(entry):
if 'id' in entry and entry.id:
return entry.id
content = getEntryContent(entry)
if content and content != "\n":
hash = hashlib.md5()
hash.update(smart_unicode(content))
return hash.hexdigest()
if 'link' in entry:
return entry.link
if 'title' in entry:
hash = hashlib.md5()
hash.update(smart_unicode(entry.title))
return hash.hexdigest()
raise ValueError("can't find ID in getID")
def getEntryTitle(entry):
if 'title_detail' in entry and entry.title_detail:
title = entry.title_detail.value
if contains(entry.title_detail.type, 'html'):
title = html2text(title)
else:
title = getEntryContent(entry)[:40]
if _is_html(title):
title = html2text(title[1])
title = _get_stripped(title.replace("\n", " "))
return title
def getEntryBody(entry, title):
link = entry.get('link', '')
entry_content = getEntryContent(entry)
if _is_html(entry_content):
body = _get_stripped(entry_content[1])
else:
body = _get_stripped(entry_content)
enclosure_list = ''
if hasattr(entry, 'enclosures'):
for enclosure in entry.enclosures:
if (hasattr(enclosure, 'url') and enclosure.url != ""):
enc_url = smart_unicode(enclosure.url)
enclosure_list += (u'<br/>Anhang: <a href="%s">%s</a>\n' % (enc_url, enc_url))
if (hasattr(enclosure, 'src') and enclosure.src != ""):
enc_src = smart_unicode(enclosure.src)
enclosure_list += (u'<br/>Anhang: <a href="%s">%s</a><br/><img src="%s"\n' % (enc_src, enc_src, enc_src))
mail_body = get_template('mail/entry.html')
mail_body_data = Context({
'css': settings.MAIL_STYLE_SHEET,
'link': link,
'title': title,
'body': body,
'enclosures': enclosure_list,
'site_url': settings.ROOT_URL,
'site_title': settings.SITE_TITLE
})
rendered_body = mail_body.render(mail_body_data)
return rendered_body
def _get_stripped(s):
if s:
return s.strip()
else:
return s
def getEntryTags(entry):
tagline = u''
if 'tags' in entry:
tags = entry.get('tags', [])
taglist = [smart_unicode(_get_stripped(tag['term'])) for tag in tags]
if taglist:
tagline = u','.join(taglist)
if tagline.find("\n") > 0:
# Remove "\n" if this is embedded by the original feed and not caugth by strip()
# This is a problem with feeds from web.de sometimes
tagline = tagline.replace("\n", '')
return tagline
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Benjamin Heil
#
# This file is part of Feedmail.
#
# Feedmail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import connection
from django.db.models.signals import post_syncdb
def convert_MySQL_tables_to_UTF8(sender, **kwargs):
"""Convert all created tables to UTF-8 after syncdb was executed."""
cursor = connection.cursor()
for model in kwargs['created_models']:
sql = 'ALTER TABLE %s CONVERT TO CHARACTER SET utf8 COLLATE utf8_general_ci;' % model._meta.db_table
cursor.execute(sql)
post_syncdb.connect(convert_MySQL_tables_to_UTF8)
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Benjamin Heil
#
# This file is part of Feedmail.
#
# Feedmail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^account/', include('account.urls'), name='account'),
url(r'^manage/', include('feedmanager.urls'), name='feedmanager'),
url(r'', include('base.urls')),
)
if settings.DEBUG:
urlpatterns += patterns('',
# Serve static media in development-server (NEVER use this in your production environment)
(r'^%s/media/(?P<path>.*)$' % (settings.CURRENT_REVISION),
'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Benjamin Heil
#
# This file is part of Feedmail.
#
# Feedmail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Django settings for feedmail project.
import os
import logging
import locale
import socket
SOCKET_TIMEOUT = 30.0
socket.setdefaulttimeout(SOCKET_TIMEOUT)
from revision import svnrev
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ENABLE_DEBUG_TOOLBAR = False
CURRENT_REVISION = svnrev()
# Title of this site
SITE_TITLE = 'Feedmail'
# Full name of this site
SITE_FULL_NAME = '%s (Rev %s)' % (SITE_TITLE, CURRENT_REVISION)
ADMINS = (
('Benjamin Heil', 'kontakt@bheil.net'),
)
MANAGERS = ADMINS
########################################################################
# USE "CREATE DATABASE <dbname> CHARACTER SET utf8;" TO CREATE THE DB! #
########################################################################
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'feedmail_dev', # Or path to database file if using sqlite3.
'USER': 'feedmail_dev', # Not used with sqlite3.
'PASSWORD': 'bAjGNT8W7DV5Oqwp', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
'OPTIONS': {
"init_command": "SET storage_engine=INNODB",
},
}
}
DEFAULT_CHARSET = 'utf-8'
TEST_CHARSET = 'utf-8'
TEST_COLLATION = 'utf8_general_ci'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Berlin'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'de-de'
DATE_FORMAT = '%d.%m.%Y'
TIME_FORMAT = '%H:%M:%S'
TIME_SHORT_FORMAT = '%H:%M'
DATETIME_SHORT_FORMAT = DATE_FORMAT + ' ' + TIME_SHORT_FORMAT
DATETIME_FORMAT = DATE_FORMAT + ' ' + TIME_FORMAT
# Alles auf Deutsch setzen, die gesamte Umgebung
LOCALE = 'de_DE.UTF-8'
locale.setlocale(locale.LC_ALL, LOCALE)
# All feed mails come from this address
DEFAULT_FROM_EMAIL = 'feedmail@bheil.net'
MAIL_STYLE_SHEET = 'h1{font-size:12pt;font-weight:bold;}body{font:11pt Ubuntu,Verdana,Arial;}a:link{color:#0066CC;}a:hover{color:#88BB22;}code{font-family:monospace;}blockquote,pre{border:1px solid #DADADA;background-color:#EEE;-moz-border-radius:4px;padding:8px;font-family:monospace;}#footer{margin-top:16px;padding:8px;-moz-border-radius:4px;background-color:#EEE;border:1px solid #DADADA;color:#444;}#feedmail-footer{float:right;font-size:xx-small;margin-bottom: 4px;color:#999;}#feedmail-footer a{color:#999;}'
# Alle server mails like broken links or status mails come from this account
SERVER_EMAIL = 'webmaster@bheil.net'
# Send status mail when 404
SEND_BROKEN_LINK_EMAILS = True
# Send status mails to managers when a new user registered or activated his account
SEND_REGISTER_STATUS_EMAILS = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = './media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = 'http://127.0.0.1:8000/%s/media/' % (CURRENT_REVISION)
ADMIN_PREFIX = 'chef'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '&dz30$wod^2rk51(qoc%5b*@q9cdn(#t7be%1yj#c1ee)fn6m2'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'feedmail.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'./templates',
)
# Split Django Apps and local apps. This way we can only run our own tests
# with 'manage.py test' and not all django test cases
# http://groups.google.com/group/django-developers/msg/ec7508651e9e9fb8?pli=1
FEEDMAIL_APPS = (
'base',
'account',
'feedmanager',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
) + FEEDMAIL_APPS
#TEST_RUNNER = 'local_tests.run_tests'
TEST_RUNNER = 'local_tests.FeedMailTestSuiteRunner'
CACHES = {
'default': {
# If you want to use Memcache, comment this line out and enable Memcached-Backend below
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
#'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
#'LOCATION': '127.0.0.1:11211',
#'TIMEOUT': 35 * 60, # since the cron job should run every 30 min, make this a little bit larger
#'OPTIONS': {
# 'KEY_PREFIX': 'feedmail-udh2386dh3',
# 'MAX_ENTRIES': 1000,
#}
}
}
# Root URL mit abschliessendem Slash
ROOT_URL = 'http://127.0.0.1:8000/'
# Root URL ohne abschliessendem Slash
ROOT_URL_WO_SLASH = ROOT_URL[:len(ROOT_URL) - 1]
# If you want to change this URLs you also have to modify urls.py
LOGIN_REDIRECT_URL = ROOT_URL
LOGIN_URL = '/account/login.html'
LOGOUT_URL = '/account/logout.html'
EMAIL_SUBJECT_PREFIX = ''
FEEDPARSER_USER_AGENT = '%s/%s +%s' % (SITE_TITLE, CURRENT_REVISION, ROOT_URL)
# Extending Django's user profile with our own account information
AUTH_PROFILE_MODULE = 'account.AccountProfile'
# The user has x hours to activate the new account
ACCOUNT_HOURS_TO_ACTIVATE = 48
# If false the user can't remove his own account
ACCOUNT_CAN_DELETE = True
CRON_LOG_LEVEL = logging.DEBUG
CRON_LOG_FILE = './cron/feedmail.log'
CRON_SEND_EXCEPTION_MAILS = True
# Send a warning mail to admins if cron jobs takes longer than X minutes
CRON_SEND_WARNING_AFTER_MIN = 20
ROBOTS_TXT = """User-agent: *
"""
# Use Unicode characters instead of their ascii psuedo-replacements
TEXTMODE_UNICODE_SNOB = 1
# Put the links after each paragraph instead of at the end.
TEXTMODE_LINKS_EACH_PARAGRAPH = 0
# Wrap long lines at position. 0 for no wrapping. (Requires Python 2.3.)
TEXTMODE_BODY_WIDTH = 78
# Formats of date & times when shown in page (e.g. on Errorlog-page)
DATE_FORMAT = '%d.%m.%Y'
TIME_FORMAT = '%H:%M:%S'
TIME_SHORT_FORMAT = '%H:%M'
DATETIME_SHORT_FORMAT = DATE_FORMAT + ' ' + TIME_SHORT_FORMAT
DATETIME_FORMAT = DATE_FORMAT + ' ' + TIME_FORMAT
# How much items per page? (on e.g. Errorlog-page)
PAGINATION_ITEMS_PER_PAGE = 25
# entries in FeedError table, which are older than this number of weeks, will be
# deleted during cron job
FEEDERROR_WEEKS = 4
# When running the test suite this feed will be fetched for some tests
# You can change it to a local feed or a feed in your network to speed up things
# It should be a valid feed!
TEST_FEED_URL = 'https://www.bheil.net/blog/rss.xml'
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Benjamin Heil
#
# This file is part of Feedmail.
#
# Feedmail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django import forms
from django.contrib.auth.models import User
from account.models import AccountProfile
class register_form(forms.ModelForm):
"""
Form to register a new user
"""
username = forms.RegexField(label=u"Benutzername", max_length=30,
regex=r'^\w+$',
error_message=u"Der Login-Name darf nur aus Buchstaben, Zahlen und Unterstrichen bestehen")
password1 = forms.CharField(label=u"Passwort", max_length=64,
widget=forms.PasswordInput)
password2 = forms.CharField(label=u"Passwort wiederholen", max_length=64,
widget=forms.PasswordInput)
email = forms.EmailField(label=u"Mail-Adresse", max_length=128,
help_text=u"An diese Adresse wird eine Mail mit einem Aktivierungscode versendet. Dieser Code wird benötigt, um den Account zu aktivieren. Achte also darauf, eine gültige Adresse anzugeben. Du wirst Dich erst einloggen können, sobald Du Deinen Account bestätigt hast.")
def clean_username(self):
"""
Checks if this username already exists
"""
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(u"Ein Nutzer mit diesem Namen existiert schon.")
def clean_email(self):
"""
Checks if this mail-adress is already in use
"""
email = self.cleaned_data["email"]
try:
User.objects.get(email=email)
except User.DoesNotExist:
return email
raise forms.ValidationError(u"Diese Mail-Adresse wird schon verwendet!")
def clean_password2(self):
"""
Checks that both typed passwords are the same.
"""
password1 = self.cleaned_data.get("password1", "")
password2 = self.cleaned_data["password2"]
if password1 != password2:
raise forms.ValidationError(u"Die beiden Passwörter stimmen nicht überein")
return password2
def save(self, commit=True, *args, **kwargs):
"""
Saves the new user and creates a new user account associated with the new user.
"""
user = super(register_form, self).save(commit=False, *args, **kwargs)
user.set_password(self.cleaned_data["password1"])
user.email = self.cleaned_data['email']
user.is_active = False
if commit:
user.save()
profile = AccountProfile.objects.create(user=user)
profile.save()
return user
class Meta:
model = User
fields = ("username",)
class profile_form(forms.Form):
"""
Form to edit a user profile.
"""
username = forms.RegexField(label=u"Login-Name", max_length=30,
regex=r'^\w+$',
error_message=u"Der Login-Name darf nur aus Buchstaben, Zahlen und Unterstrichen bestehen")
email = forms.EmailField(label=u"Mail-Adresse", max_length=128,
help_text=u"Wenn Du Deine Mail-Adresse ändern willst, musst Du die neue bestätigen. An diese Adresse wird eine Mail mit einem Aktivierungscode versendet.")
def save(self, user):
"""
Saves changes in user data and profile.
"""
user.email = self.cleaned_data['email']
user.username = self.cleaned_data['username']
#p = user.get_profile()
#p.save()
user.save()
class resend_activation_mail_form(forms.Form):
email = forms.EmailField(label=u'Mail-Adresse', max_length=128)
class delete_my_account_form(forms.Form):
confirmation = forms.BooleanField(label=u"Ja, meinen Account unwiderruflich löschen",
required=False, initial=False)
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Benjamin Heil
#
# This file is part of Feedmail.
#
# Feedmail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import random
import datetime
from django.db import models
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.template import Context, loader
class AccountProfile(models.Model):
"""
Extented user profile.
http://www.b-list.org/weblog/2006/jun/06/django-tips-extending-user-model/
"""
# To add a new field in UserProfile you must also include this field
# in all forms in account.forms
user = models.ForeignKey(User, unique=True)
mail_validated = models.BooleanField(default=False)
def save(self, *args, **kwargs):
super(AccountProfile, self).save(*args, **kwargs)
def __unicode__(self):
return "Profil %s" % (self.user.username)
class Meta:
verbose_name = 'Benutzerprofil'
verbose_name_plural = 'Benutzerprofile'
class AccountActivation(models.Model):
"""
Model to save an activation_id for every new registration.
"""
user = models.ForeignKey(User)
activation_id = models.CharField(max_length=20, unique=True)
registered = models.DateTimeField()
only_mail_change = models.BooleanField()
def save(self, *args, **kwargs):
if not self.id:
self.activation_id = self._get_activation_id()
self.registered = datetime.datetime.now()
super(AccountActivation, self).save(*args, **kwargs)
def _get_activation_id(self):
random.seed()
while True:
id = ''
while len(id) < 20:
id += chr(random.randint(97, 122))
try:
AccountActivation.objects.get(activation_id=id)
except AccountActivation.DoesNotExist:
return id
def is_activation_in_time(self):
return (datetime.datetime.now() - self.registered < \
datetime.timedelta(hours=settings.ACCOUNT_HOURS_TO_ACTIVATE))
def send_activation_mail(self):
if self.only_mail_change:
t = loader.get_template('account/mail_confirmation_email.html')
else:
t = loader.get_template('account/activation_email.html')
c = {
'site_name': settings.SITE_TITLE,
'root_url_2': settings.ROOT_URL[0:len(settings.ROOT_URL) - 1],
'sublink': reverse('account_activate', args=[self.activation_id]),
'hours_to_activate': settings.ACCOUNT_HOURS_TO_ACTIVATE,
'root_url': settings.ROOT_URL,
}
if self.only_mail_change:
c['sublink'] = reverse('account_mail_confirmation', args=[self.activation_id])
# When changing mail only set account active again, but disabled mail_validated state
# So the user can login, but will get a hint to validate the mail
self.user.is_active = True
self.user.save()
profile = AccountProfile.objects.get(user=self.user)
profile.mail_validated = False
profile.save()
self.user.email_user(u'Aktivierungsmail von %s' % (settings.SITE_TITLE),
t.render(Context(c)))
def __unicode__(self):
return "%s | %s | %s" % (self.registered.strftime(settings.DATETIME_FORMAT),
self.user.username, self.activation_id)
class Meta:
verbose_name = 'Benutzeraktivierung'
verbose_name_plural = 'Benutzeraktivierungen'
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Benjamin Heil
#
# This file is part of Feedmail.
#
# Feedmail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.test import TestCase, Client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from account.models import *
def add_testuser(c, username, password, mail):
# Register a new user
url = reverse('account_register')
response = c.client.get(url)
c.assertEqual(response.status_code, 200)
register_data = {
'username': username,
'password1': password,
'password2': password,
'email': mail,
}
response = c.client.post(url, register_data)
c.assertEqual(response.status_code, 200)
return activate_user(c, username)
def activate_user(c, username):
# Activate the user
user = User.objects.get(username=username)
activation_id = AccountActivation.objects.get(user=user)
url = reverse('account_activate', args=[activation_id.activation_id])
response = c.client.get(url)
c.assertEqual(response.status_code, 200)
return user
class TestAccountApp(TestCase):
def _check_redirect_for_login_required_page(self, url, redirect_url):
response = self.client.get(url)
self.assertRedirects(response, redirect_url)
def test_profile(self):
url = reverse('account_profile')
add_testuser(self, username='testprofile', password='testpassword', mail='test@bheil.net')
self._check_redirect_for_login_required_page(url, 'http://testserver/account/login.html?next=/account/profile.html')
login = self.client.login(username='testprofile', password='testpassword')
self.failUnless(login, 'Could not log in')
# Request testprofile's profile page
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testprofile')
# Change username
response = self.client.post(url, {'username': 'testprofile2', 'email': 'test@bheil.net'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testprofile')
self.client.logout()
self._check_redirect_for_login_required_page(url, 'http://testserver/account/login.html?next=/account/profile.html')
login = self.client.login(username='testprofile2', password='testpassword')
self.failUnless(login, 'Could not log in')
# Change mail and activate again
response = self.client.post(url, {'username': 'testprofile2', 'email': 'test2@bheil.net'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testprofile2')
activate_user(self, 'testprofile2')
self.client.logout()
self._check_redirect_for_login_required_page(url, 'http://testserver/account/login.html?next=/account/profile.html')
def test_delete_account(self):
if not settings.ACCOUNT_CAN_DELETE:
return
url = reverse('account_delete_my_account')
add_testuser(self, username='testdelete', password='testpassword', mail='testdelete@bheil.net')
self._check_redirect_for_login_required_page(url, 'http://testserver/account/login.html?next=/account/delete-my-account.html')
login = self.client.login(username='testdelete', password='testpassword')
self.failUnless(login, 'Could not log in')
# get confirmation form
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, {'confirmation': True})
self.assertEqual(response.status_code, 200)
try:
User.objects.get(username='testdelete')
self.fail('user "testdelete" was not deleted')
except User.DoesNotExist:
pass
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Benjamin Heil
#
# This file is part of Feedmail.
#
# Feedmail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Signal when an account is deleted
account_pre_delete = object()
account_post_delete = object()
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Benjamin Heil
#
# This file is part of Feedmail.
#
# Feedmail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import *
from account.views import *
urlpatterns = patterns('',
url(r'^login.html$', 'django.contrib.auth.views.login', {'template_name': 'account/login.html'}, name='account_login'),
url(r'^relog.html$', 'django.contrib.auth.views.logout_then_login', name='account_logout_then_login'),
url(r'^logout.html$', 'django.contrib.auth.views.logout', {'template_name': 'account/logout.html'}, name='account_logout'),
url(r'^password-change.html$', 'django.contrib.auth.views.password_change', {'template_name': 'account/password_change.html'}, name='account_password_change'),
url(r'^password-change-done.html$', 'django.contrib.auth.views.password_change_done', {'template_name': 'account/password_change_done.html'}, name='account_password_change_done'),
url(r'^password-reset.html$', 'django.contrib.auth.views.password_reset', {'template_name': 'account/password_reset.html', 'email_template_name': 'account/password_reset_email.html'}, name='account_password_reset'),
url(r'^password-reset-done.html$', 'django.contrib.auth.views.password_reset_done', {'template_name': 'account/password_reset_done.html'}, name='account_password_reset_done'),
url(r'^register.html$', register, name='account_register'),
url(r'^profile.html$', profile, name='account_profile'),
url(r'^activate/(?P<activation_id>[a-z]+).html$', activate, name='account_activate'),
url(r'^mail-confirmation/(?P<activation_id>[a-z]+).html$', mail_confirmation, name='account_mail_confirmation'),
url(r'^resend-activation-mail.html', resend_activation_mail, name='resend_activation_mail'),
url(r'^reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+).html$', 'django.contrib.auth.views.password_reset_confirm', {'template_name': 'account/password_reset_confirm.html'}, name='account_reset'),
url(r'^reset/done.html$', 'django.contrib.auth.views.password_reset_complete', {'template_name': 'account/password_reset_complete.html'}, name='account_reset_done'),
url(r'^delete-my-account.html$', delete_my_account, name='account_delete_my_account'),
)
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Benjamin Heil
#
# This file is part of Feedmail.
#
# Feedmail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
from account.models import AccountProfile, AccountActivation
class AccountProfileAdmin(admin.ModelAdmin):
search_fields = ['user', 'website']
class AccountActivationAdmin(admin.ModelAdmin):
date_hierarchy = 'registered'
ordering = ['-registered']
search_fields = ['activation_id', 'user']
admin.site.register(AccountProfile, AccountProfileAdmin)
admin.site.register(AccountActivation, AccountActivationAdmin)
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Benjamin Heil
#
# This file is part of Feedmail.
#
# Feedmail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django import forms
from django.core.urlresolvers import reverse
from django.core.mail import mail_managers
from django.conf import settings
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.contrib.auth.models import User
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from django.views.decorators.cache import cache_control
from django.template import RequestContext
from account.forms import register_form, profile_form, delete_my_account_form, resend_activation_mail_form
from account.models import AccountActivation, AccountProfile
def _set_user_active_and_validated(user):
user.is_active = True
user.save()
profile = AccountProfile.objects.get(user=user)
profile.mail_validated = True
profile.save()
if settings.SEND_REGISTER_STATUS_EMAILS:
mail_managers(u'User wurde aktiviert',
u'Der User wurde aktiviert: \n\nName: %s\nMail: %s\nID: %s' % \
(user.username, user.email, user.id))
def _generate_activation_id(user, only_mail_change):
# Generate new activation id for this user and send mail
act = AccountActivation()
act.user = user
act.only_mail_change = only_mail_change
act.save()
act.send_activation_mail()
def activate(request, activation_id):
state = -1
try:
u = AccountActivation.objects.get(activation_id=activation_id, only_mail_change=False)
except AccountActivation.DoesNotExist:
u = None
if u is not None:
if u.is_activation_in_time():
state = 0
_set_user_active_and_validated(u.user)
u.delete()
return render_to_response('account/activation.html', {'state': state},
context_instance=RequestContext(request))
def resend_activation_mail(request):
if not request.user.is_anonymous():
_generate_activation_id(request.user, False)
msg = u'Es wurde eine neue Aktivierungsmail an %s verschickt. Bitte rufe Deine Mails ab!' % (request.user.email)
return render_to_response('account/resend_activation_mail.html', {'msg': msg},
context_instance=RequestContext(request))
else:
if request.method == 'POST':
form = resend_activation_mail_form(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
try:
user = User.objects.get(email=email)
_generate_activation_id(user, False)
msg = u'Es wurde eine neue Aktivierungsmail an %s verschickt. Bitte rufe Deine Mails ab!' % (email)
except User.DoesNotExist:
msg = u'Diese Mail-Adresse ist nicht registriert. Du kannst damit aber einen <a href="%s">neuen Account anlegen</a>!' % (reverse('account_register'))
return render_to_response('account/resend_activation_mail.html', {'msg': msg},
context_instance=RequestContext(request))
else:
form = resend_activation_mail_form()
return render_to_response('account/resend_activation_mail.html', {'form': form},
context_instance=RequestContext(request))
def mail_confirmation(request, activation_id):
state = -1
try:
u = AccountActivation.objects.get(activation_id=activation_id, only_mail_change=True)
except AccountActivation.DoesNotExist:
u = None
if u is not None:
if u.is_activation_in_time():
state = 0
_set_user_active_and_validated(u.user)
u.delete()
return render_to_response('account/mail_confirmation.html', {'state': state},
context_instance=RequestContext(request))
@cache_control(private=True)
def register(request):
if request.method == 'POST':
form = register_form(request.POST)
if form.is_valid():
u = form.save()
_generate_activation_id(u, False)
if settings.SEND_REGISTER_STATUS_EMAILS:
mail_managers(u'Neue User-Registrierung',
u'Ein neuer User hat sich gerade auf %s registriert:\n\nName: %s\nMail: %s\nID: %s' % \
(settings.SITE_TITLE, u.username, u.email, u.id))
return render_to_response('account/registered.html',
context_instance=RequestContext(request))
else:
form = register_form()
return render_to_response('account/register.html', {'form': form},
context_instance=RequestContext(request))
@login_required
@cache_control(private=True)
def profile(request):
profile = request.user.get_profile()
if request.method == 'POST':
form = profile_form(request.POST)
if form.is_valid():
user = User.objects.get(username=request.user.username)
msg = "Deine Daten wurden geändert!"
try:
if form.cleaned_data['username'] != user.username:
try:
User.objects.get(username=form.cleaned_data['username'])
msg = "Dieser Benutzername ist schon vergeben!"
raise forms.ValidationError(msg)
except User.DoesNotExist:
pass
if form.cleaned_data['email'] != user.email:
try:
User.objects.get(email=form.cleaned_data['email'])
msg = "Diese Mail-Adresse wird schon verwendet!"
raise forms.ValidationError(msg)
except User.DoesNotExist:
pass
user.email = form.cleaned_data['email']
user.save()
#user.is_active = False
_generate_activation_id(user, True)
msg = "Dir wurde eine Mail mit einem Aktivierungslink zugeschickt. Bitte rufe diesen Link auf, um die Änderung abzuschließen."
except forms.ValidationError:
pass
else:
form.save(user)
return render_to_response('account/profile.html', {'msg': msg},
context_instance=RequestContext(request))
else:
data = {
'username': request.user.username,
'email': request.user.email,
}
form = profile_form(data)
return render_to_response('account/profile.html',
{'form': form,
'del_own_acc': settings.ACCOUNT_CAN_DELETE},
context_instance=RequestContext(request))
@login_required
def delete_my_account(request):
if not settings.ACCOUNT_CAN_DELETE:
return HttpResponseRedirect(reverse('account_profile'))
if request.method == 'POST':
form = delete_my_account_form(request.POST)
if form.is_valid():
if form.cleaned_data['confirmation']:
username = request.user.username
user = User.objects.get(username=username)
profile = AccountProfile.objects.get(user=user)
# TODO: dispatcher checken
# TODO: Abos entfernen, wenn der User gelöscht wird
#dispatcher.send(signal=account_signals.account_pre_delete, user=user)
auth.logout(request)
profile.delete()
user.delete()
#dispatcher.send(signal=account_signals.account_post_delete, user=user)
msg = u"Dein Account wurde gelöscht!"
if settings.SEND_REGISTER_STATUS_EMAILS:
mail_managers(u'Account gelöscht',
u'Der User %s hat seinen Account gelöscht!' % username)
else:
msg = u"Du musst das Löschen explizit bestätigen, indem Du die Box aktivierst!"
return render_to_response('account/delete.html', {'msg': msg},
context_instance=RequestContext(request))
else:
form = delete_my_account_form()
return render_to_response('account/delete.html', {'form': form},
context_instance=RequestContext(request))
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Benjamin Heil
#
# This file is part of Feedmail.
#
# Feedmail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import mysql_convert_tables_utf8
| Python |
'''
Revisionist: a module to get the latest source code revision for a Django project,
for use as a static content versioning device. Currently supports Mercurial,
Subversion and Bazaar.
http://www.fairviewcomputing.com/blog/2008/03/03/automatic-django-static-content-versioning/
Copyright (c) 2008 Fairview Computing LLC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import logging
import os
import sys
import time
# Day of the year as a decimal number [001,366].
DEFAULT_REVISION = time.strftime("%j", time.localtime())
logger = logging.getLogger('revisionist')
if not logger.handlers:
logger.addHandler(logging.StreamHandler(sys.stderr))
def _get_django_module_dir():
django_module_dir = '.'
if os.environ.has_key('DJANGO_SETTINGS_MODULE'):
django_settings_module = __import__(os.environ['DJANGO_SETTINGS_MODULE'])
return os.path.dirname(django_settings_module.__file__)
else:
logger.warn('DJANGO_SETTINGS_MODULE not set; using current directory.')
return django_module_dir
def _findhgrepo(path = None):
if not path:
path = os.getcwd()
while not os.path.isdir(os.path.join(path, '.hg')):
oldpath, path = path, os.path.dirname(path)
if path == oldpath:
return ''
return path
def bzrrev(path=None):
'''
Get the latest revision of the Django project from its Bazaar repository.
'''
revision = DEFAULT_REVISION
try:
from bzrlib import workingtree
if not path:
path = _get_django_module_dir()
wt = workingtree.WorkingTree.open(path)
revision = str(wt.branch.revno())
except Exception, e:
logger.warn('Could not determine Bazaar revision for %s: %s' % (path, e))
logger.warn('Using default revision: %s' % DEFAULT_REVISION)
return revision
def hgrev(path=None):
'''
Get the latest revision of the Django project from its Mercurial repository.
'''
revision = DEFAULT_REVISION
try:
from mercurial.hg import repository
from mercurial.ui import ui
if not path:
path = _get_django_module_dir()
repo = _findhgrepo(path)
revision = str(repository(ui(interactive=False), repo).changelog.count())
except Exception, e:
logger.warn('Could not determine Mercurial revision for %s: %s' % (path, e))
logger.warn('Using default revision: %s' % DEFAULT_REVISION)
return revision
def svnrev(path=None):
'''
Get the latest revision of the Django project from its Subversion repository.
Requires pysvn, available from http://pysvn.tigris.org/.
'''
revision = DEFAULT_REVISION
try:
import pysvn
client = pysvn.Client()
if not path:
path = _get_django_module_dir()
info = client.info(path)
revision = str(info.revision.number)
except Exception, e:
logger.warn('Could not determine Subversion revision for %s: %s' % (path, e))
logger.warn('Using default revision: %s' % DEFAULT_REVISION)
return revision
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Benjamin Heil
#
# This file is part of Feedmail.
#
# Feedmail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.core import mail
from django.test import TestCase, Client
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from feedmanager.models import FeedItem, FeedAbo, FeedData
from feedmail_cron import send_feed_gone_mail, send_new_location_mail, run_cron
from account.tests import add_testuser
class TestFeedmailCron(TestCase):
def runTest(self):
self.test_send_feed_gone_mail()
self.test_send_new_location_mail()
self.test_run_cron()
def setUp(self):
"""Adds a test user and creates abos for some feeds."""
add_testuser(self, username='testmanager_user', password='password', mail='testmanager_user@bheil.net')
login = self.client.login(username='testmanager_user', password='password')
self.failUnless(login, 'Could not log in')
# Add some feeds with unicode-chars like japanese, french etc.
url = reverse('feedmanager_add_feed')
# german
response = self.client.post(url, {'url': 'http://www.spiegel.de/schlagzeilen/index.rss'})
self.assertEqual(response.status_code, 200)
# arabic
response = self.client.post(url, {'url': 'http://www.aljazeera.net/AljazeeraRss/Rss.aspx?URL=RSS-Portal.xml'})
self.assertEqual(response.status_code, 200)
# japan
response = self.client.post(url, {'url': 'http://mainichi.pheedo.jp/f/mainichijp_flash'})
self.assertEqual(response.status_code, 200)
def test_send_new_location_mail(self):
mail.outbox = []
feeds = FeedItem.objects.all()
for feed in feeds:
send_new_location_mail(feed, feed.url)
self.assertEquals(len(mail.outbox), len(feeds))
def test_send_feed_gone_mail(self):
mail.outbox = []
feeds = FeedItem.objects.all()
for feed in feeds:
send_feed_gone_mail(feed)
self.assertEquals(len(mail.outbox), len(feeds))
def test_run_cron(self):
"""Just run the normal cron job."""
mail.outbox = []
# Clear FeedData - all items will be fetched and mails will be generated
FeedData.objects.all().delete()
run_cron()
self.assertTrue(len(mail.outbox) > 0)
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Benjamin Heil
#
# This file is part of Feedmail.
#
# Feedmail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import feedparser
import logging
import datetime
import time
import socket
import urllib2
import locale
import MySQLdb
from datetime import timedelta
from time import mktime
warn = sys.stderr
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
os_path = os.path.abspath(__file__)
root_path = os.path.join(os.path.split(os.path.split(os_path)[0])[0], 'feedmail')
sys.path.append(root_path)
socket_errors = []
for e in ['error', 'gaierror']:
if hasattr(socket, e):
socket_errors.append(getattr(socket, e))
from feedmanager.models import FeedAbo, FeedItem, FeedData, FeedError
from feedmanager.parser import getEntryFingerprint, getEntryFingerprint_MD5, getEntryName, getEntryContent, getEntryBody, contains, getEntryTags, getEntryTitle
import feedmanager.html2text as h2t
from django.conf import settings
from django.core.mail import EmailMessage, mail_admins, EmailMultiAlternatives
from django.template import Context
from django.template.loader import get_template
from django.utils.encoding import smart_unicode
h2t.UNICODE_SNOB = settings.TEXTMODE_UNICODE_SNOB
h2t.LINKS_EACH_PARAGRAPH = settings.TEXTMODE_LINKS_EACH_PARAGRAPH
h2t.BODY_WIDTH = settings.TEXTMODE_BODY_WIDTH
html2text = h2t.html2text
def _add_error_entry(feed, error_code, error_msg, url):
error = FeedError(feed=feed, error_no=error_code, msg=error_msg, url=url)
error.save()
def _add_logging_error(feed, error_code, error_msg, url, add_error_entry=True):
if add_error_entry:
_add_error_entry(feed, error_code, error_msg, url)
logging.error(u'FEED ERROR: %s\n%s\n%s\n' % (smart_unicode(feed),
smart_unicode(error_code),
u"%s (%s)" % (smart_unicode(error_msg), url)))
def send_new_location_mail(feed, old_url):
body = get_template('mail/new_location.html')
body_data = Context({
'old_url': old_url,
'new_url': feed.url,
'site_title': settings.SITE_TITLE,
'site_url': settings.ROOT_URL
})
subject = u'[%s] Geänderte Adresse eines Feeds' % settings.SITE_TITLE
recipients = FeedAbo.objects.filter(feed=feed)
for recipient in recipients:
logging.debug(u">> Sending INFO-Mail to %s with subject '%s'" % (recipient.user.email, subject))
msg = EmailMessage(subject=subject,
body=body.render(body_data),
to=[recipient.user.email])
msg.send(fail_silently=False)
def send_feed_gone_mail(feed):
body = get_template('mail/feed_gone.html')
body_data = Context({
'feed_url': feed.url,
'site_title': settings.SITE_TITLE,
'site_url': settings.ROOT_URL
})
subject = u'[%s] Feed wurde entfernt' % settings.SITE_TITLE
recipients = FeedAbo.objects.filter(feed=feed)
for recipient in recipients:
logging.debug(u">> Sending FEED_GONE-Mail to %s with subject '%s'" % (recipient.user.email, subject))
msg = EmailMessage(subject=subject,
body=body.render(body_data),
to=[recipient.user.email])
msg.send(fail_silently=False)
def send_mail(entry, feed):
fingerprint = entry.get('id', getEntryFingerprint(entry))
fingerprint_md5 = getEntryFingerprint_MD5(entry, fingerprint)
try:
f = FeedData.objects.get(fingerprint_md5=fingerprint_md5)
return
except FeedData.DoesNotExist:
pass
try:
f = FeedData(fingerprint=fingerprint,
fingerprint_md5=fingerprint_md5)
f.save()
except MySQLdb.IntegrityError, e:
# Bsp: Duplicate entry 'http://www.corriere.it/rss/Una%20circolare%20della%20procura%20d' for key 'fingerprint'
# Wenn in MySQL ein IntegrityError auftritt, wurde der Fingerprint aufgrund der UTF-8-Codierung beim vorherigen
# Durchlauf abgeschnitten und wird deshalb bei der Suche nicht gefunden. Die Mail für diesen Eintrag ging aber
# schon raus. Um einen weiteren Mail-Versand zu verhindern, verlassen wir hier die Funktion.
logging.error('IntegrityError when saving fingerprint "%s" from feed "%s": %s' % (fingerprint, feed, e))
return
title = getEntryTitle(entry)
entry_datetime = time.gmtime()
for datetype in ('modified', 'issued', 'created'):
kind = datetype + "_parsed"
if kind in entry and entry[kind]:
entry_datetime = entry[kind]
name = getEntryName(feed, entry)
from_addr = '"%s" <%s>' % (name, settings.DEFAULT_FROM_EMAIL)
extra_headers = {
'Date': time.strftime("%a, %d %b %Y %H:%M:%S -0000", entry_datetime),
'User-Agent': '%s -- %s' % (settings.SITE_TITLE, settings.ROOT_URL),
'X-RSS-Feed': feed.url,
'X-RSS-ID': fingerprint,
'X-RSS-Tags': getEntryTags(entry)
}
try:
body = getEntryBody(entry, title)
except UnicodeDecodeError:
# Dieses Exception ist bisher nur einmal aufgetreten. Im Feed war folgender Titel:
# <title>Akismet 2.5 for WordPress Released – Film at 11</title>
# Das kracht aber erst beim Zusammensetzen des Bodys - und auch nur in der Produktionsumgebung
# Daher erstmal diese Exception abfangen und diesen Eintrag ueberspringen
logging.error(u'UnicodeDecodeError when parsing feed "%s" (entry with fingerprint "%s")' % (feed, fingerprint))
return
text_body = html2text(body)
recipients = FeedAbo.objects.filter(feed=feed)
for recipient in recipients:
logging.debug(u">> Sending Mail to %s with subject '%s'" % (recipient.user.email, title))
msg = EmailMultiAlternatives(subject=title,
body=text_body,
from_email=from_addr,
to=[recipient.user.email],
headers=extra_headers)
msg.attach_alternative(body, "text/html")
msg.send(fail_silently=False)
def process_feed(feed):
try:
logging.debug(u"Processing '%s'" % feed.url)
r = {}
etag = feed.etag or None
modified = feed.feed_modified or None
if modified and isinstance(modified, datetime.datetime):
modified = modified.timetuple()
try:
r = feedparser.parse(feed.url, etag=etag, modified=modified, agent=settings.FEEDPARSER_USER_AGENT)
except UnicodeDecodeError:
logging.error(u'UnicodeDecodeError when parsing feed "%s"' % (feed))
return
http_status = r.get('status', 200)
http_headers = r.get('headers', {'content-type': 'application/rss+xml', 'content-length':'1'})
exc_type = r.get("bozo_exception", Exception()).__class__
if http_status != 304 and http_status != 302 and not r.get('version', ''):
if http_status not in [200, 302, 404]:
_add_logging_error(feed, http_status, u'Unbekannter Fehler', feed.url)
elif http_status == 404:
_add_logging_error(feed, http_status, u'URL nicht gefunden', feed.url)
elif http_headers.get('content-type', 'rss').find('html') != -1:
_add_logging_error(feed, http_status, u'Sieht nach HTML aus!', feed.url)
elif http_headers.get('content-length', '1') == '0':
_add_logging_error(feed, http_status, u'Leere Seite', feed.url)
elif hasattr(socket, 'timeout') and exc_type == socket.timeout:
_add_logging_error(feed, http_status, u'Feed brauchte zu lange zum Antworten', feed.url)
elif exc_type == IOError:
_add_logging_error(feed, http_status, r.bozo_exception, feed.url)
elif hasattr(feedparser, 'zlib') and exc_type == feedparser.zlib.error:
_add_logging_error(feed, http_status, u'Fehlerhafte Kompression des Feeds', feed.url)
elif exc_type in socket_errors:
_add_logging_error(feed, http_status, r.bozo_exception.args[1], feed.url)
elif exc_type == urllib2.URLError:
if r.bozo_exception.reason.__class__ in socket_errors:
exc_reason = r.bozo_exception.reason.args[1]
else:
exc_reason = r.bozo_exception.reason
_add_logging_error(feed, http_status, exc_reason, feed.url)
elif exc_type == AttributeError:
_add_logging_error(feed, http_status, r.bozo_exception, feed.url)
elif exc_type == KeyboardInterrupt:
raise r.bozo_exception
elif r.bozo:
_add_logging_error(feed, http_status, r.get("bozo_exception", u"kann nicht verarbeiten"), feed.url)
return
if http_status == 301: # Moved permanently
_old_url = feed.url
feed.url = r.get('href', feed.url)
feed.save()
send_new_location_mail(feed, _old_url)
warning_msg = "FEED MOVED PERMANENTLY, new location at '%s'" % feed.url
_add_error_entry(feed, http_status, warning_msg, feed.url)
logging.warning(warning_msg)
return
if http_status == 410: # Resource gone
feed.is_gone = True
feed.save()
send_feed_gone_mail(feed)
warning_msg = "FEED HAS GONE, DISABLED: '%s'" % feed.url
_add_error_entry(feed, http_status, warning_msg, feed.url)
logging.warning(warning_msg)
return
if http_status == 304: # Not modified
logging.debug(u"[304] Completed, feed not changed '%s'" % feed.url)
return
if http_status == 302: # Temporarily moved
logging.debug(u"[302] Feed temporarily moved to '%s', will continue to fetch old url" % r.href)
r.entries.reverse()
for entry in r.entries:
send_mail(entry, feed)
feed.etag = r.get('etag', None)
feed.feed_modified = None
last_modified = r.get('modified', None)
if last_modified and isinstance(last_modified, time.struct_time):
feed.feed_modified = datetime.datetime.fromtimestamp(mktime(last_modified))
feed.save()
logging.debug(u"[%d] Completed '%s'" % (http_status, feed.url))
except Exception, e:
logging.error('Exception in process_feed for feed "%s"' % feed.url)
logging.exception(e)
if settings.CRON_SEND_EXCEPTION_MAILS:
mail_admins('Exception in process_feed for feed "%s"' % feed.url, e)
def clean_feederrors_up():
"""Delete all FeedError-entries with a timestamp older than in settings
defined number of weeks."""
logging.debug('Will delete FeedError-entries older than %s weeks ...' % settings.FEEDERROR_WEEKS)
d = datetime.datetime.now() - datetime.timedelta(weeks=settings.FEEDERROR_WEEKS)
f = FeedError.objects.filter(timestamp__lte=d)
logging.debug('%s entries to delete ...' % len(f))
f.delete()
logging.debug('FeedError-entries deleted')
def set_US_time_locale():
"""Set time locale to en_US.utf-8. That's important for time.strftime,
because Date should be in english format for Thunderbird and other
mail programs.
IMPORTANT: This locale must be available. Check with 'locale -a'!
"""
locale.setlocale(locale.LC_TIME, ('en_US', 'utf-8'))
def run_cron():
logging.basicConfig(filename=settings.CRON_LOG_FILE,
level=settings.CRON_LOG_LEVEL,
format='%(asctime)s %(levelname)-8s %(message)s')
try:
starttime = datetime.datetime.now()
logging.warn('feedmail cron begins')
set_US_time_locale()
feeds = FeedItem.objects.all()
for feed in feeds:
if not feed.is_gone:
process_feed(feed)
clean_feederrors_up()
run_time = datetime.datetime.now() - starttime
logging.warn('feedmail cron ends (elapsed %s)' % run_time)
if run_time > timedelta(minutes=settings.CRON_SEND_WARNING_AFTER_MIN):
msg = 'Warning: feedmail cron job takes %s to run (> %s min)' % (run_time, settings.CRON_SEND_WARNING_AFTER_MIN)
mail_admins(msg, msg)
except Exception, e:
logging.error('General exception in feedmail_cron:')
logging.exception(e)
if settings.CRON_SEND_EXCEPTION_MAILS:
mail_admins('General exception in feedmail_cron, check log file', e)
if __name__ == '__main__':
run_cron()
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Benjamin Heil
#
# This file is part of Feedmail.
#
# Feedmail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import feedparser
import logging
import datetime
import time
import socket
import urllib2
import locale
import MySQLdb
from datetime import timedelta
from time import mktime
warn = sys.stderr
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
os_path = os.path.abspath(__file__)
root_path = os.path.join(os.path.split(os.path.split(os_path)[0])[0], 'feedmail')
sys.path.append(root_path)
socket_errors = []
for e in ['error', 'gaierror']:
if hasattr(socket, e):
socket_errors.append(getattr(socket, e))
from feedmanager.models import FeedAbo, FeedItem, FeedData, FeedError
from feedmanager.parser import getEntryFingerprint, getEntryFingerprint_MD5, getEntryName, getEntryContent, getEntryBody, contains, getEntryTags, getEntryTitle
import feedmanager.html2text as h2t
from django.conf import settings
from django.core.mail import EmailMessage, mail_admins, EmailMultiAlternatives
from django.template import Context
from django.template.loader import get_template
from django.utils.encoding import smart_unicode
h2t.UNICODE_SNOB = settings.TEXTMODE_UNICODE_SNOB
h2t.LINKS_EACH_PARAGRAPH = settings.TEXTMODE_LINKS_EACH_PARAGRAPH
h2t.BODY_WIDTH = settings.TEXTMODE_BODY_WIDTH
html2text = h2t.html2text
def _add_error_entry(feed, error_code, error_msg, url):
error = FeedError(feed=feed, error_no=error_code, msg=error_msg, url=url)
error.save()
def _add_logging_error(feed, error_code, error_msg, url, add_error_entry=True):
if add_error_entry:
_add_error_entry(feed, error_code, error_msg, url)
logging.error(u'FEED ERROR: %s\n%s\n%s\n' % (smart_unicode(feed),
smart_unicode(error_code),
u"%s (%s)" % (smart_unicode(error_msg), url)))
def send_new_location_mail(feed, old_url):
body = get_template('mail/new_location.html')
body_data = Context({
'old_url': old_url,
'new_url': feed.url,
'site_title': settings.SITE_TITLE,
'site_url': settings.ROOT_URL
})
subject = u'[%s] Geänderte Adresse eines Feeds' % settings.SITE_TITLE
recipients = FeedAbo.objects.filter(feed=feed)
for recipient in recipients:
logging.debug(u">> Sending INFO-Mail to %s with subject '%s'" % (recipient.user.email, subject))
msg = EmailMessage(subject=subject,
body=body.render(body_data),
to=[recipient.user.email])
msg.send(fail_silently=False)
def send_feed_gone_mail(feed):
body = get_template('mail/feed_gone.html')
body_data = Context({
'feed_url': feed.url,
'site_title': settings.SITE_TITLE,
'site_url': settings.ROOT_URL
})
subject = u'[%s] Feed wurde entfernt' % settings.SITE_TITLE
recipients = FeedAbo.objects.filter(feed=feed)
for recipient in recipients:
logging.debug(u">> Sending FEED_GONE-Mail to %s with subject '%s'" % (recipient.user.email, subject))
msg = EmailMessage(subject=subject,
body=body.render(body_data),
to=[recipient.user.email])
msg.send(fail_silently=False)
def send_mail(entry, feed):
fingerprint = entry.get('id', getEntryFingerprint(entry))
fingerprint_md5 = getEntryFingerprint_MD5(entry, fingerprint)
try:
f = FeedData.objects.get(fingerprint_md5=fingerprint_md5)
return
except FeedData.DoesNotExist:
pass
try:
f = FeedData(fingerprint=fingerprint,
fingerprint_md5=fingerprint_md5)
f.save()
except MySQLdb.IntegrityError, e:
# Bsp: Duplicate entry 'http://www.corriere.it/rss/Una%20circolare%20della%20procura%20d' for key 'fingerprint'
# Wenn in MySQL ein IntegrityError auftritt, wurde der Fingerprint aufgrund der UTF-8-Codierung beim vorherigen
# Durchlauf abgeschnitten und wird deshalb bei der Suche nicht gefunden. Die Mail für diesen Eintrag ging aber
# schon raus. Um einen weiteren Mail-Versand zu verhindern, verlassen wir hier die Funktion.
logging.error('IntegrityError when saving fingerprint "%s" from feed "%s": %s' % (fingerprint, feed, e))
return
title = getEntryTitle(entry)
entry_datetime = time.gmtime()
for datetype in ('modified', 'issued', 'created'):
kind = datetype + "_parsed"
if kind in entry and entry[kind]:
entry_datetime = entry[kind]
name = getEntryName(feed, entry)
from_addr = '"%s" <%s>' % (name, settings.DEFAULT_FROM_EMAIL)
extra_headers = {
'Date': time.strftime("%a, %d %b %Y %H:%M:%S -0000", entry_datetime),
'User-Agent': '%s -- %s' % (settings.SITE_TITLE, settings.ROOT_URL),
'X-RSS-Feed': feed.url,
'X-RSS-ID': fingerprint,
'X-RSS-Tags': getEntryTags(entry)
}
try:
body = getEntryBody(entry, title)
except UnicodeDecodeError:
# Dieses Exception ist bisher nur einmal aufgetreten. Im Feed war folgender Titel:
# <title>Akismet 2.5 for WordPress Released – Film at 11</title>
# Das kracht aber erst beim Zusammensetzen des Bodys - und auch nur in der Produktionsumgebung
# Daher erstmal diese Exception abfangen und diesen Eintrag ueberspringen
logging.error(u'UnicodeDecodeError when parsing feed "%s" (entry with fingerprint "%s")' % (feed, fingerprint))
return
text_body = html2text(body)
recipients = FeedAbo.objects.filter(feed=feed)
for recipient in recipients:
logging.debug(u">> Sending Mail to %s with subject '%s'" % (recipient.user.email, title))
msg = EmailMultiAlternatives(subject=title,
body=text_body,
from_email=from_addr,
to=[recipient.user.email],
headers=extra_headers)
msg.attach_alternative(body, "text/html")
msg.send(fail_silently=False)
def process_feed(feed):
try:
logging.debug(u"Processing '%s'" % feed.url)
r = {}
etag = feed.etag or None
modified = feed.feed_modified or None
if modified and isinstance(modified, datetime.datetime):
modified = modified.timetuple()
try:
r = feedparser.parse(feed.url, etag=etag, modified=modified, agent=settings.FEEDPARSER_USER_AGENT)
except UnicodeDecodeError:
logging.error(u'UnicodeDecodeError when parsing feed "%s"' % (feed))
return
http_status = r.get('status', 200)
http_headers = r.get('headers', {'content-type': 'application/rss+xml', 'content-length':'1'})
exc_type = r.get("bozo_exception", Exception()).__class__
if http_status != 304 and http_status != 302 and not r.get('version', ''):
if http_status not in [200, 302, 404]:
_add_logging_error(feed, http_status, u'Unbekannter Fehler', feed.url)
elif http_status == 404:
_add_logging_error(feed, http_status, u'URL nicht gefunden', feed.url)
elif http_headers.get('content-type', 'rss').find('html') != -1:
_add_logging_error(feed, http_status, u'Sieht nach HTML aus!', feed.url)
elif http_headers.get('content-length', '1') == '0':
_add_logging_error(feed, http_status, u'Leere Seite', feed.url)
elif hasattr(socket, 'timeout') and exc_type == socket.timeout:
_add_logging_error(feed, http_status, u'Feed brauchte zu lange zum Antworten', feed.url)
elif exc_type == IOError:
_add_logging_error(feed, http_status, r.bozo_exception, feed.url)
elif hasattr(feedparser, 'zlib') and exc_type == feedparser.zlib.error:
_add_logging_error(feed, http_status, u'Fehlerhafte Kompression des Feeds', feed.url)
elif exc_type in socket_errors:
_add_logging_error(feed, http_status, r.bozo_exception.args[1], feed.url)
elif exc_type == urllib2.URLError:
if r.bozo_exception.reason.__class__ in socket_errors:
exc_reason = r.bozo_exception.reason.args[1]
else:
exc_reason = r.bozo_exception.reason
_add_logging_error(feed, http_status, exc_reason, feed.url)
elif exc_type == AttributeError:
_add_logging_error(feed, http_status, r.bozo_exception, feed.url)
elif exc_type == KeyboardInterrupt:
raise r.bozo_exception
elif r.bozo:
_add_logging_error(feed, http_status, r.get("bozo_exception", u"kann nicht verarbeiten"), feed.url)
return
if http_status == 301: # Moved permanently
_old_url = feed.url
feed.url = r.get('href', feed.url)
feed.save()
send_new_location_mail(feed, _old_url)
warning_msg = "FEED MOVED PERMANENTLY, new location at '%s'" % feed.url
_add_error_entry(feed, http_status, warning_msg, feed.url)
logging.warning(warning_msg)
return
if http_status == 410: # Resource gone
feed.is_gone = True
feed.save()
send_feed_gone_mail(feed)
warning_msg = "FEED HAS GONE, DISABLED: '%s'" % feed.url
_add_error_entry(feed, http_status, warning_msg, feed.url)
logging.warning(warning_msg)
return
if http_status == 304: # Not modified
logging.debug(u"[304] Completed, feed not changed '%s'" % feed.url)
return
if http_status == 302: # Temporarily moved
logging.debug(u"[302] Feed temporarily moved to '%s', will continue to fetch old url" % r.href)
r.entries.reverse()
for entry in r.entries:
send_mail(entry, feed)
feed.etag = r.get('etag', None)
feed.feed_modified = None
last_modified = r.get('modified', None)
if last_modified and isinstance(last_modified, time.struct_time):
feed.feed_modified = datetime.datetime.fromtimestamp(mktime(last_modified))
feed.save()
logging.debug(u"[%d] Completed '%s'" % (http_status, feed.url))
except Exception, e:
logging.error('Exception in process_feed for feed "%s"' % feed.url)
logging.exception(e)
if settings.CRON_SEND_EXCEPTION_MAILS:
mail_admins('Exception in process_feed for feed "%s"' % feed.url, e)
def clean_feederrors_up():
"""Delete all FeedError-entries with a timestamp older than in settings
defined number of weeks."""
logging.debug('Will delete FeedError-entries older than %s weeks ...' % settings.FEEDERROR_WEEKS)
d = datetime.datetime.now() - datetime.timedelta(weeks=settings.FEEDERROR_WEEKS)
f = FeedError.objects.filter(timestamp__lte=d)
logging.debug('%s entries to delete ...' % len(f))
f.delete()
logging.debug('FeedError-entries deleted')
def set_US_time_locale():
"""Set time locale to en_US.utf-8. That's important for time.strftime,
because Date should be in english format for Thunderbird and other
mail programs.
IMPORTANT: This locale must be available. Check with 'locale -a'!
"""
locale.setlocale(locale.LC_TIME, ('en_US', 'utf-8'))
def run_cron():
logging.basicConfig(filename=settings.CRON_LOG_FILE,
level=settings.CRON_LOG_LEVEL,
format='%(asctime)s %(levelname)-8s %(message)s')
try:
starttime = datetime.datetime.now()
logging.warn('feedmail cron begins')
set_US_time_locale()
feeds = FeedItem.objects.all()
for feed in feeds:
if not feed.is_gone:
process_feed(feed)
clean_feederrors_up()
run_time = datetime.datetime.now() - starttime
logging.warn('feedmail cron ends (elapsed %s)' % run_time)
if run_time > timedelta(minutes=settings.CRON_SEND_WARNING_AFTER_MIN):
msg = 'Warning: feedmail cron job takes %s to run (> %s min)' % (run_time, settings.CRON_SEND_WARNING_AFTER_MIN)
mail_admins(msg, msg)
except Exception, e:
logging.error('General exception in feedmail_cron:')
logging.exception(e)
if settings.CRON_SEND_EXCEPTION_MAILS:
mail_admins('General exception in feedmail_cron, check log file', e)
if __name__ == '__main__':
run_cron()
| Python |
#!/usr/bin/env python
# CheckMentioned.py
# Find all the properties used in SciTE source files and check if they
# are mentioned in scite/doc/SciTEDoc.html.
import os
import string
import stat
srcRoot = "../../scite"
srcDir = os.path.join(srcRoot, "src")
docFileName = os.path.join(srcRoot, "doc", "SciTEDoc.html")
propsFileName = os.path.join(srcDir, "SciTEGlobal.properties")
try: # Old Python
identCharacters = "_*." + string.letters + string.digits
except AttributeError: # Python 3.x
identCharacters = "_*." + string.ascii_letters + string.digits
# These properties are for debugging or for optionally attached features or are archaic
# and kept to preserve compatibility.
# lexerpath.*.lpeg is a special case for LPEG lexers associated with Scintillua projects.
knownDebugOptionalAndArchaicProperties = {
"asynchronous.sleep":1, # Debug
"dwell.period":1, # Debug
"bookmark.pixmap":1, # Debug
"lexerpath.*.lpeg":1, # Option for Scintillua
"ipc.director.name":1, # Archaic
}
# These properties are either set by SciTE and used (not set) in property files or
# should only be located in known lexer-specific property files.
knownOutputAndLexerProperties = {
"find.directory":1,
"find.what":1,
"xml.auto.close.tags":1,
"indent.python.colon":1,
}
knownOutputAndLexerProperties.update(knownDebugOptionalAndArchaicProperties)
# Convert all punctuation characters except '_', '*', and '.' into spaces.
def depunctuate(s):
d = ""
for ch in s:
if ch in identCharacters:
d = d + ch
else:
d = d + " "
return d
srcPaths = []
for filename in os.listdir(srcRoot):
dirname = os.path.join(srcRoot, filename)
if stat.S_ISDIR(os.stat(dirname)[stat.ST_MODE]):
for src in os.listdir(dirname):
if ".cxx" in src and ".bak" not in src:
srcPaths.append(dirname + os.sep + src)
propertiesPaths = []
for src in os.listdir(srcDir):
if ".properties" in src and \
"Embedded" not in src and \
"SciTE.properties" not in src and \
".bak" not in src:
propertiesPaths.append(os.path.join(srcDir, src))
def nameOKSrc(src):
if os.path.splitext(srcPath)[1] not in [".cxx", ".h"]:
return False
if "lua" in srcPath.lower():
return False
if "IFaceTable" in srcPath:
return False
if "Exporters" in srcPath:
return False
return True
def grabQuoted(s):
if '"' in s:
s = s[s.find('"')+1:]
if '"' in s:
return s[:s.find('"')]
return ""
def stripComment(s):
if "//" in s:
return s[:s.find("//")]
return s
propertyNames = {}
literalStrings = {}
dontLook = False # ignore contributor names as they don't get localised
#print srcPaths
for srcPath in srcPaths:
try: # Python 3.0
srcFile = open(srcPath, encoding='latin_1')
except TypeError: # Python 2.6
srcFile = open(srcPath)
except NameError: # Python 2.3
srcFile = open(srcPath)
for srcLine in srcFile.readlines():
srcLine = stripComment(srcLine).strip()
if '"' in srcLine and "props" in srcLine and ("Get" in srcLine or "ColourOfProperty" in srcLine):
parts = srcLine.split('\"')
#print parts
if len(parts) > 1:
propertyName = parts[1]
if propertyName:
propertyNames[propertyName] = 0
#print propertyName
if '"' in srcLine and nameOKSrc(srcPath):
if "Atsuo" in srcLine or '{"IDM_' in srcLine or dontLook:
dontLook = ";" not in srcLine
elif not srcLine.startswith("#") and \
not srcLine.startswith("//") and \
"SendDirector" not in srcLine and \
"gtk_signal_connect" not in srcLine:
srcLine = grabQuoted(srcLine)
if srcLine:
if srcLine[:1] not in ["<"]:
literalStrings[srcLine] = 1
srcFile.close()
docFile = open(docFileName, "rt")
for docLine in docFile.readlines():
for word in depunctuate(docLine).split():
if word in propertyNames.keys():
propertyNames[word] = 1
docFile.close()
print("# Not mentioned in %s" % docFileName)
identifiersSorted = list(propertyNames.keys())
identifiersSorted.sort()
for identifier in identifiersSorted:
if not propertyNames[identifier] and identifier not in knownDebugOptionalAndArchaicProperties:
print(identifier)
# Rest flags for searching properties file
for identifier in identifiersSorted:
propertyNames[identifier] = 0
def keyOfLine(line):
if '=' in line:
line = line.strip()
if line[0] == "#":
line = line[1:]
line = line[:line.find("=")]
line = line.strip()
return line
else:
return None
propsFile = open(propsFileName, "rt")
for propLine in propsFile.readlines():
if propLine:
key = keyOfLine(propLine)
if key:
if key in propertyNames.keys():
propertyNames[key] = 1
propsFile.close()
print("\n# Not mentioned in %s" % propsFileName)
for identifier in identifiersSorted:
if not propertyNames[identifier] and identifier not in knownOutputAndLexerProperties:
if "." != identifier[-1:]:
print(identifier)
# This is a test to see whether properties are defined in more than one file.
# It doesn't understand the if directive so yields too many false positives to run often.
print("\n# Duplicate mentions")
"""
fileOfProp = {}
notRealProperties = ["abbrev.properties", "SciTE.properties", "Embedded.properties"]
for filename in os.listdir(srcRoot + os.sep + "src"):
if filename.count(".properties") and filename not in notRealProperties:
propsFile = open(srcRoot + os.sep + "src" + os.sep + filename, "rt")
for line in propsFile.readlines():
if line and not line.startswith("#"):
key = keyOfLine(line)
if key:
if key in fileOfProp:
print("Clash for %s %s %s" % (key, fileOfProp[key], filename))
else:
fileOfProp[key] =filename
propsFile.close()
"""
propertiesSet = {}
for k in propertyNames.keys():
propertiesSet[k] = 1
localeFileName = srcRoot + "/win32/locale.properties"
localeSet = {}
for line in open(localeFileName):
if not line.startswith("#"):
line = line.strip().strip("=")
localeSet[line.lower()] = 1
resourceFileName = srcRoot + "/win32/SciTERes.rc"
resourceSet = {}
for line in open(resourceFileName):
line = line.strip()
if "VIRTKEY" not in line and \
"VALUE" not in line and \
"1234567" not in line and \
not line.startswith("BLOCK") and \
not line.startswith("FONT") and \
not line.startswith("ICON") and \
not line.startswith("ID") and \
"#include" not in line:
line = grabQuoted(line)
if line:
if '\\t' in line:
line = line[:line.find('\\t')]
line = line.replace('&','')
line = line.replace('...','')
if len(line) > 2:
resourceSet[line] = 1
print("\n# Missing localisation of resource")
resourceSet = list(resourceSet.keys())
resourceSet.sort()
for l in resourceSet:
if l.lower() not in localeSet:
print(l)
def present(l, n):
low = n.lower()
if low in localeSet:
return True
return low.replace("_","").replace("&","") in localeSet
literalStrings = [l for l in literalStrings.keys() if l not in identifiersSorted]
literalStrings = [l for l in list(literalStrings) if not present(localeSet, l)]
propsFile = open(propsFileName, "rt")
for propLine in propsFile.readlines():
if propLine:
key = keyOfLine(propLine)
if key:
if key in propertyNames.keys():
propertyNames[key] = 1
propsFile.close()
propToFile = {}
for propPath in propertiesPaths:
base = os.path.basename(propPath)
base = propPath
propsFile = open(propPath)
for propLine in propsFile.readlines():
if propLine and not propLine.startswith("#"):
key = keyOfLine(propLine)
if key:
if key not in propToFile:
propToFile[key] = []
propToFile[key].append(base)
propsFile.close()
print("\n# Duplicate properties")
propToFileKeys = list(propToFile.keys())
propToFileKeys.sort()
for k in propToFileKeys:
files = propToFile[k]
if len(files) > 1:
if files.count(files[0]) < len(files):
print(k + (", ".join(propToFile[k])))
| Python |
#!/usr/bin/env python
# RegenerateSource.py - implemented 2013 by Neil Hodgson neilh@scintilla.org
# Released to the public domain.
# Regenerate the SciTE source files that list all the lexers and all the
# properties files.
# Should be run whenever a new lexer is added or removed.
# Requires Python 2.5 or later
# Most files are regenerated in place with templates stored in comments.
# The VS .NET project file is generated into a different file as the
# VS .NET environment will not retain comments when modifying the file.
# The format of generation comments is documented in FileGenerator.py.
# Regenerates Scintilla files by calling LexGen.RegenerateAll
import glob, os, sys
srcRoot = "../.."
sys.path.append(srcRoot + "/scintilla/scripts")
from FileGenerator import Generate, Regenerate, UpdateLineInFile, ReplaceREInFile
import ScintillaData
import LexGen
def UpdateVersionNumbers(sci, root):
UpdateLineInFile(root + "scite/src/SciTE.h", "#define VERSION_SCITE",
"#define VERSION_SCITE \"" + sci.versionDotted + "\"")
UpdateLineInFile(root + "scite/src/SciTE.h", "#define VERSION_WORDS",
"#define VERSION_WORDS " + sci.versionCommad)
UpdateLineInFile(root + "scite/src/SciTE.h", "#define COPYRIGHT_DATES",
'#define COPYRIGHT_DATES "December 1998-' + sci.myModified + '"')
UpdateLineInFile(root + "scite/src/SciTE.h", "#define COPYRIGHT_YEARS",
'#define COPYRIGHT_YEARS "1998-' + sci.yearModified + '"')
UpdateLineInFile(root + "scite/doc/SciTEDownload.html", " Release",
" Release " + sci.versionDotted)
ReplaceREInFile(root + "scite/doc/SciTEDownload.html",
r"/scintilla/([a-zA-Z]+)\d\d\d",
r"/scintilla/\g<1>" + sci.version)
UpdateLineInFile(root + "scite/doc/SciTE.html",
' <font color="#FFCC99" size="3"> Release version',
' <font color="#FFCC99" size="3"> Release version ' + \
sci.versionDotted + '<br />')
UpdateLineInFile(root + "scite/doc/SciTE.html",
' Site last modified',
' Site last modified ' + sci.mdyModified + '</font>')
UpdateLineInFile(root + "scite/doc/SciTE.html",
' <meta name="Date.Modified"',
' <meta name="Date.Modified" content="' + sci.dateModified + '" />')
def RegenerateAll():
root="../../"
sci = ScintillaData.ScintillaData(root + "scintilla/")
# Generate HTML to document each property
# This is done because tags can not be safely put inside comments in HTML
documentProperties = list(sci.propertyDocuments.keys())
ScintillaData.SortListInsensitive(documentProperties)
propertiesHTML = []
for k in documentProperties:
propertiesHTML.append("\t<tr id='property-%s'>\n\t<td>%s</td>\n\t<td>%s</td>\n\t</tr>" %
(k, k, sci.propertyDocuments[k]))
# Find all the SciTE properties files
otherProps = [
"abbrev.properties",
"Embedded.properties",
"SciTEGlobal.properties",
"SciTE.properties"]
propFilePaths = glob.glob(root + "scite/src/*.properties")
ScintillaData.SortListInsensitive(propFilePaths)
propFiles = [os.path.basename(f) for f in propFilePaths if os.path.basename(f) not in otherProps]
ScintillaData.SortListInsensitive(propFiles)
Regenerate(root + "scite/win32/makefile", "#", propFiles)
Regenerate(root + "scite/win32/scite.mak", "#", propFiles)
Regenerate(root + "scite/src/SciTEProps.cxx", "//", sci.lexerProperties)
Regenerate(root + "scite/doc/SciTEDoc.html", "<!--", propertiesHTML)
Generate(root + "scite/boundscheck/vcproj.gen",
root + "scite/boundscheck/SciTE.vcproj", "#", sci.lexFiles)
UpdateVersionNumbers(sci, root)
LexGen.RegenerateAll("../../scintilla/")
RegenerateAll()
| Python |
#!/usr/bin/env python3
# CheckMentioned.py
# Find all the symbols in scintilla/include/Scintilla.h and check if they
# are mentioned in scintilla/doc/ScintillaDoc.html.
import string
uninteresting = {
"SCINTILLA_H", "SCI_START", "SCI_LEXER_START", "SCI_OPTIONAL_START",
# These archaic names are #defined to the Sci_ prefixed modern equivalents.
# They are not documented so they are not used in new code.
"CharacterRange", "TextRange", "TextToFind", "RangeToFormat",
}
srcRoot = "../.."
incFileName = srcRoot + "/scintilla/include/Scintilla.h"
docFileName = srcRoot + "/scintilla/doc/ScintillaDoc.html"
try: # Old Python
identCharacters = "_" + string.letters + string.digits
except AttributeError: # Python 3.x
identCharacters = "_" + string.ascii_letters + string.digits
# Convert all punctuation characters except '_' into spaces.
def depunctuate(s):
d = ""
for ch in s:
if ch in identCharacters:
d = d + ch
else:
d = d + " "
return d
symbols = {}
incFile = open(incFileName, "rt")
for line in incFile.readlines():
if line.startswith("#define"):
identifier = line.split()[1]
symbols[identifier] = 0
incFile.close()
docFile = open(docFileName, "rt")
for line in docFile.readlines():
for word in depunctuate(line).split():
if word in symbols.keys():
symbols[word] = 1
docFile.close()
identifiersSorted = list(symbols.keys())
identifiersSorted.sort()
for identifier in identifiersSorted:
if not symbols[identifier] and identifier not in uninteresting:
print(identifier)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os, sys
scintillaDirectory = os.path.join("..", "..", "scintilla")
scintillaScriptsDirectory = os.path.join(scintillaDirectory, "scripts")
sys.path.append(scintillaScriptsDirectory)
import Face
def cell(s):
return "<td>%s</td>" % s
def faceFeatures(out):
out.write("<h2>Scintilla key commands</h2>\n")
out.write("<table>\n")
out.write("<thead>%s%s%s</thead>\n" % (cell("Command"), cell("Name"), cell("Explanation")))
face = Face.Face()
face.ReadFromFile(os.path.join(scintillaDirectory, "include", "Scintilla.iface"))
texts = []
for name in face.features:
#~ print name
f = face.features[name]
if f["FeatureType"] == "fun" and \
f["ReturnType"] == "void" and \
not (f["Param1Type"] or f["Param2Type"]):
texts.append([name, f["Value"], " ".join(f["Comment"])])
texts.sort()
for t in texts:
out.write("<tr>%s%s%s</tr>\n" % (cell(t[1]), cell(t[0]), cell(t[2])))
out.write("</table>\n")
def menuFeatures(out):
out.write("<h2>SciTE menu commands</h2>\n")
out.write("<table>\n")
out.write("<thead>%s%s</thead>\n" % (cell("Command"), cell("Menu text")))
with open(os.path.join("..", "win32", "SciTERes.rc"), "rt") as f:
for l in f:
l = l.strip()
if l.startswith("MENUITEM") and "SEPARATOR" not in l:
l = l.replace("MENUITEM", "").strip()
text, symbol = l.split('",', 1)
symbol = symbol.strip()
text = text[1:].replace("&", "").replace("...", "")
if "\\t" in text:
text = text.split("\\t",1)[0]
if text:
out.write("<tr><td>%s</td><td>%s</td></tr>\n" % (symbol, text))
out.write("</table>\n")
startFile = """
<?xml version="1.0"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<!--Generated by scite/scripts/commandsdoc.py -->
<style type="text/css">
table { border: 1px solid #1F1F1F; border-collapse: collapse; }
td { border: 1px solid; border-color: #E0E0E0 #000000; padding: 1px 5px 1px 5px; }
th { border: 1px solid #1F1F1F; padding: 1px 5px 1px 5px; }
thead { background-color: #000000; color: #FFFFFF; }
</style>
<body>
"""
if __name__ == "__main__":
with open(os.path.join("..", "doc", "CommandValues.html"), "w") as out:
out.write(startFile)
menuFeatures(out)
faceFeatures(out)
out.write("</body>\n</html>\n")
| Python |
#!/usr/bin/env python
# makerpms.py
# Copy files all over the place build RPMs and copy to top level directory
import os
import shutil
srcRoot = "../../"
rpmRoot = "/usr/src/redhat/SOURCES/"
rpmBin = "/usr/src/redhat/RPMS/i386/"
rpmSource = "/usr/src/redhat/SRPMS/"
verFileName = srcRoot + "scintilla/version.txt"
vers = open(verFileName)
#139
vFull = vers.read().strip()
vers.close()
#1.39
vPoint = vFull[0] + "." + vFull[1:]
#1, 3, 9, 0
vComma = vFull[0] + ", " + vFull[1] + ", " + vFull[2] + ", 0"
print("[ %s | %s | %s ]" % (vFull, vPoint, vComma))
tgzV = "scite" + vFull + ".tgz"
tgzFileName = srcRoot + "scite.tgz"
tgzVFileName = srcRoot + tgzV
print("[ %s | %s ]" % (tgzFileName, tgzVFileName))
if not os.access(tgzFileName, os.F_OK):
print("Base file '" + tgzFileName + "' does not exist.")
else:
shutil.copyfile(tgzFileName, tgzVFileName)
os.unlink(tgzFileName)
rpmVFileName = rpmRoot + tgzV
shutil.copyfile(tgzVFileName, rpmVFileName)
# Run the rpm build command
os.system("rpm -ba scite.spec")
rpmB = "scite-" + vPoint + "-1.i386.rpm"
shutil.copyfile(rpmBin + rpmB, srcRoot + rpmB)
rpmS = "scite-" + vPoint + "-1.src.rpm"
shutil.copyfile(rpmSource + rpmS, srcRoot + rpmS)
| Python |
#! /usr/bin/env python
# Produces a .api file for SciTE's identifier completion and calltip features.
# invoke as
# python tags2api.py tags >x.api
# before running this program, create a tags file with
# ctags --excmd=number --c-types=pcdgstu <header files>
import fileinput
import time
# Definitions that start with _ are often used for infrastructure
# like include guards and can be removed with
removePrivate=1
# The Windows headers include both ANSI and UniCode versions
# of many functions with macros that default to one or the other.
# When winMode is on, these 4 lines are replaced with either the
# ANSI or UniCode prototype.
winMode = 1
include="A" # Set to "W" if you want the UniCode prototypes.
class FileCache:
'''Caches the contents of a set of files.
Avoids reading files repeatedly from disk by holding onto the
contents of each file as a list of strings.
'''
def __init__(self):
self.filecache = {}
def grabFile(self, filename):
'''Return the contents of a file as a list of strings.
New line characters are removed.
'''
if filename not in self.filecache:
contents=[]
f = open(filename)
for line in f.readlines():
if line[-1:] == '\n': line = line[:-1]
contents.append(line)
f.close()
self.filecache[filename] = contents
return self.filecache[filename]
def bracesDiff(s):
''' Counts the number of '(' and ')' in a string and returns the difference between the two.
Used to work out when a function prototype is complete.
'''
diff = 0
mode=0 # 0 <=> default, 1 <=> comment, 2 <=> string
for i in range(len(s)):
if mode == 0: # default mode
if s[i]=='(':
diff += 1
elif s[i]==')':
diff -= 1
elif s[i]=='"':
mode=2
elif i>0 and s[i-1]=='/' and s[i]=='/':
return diff
elif i>0 and s[i-1]=='/' and s[i]=='*':
mode=1
elif mode == 1: # comment
if i>0 and s[i-1]=='*' and s[i]=='/':
mode=0
elif mode == 2: # string
if s[i]=='"':
mode=0
return diff
fc = FileCache()
apis = {}
for line in fileinput.input():
if line[0] != '!': # Not a comment.
(entityName, fileName, lineNo, tagType) = line.split("\t")[:4]
curLineNo = int(lineNo[:-2]) - 1 # -1 because line numbers in tags file start at 1.
contents = fc.grabFile(fileName)
if (not removePrivate or entityName[0] != '_') and not entityName.startswith("operator "):
if tagType[0] in "pf": # Function prototype.
try:
braces = bracesDiff(contents[curLineNo])
curDef = contents[curLineNo]
while braces > 0: # Search for end of prototype.
curLineNo = curLineNo + 1
braces = braces + bracesDiff(contents[curLineNo])
curDef = curDef + contents[curLineNo]
# Normalise the appearance of the prototype.
curDef = curDef.strip()
# Replace whitespace sequences with a single space character.
curDef = " ".join(curDef.split())
# Remove space around the '('.
curDef = curDef.replace(" (", '(').replace("( ", '(')
# Remove trailing semicolon.
curDef = curDef.replace(";", '')
# Remove implementation if present.
if "{" in curDef and "}" in curDef:
startImpl = curDef.find("{")
endImpl = curDef.find("}")
curDef = curDef[:startImpl] + curDef[endImpl+1:]
else:
# Remove trailing brace.
curDef = curDef.rstrip("{")
# Remove return type.
curDef = curDef[curDef.find(entityName):]
# Remove virtual indicator.
if curDef.replace(" ", "").endswith(")=0"):
curDef = curDef.rstrip("0 ")
curDef = curDef.rstrip("= ")
# Remove trailing space.
curDef = curDef.rstrip()
if winMode:
if curDef.find("A(") >= 0:
if "A" in include:
apis[curDef.replace("A(", '(')] = 1
elif curDef.find("W(") >= 0:
if "W" in include:
apis[curDef.replace("W(", '(')] = 1
else: # A character set independent function.
apis[curDef] = 1
else:
apis.add[curDef] = 1
except IndexError:
pass
elif tagType[0] == 'd': # Macro definition.
curDef = contents[curLineNo]
if (not winMode) or (curDef[-1] not in "AW"):
apis[entityName] = 1
else:
apis[entityName] = 1
apisKeys = list(apis.keys())
apisKeys.sort()
print("\n".join(apisKeys))
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from PySide.QtCore import *
from PySide.QtGui import *
import ScintillaConstants as sci
sys.path.append("../..")
from bin import ScintillaEditPy
txtInit = "int main(int argc, char **argv) {\n" \
" // Start up the gnome\n" \
" gnome_init(\"stest\", \"1.0\", argc, argv);\n}\n";
keywords = \
"and and_eq asm auto bitand bitor bool break " \
"case catch char class compl const const_cast continue " \
"default delete do double dynamic_cast else enum explicit export extern false float for " \
"friend goto if inline int long mutable namespace new not not_eq " \
"operator or or_eq private protected public " \
"register reinterpret_cast return short signed sizeof static static_cast struct switch " \
"template this throw true try typedef typeid typename union unsigned using " \
"virtual void volatile wchar_t while xor xor_eq";
def uriDropped():
print "uriDropped"
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.resize(460,300)
# Create widgets
self.edit = ScintillaEditPy.ScintillaEdit(self)
self.edit.uriDropped.connect(uriDropped)
self.edit.command.connect(self.receive_command)
self.edit.notify.connect(self.receive_notification)
self.edit.styleClearAll()
self.edit.setMarginWidthN(0, 35)
self.edit.setScrollWidth(200)
self.edit.setScrollWidthTracking(1)
self.edit.setLexer(sci.SCLEX_CPP)
self.edit.styleSetFore(sci.SCE_C_COMMENT, 0x008000)
self.edit.styleSetFore(sci.SCE_C_COMMENTLINE, 0x008000)
self.edit.styleSetFore(sci.SCE_C_COMMENTDOC, 0x008040)
self.edit.styleSetItalic(sci.SCE_C_COMMENTDOC, 1)
self.edit.styleSetFore(sci.SCE_C_NUMBER, 0x808000)
self.edit.styleSetFore(sci.SCE_C_WORD, 0x800000)
self.edit.styleSetBold(sci.SCE_C_WORD, True)
self.edit.styleSetFore(sci.SCE_C_STRING, 0x800080)
self.edit.styleSetFore(sci.SCE_C_PREPROCESSOR, 0x008080)
self.edit.styleSetBold(sci.SCE_C_OPERATOR, True)
self.edit.setMultipleSelection(1)
self.edit.setVirtualSpaceOptions(
sci.SCVS_RECTANGULARSELECTION | sci.SCVS_USERACCESSIBLE)
self.edit.setAdditionalSelectionTyping(1)
self.edit.styleSetFore(sci.STYLE_INDENTGUIDE, 0x808080)
self.edit.setIndentationGuides(sci.SC_IV_LOOKBOTH)
self.edit.setKeyWords(0, keywords)
self.edit.addText(len(txtInit), txtInit)
self.edit.setSel(1,10)
retriever = str(self.edit.getLine(1))
print(type(retriever), len(retriever))
print('[' + retriever + ']')
someText = str(self.edit.textRange(2,5))
print(len(someText), '[' + someText + ']')
someText = self.edit.getCurLine(100)
print(len(someText), '[' + someText + ']')
someText = self.edit.styleFont(1)
print(len(someText), '[' + someText + ']')
someText = self.edit.getSelText()
print(len(someText), '[' + someText + ']')
someText = self.edit.tag(1)
print(len(someText), '[' + someText + ']')
someText = self.edit.autoCCurrentText()
print(len(someText), '[' + someText + ']')
someText = self.edit.annotationText(1)
print(len(someText), '[' + someText + ']')
someText = self.edit.annotationStyles(1)
print(len(someText), '[' + someText + ']')
someText = self.edit.describeKeyWordSets()
print(len(someText), '[' + someText + ']')
someText = self.edit.propertyNames()
print(len(someText), '[' + someText + ']')
self.edit.setProperty("fold", "1")
someText = self.edit.property("fold")
print(len(someText), '[' + someText + ']')
someText = self.edit.propertyExpanded("fold")
print(len(someText), '[' + someText + ']')
someText = self.edit.lexerLanguage()
print(len(someText), '[' + someText + ']')
someText = self.edit.describeProperty("styling.within.preprocessor")
print(len(someText), '[' + someText + ']')
xx = self.edit.findText(0, "main", 0, 25)
print(type(xx), xx)
print("isBold", self.edit.styleBold(sci.SCE_C_WORD))
# Retrieve the document and write into it
doc = self.edit.get_doc()
doc.insert_string(40, "***")
stars = doc.get_char_range(40,3)
assert stars == "***"
# Create a new independent document and attach it to the editor
doc = ScintillaEditPy.ScintillaDocument()
doc.insert_string(0, "/***/\nif(a)\n")
self.edit.set_doc(doc)
self.edit.setLexer(sci.SCLEX_CPP)
def Call(self, message, wParam=0, lParam=0):
return self.edit.send(message, wParam, lParam)
def resizeEvent(self, e):
self.edit.resize(e.size().width(), e.size().height())
def receive_command(self, wParam, lParam):
# Show underline at start when focussed
notifyCode = wParam >> 16
if (notifyCode == sci.SCEN_SETFOCUS) or (notifyCode == sci.SCEN_KILLFOCUS):
self.edit.setIndicatorCurrent(sci.INDIC_CONTAINER);
self.edit.indicatorClearRange(0, self.edit.length())
if notifyCode == sci.SCEN_SETFOCUS:
self.edit.indicatorFillRange(0, 2);
def receive_notification(self, scn):
if scn.nmhdr.code == sci.SCN_CHARADDED:
print "Char %02X" % scn.ch
elif scn.nmhdr.code == sci.SCN_SAVEPOINTREACHED:
print "Saved"
elif scn.nmhdr.code == sci.SCN_SAVEPOINTLEFT:
print "Unsaved"
elif scn.nmhdr.code == sci.SCN_MODIFIED:
print "Modified"
elif scn.nmhdr.code == sci.SCN_UPDATEUI:
print "Update UI"
elif scn.nmhdr.code == sci.SCN_PAINTED:
#print "Painted"
pass
else:
print "Notification", scn.nmhdr.code
pass
if __name__ == '__main__':
# Create the Qt Application
app = QApplication(sys.argv)
# Create and show the form
form = Form()
form.show()
# Run the main Qt loop
sys.exit(app.exec_())
| Python |
import distutils.sysconfig
import getopt
import glob
import os
import platform
import shutil
import subprocess
import stat
import sys
sys.path.append(os.path.join("..", "ScintillaEdit"))
import WidgetGen
scintillaDirectory = "../.."
scintillaScriptsDirectory = os.path.join(scintillaDirectory, "scripts")
sys.path.append(scintillaScriptsDirectory)
from FileGenerator import GenerateFile
# Decide up front which platform, treat anything other than Windows or OS X as Linux
PLAT_WINDOWS = platform.system() == "Windows"
PLAT_DARWIN = platform.system() == "Darwin"
PLAT_LINUX = not (PLAT_DARWIN or PLAT_WINDOWS)
def IsFileNewer(name1, name2):
""" Returns whether file with name1 is newer than file with name2. Returns 1
if name2 doesn't exist. """
if not os.path.exists(name1):
return 0
if not os.path.exists(name2):
return 1
mod_time1 = os.stat(name1)[stat.ST_MTIME]
mod_time2 = os.stat(name2)[stat.ST_MTIME]
return (mod_time1 > mod_time2)
def textFromRun(args):
(stdoutdata, stderrdata) = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE).communicate()
return stdoutdata
def runProgram(args, exitOnFailure):
print(" ".join(args))
retcode = subprocess.call(" ".join(args), shell=True, stderr=subprocess.STDOUT)
if retcode:
print("Failed in " + " ".join(args) + " return code = " + str(retcode))
if exitOnFailure:
sys.exit()
def usage():
print("sepbuild.py [-h|--help][-c|--clean][-u|--underscore-names]")
print("")
print("Generate PySide wappers and build them.")
print("")
print("options:")
print("")
print("-c --clean remove all object and generated files")
print("-b --pyside-base Location of the PySide+Qt4 sandbox to use")
print("-h --help display this text")
print("-d --debug=yes|no force debug build (or non-debug build)")
print("-u --underscore-names use method_names consistent with GTK+ standards")
modifyFunctionElement = """ <modify-function signature="%s">%s
</modify-function>"""
injectCode = """
<inject-code class="target" position="beginning">%s
</inject-code>"""
injectCheckN = """
if (!cppArg%d) {
PyErr_SetString(PyExc_ValueError, "Null string argument");
return 0;
}"""
def methodSignature(name, v, options):
argTypes = ""
p1Type = WidgetGen.cppAlias(v["Param1Type"])
if p1Type == "int":
p1Type = "sptr_t"
if p1Type:
argTypes = argTypes + p1Type
p2Type = WidgetGen.cppAlias(v["Param2Type"])
if p2Type == "int":
p2Type = "sptr_t"
if p2Type and v["Param2Type"] != "stringresult":
if p1Type:
argTypes = argTypes + ", "
argTypes = argTypes + p2Type
methodName = WidgetGen.normalisedName(name, options, v["FeatureType"])
constDeclarator = " const" if v["FeatureType"] == "get" else ""
return methodName + "(" + argTypes + ")" + constDeclarator
def printTypeSystemFile(f, options):
out = []
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["fun", "get", "set"]:
checks = ""
if v["Param1Type"] == "string":
checks = checks + (injectCheckN % 0)
if v["Param2Type"] == "string":
if v["Param1Type"] == "": # Only arg 2 -> treat as first
checks = checks + (injectCheckN % 0)
else:
checks = checks + (injectCheckN % 1)
if checks:
inject = injectCode % checks
out.append(modifyFunctionElement % (methodSignature(name, v, options), inject))
#if v["Param1Type"] == "string":
# out.append("<string-xml>" + name + "</string-xml>\n")
return out
def doubleBackSlashes(s):
# Quote backslashes so qmake does not produce warnings
return s.replace("\\", "\\\\")
class SepBuilder:
def __init__(self):
# Discover configuration parameters
self.ScintillaEditIncludes = [".", "../ScintillaEdit", "../ScintillaEditBase", "../../include"]
if PLAT_WINDOWS:
self.MakeCommand = "nmake"
self.MakeTarget = "release"
else:
self.MakeCommand = "make"
self.MakeTarget = ""
if PLAT_DARWIN:
self.QMakeOptions = "-spec macx-g++"
else:
self.QMakeOptions = ""
# Default to debug build if running in a debug build interpreter
self.DebugBuild = hasattr(sys, 'getobjects')
# Python
self.PyVersion = "%d.%d" % sys.version_info[:2]
self.PyVersionSuffix = distutils.sysconfig.get_config_var("VERSION")
self.PyIncludes = distutils.sysconfig.get_python_inc()
self.PyPrefix = distutils.sysconfig.get_config_var("prefix")
self.PyLibDir = distutils.sysconfig.get_config_var(
("LIBDEST" if sys.platform == 'win32' else "LIBDIR"))
# Scintilla
with open("../../version.txt") as f:
version = f.read()
self.ScintillaVersion = version[0] + '.' + version[1] + '.' + version[2]
# Find out what qmake is called
self.QMakeCommand = "qmake"
if not PLAT_WINDOWS:
# On Unix qmake may not be present but qmake-qt4 may be so check
pathToQMake = textFromRun("which qmake-qt4 || which qmake").rstrip()
self.QMakeCommand = os.path.basename(pathToQMake)
# Qt default location from qmake
self._SetQtIncludeBase(textFromRun(self.QMakeCommand + " -query QT_INSTALL_HEADERS").rstrip())
# PySide default location
# No standard for installing PySide development headers and libs on Windows so
# choose /usr to be like Linux
self._setPySideBase('\\usr' if PLAT_WINDOWS else '/usr')
self.ProInclude = "sepbuild.pri"
self.qtStyleInterface = True
def _setPySideBase(self, base):
self.PySideBase = base
if PLAT_LINUX:
self.PySideTypeSystem = textFromRun("pkg-config --variable=typesystemdir pyside").rstrip()
self.PySideIncludeBase = textFromRun("pkg-config --variable=includedir pyside").rstrip()
self.ShibokenIncludeBase = textFromRun("pkg-config --variable=includedir shiboken").rstrip()
else:
self.PySideTypeSystem = os.path.join(self.PySideBase, "share", "PySide", "typesystems")
self.ShibokenIncludeBase = os.path.join(self.PySideBase, "include", "shiboken")
self.PySideIncludeBase = os.path.join(self.PySideBase, "include", "PySide")
self.PySideIncludes = [
self.ShibokenIncludeBase,
self.PySideIncludeBase,
os.path.join(self.PySideIncludeBase, "QtCore"),
os.path.join(self.PySideIncludeBase, "QtGui")]
self.PySideLibDir = os.path.join(self.PySideBase, "lib")
self.AllIncludes = os.pathsep.join(self.QtIncludes + self.ScintillaEditIncludes + self.PySideIncludes)
self.ShibokenGenerator = "shiboken"
# Is this still needed? It doesn't work with latest shiboken sources
#if PLAT_DARWIN:
# # On OS X, can not automatically find Shiboken dylib so provide a full path
# self.ShibokenGenerator = os.path.join(self.PySideLibDir, "generatorrunner", "shiboken")
def generateAPI(self, args):
os.chdir(os.path.join("..", "ScintillaEdit"))
if not self.qtStyleInterface:
args.insert(0, '--underscore-names')
WidgetGen.main(args)
f = WidgetGen.readInterface(False)
os.chdir(os.path.join("..", "ScintillaEditPy"))
options = {"qtStyle": self.qtStyleInterface}
GenerateFile("typesystem_ScintillaEdit.xml.template", "typesystem_ScintillaEdit.xml",
"<!-- ", True, printTypeSystemFile(f, options))
def runGenerator(self):
generatorrunner = "shiboken"
for name in ('shiboken', 'generatorrunner'):
if PLAT_WINDOWS:
name += '.exe'
name = os.path.join(self.PySideBase, "bin", name)
if os.path.exists(name):
generatorrunner = name
break
args = [
generatorrunner,
"--generator-set=" + self.ShibokenGenerator,
"global.h ",
"--avoid-protected-hack",
"--enable-pyside-extensions",
"--include-paths=" + self.AllIncludes,
"--typesystem-paths=" + self.PySideTypeSystem,
"--output-directory=.",
"typesystem_ScintillaEdit.xml"]
print(" ".join(args))
retcode = subprocess.call(" ".join(args), shell=True, stderr=subprocess.STDOUT)
if retcode:
print("Failed in generatorrunner", retcode)
sys.exit()
def writeVariables(self):
# Write variables needed into file to be included from project so it does not have to discover much
with open(self.ProInclude, "w") as f:
f.write("SCINTILLA_VERSION=" + self.ScintillaVersion + "\n")
f.write("PY_VERSION=" + self.PyVersion + "\n")
f.write("PY_VERSION_SUFFIX=" + self.PyVersionSuffix + "\n")
f.write("PY_PREFIX=" + doubleBackSlashes(self.PyPrefix) + "\n")
f.write("PY_INCLUDES=" + doubleBackSlashes(self.PyIncludes) + "\n")
f.write("PY_LIBDIR=" + doubleBackSlashes(self.PyLibDir) + "\n")
f.write("PYSIDE_INCLUDES=" + doubleBackSlashes(self.PySideIncludeBase) + "\n")
f.write("PYSIDE_LIB=" + doubleBackSlashes(self.PySideLibDir) + "\n")
f.write("SHIBOKEN_INCLUDES=" + doubleBackSlashes(self.ShibokenIncludeBase) + "\n")
if self.DebugBuild:
f.write("CONFIG += debug\n")
else:
f.write("CONFIG += release\n")
def make(self):
runProgram([self.QMakeCommand, self.QMakeOptions], exitOnFailure=True)
runProgram([self.MakeCommand, self.MakeTarget], exitOnFailure=True)
def cleanEverything(self):
self.generateAPI(["--clean"])
runProgram([self.MakeCommand, "distclean"], exitOnFailure=False)
filesToRemove = [self.ProInclude, "typesystem_ScintillaEdit.xml",
"../../bin/ScintillaEditPy.so", "../../bin/ScintillaConstants.py"]
for file in filesToRemove:
try:
os.remove(file)
except OSError:
pass
for logFile in glob.glob("*.log"):
try:
os.remove(logFile)
except OSError:
pass
shutil.rmtree("debug", ignore_errors=True)
shutil.rmtree("release", ignore_errors=True)
shutil.rmtree("ScintillaEditPy", ignore_errors=True)
def buildEverything(self):
cleanGenerated = False
opts, args = getopt.getopt(sys.argv[1:], "hcdub",
["help", "clean", "debug=",
"underscore-names", "pyside-base="])
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-c", "--clean"):
cleanGenerated = True
elif opt in ("-d", "--debug"):
self.DebugBuild = (arg == '' or arg.lower() == 'yes')
if self.DebugBuild and sys.platform == 'win32':
self.MakeTarget = 'debug'
elif opt in ("-b", '--pyside-base'):
self._SetQtIncludeBase(os.path.join(os.path.normpath(arg), 'include'))
self._setPySideBase(os.path.normpath(arg))
elif opt in ("-u", "--underscore-names"):
self.qtStyleInterface = False
if cleanGenerated:
self.cleanEverything()
else:
self.writeVariables()
self.generateAPI([""])
self.runGenerator()
self.make()
self.copyScintillaConstants()
def copyScintillaConstants(self):
orig = 'ScintillaConstants.py'
dest = '../../bin/' + orig
if IsFileNewer(dest, orig):
return
f = open(orig, 'r')
contents = f.read()
f.close()
f = open(dest, 'w')
f.write(contents)
f.close()
def _SetQtIncludeBase(self, base):
self.QtIncludeBase = base
self.QtIncludes = [self.QtIncludeBase] + [os.path.join(self.QtIncludeBase, sub) for sub in ["QtCore", "QtGui"]]
# Set path so correct qmake is found
path = os.environ.get('PATH', '').split(os.pathsep)
qt_bin_dir = os.path.join(os.path.dirname(base), 'bin')
if qt_bin_dir not in path:
path.insert(0, qt_bin_dir)
os.environ['PATH'] = os.pathsep.join(path)
if __name__ == "__main__":
sepBuild = SepBuilder()
sepBuild.buildEverything()
| Python |
#!/usr/bin/env python
# WidgetGen.py - regenerate the ScintillaWidgetCpp.cpp and ScintillaWidgetCpp.h files
# Check that API includes all gtkscintilla2 functions
import sys
import os
import getopt
scintillaDirectory = "../.."
scintillaScriptsDirectory = os.path.join(scintillaDirectory, "scripts")
sys.path.append(scintillaScriptsDirectory)
import Face
from FileGenerator import GenerateFile
def underscoreName(s):
# Name conversion fixes to match gtkscintilla2
irregular = ['WS', 'EOL', 'AutoC', 'KeyWords', 'BackSpace', 'UnIndents', 'RE', 'RGBA']
for word in irregular:
replacement = word[0] + word[1:].lower()
s = s.replace(word, replacement)
out = ""
for c in s:
if c.isupper():
if out:
out += "_"
out += c.lower()
else:
out += c
return out
def normalisedName(s, options, role=None):
if options["qtStyle"]:
if role == "get":
s = s.replace("Get", "")
return s[0].lower() + s[1:]
else:
return underscoreName(s)
typeAliases = {
"position": "int",
"colour": "int",
"keymod": "int",
"string": "const char *",
"stringresult": "const char *",
"cells": "const char *",
}
def cppAlias(s):
if s in typeAliases:
return typeAliases[s]
else:
return s
understoodTypes = ["", "void", "int", "bool", "position",
"colour", "keymod", "string", "stringresult", "cells"]
def checkTypes(name, v):
understandAllTypes = True
if v["ReturnType"] not in understoodTypes:
#~ print("Do not understand", v["ReturnType"], "for", name)
understandAllTypes = False
if v["Param1Type"] not in understoodTypes:
#~ print("Do not understand", v["Param1Type"], "for", name)
understandAllTypes = False
if v["Param2Type"] not in understoodTypes:
#~ print("Do not understand", v["Param2Type"], "for", name)
understandAllTypes = False
return understandAllTypes
def arguments(v, stringResult, options):
ret = ""
p1Type = cppAlias(v["Param1Type"])
if p1Type == "int":
p1Type = "sptr_t"
if p1Type:
ret = ret + p1Type + " " + normalisedName(v["Param1Name"], options)
p2Type = cppAlias(v["Param2Type"])
if p2Type == "int":
p2Type = "sptr_t"
if p2Type and not stringResult:
if p1Type:
ret = ret + ", "
ret = ret + p2Type + " " + normalisedName(v["Param2Name"], options)
return ret
def printPyFile(f, options):
out = []
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["val"]:
out.append(name + "=" + v["Value"])
if feat in ["evt"]:
out.append("SCN_" + name.upper() + "=" + v["Value"])
return out
def printHFile(f, options):
out = []
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["fun", "get", "set"]:
if checkTypes(name, v):
constDeclarator = " const" if feat == "get" else ""
returnType = cppAlias(v["ReturnType"])
if returnType == "int":
returnType = "sptr_t"
stringResult = v["Param2Type"] == "stringresult"
if stringResult:
returnType = "QByteArray"
out.append("\t" + returnType + " " + normalisedName(name, options, feat) + "(" +
arguments(v, stringResult, options)+
")" + constDeclarator + ";")
return out
def methodNames(f, options):
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["fun", "get", "set"]:
if checkTypes(name, v):
yield normalisedName(name, options)
def printCPPFile(f, options):
out = []
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["fun", "get", "set"]:
if checkTypes(name, v):
constDeclarator = " const" if feat == "get" else ""
featureDefineName = "SCI_" + name.upper()
returnType = cppAlias(v["ReturnType"])
if returnType == "int":
returnType = "sptr_t"
stringResult = v["Param2Type"] == "stringresult"
if stringResult:
returnType = "QByteArray"
returnStatement = ""
if returnType != "void":
returnStatement = "return "
out.append(returnType + " ScintillaEdit::" + normalisedName(name, options, feat) + "(" +
arguments(v, stringResult, options) +
")" + constDeclarator + " {")
returns = ""
if stringResult:
returns += " " + returnStatement + "TextReturner(" + featureDefineName + ", "
if "*" in cppAlias(v["Param1Type"]):
returns += "(sptr_t)"
if v["Param1Name"]:
returns += normalisedName(v["Param1Name"], options)
else:
returns += "0"
returns += ");"
else:
returns += " " + returnStatement + "send(" + featureDefineName + ", "
if "*" in cppAlias(v["Param1Type"]):
returns += "(sptr_t)"
if v["Param1Name"]:
returns += normalisedName(v["Param1Name"], options)
else:
returns += "0"
returns += ", "
if "*" in cppAlias(v["Param2Type"]):
returns += "(sptr_t)"
if v["Param2Name"]:
returns += normalisedName(v["Param2Name"], options)
else:
returns += "0"
returns += ");"
out.append(returns)
out.append("}")
out.append("")
return out
def gtkNames():
# The full path on my machine: should be altered for anyone else
p = "C:/Users/Neil/Downloads/wingide-source-4.0.1-1/wingide-source-4.0.1-1/external/gtkscintilla2/gtkscintilla.c"
with open(p) as f:
for l in f.readlines():
if "gtk_scintilla_" in l:
name = l.split()[1][14:]
if '(' in name:
name = name.split('(')[0]
yield name
def usage():
print("WidgetGen.py [-c|--clean][-h|--help][-u|--underscore-names]")
print("")
print("Generate full APIs for ScintillaEdit class and ScintillaConstants.py.")
print("")
print("options:")
print("")
print("-c --clean remove all generated code from files")
print("-h --help display this text")
print("-u --underscore-names use method_names consistent with GTK+ standards")
def readInterface(cleanGenerated):
f = Face.Face()
if not cleanGenerated:
f.ReadFromFile("../../include/Scintilla.iface")
return f
def main(argv):
# Using local path for gtkscintilla2 so don't default to checking
checkGTK = False
cleanGenerated = False
qtStyleInterface = True
# The --gtk-check option checks for full coverage of the gtkscintilla2 API but
# depends on a particular directory so is not mentioned in --help.
opts, args = getopt.getopt(argv, "hcgu", ["help", "clean", "gtk-check", "underscore-names"])
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-c", "--clean"):
cleanGenerated = True
elif opt in ("-g", "--gtk-check"):
checkGTK = True
elif opt in ("-u", "--underscore-names"):
qtStyleInterface = False
options = {"qtStyle": qtStyleInterface}
f = readInterface(cleanGenerated)
try:
GenerateFile("ScintillaEdit.cpp.template", "ScintillaEdit.cpp",
"/* ", True, printCPPFile(f, options))
GenerateFile("ScintillaEdit.h.template", "ScintillaEdit.h",
"/* ", True, printHFile(f, options))
GenerateFile("../ScintillaEditPy/ScintillaConstants.py.template",
"../ScintillaEditPy/ScintillaConstants.py",
"# ", True, printPyFile(f, options))
if checkGTK:
names = set(methodNames(f))
#~ print("\n".join(names))
namesGtk = set(gtkNames())
for name in namesGtk:
if name not in names:
print(name, "not found in Qt version")
for name in names:
if name not in namesGtk:
print(name, "not found in GTK+ version")
except:
raise
if cleanGenerated:
for file in ["ScintillaEdit.cpp", "ScintillaEdit.h", "../ScintillaEditPy/ScintillaConstants.py"]:
try:
os.remove(file)
except OSError:
pass
if __name__ == "__main__":
main(sys.argv[1:])
| Python |
#!/usr/bin/env python
# HFacer.py - regenerate the Scintilla.h and SciLexer.h files from the Scintilla.iface interface
# definition file.
# Implemented 2000 by Neil Hodgson neilh@scintilla.org
# Requires Python 2.5 or later
import sys
import os
import Face
from FileGenerator import UpdateFile, Generate, Regenerate, UpdateLineInFile, lineEnd
def printLexHFile(f):
out = []
for name in f.order:
v = f.features[name]
if v["FeatureType"] in ["val"]:
if "SCE_" in name or "SCLEX_" in name:
out.append("#define " + name + " " + v["Value"])
return out
def printHFile(f):
out = []
previousCategory = ""
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
if v["Category"] == "Provisional" and previousCategory != "Provisional":
out.append("#ifndef SCI_DISABLE_PROVISIONAL")
previousCategory = v["Category"]
if v["FeatureType"] in ["fun", "get", "set"]:
featureDefineName = "SCI_" + name.upper()
out.append("#define " + featureDefineName + " " + v["Value"])
elif v["FeatureType"] in ["evt"]:
featureDefineName = "SCN_" + name.upper()
out.append("#define " + featureDefineName + " " + v["Value"])
elif v["FeatureType"] in ["val"]:
if not ("SCE_" in name or "SCLEX_" in name):
out.append("#define " + name + " " + v["Value"])
out.append("#endif")
return out
f = Face.Face()
try:
f.ReadFromFile("../include/Scintilla.iface")
Regenerate("../include/Scintilla.h", "/* ", printHFile(f))
Regenerate("../include/SciLexer.h", "/* ", printLexHFile(f))
print("Maximum ID is %s" % max([x for x in f.values if int(x) < 3000]))
except:
raise
| Python |
# Script to generate CaseConvert.cxx from Python's Unicode data
# Should be run rarely when a Python with a new version of Unicode data is available.
# Should not be run with old versions of Python.
# Current best approach divides case conversions into two cases:
# simple symmetric and complex.
# Simple symmetric is where a lower and upper case pair convert to each
# other and the folded form is the same as the lower case.
# There are 1006 symmetric pairs.
# These are further divided into ranges (stored as lower, upper, range length,
# range pitch and singletons (stored as lower, upper).
# Complex is for cases that don't fit the above: where there are multiple
# characters in one of the forms or fold is different to lower or
# lower(upper(x)) or upper(lower(x)) are not x. These are represented as UTF-8
# strings with original, folded, upper, and lower separated by '|'.
# There are 126 complex cases.
import codecs, itertools, os, string, sys, unicodedata
from FileGenerator import Regenerate
def contiguousRanges(l, diff):
# l is s list of lists
# group into lists where first element of each element differs by diff
out = [[l[0]]]
for s in l[1:]:
if s[0] != out[-1][-1][0] + diff:
out.append([])
out[-1].append(s)
return out
def flatten(listOfLists):
"Flatten one level of nesting"
return itertools.chain.from_iterable(listOfLists)
def conversionSets():
# For all Unicode characters, see whether they have case conversions
# Return 2 sets: one of simple symmetric conversion cases and another
# with complex cases.
complexes = []
symmetrics = []
for ch in range(sys.maxunicode):
if ch >= 0xd800 and ch <= 0xDBFF:
continue
if ch >= 0xdc00 and ch <= 0xDFFF:
continue
uch = chr(ch)
fold = uch.casefold()
upper = uch.upper()
lower = uch.lower()
symmetric = False
if uch != upper and len(upper) == 1 and uch == lower and uch == fold:
lowerUpper = upper.lower()
foldUpper = upper.casefold()
if lowerUpper == foldUpper and lowerUpper == uch:
symmetric = True
symmetrics.append((ch, ord(upper), ch - ord(upper)))
if uch != lower and len(lower) == 1 and uch == upper and lower == fold:
upperLower = lower.upper()
if upperLower == uch:
symmetric = True
if fold == uch:
fold = ""
if upper == uch:
upper = ""
if lower == uch:
lower = ""
if (fold or upper or lower) and not symmetric:
complexes.append((uch, fold, upper, lower))
return symmetrics, complexes
def groupRanges(symmetrics):
# Group the symmetrics into groups where possible, returning a list
# of ranges and a list of symmetrics that didn't fit into a range
def distance(s):
return s[2]
groups = []
uniquekeys = []
for k, g in itertools.groupby(symmetrics, distance):
groups.append(list(g)) # Store group iterator as a list
uniquekeys.append(k)
contiguousGroups = flatten([contiguousRanges(g, 1) for g in groups])
longGroups = [(x[0][0], x[0][1], len(x), 1) for x in contiguousGroups if len(x) > 4]
oneDiffs = [s for s in symmetrics if s[2] == 1]
contiguousOnes = flatten([contiguousRanges(g, 2) for g in [oneDiffs]])
longOneGroups = [(x[0][0], x[0][1], len(x), 2) for x in contiguousOnes if len(x) > 4]
rangeGroups = sorted(longGroups+longOneGroups, key=lambda s: s[0])
rangeCoverage = list(flatten([range(r[0], r[0]+r[2]*r[3], r[3]) for r in rangeGroups]))
nonRanges = [(l, u) for l, u, d in symmetrics if l not in rangeCoverage]
return rangeGroups, nonRanges
def escape(s):
return "".join((chr(c) if chr(c) in string.ascii_letters else "\\x%x" % c) for c in s.encode('utf-8'))
def updateCaseConvert():
symmetrics, complexes = conversionSets()
rangeGroups, nonRanges = groupRanges(symmetrics)
print(len(rangeGroups), "ranges")
rangeLines = ["%d,%d,%d,%d, " % x for x in rangeGroups]
print(len(nonRanges), "non ranges")
nonRangeLines = ["%d,%d, " % x for x in nonRanges]
print(len(symmetrics), "symmetric")
complexLines = ['"%s|%s|%s|%s|"' % tuple(escape(t) for t in x) for x in complexes]
print(len(complexLines), "complex")
Regenerate("../src/CaseConvert.cxx", "//", rangeLines, nonRangeLines, complexLines)
updateCaseConvert()
| Python |
#!/usr/bin/env python
# LexGen.py - implemented 2002 by Neil Hodgson neilh@scintilla.org
# Released to the public domain.
# Regenerate the Scintilla source files that list all the lexers.
# Should be run whenever a new lexer is added or removed.
# Requires Python 2.5 or later
# Files are regenerated in place with templates stored in comments.
# The format of generation comments is documented in FileGenerator.py.
from FileGenerator import Regenerate, UpdateLineInFile, ReplaceREInFile
import ScintillaData
def UpdateVersionNumbers(sci, root):
UpdateLineInFile(root + "win32/ScintRes.rc", "#define VERSION_SCINTILLA",
"#define VERSION_SCINTILLA \"" + sci.versionDotted + "\"")
UpdateLineInFile(root + "win32/ScintRes.rc", "#define VERSION_WORDS",
"#define VERSION_WORDS " + sci.versionCommad)
UpdateLineInFile(root + "qt/ScintillaEditBase/ScintillaEditBase.pro",
"VERSION =",
"VERSION = " + sci.versionDotted)
UpdateLineInFile(root + "qt/ScintillaEdit/ScintillaEdit.pro",
"VERSION =",
"VERSION = " + sci.versionDotted)
UpdateLineInFile(root + "doc/ScintillaDownload.html", " Release",
" Release " + sci.versionDotted)
ReplaceREInFile(root + "doc/ScintillaDownload.html",
r"/scintilla/([a-zA-Z]+)\d\d\d",
r"/scintilla/\g<1>" + sci.version)
UpdateLineInFile(root + "doc/index.html",
' <font color="#FFCC99" size="3"> Release version',
' <font color="#FFCC99" size="3"> Release version ' +\
sci.versionDotted + '<br />')
UpdateLineInFile(root + "doc/index.html",
' Site last modified',
' Site last modified ' + sci.mdyModified + '</font>')
UpdateLineInFile(root + "doc/ScintillaHistory.html",
' Released ',
' Released ' + sci.dmyModified + '.')
def RegenerateAll(root):
sci = ScintillaData.ScintillaData(root)
Regenerate(root + "src/Catalogue.cxx", "//", sci.lexerModules)
Regenerate(root + "win32/scintilla.mak", "#", sci.lexFiles)
UpdateVersionNumbers(sci, root)
if __name__=="__main__":
RegenerateAll("../")
| Python |
# Script to generate CharacterCategory.cxx from Python's Unicode data
# Should be run rarely when a Python with a new version of Unicode data is available.
# Should not be run with old versions of Python.
import codecs, os, platform, sys, unicodedata
from FileGenerator import Regenerate
def findCategories(filename):
with codecs.open(filename, "r", "UTF-8") as infile:
lines = [x.strip() for x in infile.readlines() if "\tcc" in x]
values = "".join(lines).replace(" ","").split(",")
print(values)
return [v[2:] for v in values]
def updateCharacterCategory(filename):
values = ["// Created with Python %s, Unicode %s" % (
platform.python_version(), unicodedata.unidata_version)]
category = unicodedata.category(chr(0))
startRange = 0
for ch in range(sys.maxunicode):
uch = chr(ch)
if unicodedata.category(uch) != category:
value = startRange * 32 + categories.index(category)
values.append("%d," % value)
category = unicodedata.category(uch)
startRange = ch
value = startRange * 32 + categories.index(category)
values.append("%d," % value)
Regenerate(filename, "//", values)
categories = findCategories("../lexlib/CharacterCategory.h")
updateCharacterCategory("../lexlib/CharacterCategory.cxx")
| Python |
# Face.py - module for reading and parsing Scintilla.iface file
# Implemented 2000 by Neil Hodgson neilh@scintilla.org
# Released to the public domain.
# Requires Python 2.5 or later
def sanitiseLine(line):
if line[-1:] == '\n': line = line[:-1]
if line.find("##") != -1:
line = line[:line.find("##")]
line = line.strip()
return line
def decodeFunction(featureVal):
retType, rest = featureVal.split(" ", 1)
nameIdent, params = rest.split("(")
name, value = nameIdent.split("=")
params, rest = params.split(")")
param1, param2 = params.split(",")
return retType, name, value, param1, param2
def decodeEvent(featureVal):
retType, rest = featureVal.split(" ", 1)
nameIdent, params = rest.split("(")
name, value = nameIdent.split("=")
return retType, name, value
def decodeParam(p):
param = p.strip()
type = ""
name = ""
value = ""
if " " in param:
type, nv = param.split(" ")
if "=" in nv:
name, value = nv.split("=")
else:
name = nv
return type, name, value
class Face:
def __init__(self):
self.order = []
self.features = {}
self.values = {}
self.events = {}
def ReadFromFile(self, name):
currentCategory = ""
currentComment = []
currentCommentFinished = 0
file = open(name)
for line in file.readlines():
line = sanitiseLine(line)
if line:
if line[0] == "#":
if line[1] == " ":
if currentCommentFinished:
currentComment = []
currentCommentFinished = 0
currentComment.append(line[2:])
else:
currentCommentFinished = 1
featureType, featureVal = line.split(" ", 1)
if featureType in ["fun", "get", "set"]:
try:
retType, name, value, param1, param2 = decodeFunction(featureVal)
except ValueError:
print("Failed to decode %s" % line)
raise
p1 = decodeParam(param1)
p2 = decodeParam(param2)
self.features[name] = {
"FeatureType": featureType,
"ReturnType": retType,
"Value": value,
"Param1Type": p1[0], "Param1Name": p1[1], "Param1Value": p1[2],
"Param2Type": p2[0], "Param2Name": p2[1], "Param2Value": p2[2],
"Category": currentCategory, "Comment": currentComment
}
if value in self.values:
raise Exception("Duplicate value " + value + " " + name)
self.values[value] = 1
self.order.append(name)
elif featureType == "evt":
retType, name, value = decodeEvent(featureVal)
self.features[name] = {
"FeatureType": featureType,
"ReturnType": retType,
"Value": value,
"Category": currentCategory, "Comment": currentComment
}
if value in self.events:
raise Exception("Duplicate event " + value + " " + name)
self.events[value] = 1
self.order.append(name)
elif featureType == "cat":
currentCategory = featureVal
elif featureType == "val":
try:
name, value = featureVal.split("=", 1)
except ValueError:
print("Failure %s" % featureVal)
raise Exception()
self.features[name] = {
"FeatureType": featureType,
"Category": currentCategory,
"Value": value }
self.order.append(name)
elif featureType == "enu" or featureType == "lex":
name, value = featureVal.split("=", 1)
self.features[name] = {
"FeatureType": featureType,
"Category": currentCategory,
"Value": value }
self.order.append(name)
| Python |
# ScintillaData.py - implemented 2013 by Neil Hodgson neilh@scintilla.org
# Released to the public domain.
# Common code used by Scintilla and SciTE for source file regeneration.
# The ScintillaData object exposes information about Scintilla as properties:
# Version properties
# version
# versionDotted
# versionCommad
#
# Date last modified
# dateModified
# yearModified
# mdyModified
# dmyModified
# myModified
#
# Information about lexers and properties defined in lexers
# lexFiles
# sorted list of lexer files
# lexerModules
# sorted list of module names
# lexerProperties
# sorted list of lexer properties
# propertyDocuments
# dictionary of property documentation { name: document string }
# This file can be run to see the data it provides.
# Requires Python 2.5 or later
from __future__ import with_statement
import datetime, glob, os, textwrap
import FileGenerator
def FindModules(lexFile):
modules = []
with open(lexFile) as f:
for l in f.readlines():
if l.startswith("LexerModule"):
l = l.replace("(", " ")
modules.append(l.split()[1])
return modules
# Properties that start with lexer. or fold. are automatically found but there are some
# older properties that don't follow this pattern so must be explicitly listed.
knownIrregularProperties = [
"fold",
"styling.within.preprocessor",
"tab.timmy.whinge.level",
"asp.default.language",
"html.tags.case.sensitive",
"ps.level",
"ps.tokenize",
"sql.backslash.escapes",
"nsis.uservars",
"nsis.ignorecase"
]
def FindProperties(lexFile):
properties = {}
with open(lexFile) as f:
for l in f.readlines():
if ("GetProperty" in l or "DefineProperty" in l) and "\"" in l:
l = l.strip()
if not l.startswith("//"): # Drop comments
propertyName = l.split("\"")[1]
if propertyName.lower() == propertyName:
# Only allow lower case property names
if propertyName in knownIrregularProperties or \
propertyName.startswith("fold.") or \
propertyName.startswith("lexer."):
properties[propertyName] = 1
return properties
def FindPropertyDocumentation(lexFile):
documents = {}
with open(lexFile) as f:
name = ""
for l in f.readlines():
l = l.strip()
if "// property " in l:
propertyName = l.split()[2]
if propertyName.lower() == propertyName:
# Only allow lower case property names
name = propertyName
documents[name] = ""
elif "DefineProperty" in l and "\"" in l:
propertyName = l.split("\"")[1]
if propertyName.lower() == propertyName:
# Only allow lower case property names
name = propertyName
documents[name] = ""
elif name:
if l.startswith("//"):
if documents[name]:
documents[name] += " "
documents[name] += l[2:].strip()
elif l.startswith("\""):
l = l[1:].strip()
if l.endswith(";"):
l = l[:-1].strip()
if l.endswith(")"):
l = l[:-1].strip()
if l.endswith("\""):
l = l[:-1]
# Fix escaped double quotes
l = l.replace("\\\"", "\"")
documents[name] += l
else:
name = ""
for name in list(documents.keys()):
if documents[name] == "":
del documents[name]
return documents
def ciCompare(a,b):
return cmp(a.lower(), b.lower())
def ciKey(a):
return a.lower()
def SortListInsensitive(l):
try: # Try key function
l.sort(key=ciKey)
except TypeError: # Earlier version of Python, so use comparison function
l.sort(ciCompare)
class ScintillaData:
def __init__(self, scintillaRoot):
# Discover verion information
with open(scintillaRoot + "version.txt") as f:
self.version = f.read().strip()
self.versionDotted = self.version[0] + '.' + self.version[1] + '.' + \
self.version[2]
self.versionCommad = self.version[0] + ', ' + self.version[1] + ', ' + \
self.version[2] + ', 0'
with open(scintillaRoot + "doc/index.html") as f:
self.dateModified = [l for l in f.readlines() if "Date.Modified" in l]\
[0].split('\"')[3]
# 20130602
# index.html, SciTE.html
dtModified = datetime.datetime.strptime(self.dateModified, "%Y%m%d")
self.yearModified = self.dateModified[0:4]
monthModified = dtModified.strftime("%B")
dayModified = "%d" % dtModified.day
self.mdyModified = monthModified + " " + dayModified + " " + self.yearModified
# May 22 2013
# index.html, SciTE.html
self.dmyModified = dayModified + " " + monthModified + " " + self.yearModified
# 22 May 2013
# ScintillaHistory.html -- only first should change
self.myModified = monthModified + " " + self.yearModified
# Find all the lexer source code files
lexFilePaths = glob.glob(scintillaRoot + "lexers/Lex*.cxx")
SortListInsensitive(lexFilePaths)
self.lexFiles = [os.path.basename(f)[:-4] for f in lexFilePaths]
self.lexerModules = []
lexerProperties = set()
self.propertyDocuments = {}
for lexFile in lexFilePaths:
self.lexerModules.extend(FindModules(lexFile))
for k in FindProperties(lexFile).keys():
lexerProperties.add(k)
documents = FindPropertyDocumentation(lexFile)
for k in documents.keys():
if k not in self.propertyDocuments:
self.propertyDocuments[k] = documents[k]
SortListInsensitive(self.lexerModules)
self.lexerProperties = list(lexerProperties)
SortListInsensitive(self.lexerProperties)
def printWrapped(text):
print(textwrap.fill(text, subsequent_indent=" "))
if __name__=="__main__":
sci = ScintillaData("../")
print("Version %s %s %s" % (sci.version, sci.versionDotted, sci.versionCommad))
print("Date last modified %s %s %s %s %s" % (
sci.dateModified, sci.yearModified, sci.mdyModified, sci.dmyModified, sci.myModified))
printWrapped(str(len(sci.lexFiles)) + " lexer files: " + ", ".join(sci.lexFiles))
printWrapped(str(len(sci.lexerModules)) + " lexer modules: " + ", ".join(sci.lexerModules))
printWrapped("Lexer properties: " + ", ".join(sci.lexerProperties))
print("Lexer property documentation:")
documentProperties = list(sci.propertyDocuments.keys())
SortListInsensitive(documentProperties)
for k in documentProperties:
print(" " + k)
print(textwrap.fill(sci.propertyDocuments[k], initial_indent=" ",
subsequent_indent=" "))
| Python |
#!/usr/bin/env python
# LexGen.py - implemented 2002 by Neil Hodgson neilh@scintilla.org
# Released to the public domain.
# Regenerate the Scintilla and SciTE source files that list
# all the lexers and all the properties files.
# Should be run whenever a new lexer is added or removed.
# Requires Python 2.4 or later
# Most files are regenerated in place with templates stored in comments.
# The VS .NET project file is generated into a different file as the
# VS .NET environment will not retain comments when modifying the file.
# The files are copied to a string apart from sections between a
# ++Autogenerated comment and a --Autogenerated comment which is
# generated by the CopyWithInsertion function. After the whole
# string is instantiated, it is compared with the target file and
# if different the file is rewritten.
# Does not regenerate the Visual C++ 6 project files but does the VS .NET
# project file.
import string
import sys
import os
import glob
# EOL constants
CR = "\r"
LF = "\n"
CRLF = "\r\n"
if sys.platform == "win32":
NATIVE = CRLF
else:
# Yes, LF is the native EOL even on Mac OS X. CR is just for
# Mac OS <=9 (a.k.a. "Mac Classic")
NATIVE = LF
# Automatically generated sections contain start and end comments,
# a definition line and the results.
# The results are replaced by regenerating based on the definition line.
# The definition line is a comment prefix followed by "**".
# If there is a digit after the ** then this indicates which list to use
# and the digit and next character are not part of the definition
# Backslash is used as an escape within the definition line.
# The part between \( and \) is repeated for each item in the list.
# \* is replaced by each list item. \t, and \n are tab and newline.
def CopyWithInsertion(input, commentPrefix, retainDefs, eolType, *lists):
copying = 1
listid = 0
output = []
for line in input.splitlines(0):
isStartGenerated = line.startswith(commentPrefix + "++Autogenerated")
if copying and not isStartGenerated:
output.append(line)
if isStartGenerated:
if retainDefs:
output.append(line)
copying = 0
definition = ""
elif not copying and line.startswith(commentPrefix + "**"):
if retainDefs:
output.append(line)
definition = line[len(commentPrefix + "**"):]
if (commentPrefix == "<!--") and (" -->" in definition):
definition = definition.replace(" -->", "")
listid = 0
if definition[0] in string.digits:
listid = int(definition[:1])
definition = definition[2:]
# Hide double slashes as a control character
definition = definition.replace("\\\\", "\001")
# Do some normal C style transforms
definition = definition.replace("\\n", "\n")
definition = definition.replace("\\t", "\t")
# Get the doubled backslashes back as single backslashes
definition = definition.replace("\001", "\\")
startRepeat = definition.find("\\(")
endRepeat = definition.find("\\)")
intro = definition[:startRepeat]
out = ""
if intro.endswith("\n"):
pos = 0
else:
pos = len(intro)
out += intro
middle = definition[startRepeat+2:endRepeat]
for i in lists[listid]:
item = middle.replace("\\*", i)
if pos and (pos + len(item) >= 80):
out += "\\\n"
pos = 0
out += item
pos += len(item)
if item.endswith("\n"):
pos = 0
outro = definition[endRepeat+2:]
out += outro
out = out.replace("\n", eolType) # correct EOLs in generated content
output.append(out)
elif line.startswith(commentPrefix + "--Autogenerated"):
copying = 1
if retainDefs:
output.append(line)
output = [line.rstrip(" \t") for line in output] # trim trailing whitespace
return eolType.join(output) + eolType
def UpdateFile(filename, updated):
""" If the file is different to updated then copy updated
into the file else leave alone so CVS and make don't treat
it as modified. """
try:
infile = open(filename, "rb")
except IOError: # File is not there yet
out = open(filename, "wb")
out.write(updated.encode('utf-8'))
out.close()
print("New %s" % filename)
return
original = infile.read()
infile.close()
original = original.decode('utf-8')
if updated != original:
os.unlink(filename)
out = open(filename, "wb")
out.write(updated.encode('utf-8'))
out.close()
print("Changed %s " % filename)
#~ else:
#~ print "Unchanged", filename
def Generate(inpath, outpath, commentPrefix, eolType, *lists):
"""Generate 'outpath' from 'inpath'.
"eolType" indicates the type of EOLs to use in the generated
file. It should be one of following constants: LF, CRLF,
CR, or NATIVE.
"""
#print "generate '%s' -> '%s' (comment prefix: %r, eols: %r)"\
# % (inpath, outpath, commentPrefix, eolType)
try:
infile = open(inpath, "rb")
except IOError:
print("Can not open %s" % inpath)
return
original = infile.read()
infile.close()
original = original.decode('utf-8')
updated = CopyWithInsertion(original, commentPrefix,
inpath == outpath, eolType, *lists)
UpdateFile(outpath, updated)
def Regenerate(filename, commentPrefix, eolType, *lists):
"""Regenerate the given file.
"eolType" indicates the type of EOLs to use in the generated
file. It should be one of following constants: LF, CRLF,
CR, or NATIVE.
"""
Generate(filename, filename, commentPrefix, eolType, *lists)
def FindModules(lexFile):
modules = []
f = open(lexFile)
for l in f.readlines():
if l.startswith("LexerModule"):
l = l.replace("(", " ")
modules.append(l.split()[1])
return modules
# Properties that start with lexer. or fold. are automatically found but there are some
# older properties that don't follow this pattern so must be explicitly listed.
knownIrregularProperties = [
"fold",
"styling.within.preprocessor",
"tab.timmy.whinge.level",
"asp.default.language",
"html.tags.case.sensitive",
"ps.level",
"ps.tokenize",
"sql.backslash.escapes",
"nsis.uservars",
"nsis.ignorecase"
]
def FindProperties(lexFile):
properties = {}
f = open(lexFile)
for l in f.readlines():
if ("GetProperty" in l or "DefineProperty" in l) and "\"" in l:
l = l.strip()
if not l.startswith("//"): # Drop comments
propertyName = l.split("\"")[1]
if propertyName.lower() == propertyName:
# Only allow lower case property names
if propertyName in knownIrregularProperties or \
propertyName.startswith("fold.") or \
propertyName.startswith("lexer."):
properties[propertyName] = 1
return properties
def FindPropertyDocumentation(lexFile):
documents = {}
f = open(lexFile)
name = ""
for l in f.readlines():
l = l.strip()
if "// property " in l:
propertyName = l.split()[2]
if propertyName.lower() == propertyName:
# Only allow lower case property names
name = propertyName
documents[name] = ""
elif "DefineProperty" in l and "\"" in l:
propertyName = l.split("\"")[1]
if propertyName.lower() == propertyName:
# Only allow lower case property names
name = propertyName
documents[name] = ""
elif name:
if l.startswith("//"):
if documents[name]:
documents[name] += " "
documents[name] += l[2:].strip()
elif l.startswith("\""):
l = l[1:].strip()
if l.endswith(";"):
l = l[:-1].strip()
if l.endswith(")"):
l = l[:-1].strip()
if l.endswith("\""):
l = l[:-1]
# Fix escaped double quotes
l = l.replace("\\\"", "\"")
documents[name] += l
else:
name = ""
for name in list(documents.keys()):
if documents[name] == "":
del documents[name]
return documents
def ciCompare(a,b):
return cmp(a.lower(), b.lower())
def ciKey(a):
return a.lower()
def sortListInsensitive(l):
try: # Try key function
l.sort(key=ciKey)
except TypeError: # Earlier version of Python, so use comparison function
l.sort(ciCompare)
def UpdateLineInFile(path, linePrefix, lineReplace):
lines = []
with open(path, "r") as f:
for l in f.readlines():
l = l.rstrip()
if l.startswith(linePrefix):
lines.append(lineReplace)
else:
lines.append(l)
contents = NATIVE.join(lines) + NATIVE
UpdateFile(path, contents)
def UpdateVersionNumbers(root):
with open(root + "scintilla/version.txt") as f:
version = f.read()
versionDotted = version[0] + '.' + version[1] + '.' + version[2]
versionCommad = version[0] + ', ' + version[1] + ', ' + version[2] + ', 0'
UpdateLineInFile(root + "scintilla/win32/ScintRes.rc", "#define VERSION_SCINTILLA",
"#define VERSION_SCINTILLA \"" + versionDotted + "\"")
UpdateLineInFile(root + "scintilla/win32/ScintRes.rc", "#define VERSION_WORDS",
"#define VERSION_WORDS " + versionCommad)
UpdateLineInFile(root + "scintilla/qt/ScintillaEditBase/ScintillaEditBase.pro",
"VERSION =",
"VERSION = " + versionDotted)
UpdateLineInFile(root + "scintilla/qt/ScintillaEdit/ScintillaEdit.pro",
"VERSION =",
"VERSION = " + versionDotted)
UpdateLineInFile(root + "scintilla/doc/ScintillaDownload.html", " Release",
" Release " + versionDotted)
UpdateLineInFile(root + "scintilla/doc/index.html",
' <font color="#FFCC99" size="3"> Release version',
' <font color="#FFCC99" size="3"> Release version ' + versionDotted + '<br />')
if os.path.exists(root + "scite"):
UpdateLineInFile(root + "scite/src/SciTE.h", "#define VERSION_SCITE",
"#define VERSION_SCITE \"" + versionDotted + "\"")
UpdateLineInFile(root + "scite/src/SciTE.h", "#define VERSION_WORDS",
"#define VERSION_WORDS " + versionCommad)
UpdateLineInFile(root + "scite/doc/SciTEDownload.html", " Release",
" Release " + versionDotted)
UpdateLineInFile(root + "scite/doc/SciTE.html",
' <font color="#FFCC99" size="3"> Release version',
' <font color="#FFCC99" size="3"> Release version ' + versionDotted + '<br />')
def RegenerateAll():
root="../../"
# Find all the lexer source code files
lexFilePaths = glob.glob(root + "scintilla/lexers/Lex*.cxx")
sortListInsensitive(lexFilePaths)
lexFiles = [os.path.basename(f)[:-4] for f in lexFilePaths]
print(lexFiles)
lexerModules = []
lexerProperties = {}
propertyDocuments = {}
for lexFile in lexFilePaths:
lexerModules.extend(FindModules(lexFile))
for k in FindProperties(lexFile).keys():
lexerProperties[k] = 1
documents = FindPropertyDocumentation(lexFile)
for k in documents.keys():
propertyDocuments[k] = documents[k]
sortListInsensitive(lexerModules)
lexerProperties = list(lexerProperties.keys())
sortListInsensitive(lexerProperties)
# Generate HTML to document each property
# This is done because tags can not be safely put inside comments in HTML
documentProperties = list(propertyDocuments.keys())
sortListInsensitive(documentProperties)
propertiesHTML = []
for k in documentProperties:
propertiesHTML.append("\t<tr id='property-%s'>\n\t<td>%s</td>\n\t<td>%s</td>\n\t</tr>" %
(k, k, propertyDocuments[k]))
# Find all the SciTE properties files
otherProps = ["abbrev.properties", "Embedded.properties", "SciTEGlobal.properties", "SciTE.properties"]
if os.path.exists(root + "scite"):
propFilePaths = glob.glob(root + "scite/src/*.properties")
sortListInsensitive(propFilePaths)
propFiles = [os.path.basename(f) for f in propFilePaths if os.path.basename(f) not in otherProps]
sortListInsensitive(propFiles)
print(propFiles)
Regenerate(root + "scintilla/src/Catalogue.cxx", "//", NATIVE, lexerModules)
Regenerate(root + "scintilla/win32/scintilla.mak", "#", NATIVE, lexFiles)
Regenerate(root + "scintilla/win32/scintilla_vc6.mak", "#", NATIVE, lexFiles)
if os.path.exists(root + "scite"):
Regenerate(root + "scite/win32/makefile", "#", NATIVE, propFiles)
Regenerate(root + "scite/win32/scite.mak", "#", NATIVE, propFiles)
Regenerate(root + "scite/src/SciTEProps.cxx", "//", NATIVE, lexerProperties)
Regenerate(root + "scite/doc/SciTEDoc.html", "<!--", NATIVE, propertiesHTML)
Generate(root + "scite/boundscheck/vcproj.gen",
root + "scite/boundscheck/SciTE.vcproj", "#", NATIVE, lexFiles)
UpdateVersionNumbers(root)
RegenerateAll()
| Python |
# -*- coding: utf-8 -*-
from __future__ import with_statement
import io, os, sys, unittest
if sys.platform == "win32":
import XiteWin as Xite
else:
import XiteQt as Xite
keywordsHTML = [
b"b body content head href html link meta "
b"name rel script strong title type xmlns",
b"function",
b"sub"
]
class TestLexers(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
def AsStyled(self):
text = self.ed.Contents()
data = io.BytesIO()
prevStyle = -1
for o in range(self.ed.Length):
styleNow = self.ed.GetStyleAt(o)
if styleNow != prevStyle:
styleBuf = "{%0d}" % styleNow
data.write(styleBuf.encode('utf-8'))
prevStyle = styleNow
data.write(text[o:o+1])
return data.getvalue()
def LexExample(self, name, lexerName, keywords=None):
if keywords is None:
keywords = []
self.ed.SetCodePage(65001)
self.ed.LexerLanguage = lexerName
bits = self.ed.StyleBitsNeeded
mask = 2 << bits - 1
self.ed.StyleBits = bits
for i in range(len(keywords)):
self.ed.SetKeyWords(i, keywords[i])
nameExample = os.path.join("examples", name)
namePrevious = nameExample +".styled"
nameNew = nameExample +".new"
with open(nameExample, "rb") as f:
prog = f.read()
BOM = b"\xEF\xBB\xBF"
if prog.startswith(BOM):
prog = prog[len(BOM):]
lenDocument = len(prog)
self.ed.AddText(lenDocument, prog)
self.ed.Colourise(0, lenDocument)
self.assertEquals(self.ed.EndStyled, lenDocument)
try:
with open(namePrevious, "rb") as f:
prevStyled = f.read()
except FileNotFoundError:
prevStyled = ""
progStyled = self.AsStyled()
if progStyled != prevStyled:
with open(nameNew, "wb") as f:
f.write(progStyled)
print(progStyled)
print(prevStyled)
self.assertEquals(progStyled, prevStyled)
# The whole file doesn't parse like it did before so don't try line by line
# as that is likely to fail many times.
return
# Try partial lexes from the start of every line which should all be identical.
for line in range(self.ed.LineCount):
lineStart = self.ed.PositionFromLine(line)
self.ed.StartStyling(lineStart, mask)
self.assertEquals(self.ed.EndStyled, lineStart)
self.ed.Colourise(lineStart, lenDocument)
progStyled = self.AsStyled()
if progStyled != prevStyled:
with open(nameNew, "wb") as f:
f.write(progStyled)
self.assertEquals(progStyled, prevStyled)
# Give up after one failure
return
def testCXX(self):
self.LexExample("x.cxx", b"cpp", [b"int"])
def testPython(self):
self.LexExample("x.py", b"python",
[b"class def else for if import in print return while"])
def testHTML(self):
self.LexExample("x.html", b"hypertext", keywordsHTML)
def testASP(self):
self.LexExample("x.asp", b"hypertext", keywordsHTML)
def testPHP(self):
self.LexExample("x.php", b"hypertext", keywordsHTML)
def testVB(self):
self.LexExample("x.vb", b"vb", [b"as dim or string"])
def testLua(self):
self.LexExample("x.lua", b"lua", [b"function end"])
def testRuby(self):
self.LexExample("x.rb", b"ruby", [b"class def end"])
def testPerl(self):
self.LexExample("x.pl", b"perl", [b"printf sleep use while"])
def testD(self):
self.LexExample("x.d", b"d",
[b"keyword1", b"keyword2", b"", b"keyword4", b"keyword5",
b"keyword6", b"keyword7"])
if __name__ == '__main__':
Xite.main("lexTests")
| Python |
# List many windows message numbers
msgs = {
"WM_ACTIVATE":6,
"WM_ACTIVATEAPP":28,
"WM_CAPTURECHANGED":533,
"WM_CHAR":258,
"WM_CLOSE":16,
"WM_CREATE":1,
"WM_COMMAND":273,
"WM_DESTROY":2,
"WM_ENTERSIZEMOVE":561,
"WM_ERASEBKGND":20,
"WM_EXITSIZEMOVE":562,
"WM_GETMINMAXINFO":36,
"WM_GETTEXT":13,
"WM_IME_SETCONTEXT":0x0281,
"WM_IME_NOTIFY":0x0282,
"WM_KEYDOWN":256,
"WM_KEYUP":257,
"WM_KILLFOCUS":8,
"WM_LBUTTONDOWN":513,
"WM_LBUTTONUP":514,
"WM_MBUTTONDOWN":519,
"WM_MBUTTONUP":520,
"WM_MBUTTONDBLCLK":521,
"WM_MOUSEACTIVATE":33,
"WM_MOUSEMOVE":512,
"WM_MOVE":3,
"WM_MOVING":534,
"WM_NCACTIVATE":134,
"WM_NCCALCSIZE":131,
"WM_NCCREATE":129,
"WM_NCDESTROY":130,
"WM_NCHITTEST":132,
"WM_NCLBUTTONDBLCLK":163,
"WM_NCLBUTTONDOWN":161,
"WM_NCLBUTTONUP":162,
"WM_NCMOUSEMOVE":160,
"WM_NCPAINT":133,
"WM_PAINT":15,
"WM_PARENTNOTIFY":528,
"WM_SETCURSOR":32,
"WM_SETFOCUS":7,
"WM_SETFONT":48,
"WM_SETTEXT":12,
"WM_SHOWWINDOW":24,
"WM_SIZE":5,
"WM_SIZING":532,
"WM_SYNCPAINT":136,
"WM_SYSCOMMAND":274,
"WM_SYSKEYDOWN":260,
"WM_TIMER":275,
"WM_USER":1024,
"WM_USER+1":1025,
"WM_WINDOWPOSCHANGED":71,
"WM_WINDOWPOSCHANGING":70,
}
sgsm={}
for k,v in msgs.items():
sgsm[v] = k
| Python |
# -*- coding: utf-8 -*-
from __future__ import with_statement
from __future__ import unicode_literals
import codecs, ctypes, os, sys, unittest
if sys.platform == "win32":
import XiteWin as Xite
else:
import XiteQt as Xite
class TestSimple(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
def testLength(self):
self.assertEquals(self.ed.Length, 0)
def testAddText(self):
self.ed.AddText(1, b"x")
self.assertEquals(self.ed.Length, 1)
self.assertEquals(self.ed.GetCharAt(0), ord("x"))
self.assertEquals(self.ed.GetStyleAt(0), 0)
self.ed.ClearAll()
self.assertEquals(self.ed.Length, 0)
def testDeleteRange(self):
self.ed.AddText(5, b"abcde")
self.assertEquals(self.ed.Length, 5)
self.ed.DeleteRange(1, 2)
self.assertEquals(self.ed.Length, 3)
self.assertEquals(self.ed.Contents(), b"ade")
def testAddStyledText(self):
self.assertEquals(self.ed.EndStyled, 0)
self.ed.AddStyledText(2, b"x\002")
self.assertEquals(self.ed.Length, 1)
self.assertEquals(self.ed.GetCharAt(0), ord("x"))
self.assertEquals(self.ed.GetStyleAt(0), 2)
self.assertEquals(self.ed.StyledTextRange(0, 1), b"x\002")
self.ed.ClearDocumentStyle()
self.assertEquals(self.ed.Length, 1)
self.assertEquals(self.ed.GetCharAt(0), ord("x"))
self.assertEquals(self.ed.GetStyleAt(0), 0)
self.assertEquals(self.ed.StyledTextRange(0, 1), b"x\0")
def testStyling(self):
self.assertEquals(self.ed.EndStyled, 0)
self.ed.AddStyledText(4, b"x\002y\003")
self.assertEquals(self.ed.StyledTextRange(0, 2), b"x\002y\003")
self.ed.StartStyling(0,0xf)
self.ed.SetStyling(1, 5)
self.assertEquals(self.ed.StyledTextRange(0, 2), b"x\005y\003")
# Set the mask so 0 bit changed but not 2 bit
self.ed.StartStyling(0,0x1)
self.ed.SetStyling(1, 0)
self.assertEquals(self.ed.StyledTextRange(0, 2), b"x\004y\003")
self.ed.StartStyling(0,0xff)
self.ed.SetStylingEx(2, b"\100\101")
self.assertEquals(self.ed.StyledTextRange(0, 2), b"x\100y\101")
def testPosition(self):
self.assertEquals(self.ed.CurrentPos, 0)
self.assertEquals(self.ed.Anchor, 0)
self.ed.AddText(1, b"x")
# Caret has automatically moved
self.assertEquals(self.ed.CurrentPos, 1)
self.assertEquals(self.ed.Anchor, 1)
self.ed.SelectAll()
self.assertEquals(self.ed.CurrentPos, 0)
self.assertEquals(self.ed.Anchor, 1)
self.ed.Anchor = 0
self.assertEquals(self.ed.Anchor, 0)
# Check line positions
self.assertEquals(self.ed.PositionFromLine(0), 0)
self.assertEquals(self.ed.GetLineEndPosition(0), 1)
self.assertEquals(self.ed.PositionFromLine(1), 1)
self.ed.CurrentPos = 1
self.assertEquals(self.ed.Anchor, 0)
self.assertEquals(self.ed.CurrentPos, 1)
def testBeyonEnd(self):
self.ed.AddText(1, b"x")
self.assertEquals(self.ed.GetLineEndPosition(0), 1)
self.assertEquals(self.ed.GetLineEndPosition(1), 1)
self.assertEquals(self.ed.GetLineEndPosition(2), 1)
def testSelection(self):
self.assertEquals(self.ed.CurrentPos, 0)
self.assertEquals(self.ed.Anchor, 0)
self.assertEquals(self.ed.SelectionStart, 0)
self.assertEquals(self.ed.SelectionEnd, 0)
self.ed.AddText(1, b"x")
self.ed.SelectionStart = 0
self.assertEquals(self.ed.CurrentPos, 1)
self.assertEquals(self.ed.Anchor, 0)
self.assertEquals(self.ed.SelectionStart, 0)
self.assertEquals(self.ed.SelectionEnd, 1)
self.ed.SelectionStart = 1
self.assertEquals(self.ed.CurrentPos, 1)
self.assertEquals(self.ed.Anchor, 1)
self.assertEquals(self.ed.SelectionStart, 1)
self.assertEquals(self.ed.SelectionEnd, 1)
self.ed.SelectionEnd = 0
self.assertEquals(self.ed.CurrentPos, 0)
self.assertEquals(self.ed.Anchor, 0)
def testSetSelection(self):
self.ed.AddText(4, b"abcd")
self.ed.SetSel(1, 3)
self.assertEquals(self.ed.SelectionStart, 1)
self.assertEquals(self.ed.SelectionEnd, 3)
result = self.ed.GetSelText(0)
self.assertEquals(result, b"bc\0")
self.ed.ReplaceSel(0, b"1234")
self.assertEquals(self.ed.Length, 6)
self.assertEquals(self.ed.Contents(), b"a1234d")
def testReadOnly(self):
self.ed.AddText(1, b"x")
self.assertEquals(self.ed.ReadOnly, 0)
self.assertEquals(self.ed.Contents(), b"x")
self.ed.ReadOnly = 1
self.assertEquals(self.ed.ReadOnly, 1)
self.ed.AddText(1, b"x")
self.assertEquals(self.ed.Contents(), b"x")
self.ed.ReadOnly = 0
self.ed.AddText(1, b"x")
self.assertEquals(self.ed.Contents(), b"xx")
self.ed.Null()
self.assertEquals(self.ed.Contents(), b"xx")
def testAddLine(self):
data = b"x" * 70 + b"\n"
for i in range(5):
self.ed.AddText(len(data), data)
self.xite.DoEvents()
self.assertEquals(self.ed.LineCount, i + 2)
self.assert_(self.ed.Length > 0)
def testInsertText(self):
data = b"xy"
self.ed.InsertText(0, data)
self.assertEquals(self.ed.Length, 2)
self.assertEquals(data, self.ed.ByteRange(0,2))
self.ed.InsertText(1, data)
# Should now be "xxyy"
self.assertEquals(self.ed.Length, 4)
self.assertEquals(b"xxyy", self.ed.ByteRange(0,4))
def testInsertNul(self):
data = b"\0"
self.ed.AddText(1, data)
self.assertEquals(self.ed.Length, 1)
self.assertEquals(data, self.ed.ByteRange(0,1))
def testUndoRedo(self):
data = b"xy"
self.assertEquals(self.ed.Modify, 0)
self.assertEquals(self.ed.UndoCollection, 1)
self.assertEquals(self.ed.CanRedo(), 0)
self.assertEquals(self.ed.CanUndo(), 0)
self.ed.InsertText(0, data)
self.assertEquals(self.ed.Length, 2)
self.assertEquals(self.ed.Modify, 1)
self.assertEquals(self.ed.CanRedo(), 0)
self.assertEquals(self.ed.CanUndo(), 1)
self.ed.Undo()
self.assertEquals(self.ed.Length, 0)
self.assertEquals(self.ed.Modify, 0)
self.assertEquals(self.ed.CanRedo(), 1)
self.assertEquals(self.ed.CanUndo(), 0)
self.ed.Redo()
self.assertEquals(self.ed.Length, 2)
self.assertEquals(self.ed.Modify, 1)
self.assertEquals(data, self.ed.Contents())
self.assertEquals(self.ed.CanRedo(), 0)
self.assertEquals(self.ed.CanUndo(), 1)
def testUndoSavePoint(self):
data = b"xy"
self.assertEquals(self.ed.Modify, 0)
self.ed.InsertText(0, data)
self.assertEquals(self.ed.Modify, 1)
self.ed.SetSavePoint()
self.assertEquals(self.ed.Modify, 0)
self.ed.InsertText(0, data)
self.assertEquals(self.ed.Modify, 1)
def testUndoCollection(self):
data = b"xy"
self.assertEquals(self.ed.UndoCollection, 1)
self.ed.UndoCollection = 0
self.assertEquals(self.ed.UndoCollection, 0)
self.ed.InsertText(0, data)
self.assertEquals(self.ed.CanRedo(), 0)
self.assertEquals(self.ed.CanUndo(), 0)
self.ed.UndoCollection = 1
def testGetColumn(self):
self.ed.AddText(1, b"x")
self.assertEquals(self.ed.GetColumn(0), 0)
self.assertEquals(self.ed.GetColumn(1), 1)
# Next line caused infinite loop in 1.71
self.assertEquals(self.ed.GetColumn(2), 1)
self.assertEquals(self.ed.GetColumn(3), 1)
def testTabWidth(self):
self.assertEquals(self.ed.TabWidth, 8)
self.ed.AddText(3, b"x\tb")
self.assertEquals(self.ed.GetColumn(0), 0)
self.assertEquals(self.ed.GetColumn(1), 1)
self.assertEquals(self.ed.GetColumn(2), 8)
for col in range(10):
if col == 0:
self.assertEquals(self.ed.FindColumn(0, col), 0)
elif col == 1:
self.assertEquals(self.ed.FindColumn(0, col), 1)
elif col == 8:
self.assertEquals(self.ed.FindColumn(0, col), 2)
elif col == 9:
self.assertEquals(self.ed.FindColumn(0, col), 3)
else:
self.assertEquals(self.ed.FindColumn(0, col), 1)
self.ed.TabWidth = 4
self.assertEquals(self.ed.TabWidth, 4)
self.assertEquals(self.ed.GetColumn(0), 0)
self.assertEquals(self.ed.GetColumn(1), 1)
self.assertEquals(self.ed.GetColumn(2), 4)
def testIndent(self):
self.assertEquals(self.ed.Indent, 0)
self.assertEquals(self.ed.UseTabs, 1)
self.ed.Indent = 8
self.ed.UseTabs = 0
self.assertEquals(self.ed.Indent, 8)
self.assertEquals(self.ed.UseTabs, 0)
self.ed.AddText(3, b"x\tb")
self.assertEquals(self.ed.GetLineIndentation(0), 0)
self.ed.InsertText(0, b" ")
self.assertEquals(self.ed.GetLineIndentation(0), 1)
self.assertEquals(self.ed.GetLineIndentPosition(0), 1)
self.assertEquals(self.ed.Contents(), b" x\tb")
self.ed.SetLineIndentation(0,2)
self.assertEquals(self.ed.Contents(), b" x\tb")
self.assertEquals(self.ed.GetLineIndentPosition(0), 2)
self.ed.UseTabs = 1
self.ed.SetLineIndentation(0,8)
self.assertEquals(self.ed.Contents(), b"\tx\tb")
self.assertEquals(self.ed.GetLineIndentPosition(0), 1)
def testGetCurLine(self):
self.ed.AddText(1, b"x")
data = ctypes.create_string_buffer(b"\0" * 100)
caret = self.ed.GetCurLine(len(data), data)
self.assertEquals(caret, 1)
self.assertEquals(data.value, b"x")
def testGetLine(self):
self.ed.AddText(1, b"x")
data = ctypes.create_string_buffer(b"\0" * 100)
self.ed.GetLine(0, data)
self.assertEquals(data.value, b"x")
def testLineEnds(self):
self.ed.AddText(3, b"x\ny")
self.assertEquals(self.ed.GetLineEndPosition(0), 1)
self.assertEquals(self.ed.GetLineEndPosition(1), 3)
self.assertEquals(self.ed.LineLength(0), 2)
self.assertEquals(self.ed.LineLength(1), 1)
if sys.platform == "win32":
self.assertEquals(self.ed.EOLMode, self.ed.SC_EOL_CRLF)
else:
self.assertEquals(self.ed.EOLMode, self.ed.SC_EOL_LF)
lineEnds = [b"\r\n", b"\r", b"\n"]
for lineEndType in [self.ed.SC_EOL_CR, self.ed.SC_EOL_LF, self.ed.SC_EOL_CRLF]:
self.ed.EOLMode = lineEndType
self.assertEquals(self.ed.EOLMode, lineEndType)
self.ed.ConvertEOLs(lineEndType)
self.assertEquals(self.ed.Contents(), b"x" + lineEnds[lineEndType] + b"y")
self.assertEquals(self.ed.LineLength(0), 1 + len(lineEnds[lineEndType]))
# Several tests for unicode line ends U+2028 and U+2029
def testUnicodeLineEnds(self):
# Add two lines separated with U+2028 and ensure it is seen as two lines
# Then remove U+2028 and should be just 1 lines
self.ed.Lexer = self.ed.SCLEX_CPP
self.ed.SetCodePage(65001)
self.ed.SetLineEndTypesAllowed(1)
self.ed.AddText(5, b"x\xe2\x80\xa8y")
self.assertEquals(self.ed.LineCount, 2)
self.assertEquals(self.ed.GetLineEndPosition(0), 1)
self.assertEquals(self.ed.GetLineEndPosition(1), 5)
self.assertEquals(self.ed.LineLength(0), 4)
self.assertEquals(self.ed.LineLength(1), 1)
self.ed.TargetStart = 1
self.ed.TargetEnd = 4
self.ed.ReplaceTarget(0, b"")
self.assertEquals(self.ed.LineCount, 1)
self.assertEquals(self.ed.LineLength(0), 2)
self.assertEquals(self.ed.GetLineEndPosition(0), 2)
self.assertEquals(self.ed.LineEndTypesSupported, 1)
def testUnicodeLineEndsWithCodePage0(self):
# Try the Unicode line ends when not in Unicode mode -> should remain 1 line
self.ed.SetCodePage(0)
self.ed.AddText(5, b"x\xe2\x80\xa8y")
self.assertEquals(self.ed.LineCount, 1)
self.ed.AddText(4, b"x\xc2\x85y")
self.assertEquals(self.ed.LineCount, 1)
def testUnicodeLineEndsSwitchToUnicodeAndBack(self):
# Add the Unicode line ends when not in Unicode mode
self.ed.SetCodePage(0)
self.ed.AddText(5, b"x\xe2\x80\xa8y")
self.assertEquals(self.ed.LineCount, 1)
# Into UTF-8 mode - should now be interpreting as two lines
self.ed.Lexer = self.ed.SCLEX_CPP
self.ed.SetCodePage(65001)
self.ed.SetLineEndTypesAllowed(1)
self.assertEquals(self.ed.LineCount, 2)
# Back to code page 0 and 1 line
self.ed.SetCodePage(0)
self.assertEquals(self.ed.LineCount, 1)
def testUFragmentedEOLCompletion(self):
# Add 2 starting bytes of UTF-8 line end then complete it
self.ed.ClearAll()
self.ed.AddText(4, b"x\xe2\x80y")
self.assertEquals(self.ed.LineCount, 1)
self.assertEquals(self.ed.GetLineEndPosition(0), 4)
self.ed.SetSel(3,3)
self.ed.AddText(1, b"\xa8")
self.assertEquals(self.ed.Contents(), b"x\xe2\x80\xa8y")
self.assertEquals(self.ed.LineCount, 2)
# Add 1 starting bytes of UTF-8 line end then complete it
self.ed.ClearAll()
self.ed.AddText(3, b"x\xe2y")
self.assertEquals(self.ed.LineCount, 1)
self.assertEquals(self.ed.GetLineEndPosition(0), 3)
self.ed.SetSel(2,2)
self.ed.AddText(2, b"\x80\xa8")
self.assertEquals(self.ed.Contents(), b"x\xe2\x80\xa8y")
self.assertEquals(self.ed.LineCount, 2)
def testUFragmentedEOLStart(self):
# Add end of UTF-8 line end then insert start
self.ed.Lexer = self.ed.SCLEX_CPP
self.ed.SetCodePage(65001)
self.ed.SetLineEndTypesAllowed(1)
self.assertEquals(self.ed.LineCount, 1)
self.ed.AddText(4, b"x\x80\xa8y")
self.assertEquals(self.ed.LineCount, 1)
self.ed.SetSel(1,1)
self.ed.AddText(1, b"\xe2")
self.assertEquals(self.ed.LineCount, 2)
def testUBreakApartEOL(self):
# Add two lines separated by U+2029 then remove and add back each byte ensuring
# only one line after each removal of any byte in line end and 2 lines after reinsertion
self.ed.Lexer = self.ed.SCLEX_CPP
self.ed.SetCodePage(65001)
self.ed.SetLineEndTypesAllowed(1)
text = b"x\xe2\x80\xa9y";
self.ed.AddText(5, text)
self.assertEquals(self.ed.LineCount, 2)
for i in range(len(text)):
self.ed.TargetStart = i
self.ed.TargetEnd = i + 1
self.ed.ReplaceTarget(0, b"")
if i in [0, 4]:
# Removing text characters does not change number of lines
self.assertEquals(self.ed.LineCount, 2)
else:
# Removing byte from line end, removes 1 line
self.assertEquals(self.ed.LineCount, 1)
self.ed.TargetEnd = i
self.ed.ReplaceTarget(1, text[i:i+1])
self.assertEquals(self.ed.LineCount, 2)
def testURemoveEOLFragment(self):
# Add UTF-8 line end then delete each byte causing line end to disappear
self.ed.Lexer = self.ed.SCLEX_CPP
self.ed.SetCodePage(65001)
self.ed.SetLineEndTypesAllowed(1)
for i in range(3):
self.ed.ClearAll()
self.ed.AddText(5, b"x\xe2\x80\xa8y")
self.assertEquals(self.ed.LineCount, 2)
self.ed.TargetStart = i+1
self.ed.TargetEnd = i+2
self.ed.ReplaceTarget(0, b"")
self.assertEquals(self.ed.LineCount, 1)
# Several tests for unicode NEL line ends U+0085
def testNELLineEnds(self):
# Add two lines separated with U+0085 and ensure it is seen as two lines
# Then remove U+0085 and should be just 1 lines
self.ed.Lexer = self.ed.SCLEX_CPP
self.ed.SetCodePage(65001)
self.ed.SetLineEndTypesAllowed(1)
self.ed.AddText(4, b"x\xc2\x85y")
self.assertEquals(self.ed.LineCount, 2)
self.assertEquals(self.ed.GetLineEndPosition(0), 1)
self.assertEquals(self.ed.GetLineEndPosition(1), 4)
self.assertEquals(self.ed.LineLength(0), 3)
self.assertEquals(self.ed.LineLength(1), 1)
self.ed.TargetStart = 1
self.ed.TargetEnd = 3
self.ed.ReplaceTarget(0, b"")
self.assertEquals(self.ed.LineCount, 1)
self.assertEquals(self.ed.LineLength(0), 2)
self.assertEquals(self.ed.GetLineEndPosition(0), 2)
def testNELFragmentedEOLCompletion(self):
# Add starting byte of UTF-8 NEL then complete it
self.ed.AddText(3, b"x\xc2y")
self.assertEquals(self.ed.LineCount, 1)
self.assertEquals(self.ed.GetLineEndPosition(0), 3)
self.ed.SetSel(2,2)
self.ed.AddText(1, b"\x85")
self.assertEquals(self.ed.Contents(), b"x\xc2\x85y")
self.assertEquals(self.ed.LineCount, 2)
def testNELFragmentedEOLStart(self):
# Add end of UTF-8 NEL then insert start
self.ed.Lexer = self.ed.SCLEX_CPP
self.ed.SetCodePage(65001)
self.ed.SetLineEndTypesAllowed(1)
self.assertEquals(self.ed.LineCount, 1)
self.ed.AddText(4, b"x\x85y")
self.assertEquals(self.ed.LineCount, 1)
self.ed.SetSel(1,1)
self.ed.AddText(1, b"\xc2")
self.assertEquals(self.ed.LineCount, 2)
def testNELBreakApartEOL(self):
# Add two lines separated by U+0085 then remove and add back each byte ensuring
# only one line after each removal of any byte in line end and 2 lines after reinsertion
self.ed.Lexer = self.ed.SCLEX_CPP
self.ed.SetCodePage(65001)
self.ed.SetLineEndTypesAllowed(1)
text = b"x\xc2\x85y";
self.ed.AddText(4, text)
self.assertEquals(self.ed.LineCount, 2)
for i in range(len(text)):
self.ed.TargetStart = i
self.ed.TargetEnd = i + 1
self.ed.ReplaceTarget(0, b"")
if i in [0, 3]:
# Removing text characters does not change number of lines
self.assertEquals(self.ed.LineCount, 2)
else:
# Removing byte from line end, removes 1 line
self.assertEquals(self.ed.LineCount, 1)
self.ed.TargetEnd = i
self.ed.ReplaceTarget(1, text[i:i+1])
self.assertEquals(self.ed.LineCount, 2)
def testNELRemoveEOLFragment(self):
# Add UTF-8 NEL then delete each byte causing line end to disappear
self.ed.SetCodePage(65001)
for i in range(2):
self.ed.ClearAll()
self.ed.AddText(4, b"x\xc2\x85y")
self.assertEquals(self.ed.LineCount, 2)
self.ed.TargetStart = i+1
self.ed.TargetEnd = i+2
self.ed.ReplaceTarget(0, b"")
self.assertEquals(self.ed.LineCount, 1)
def testGoto(self):
self.ed.AddText(5, b"a\nb\nc")
self.assertEquals(self.ed.CurrentPos, 5)
self.ed.GotoLine(1)
self.assertEquals(self.ed.CurrentPos, 2)
self.ed.GotoPos(4)
self.assertEquals(self.ed.CurrentPos, 4)
def testCutCopyPaste(self):
self.ed.AddText(5, b"a1b2c")
self.ed.SetSel(1,3)
self.ed.Cut()
self.xite.DoEvents()
self.assertEquals(self.ed.CanPaste(), 1)
self.ed.SetSel(0, 0)
self.ed.Paste()
self.assertEquals(self.ed.Contents(), b"1ba2c")
self.ed.SetSel(4,5)
self.ed.Copy()
self.ed.SetSel(1,3)
self.ed.Paste()
self.assertEquals(self.ed.Contents(), b"1c2c")
self.ed.SetSel(2,4)
self.ed.Clear()
self.assertEquals(self.ed.Contents(), b"1c")
def testCopyAllowLine(self):
self.xite.DoEvents()
lineEndType = self.ed.EOLMode
self.ed.EOLMode = self.ed.SC_EOL_LF
self.ed.AddText(5, b"a1\nb2")
self.ed.SetSel(1,1)
self.ed.CopyAllowLine()
self.xite.DoEvents()
self.assertEquals(self.ed.CanPaste(), 1)
self.ed.SetSel(0, 0)
self.ed.Paste()
self.ed.EOLMode = lineEndType
self.assertEquals(self.ed.Contents(), b"a1\na1\nb2")
def testDuplicate(self):
self.ed.AddText(3, b"1b2")
self.ed.SetSel(1,2)
self.ed.SelectionDuplicate()
self.assertEquals(self.ed.Contents(), b"1bb2")
def testTransposeLines(self):
self.ed.AddText(8, b"a1\nb2\nc3")
self.ed.SetSel(3,3)
self.ed.LineTranspose()
self.assertEquals(self.ed.Contents(), b"b2\na1\nc3")
def testGetSet(self):
self.ed.SetContents(b"abc")
self.assertEquals(self.ed.TextLength, 3)
result = ctypes.create_string_buffer(b"\0" * 5)
length = self.ed.GetText(4, result)
self.assertEquals(result.value, b"abc")
def testAppend(self):
self.ed.SetContents(b"abc")
self.assertEquals(self.ed.SelectionStart, 0)
self.assertEquals(self.ed.SelectionEnd, 0)
text = b"12"
self.ed.AppendText(len(text), text)
self.assertEquals(self.ed.SelectionStart, 0)
self.assertEquals(self.ed.SelectionEnd, 0)
self.assertEquals(self.ed.Contents(), b"abc12")
def testTarget(self):
self.ed.SetContents(b"abcd")
self.ed.TargetStart = 1
self.ed.TargetEnd = 3
self.assertEquals(self.ed.TargetStart, 1)
self.assertEquals(self.ed.TargetEnd, 3)
rep = b"321"
self.ed.ReplaceTarget(len(rep), rep)
self.assertEquals(self.ed.Contents(), b"a321d")
self.ed.SearchFlags = self.ed.SCFIND_REGEXP
self.assertEquals(self.ed.SearchFlags, self.ed.SCFIND_REGEXP)
searchString = b"\([1-9]+\)"
pos = self.ed.SearchInTarget(len(searchString), searchString)
self.assertEquals(1, pos)
tagString = self.ed.GetTag(1)
self.assertEquals(tagString, b"321")
rep = b"\\1"
self.ed.TargetStart = 0
self.ed.TargetEnd = 0
self.ed.ReplaceTargetRE(len(rep), rep)
self.assertEquals(self.ed.Contents(), b"321a321d")
self.ed.SetSel(4,5)
self.ed.TargetFromSelection()
self.assertEquals(self.ed.TargetStart, 4)
self.assertEquals(self.ed.TargetEnd, 5)
def testTargetEscape(self):
# Checks that a literal \ can be in the replacement. Bug #2959876
self.ed.SetContents(b"abcd")
self.ed.TargetStart = 1
self.ed.TargetEnd = 3
rep = b"\\\\n"
self.ed.ReplaceTargetRE(len(rep), rep)
self.assertEquals(self.ed.Contents(), b"a\\nd")
def testPointsAndPositions(self):
self.ed.AddText(1, b"x")
# Start of text
self.assertEquals(self.ed.PositionFromPoint(0,0), 0)
# End of text
self.assertEquals(self.ed.PositionFromPoint(0,100), 1)
def testLinePositions(self):
text = b"ab\ncd\nef"
nl = b"\n"
if sys.version_info[0] == 3:
nl = ord(b"\n")
self.ed.AddText(len(text), text)
self.assertEquals(self.ed.LineFromPosition(-1), 0)
line = 0
for pos in range(len(text)+1):
self.assertEquals(self.ed.LineFromPosition(pos), line)
if pos < len(text) and text[pos] == nl:
line += 1
def testWordPositions(self):
text = b"ab cd\tef"
self.ed.AddText(len(text), text)
self.assertEquals(self.ed.WordStartPosition(3, 0), 2)
self.assertEquals(self.ed.WordStartPosition(4, 0), 3)
self.assertEquals(self.ed.WordStartPosition(5, 0), 3)
self.assertEquals(self.ed.WordStartPosition(6, 0), 5)
self.assertEquals(self.ed.WordEndPosition(2, 0), 3)
self.assertEquals(self.ed.WordEndPosition(3, 0), 5)
self.assertEquals(self.ed.WordEndPosition(4, 0), 5)
self.assertEquals(self.ed.WordEndPosition(5, 0), 6)
self.assertEquals(self.ed.WordEndPosition(6, 0), 8)
MODI = 1
UNDO = 2
REDO = 4
class TestContainerUndo(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
self.data = b"xy"
def UndoState(self):
return (MODI if self.ed.Modify else 0) | \
(UNDO if self.ed.CanUndo() else 0) | \
(REDO if self.ed.CanRedo() else 0)
def testContainerActNoCoalesce(self):
self.ed.InsertText(0, self.data)
self.assertEquals(self.ed.Length, 2)
self.assertEquals(self.UndoState(), MODI | UNDO)
self.ed.AddUndoAction(5, 0)
self.ed.Undo()
self.assertEquals(self.ed.Length, 2)
self.assertEquals(self.UndoState(), MODI | UNDO | REDO)
self.ed.Redo()
self.assertEquals(self.ed.Length, 2)
self.assertEquals(self.UndoState(), MODI | UNDO)
self.ed.Undo()
def testContainerActCoalesce(self):
self.ed.InsertText(0, self.data)
self.ed.AddUndoAction(5, 1)
self.ed.Undo()
self.assertEquals(self.ed.Length, 0)
self.assertEquals(self.UndoState(), REDO)
self.ed.Redo()
self.assertEquals(self.ed.Length, 2)
self.assertEquals(self.UndoState(), MODI | UNDO)
def testContainerMultiStage(self):
self.ed.InsertText(0, self.data)
self.ed.AddUndoAction(5, 1)
self.ed.AddUndoAction(5, 1)
self.assertEquals(self.ed.Length, 2)
self.assertEquals(self.UndoState(), MODI | UNDO)
self.ed.Undo()
self.assertEquals(self.ed.Length, 0)
self.assertEquals(self.UndoState(), REDO)
self.ed.Redo()
self.assertEquals(self.ed.Length, 2)
self.assertEquals(self.UndoState(), MODI | UNDO)
self.ed.AddUndoAction(5, 1)
self.assertEquals(self.ed.Length, 2)
self.assertEquals(self.UndoState(), MODI | UNDO)
self.ed.Undo()
self.assertEquals(self.ed.Length, 0)
self.assertEquals(self.UndoState(), REDO)
def testContainerMultiStageNoText(self):
self.ed.AddUndoAction(5, 1)
self.ed.AddUndoAction(5, 1)
self.assertEquals(self.UndoState(), MODI | UNDO)
self.ed.Undo()
self.assertEquals(self.UndoState(), REDO)
self.ed.Redo()
self.assertEquals(self.UndoState(), MODI | UNDO)
self.ed.AddUndoAction(5, 1)
self.assertEquals(self.UndoState(), MODI | UNDO)
self.ed.Undo()
self.assertEquals(self.UndoState(), REDO)
def testContainerActCoalesceEnd(self):
self.ed.AddUndoAction(5, 1)
self.assertEquals(self.ed.Length, 0)
self.assertEquals(self.UndoState(), MODI | UNDO)
self.ed.InsertText(0, self.data)
self.assertEquals(self.ed.Length, 2)
self.assertEquals(self.UndoState(), MODI | UNDO)
self.ed.Undo()
self.assertEquals(self.ed.Length, 0)
self.assertEquals(self.UndoState(), REDO)
self.ed.Redo()
self.assertEquals(self.ed.Length, 2)
self.assertEquals(self.UndoState(), MODI | UNDO)
def testContainerBetweenInsertAndInsert(self):
self.assertEquals(self.ed.Length, 0)
self.ed.InsertText(0, self.data)
self.assertEquals(self.ed.Length, 2)
self.assertEquals(self.UndoState(), MODI | UNDO)
self.ed.AddUndoAction(5, 1)
self.assertEquals(self.ed.Length, 2)
self.assertEquals(self.UndoState(), MODI | UNDO)
self.ed.InsertText(2, self.data)
self.assertEquals(self.ed.Length, 4)
self.assertEquals(self.UndoState(), MODI | UNDO)
# Undoes both insertions and the containerAction in the middle
self.ed.Undo()
self.assertEquals(self.ed.Length, 0)
self.assertEquals(self.UndoState(), REDO)
def testContainerNoCoalesceBetweenInsertAndInsert(self):
self.assertEquals(self.ed.Length, 0)
self.ed.InsertText(0, self.data)
self.assertEquals(self.ed.Length, 2)
self.assertEquals(self.UndoState(), MODI | UNDO)
self.ed.AddUndoAction(5, 0)
self.assertEquals(self.ed.Length, 2)
self.assertEquals(self.UndoState(), MODI | UNDO)
self.ed.InsertText(2, self.data)
self.assertEquals(self.ed.Length, 4)
self.assertEquals(self.UndoState(), MODI | UNDO)
# Undo last insertion
self.ed.Undo()
self.assertEquals(self.ed.Length, 2)
self.assertEquals(self.UndoState(), MODI | UNDO | REDO)
# Undo container
self.ed.Undo()
self.assertEquals(self.ed.Length, 2)
self.assertEquals(self.UndoState(), MODI | UNDO | REDO)
# Undo first insertion
self.ed.Undo()
self.assertEquals(self.ed.Length, 0)
self.assertEquals(self.UndoState(), REDO)
def testContainerBetweenDeleteAndDelete(self):
self.ed.InsertText(0, self.data)
self.ed.EmptyUndoBuffer()
self.assertEquals(self.ed.Length, 2)
self.assertEquals(self.UndoState(), 0)
self.ed.SetSel(2,2)
self.ed.DeleteBack()
self.assertEquals(self.ed.Length, 1)
self.ed.AddUndoAction(5, 1)
self.ed.DeleteBack()
self.assertEquals(self.ed.Length, 0)
# Undoes both deletions and the containerAction in the middle
self.ed.Undo()
self.assertEquals(self.ed.Length, 2)
self.assertEquals(self.UndoState(), REDO)
def testContainerBetweenInsertAndDelete(self):
self.assertEquals(self.ed.Length, 0)
self.ed.InsertText(0, self.data)
self.assertEquals(self.ed.Length, 2)
self.assertEquals(self.UndoState(), MODI | UNDO)
self.ed.AddUndoAction(5, 1)
self.assertEquals(self.UndoState(), MODI | UNDO)
self.ed.SetSel(0,1)
self.ed.Cut()
self.assertEquals(self.ed.Length, 1)
self.assertEquals(self.UndoState(), MODI | UNDO)
self.ed.Undo() # Only undoes the deletion
self.assertEquals(self.ed.Length, 2)
self.assertEquals(self.UndoState(), MODI | UNDO | REDO)
class TestKeyCommands(unittest.TestCase):
""" These commands are normally assigned to keys and take no arguments """
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
def selRange(self):
return self.ed.CurrentPos, self.ed.Anchor
def testLineMove(self):
self.ed.AddText(8, b"x1\ny2\nz3")
self.ed.SetSel(0,0)
self.ed.ChooseCaretX()
self.ed.LineDown()
self.ed.LineDown()
self.assertEquals(self.selRange(), (6, 6))
self.ed.LineUp()
self.assertEquals(self.selRange(), (3, 3))
self.ed.LineDownExtend()
self.assertEquals(self.selRange(), (6, 3))
self.ed.LineUpExtend()
self.ed.LineUpExtend()
self.assertEquals(self.selRange(), (0, 3))
def testCharMove(self):
self.ed.AddText(8, b"x1\ny2\nz3")
self.ed.SetSel(0,0)
self.ed.CharRight()
self.ed.CharRight()
self.assertEquals(self.selRange(), (2, 2))
self.ed.CharLeft()
self.assertEquals(self.selRange(), (1, 1))
self.ed.CharRightExtend()
self.assertEquals(self.selRange(), (2, 1))
self.ed.CharLeftExtend()
self.ed.CharLeftExtend()
self.assertEquals(self.selRange(), (0, 1))
def testWordMove(self):
self.ed.AddText(10, b"a big boat")
self.ed.SetSel(3,3)
self.ed.WordRight()
self.ed.WordRight()
self.assertEquals(self.selRange(), (10, 10))
self.ed.WordLeft()
self.assertEquals(self.selRange(), (6, 6))
self.ed.WordRightExtend()
self.assertEquals(self.selRange(), (10, 6))
self.ed.WordLeftExtend()
self.ed.WordLeftExtend()
self.assertEquals(self.selRange(), (2, 6))
def testHomeEndMove(self):
self.ed.AddText(10, b"a big boat")
self.ed.SetSel(3,3)
self.ed.Home()
self.assertEquals(self.selRange(), (0, 0))
self.ed.LineEnd()
self.assertEquals(self.selRange(), (10, 10))
self.ed.SetSel(3,3)
self.ed.HomeExtend()
self.assertEquals(self.selRange(), (0, 3))
self.ed.LineEndExtend()
self.assertEquals(self.selRange(), (10, 3))
def testStartEndMove(self):
self.ed.AddText(10, b"a\nbig\nboat")
self.ed.SetSel(3,3)
self.ed.DocumentStart()
self.assertEquals(self.selRange(), (0, 0))
self.ed.DocumentEnd()
self.assertEquals(self.selRange(), (10, 10))
self.ed.SetSel(3,3)
self.ed.DocumentStartExtend()
self.assertEquals(self.selRange(), (0, 3))
self.ed.DocumentEndExtend()
self.assertEquals(self.selRange(), (10, 3))
class TestMarkers(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
self.ed.AddText(5, b"x\ny\nz")
def testMarker(self):
handle = self.ed.MarkerAdd(1,1)
self.assertEquals(self.ed.MarkerLineFromHandle(handle), 1)
self.ed.MarkerDelete(1,1)
self.assertEquals(self.ed.MarkerLineFromHandle(handle), -1)
def testTwiceAddedDelete(self):
handle = self.ed.MarkerAdd(1,1)
self.assertEquals(self.ed.MarkerGet(1), 2)
handle2 = self.ed.MarkerAdd(1,1)
self.assertEquals(self.ed.MarkerGet(1), 2)
self.ed.MarkerDelete(1,1)
self.assertEquals(self.ed.MarkerGet(1), 2)
self.ed.MarkerDelete(1,1)
self.assertEquals(self.ed.MarkerGet(1), 0)
def testMarkerDeleteAll(self):
h1 = self.ed.MarkerAdd(0,1)
h2 = self.ed.MarkerAdd(1,2)
self.assertEquals(self.ed.MarkerLineFromHandle(h1), 0)
self.assertEquals(self.ed.MarkerLineFromHandle(h2), 1)
self.ed.MarkerDeleteAll(1)
self.assertEquals(self.ed.MarkerLineFromHandle(h1), -1)
self.assertEquals(self.ed.MarkerLineFromHandle(h2), 1)
self.ed.MarkerDeleteAll(-1)
self.assertEquals(self.ed.MarkerLineFromHandle(h1), -1)
self.assertEquals(self.ed.MarkerLineFromHandle(h2), -1)
def testMarkerDeleteHandle(self):
handle = self.ed.MarkerAdd(0,1)
self.assertEquals(self.ed.MarkerLineFromHandle(handle), 0)
self.ed.MarkerDeleteHandle(handle)
self.assertEquals(self.ed.MarkerLineFromHandle(handle), -1)
def testMarkerBits(self):
self.assertEquals(self.ed.MarkerGet(0), 0)
self.ed.MarkerAdd(0,1)
self.assertEquals(self.ed.MarkerGet(0), 2)
self.ed.MarkerAdd(0,2)
self.assertEquals(self.ed.MarkerGet(0), 6)
def testMarkerAddSet(self):
self.assertEquals(self.ed.MarkerGet(0), 0)
self.ed.MarkerAddSet(0,5)
self.assertEquals(self.ed.MarkerGet(0), 5)
self.ed.MarkerDeleteAll(-1)
def testMarkerNext(self):
self.assertEquals(self.ed.MarkerNext(0, 2), -1)
h1 = self.ed.MarkerAdd(0,1)
h2 = self.ed.MarkerAdd(2,1)
self.assertEquals(self.ed.MarkerNext(0, 2), 0)
self.assertEquals(self.ed.MarkerNext(1, 2), 2)
self.assertEquals(self.ed.MarkerNext(2, 2), 2)
self.assertEquals(self.ed.MarkerPrevious(0, 2), 0)
self.assertEquals(self.ed.MarkerPrevious(1, 2), 0)
self.assertEquals(self.ed.MarkerPrevious(2, 2), 2)
def testMarkerNegative(self):
self.assertEquals(self.ed.MarkerNext(-1, 2), -1)
def testLineState(self):
self.assertEquals(self.ed.MaxLineState, 0)
self.assertEquals(self.ed.GetLineState(0), 0)
self.assertEquals(self.ed.GetLineState(1), 0)
self.assertEquals(self.ed.GetLineState(2), 0)
self.ed.SetLineState(1, 100)
self.assertNotEquals(self.ed.MaxLineState, 0)
self.assertEquals(self.ed.GetLineState(0), 0)
self.assertEquals(self.ed.GetLineState(1), 100)
self.assertEquals(self.ed.GetLineState(2), 0)
def testSymbolRetrieval(self):
self.ed.MarkerDefine(1,3)
self.assertEquals(self.ed.MarkerSymbolDefined(1), 3)
class TestIndicators(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
def testSetIndicator(self):
self.assertEquals(self.ed.IndicGetStyle(0), 1)
self.assertEquals(self.ed.IndicGetFore(0), 0x007f00)
self.ed.IndicSetStyle(0, 2)
self.ed.IndicSetFore(0, 0xff0080)
self.assertEquals(self.ed.IndicGetStyle(0), 2)
self.assertEquals(self.ed.IndicGetFore(0), 0xff0080)
def testIndicatorFill(self):
self.ed.InsertText(0, b"abc")
self.ed.IndicatorCurrent = 3
self.ed.IndicatorFillRange(1,1)
self.assertEquals(self.ed.IndicatorValueAt(3, 0), 0)
self.assertEquals(self.ed.IndicatorValueAt(3, 1), 1)
self.assertEquals(self.ed.IndicatorValueAt(3, 2), 0)
self.assertEquals(self.ed.IndicatorStart(3, 0), 0)
self.assertEquals(self.ed.IndicatorEnd(3, 0), 1)
self.assertEquals(self.ed.IndicatorStart(3, 1), 1)
self.assertEquals(self.ed.IndicatorEnd(3, 1), 2)
self.assertEquals(self.ed.IndicatorStart(3, 2), 2)
self.assertEquals(self.ed.IndicatorEnd(3, 2), 3)
def testIndicatorAtEnd(self):
self.ed.InsertText(0, b"ab")
self.ed.IndicatorCurrent = 3
self.ed.IndicatorFillRange(1,1)
self.assertEquals(self.ed.IndicatorValueAt(3, 0), 0)
self.assertEquals(self.ed.IndicatorValueAt(3, 1), 1)
self.assertEquals(self.ed.IndicatorStart(3, 0), 0)
self.assertEquals(self.ed.IndicatorEnd(3, 0), 1)
self.assertEquals(self.ed.IndicatorStart(3, 1), 1)
self.assertEquals(self.ed.IndicatorEnd(3, 1), 2)
self.ed.DeleteRange(1, 1)
# Now only one character left and does not have indicator so indicator 3 is null
self.assertEquals(self.ed.IndicatorValueAt(3, 0), 0)
# Since null, remaining calls return 0
self.assertEquals(self.ed.IndicatorStart(3, 0), 0)
self.assertEquals(self.ed.IndicatorEnd(3, 0), 0)
self.assertEquals(self.ed.IndicatorStart(3, 1), 0)
self.assertEquals(self.ed.IndicatorEnd(3, 1), 0)
class TestScrolling(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
# 150 should be enough lines
self.ed.InsertText(0, b"a" * 150 + b"\n" * 150)
def testTop(self):
self.ed.GotoLine(0)
self.assertEquals(self.ed.FirstVisibleLine, 0)
def testLineScroll(self):
self.ed.GotoLine(0)
self.ed.LineScroll(0, 3)
self.assertEquals(self.ed.FirstVisibleLine, 3)
self.ed.LineScroll(0, -2)
self.assertEquals(self.ed.FirstVisibleLine, 1)
self.assertEquals(self.ed.XOffset, 0)
self.ed.LineScroll(10, 0)
self.assertGreater(self.ed.XOffset, 0)
scroll_width = float(self.ed.XOffset) / 10
self.ed.LineScroll(-2, 0)
self.assertEquals(self.ed.XOffset, scroll_width * 8)
def testVisibleLine(self):
self.ed.FirstVisibleLine = 7
self.assertEquals(self.ed.FirstVisibleLine, 7)
class TestSearch(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
self.ed.InsertText(0, b"a\tbig boat\t")
def testFind(self):
pos = self.ed.FindBytes(0, self.ed.Length, b"zzz", 0)
self.assertEquals(pos, -1)
pos = self.ed.FindBytes(0, self.ed.Length, b"big", 0)
self.assertEquals(pos, 2)
def testFindEmpty(self):
pos = self.ed.FindBytes(0, self.ed.Length, b"", 0)
self.assertEquals(pos, 0)
def testCaseFind(self):
self.assertEquals(self.ed.FindBytes(0, self.ed.Length, b"big", 0), 2)
self.assertEquals(self.ed.FindBytes(0, self.ed.Length, b"bIg", 0), 2)
self.assertEquals(self.ed.FindBytes(0, self.ed.Length, b"bIg",
self.ed.SCFIND_MATCHCASE), -1)
def testWordFind(self):
self.assertEquals(self.ed.FindBytes(0, self.ed.Length, b"bi", 0), 2)
self.assertEquals(self.ed.FindBytes(0, self.ed.Length, b"bi",
self.ed.SCFIND_WHOLEWORD), -1)
def testWordStartFind(self):
self.assertEquals(self.ed.FindBytes(0, self.ed.Length, b"bi", 0), 2)
self.assertEquals(self.ed.FindBytes(0, self.ed.Length, b"bi",
self.ed.SCFIND_WORDSTART), 2)
self.assertEquals(self.ed.FindBytes(0, self.ed.Length, b"ig", 0), 3)
self.assertEquals(self.ed.FindBytes(0, self.ed.Length, b"ig",
self.ed.SCFIND_WORDSTART), -1)
def testREFind(self):
flags = self.ed.SCFIND_REGEXP
self.assertEquals(-1, self.ed.FindBytes(0, self.ed.Length, b"b.g", 0))
self.assertEquals(2, self.ed.FindBytes(0, self.ed.Length, b"b.g", flags))
self.assertEquals(2, self.ed.FindBytes(0, self.ed.Length, b"\<b.g\>", flags))
self.assertEquals(-1, self.ed.FindBytes(0, self.ed.Length, b"b[A-Z]g",
flags | self.ed.SCFIND_MATCHCASE))
self.assertEquals(2, self.ed.FindBytes(0, self.ed.Length, b"b[a-z]g", flags))
self.assertEquals(6, self.ed.FindBytes(0, self.ed.Length, b"b[a-z]*t", flags))
self.assertEquals(0, self.ed.FindBytes(0, self.ed.Length, b"^a", flags))
self.assertEquals(10, self.ed.FindBytes(0, self.ed.Length, b"\t$", flags))
self.assertEquals(0, self.ed.FindBytes(0, self.ed.Length, b"\([a]\).*\0", flags))
def testPosixREFind(self):
flags = self.ed.SCFIND_REGEXP | self.ed.SCFIND_POSIX
self.assertEquals(-1, self.ed.FindBytes(0, self.ed.Length, b"b.g", 0))
self.assertEquals(2, self.ed.FindBytes(0, self.ed.Length, b"b.g", flags))
self.assertEquals(2, self.ed.FindBytes(0, self.ed.Length, b"\<b.g\>", flags))
self.assertEquals(-1, self.ed.FindBytes(0, self.ed.Length, b"b[A-Z]g",
flags | self.ed.SCFIND_MATCHCASE))
self.assertEquals(2, self.ed.FindBytes(0, self.ed.Length, b"b[a-z]g", flags))
self.assertEquals(6, self.ed.FindBytes(0, self.ed.Length, b"b[a-z]*t", flags))
self.assertEquals(0, self.ed.FindBytes(0, self.ed.Length, b"^a", flags))
self.assertEquals(10, self.ed.FindBytes(0, self.ed.Length, b"\t$", flags))
self.assertEquals(0, self.ed.FindBytes(0, self.ed.Length, b"([a]).*\0", flags))
def testPhilippeREFind(self):
# Requires 1.,72
flags = self.ed.SCFIND_REGEXP
self.assertEquals(0, self.ed.FindBytes(0, self.ed.Length, b"\w", flags))
self.assertEquals(1, self.ed.FindBytes(0, self.ed.Length, b"\W", flags))
self.assertEquals(-1, self.ed.FindBytes(0, self.ed.Length, b"\d", flags))
self.assertEquals(0, self.ed.FindBytes(0, self.ed.Length, b"\D", flags))
self.assertEquals(1, self.ed.FindBytes(0, self.ed.Length, b"\s", flags))
self.assertEquals(0, self.ed.FindBytes(0, self.ed.Length, b"\S", flags))
self.assertEquals(2, self.ed.FindBytes(0, self.ed.Length, b"\x62", flags))
def testRENonASCII(self):
self.ed.InsertText(0, b"\xAD")
flags = self.ed.SCFIND_REGEXP
self.assertEquals(-1, self.ed.FindBytes(0, self.ed.Length, b"\\x10", flags))
self.assertEquals(2, self.ed.FindBytes(0, self.ed.Length, b"\\x09", flags))
self.assertEquals(-1, self.ed.FindBytes(0, self.ed.Length, b"\\xAB", flags))
self.assertEquals(0, self.ed.FindBytes(0, self.ed.Length, b"\\xAD", flags))
class TestRepresentations(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
def testGetControl(self):
result = self.ed.GetRepresentation(b"\001")
self.assertEquals(result, b"SOH")
def testClearControl(self):
result = self.ed.GetRepresentation(b"\002")
self.assertEquals(result, b"STX")
self.ed.ClearRepresentation(b"\002")
result = self.ed.GetRepresentation(b"\002")
self.assertEquals(result, b"")
def testSetOhm(self):
ohmSign = b"\xe2\x84\xa6"
ohmExplained = b"U+2126 \xe2\x84\xa6"
self.ed.SetRepresentation(ohmSign, ohmExplained)
result = self.ed.GetRepresentation(ohmSign)
self.assertEquals(result, ohmExplained)
class TestProperties(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
def testSet(self):
self.ed.SetProperty(b"test", b"12")
self.assertEquals(self.ed.GetPropertyInt(b"test"), 12)
result = self.ed.GetProperty(b"test")
self.assertEquals(result, b"12")
self.ed.SetProperty(b"test.plus", b"[$(test)]")
result = self.ed.GetPropertyExpanded(b"test.plus")
self.assertEquals(result, b"[12]")
class TestTextMargin(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
self.txt = b"abcd"
self.ed.AddText(1, b"x")
def testAscent(self):
lineHeight = self.ed.TextHeight(0)
self.assertEquals(self.ed.ExtraAscent, 0)
self.assertEquals(self.ed.ExtraDescent, 0)
self.ed.ExtraAscent = 1
self.assertEquals(self.ed.ExtraAscent, 1)
self.ed.ExtraDescent = 2
self.assertEquals(self.ed.ExtraDescent, 2)
# Allow line height to recalculate
self.xite.DoEvents()
lineHeightIncreased = self.ed.TextHeight(0)
self.assertEquals(lineHeightIncreased, lineHeight + 2 + 1)
def testTextMargin(self):
self.ed.MarginSetText(0, self.txt)
result = self.ed.MarginGetText(0)
self.assertEquals(result, self.txt)
self.ed.MarginTextClearAll()
def testTextMarginStyle(self):
self.ed.MarginSetText(0, self.txt)
self.ed.MarginSetStyle(0, 33)
self.assertEquals(self.ed.MarginGetStyle(0), 33)
self.ed.MarginTextClearAll()
def testTextMarginStyles(self):
styles = b"\001\002\003\004"
self.ed.MarginSetText(0, self.txt)
self.ed.MarginSetStyles(0, styles)
result = self.ed.MarginGetStyles(0)
self.assertEquals(result, styles)
self.ed.MarginTextClearAll()
def testTextMarginStyleOffset(self):
self.ed.MarginSetStyleOffset(300)
self.assertEquals(self.ed.MarginGetStyleOffset(), 300)
class TestAnnotation(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
self.txt = b"abcd"
self.ed.AddText(1, b"x")
def testTextAnnotation(self):
self.assertEquals(self.ed.AnnotationGetLines(), 0)
self.ed.AnnotationSetText(0, self.txt)
self.assertEquals(self.ed.AnnotationGetLines(), 1)
result = self.ed.AnnotationGetText(0)
self.assertEquals(len(result), 4)
self.assertEquals(result, self.txt)
self.ed.AnnotationClearAll()
def testTextAnnotationStyle(self):
self.ed.AnnotationSetText(0, self.txt)
self.ed.AnnotationSetStyle(0, 33)
self.assertEquals(self.ed.AnnotationGetStyle(0), 33)
self.ed.AnnotationClearAll()
def testTextAnnotationStyles(self):
styles = b"\001\002\003\004"
self.ed.AnnotationSetText(0, self.txt)
self.ed.AnnotationSetStyles(0, styles)
result = self.ed.AnnotationGetStyles(0)
self.assertEquals(result, styles)
self.ed.AnnotationClearAll()
def testExtendedStyles(self):
start0 = self.ed.AllocateExtendedStyles(0)
self.assertEquals(start0, 256)
start1 = self.ed.AllocateExtendedStyles(10)
self.assertEquals(start1, 256)
start2 = self.ed.AllocateExtendedStyles(20)
self.assertEquals(start2, start1 + 10)
# Reset by changing lexer
self.ed.ReleaseAllExtendedStyles()
start0 = self.ed.AllocateExtendedStyles(0)
self.assertEquals(start0, 256)
def testTextAnnotationStyleOffset(self):
self.ed.AnnotationSetStyleOffset(300)
self.assertEquals(self.ed.AnnotationGetStyleOffset(), 300)
def testTextAnnotationVisible(self):
self.assertEquals(self.ed.AnnotationGetVisible(), 0)
self.ed.AnnotationSetVisible(2)
self.assertEquals(self.ed.AnnotationGetVisible(), 2)
self.ed.AnnotationSetVisible(0)
class TestMultiSelection(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
# 3 lines of 3 characters
t = b"xxx\nxxx\nxxx"
self.ed.AddText(len(t), t)
def testSelectionCleared(self):
self.ed.ClearSelections()
self.assertEquals(self.ed.Selections, 1)
self.assertEquals(self.ed.MainSelection, 0)
self.assertEquals(self.ed.GetSelectionNCaret(0), 0)
self.assertEquals(self.ed.GetSelectionNAnchor(0), 0)
def test1Selection(self):
self.ed.SetSelection(1, 2)
self.assertEquals(self.ed.Selections, 1)
self.assertEquals(self.ed.MainSelection, 0)
self.assertEquals(self.ed.GetSelectionNCaret(0), 1)
self.assertEquals(self.ed.GetSelectionNAnchor(0), 2)
self.assertEquals(self.ed.GetSelectionNStart(0), 1)
self.assertEquals(self.ed.GetSelectionNEnd(0), 2)
self.ed.SwapMainAnchorCaret()
self.assertEquals(self.ed.Selections, 1)
self.assertEquals(self.ed.MainSelection, 0)
self.assertEquals(self.ed.GetSelectionNCaret(0), 2)
self.assertEquals(self.ed.GetSelectionNAnchor(0), 1)
def test1SelectionReversed(self):
self.ed.SetSelection(2, 1)
self.assertEquals(self.ed.Selections, 1)
self.assertEquals(self.ed.MainSelection, 0)
self.assertEquals(self.ed.GetSelectionNCaret(0), 2)
self.assertEquals(self.ed.GetSelectionNAnchor(0), 1)
self.assertEquals(self.ed.GetSelectionNStart(0), 1)
self.assertEquals(self.ed.GetSelectionNEnd(0), 2)
def test1SelectionByStartEnd(self):
self.ed.SetSelectionNStart(0, 2)
self.ed.SetSelectionNEnd(0, 3)
self.assertEquals(self.ed.Selections, 1)
self.assertEquals(self.ed.MainSelection, 0)
self.assertEquals(self.ed.GetSelectionNAnchor(0), 2)
self.assertEquals(self.ed.GetSelectionNCaret(0), 3)
self.assertEquals(self.ed.GetSelectionNStart(0), 2)
self.assertEquals(self.ed.GetSelectionNEnd(0), 3)
def test2Selections(self):
self.ed.SetSelection(1, 2)
self.ed.AddSelection(4, 5)
self.assertEquals(self.ed.Selections, 2)
self.assertEquals(self.ed.MainSelection, 1)
self.assertEquals(self.ed.GetSelectionNCaret(0), 1)
self.assertEquals(self.ed.GetSelectionNAnchor(0), 2)
self.assertEquals(self.ed.GetSelectionNCaret(1), 4)
self.assertEquals(self.ed.GetSelectionNAnchor(1), 5)
self.assertEquals(self.ed.GetSelectionNStart(0), 1)
self.assertEquals(self.ed.GetSelectionNEnd(0), 2)
self.ed.MainSelection = 0
self.assertEquals(self.ed.MainSelection, 0)
self.ed.RotateSelection()
self.assertEquals(self.ed.MainSelection, 1)
def testRectangularSelection(self):
self.ed.RectangularSelectionAnchor = 1
self.assertEquals(self.ed.RectangularSelectionAnchor, 1)
self.ed.RectangularSelectionCaret = 10
self.assertEquals(self.ed.RectangularSelectionCaret, 10)
self.assertEquals(self.ed.Selections, 3)
self.assertEquals(self.ed.MainSelection, 2)
self.assertEquals(self.ed.GetSelectionNAnchor(0), 1)
self.assertEquals(self.ed.GetSelectionNCaret(0), 2)
self.assertEquals(self.ed.GetSelectionNAnchor(1), 5)
self.assertEquals(self.ed.GetSelectionNCaret(1), 6)
self.assertEquals(self.ed.GetSelectionNAnchor(2), 9)
self.assertEquals(self.ed.GetSelectionNCaret(2), 10)
def testVirtualSpace(self):
self.ed.SetSelection(3, 7)
self.ed.SetSelectionNCaretVirtualSpace(0, 3)
self.assertEquals(self.ed.GetSelectionNCaretVirtualSpace(0), 3)
self.ed.SetSelectionNAnchorVirtualSpace(0, 2)
self.assertEquals(self.ed.GetSelectionNAnchorVirtualSpace(0), 2)
# Does not check that virtual space is valid by being at end of line
self.ed.SetSelection(1, 1)
self.ed.SetSelectionNCaretVirtualSpace(0, 3)
self.assertEquals(self.ed.GetSelectionNCaretVirtualSpace(0), 3)
def testRectangularVirtualSpace(self):
self.ed.VirtualSpaceOptions=1
self.ed.RectangularSelectionAnchor = 3
self.assertEquals(self.ed.RectangularSelectionAnchor, 3)
self.ed.RectangularSelectionCaret = 7
self.assertEquals(self.ed.RectangularSelectionCaret, 7)
self.ed.RectangularSelectionAnchorVirtualSpace = 1
self.assertEquals(self.ed.RectangularSelectionAnchorVirtualSpace, 1)
self.ed.RectangularSelectionCaretVirtualSpace = 10
self.assertEquals(self.ed.RectangularSelectionCaretVirtualSpace, 10)
self.assertEquals(self.ed.Selections, 2)
self.assertEquals(self.ed.MainSelection, 1)
self.assertEquals(self.ed.GetSelectionNAnchor(0), 3)
self.assertEquals(self.ed.GetSelectionNAnchorVirtualSpace(0), 1)
self.assertEquals(self.ed.GetSelectionNCaret(0), 3)
self.assertEquals(self.ed.GetSelectionNCaretVirtualSpace(0), 10)
def testRectangularVirtualSpaceOptionOff(self):
# Same as previous test but virtual space option off so no virtual space in result
self.ed.VirtualSpaceOptions=0
self.ed.RectangularSelectionAnchor = 3
self.assertEquals(self.ed.RectangularSelectionAnchor, 3)
self.ed.RectangularSelectionCaret = 7
self.assertEquals(self.ed.RectangularSelectionCaret, 7)
self.ed.RectangularSelectionAnchorVirtualSpace = 1
self.assertEquals(self.ed.RectangularSelectionAnchorVirtualSpace, 1)
self.ed.RectangularSelectionCaretVirtualSpace = 10
self.assertEquals(self.ed.RectangularSelectionCaretVirtualSpace, 10)
self.assertEquals(self.ed.Selections, 2)
self.assertEquals(self.ed.MainSelection, 1)
self.assertEquals(self.ed.GetSelectionNAnchor(0), 3)
self.assertEquals(self.ed.GetSelectionNAnchorVirtualSpace(0), 0)
self.assertEquals(self.ed.GetSelectionNCaret(0), 3)
self.assertEquals(self.ed.GetSelectionNCaretVirtualSpace(0), 0)
class TestCharacterNavigation(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
self.ed.SetCodePage(65001)
def tearDown(self):
self.ed.SetCodePage(0)
def testBeforeAfter(self):
t = "aåflﬔ-"
tv = t.encode("UTF-8")
self.ed.SetContents(tv)
pos = 0
for i in range(len(t)-1):
after = self.ed.PositionAfter(pos)
self.assert_(after > i)
back = self.ed.PositionBefore(after)
self.assertEquals(pos, back)
pos = after
def testRelative(self):
# \x61 \xc3\xa5 \xef\xac\x82 \xef\xac\x94 \x2d
t = "aåflﬔ-"
tv = t.encode("UTF-8")
self.ed.SetContents(tv)
self.assertEquals(self.ed.PositionRelative(1, 2), 6)
self.assertEquals(self.ed.PositionRelative(6, -2), 1)
pos = 0
previous = 0
for i in range(1, len(t)):
after = self.ed.PositionRelative(pos, i)
self.assert_(after > pos)
self.assert_(after > previous)
previous = after
pos = len(t)
previous = pos
for i in range(1, len(t)-1):
after = self.ed.PositionRelative(pos, -i)
self.assert_(after < pos)
self.assert_(after < previous)
previous = after
class TestCaseMapping(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
def tearDown(self):
self.ed.SetCodePage(0)
self.ed.StyleSetCharacterSet(self.ed.STYLE_DEFAULT, self.ed.SC_CHARSET_DEFAULT)
def testEmpty(self):
# Trying to upper case an empty string caused a crash at one stage
t = b"x"
self.ed.SetContents(t)
self.ed.UpperCase()
self.assertEquals(self.ed.Contents(), b"x")
def testASCII(self):
t = b"x"
self.ed.SetContents(t)
self.ed.SetSel(0,1)
self.ed.UpperCase()
self.assertEquals(self.ed.Contents(), b"X")
def testLatin1(self):
t = "å".encode("Latin-1")
r = "Å".encode("Latin-1")
self.ed.SetContents(t)
self.ed.SetSel(0,1)
self.ed.UpperCase()
self.assertEquals(self.ed.Contents(), r)
def testRussian(self):
if sys.platform == "win32":
self.ed.StyleSetCharacterSet(self.ed.STYLE_DEFAULT, self.ed.SC_CHARSET_RUSSIAN)
else:
self.ed.StyleSetCharacterSet(self.ed.STYLE_DEFAULT, self.ed.SC_CHARSET_CYRILLIC)
t = "Б".encode("Windows-1251")
r = "б".encode("Windows-1251")
self.ed.SetContents(t)
self.ed.SetSel(0,1)
self.ed.LowerCase()
self.assertEquals(self.ed.Contents(), r)
def testUTF(self):
self.ed.SetCodePage(65001)
t = "å".encode("UTF-8")
r = "Å".encode("UTF-8")
self.ed.SetContents(t)
self.ed.SetSel(0,2)
self.ed.UpperCase()
self.assertEquals(self.ed.Contents(), r)
def testUTFDifferentLength(self):
self.ed.SetCodePage(65001)
t = "ı".encode("UTF-8")
r = "I".encode("UTF-8")
self.ed.SetContents(t)
self.assertEquals(self.ed.Length, 2)
self.ed.SetSel(0,2)
self.ed.UpperCase()
self.assertEquals(self.ed.Length, 1)
self.assertEquals(self.ed.Contents(), r)
def testUTFGrows(self):
# This crashed at one point in debug builds due to looking past end of shorter string
self.ed.SetCodePage(65001)
# ﬖ is a single character ligature taking 3 bytes in UTF8: EF AC 96
t = 'ﬖﬖ'.encode("UTF-8")
self.ed.SetContents(t)
self.assertEquals(self.ed.Length, 6)
self.ed.SetSel(0,self.ed.Length)
self.ed.UpperCase()
# To convert to upper case the ligature is separated into վ and ն then uppercased to Վ and Ն
# each of which takes 2 bytes in UTF-8: D5 8E D5 86
r = 'ՎՆՎՆ'.encode("UTF-8")
self.assertEquals(self.ed.Length, 8)
self.assertEquals(self.ed.Contents(), r)
self.assertEquals(self.ed.SelectionEnd, self.ed.Length)
def testUTFShrinks(self):
self.ed.SetCodePage(65001)
# fi is a single character ligature taking 3 bytes in UTF8: EF AC 81
t = 'fifi'.encode("UTF-8")
self.ed.SetContents(t)
self.assertEquals(self.ed.Length, 6)
self.ed.SetSel(0,self.ed.Length)
self.ed.UpperCase()
# To convert to upper case the ligature is separated into f and i then uppercased to F and I
# each of which takes 1 byte in UTF-8: 46 49
r = 'FIFI'.encode("UTF-8")
self.assertEquals(self.ed.Length, 4)
self.assertEquals(self.ed.Contents(), r)
self.assertEquals(self.ed.SelectionEnd, self.ed.Length)
class TestCaseInsensitiveSearch(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
def tearDown(self):
self.ed.SetCodePage(0)
self.ed.StyleSetCharacterSet(self.ed.STYLE_DEFAULT, self.ed.SC_CHARSET_DEFAULT)
def testEmpty(self):
text = b" x X"
searchString = b""
self.ed.SetContents(text)
self.ed.TargetStart = 0
self.ed.TargetEnd = self.ed.Length-1
self.ed.SearchFlags = 0
pos = self.ed.SearchInTarget(len(searchString), searchString)
self.assertEquals(0, pos)
def testASCII(self):
text = b" x X"
searchString = b"X"
self.ed.SetContents(text)
self.ed.TargetStart = 0
self.ed.TargetEnd = self.ed.Length-1
self.ed.SearchFlags = 0
pos = self.ed.SearchInTarget(len(searchString), searchString)
self.assertEquals(1, pos)
def testLatin1(self):
text = "Frånd Åå".encode("Latin-1")
searchString = "Å".encode("Latin-1")
self.ed.SetContents(text)
self.ed.TargetStart = 0
self.ed.TargetEnd = self.ed.Length-1
self.ed.SearchFlags = 0
pos = self.ed.SearchInTarget(len(searchString), searchString)
self.assertEquals(2, pos)
def testRussian(self):
self.ed.StyleSetCharacterSet(self.ed.STYLE_DEFAULT, self.ed.SC_CHARSET_RUSSIAN)
text = "=(Б tex б)".encode("Windows-1251")
searchString = "б".encode("Windows-1251")
self.ed.SetContents(text)
self.ed.TargetStart = 0
self.ed.TargetEnd = self.ed.Length-1
self.ed.SearchFlags = 0
pos = self.ed.SearchInTarget(len(searchString), searchString)
self.assertEquals(2, pos)
def testUTF(self):
self.ed.SetCodePage(65001)
text = "Frånd Åå".encode("UTF-8")
searchString = "Å".encode("UTF-8")
self.ed.SetContents(text)
self.ed.TargetStart = 0
self.ed.TargetEnd = self.ed.Length-1
self.ed.SearchFlags = 0
pos = self.ed.SearchInTarget(len(searchString), searchString)
self.assertEquals(2, pos)
def testUTFDifferentLength(self):
# Searching for a two byte string finds a single byte
self.ed.SetCodePage(65001)
# two byte string "ſ" single byte "s"
text = "Frånds Ååſ $".encode("UTF-8")
searchString = "ſ".encode("UTF-8")
firstPosition = len("Frånd".encode("UTF-8"))
self.assertEquals(len(searchString), 2)
self.ed.SetContents(text)
self.ed.TargetStart = 0
self.ed.TargetEnd = self.ed.Length-1
self.ed.SearchFlags = 0
pos = self.ed.SearchInTarget(len(searchString), searchString)
self.assertEquals(firstPosition, pos)
self.assertEquals(firstPosition+1, self.ed.TargetEnd)
class TestLexer(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
def testLexerNumber(self):
self.ed.Lexer = self.ed.SCLEX_CPP
self.assertEquals(self.ed.GetLexer(), self.ed.SCLEX_CPP)
def testLexerName(self):
self.ed.LexerLanguage = b"cpp"
self.assertEquals(self.ed.GetLexer(), self.ed.SCLEX_CPP)
name = self.ed.GetLexerLanguage(0)
self.assertEquals(name, b"cpp")
def testPropertyNames(self):
propertyNames = self.ed.PropertyNames()
self.assertNotEquals(propertyNames, b"")
# The cpp lexer has a boolean property named lexer.cpp.allow.dollars
propNameDollars = b"lexer.cpp.allow.dollars"
propertyType = self.ed.PropertyType(propNameDollars)
self.assertEquals(propertyType, self.ed.SC_TYPE_BOOLEAN)
propertyDescription = self.ed.DescribeProperty(propNameDollars)
self.assertNotEquals(propertyDescription, b"")
def testWordListDescriptions(self):
wordSet = self.ed.DescribeKeyWordSets()
self.assertNotEquals(wordSet, b"")
class TestSubStyles(unittest.TestCase):
''' These tests include knowledge of the current implementation in the cpp lexer
and may have to change when that implementation changes.
Currently supports subStyles for IDENTIFIER 11 and COMMENTDOCKEYWORD 17 '''
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
def testInfo(self):
self.ed.Lexer = self.ed.SCLEX_CPP
bases = self.ed.GetSubStyleBases()
self.assertEquals(bases, b"\x0b\x11") # 11, 17
self.assertEquals(self.ed.DistanceToSecondaryStyles(), 0x40)
def testAllocate(self):
firstSubStyle = 0x80 # Current implementation
self.ed.Lexer = self.ed.SCLEX_CPP
self.assertEquals(self.ed.GetStyleFromSubStyle(firstSubStyle), firstSubStyle)
self.assertEquals(self.ed.GetSubStylesStart(self.ed.SCE_C_IDENTIFIER), 0)
self.assertEquals(self.ed.GetSubStylesLength(self.ed.SCE_C_IDENTIFIER), 0)
numSubStyles = 5
subs = self.ed.AllocateSubStyles(self.ed.SCE_C_IDENTIFIER, numSubStyles)
self.assertEquals(subs, firstSubStyle)
self.assertEquals(self.ed.GetSubStylesStart(self.ed.SCE_C_IDENTIFIER), firstSubStyle)
self.assertEquals(self.ed.GetSubStylesLength(self.ed.SCE_C_IDENTIFIER), numSubStyles)
self.assertEquals(self.ed.GetStyleFromSubStyle(subs), self.ed.SCE_C_IDENTIFIER)
self.assertEquals(self.ed.GetStyleFromSubStyle(subs+numSubStyles-1), self.ed.SCE_C_IDENTIFIER)
self.assertEquals(self.ed.GetStyleFromSubStyle(self.ed.SCE_C_IDENTIFIER), self.ed.SCE_C_IDENTIFIER)
# Now free and check same as start
self.ed.FreeSubStyles()
self.assertEquals(self.ed.GetStyleFromSubStyle(subs), subs)
self.assertEquals(self.ed.GetSubStylesStart(self.ed.SCE_C_IDENTIFIER), 0)
self.assertEquals(self.ed.GetSubStylesLength(self.ed.SCE_C_IDENTIFIER), 0)
def testInactive(self):
firstSubStyle = 0x80 # Current implementation
inactiveDistance = self.ed.DistanceToSecondaryStyles()
self.ed.Lexer = self.ed.SCLEX_CPP
numSubStyles = 5
subs = self.ed.AllocateSubStyles(self.ed.SCE_C_IDENTIFIER, numSubStyles)
self.assertEquals(subs, firstSubStyle)
self.assertEquals(self.ed.GetStyleFromSubStyle(subs), self.ed.SCE_C_IDENTIFIER)
self.assertEquals(self.ed.GetStyleFromSubStyle(subs+inactiveDistance), self.ed.SCE_C_IDENTIFIER+inactiveDistance)
self.ed.FreeSubStyles()
def testSecondary(self):
inactiveDistance = self.ed.DistanceToSecondaryStyles()
self.assertEquals(self.ed.GetPrimaryStyleFromStyle(self.ed.SCE_C_IDENTIFIER+inactiveDistance), self.ed.SCE_C_IDENTIFIER)
class TestAutoComplete(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
# 1 line of 3 characters
t = b"xxx\n"
self.ed.AddText(len(t), t)
def testDefaults(self):
self.assertEquals(self.ed.AutoCGetSeparator(), ord(' '))
self.assertEquals(self.ed.AutoCGetMaxHeight(), 5)
self.assertEquals(self.ed.AutoCGetMaxWidth(), 0)
self.assertEquals(self.ed.AutoCGetTypeSeparator(), ord('?'))
self.assertEquals(self.ed.AutoCGetIgnoreCase(), 0)
self.assertEquals(self.ed.AutoCGetAutoHide(), 1)
self.assertEquals(self.ed.AutoCGetDropRestOfWord(), 0)
def testChangeDefaults(self):
self.ed.AutoCSetSeparator(ord('-'))
self.assertEquals(self.ed.AutoCGetSeparator(), ord('-'))
self.ed.AutoCSetSeparator(ord(' '))
self.ed.AutoCSetMaxHeight(100)
self.assertEquals(self.ed.AutoCGetMaxHeight(), 100)
self.ed.AutoCSetMaxHeight(5)
self.ed.AutoCSetMaxWidth(100)
self.assertEquals(self.ed.AutoCGetMaxWidth(), 100)
self.ed.AutoCSetMaxWidth(0)
self.ed.AutoCSetTypeSeparator(ord('@'))
self.assertEquals(self.ed.AutoCGetTypeSeparator(), ord('@'))
self.ed.AutoCSetTypeSeparator(ord('?'))
self.ed.AutoCSetIgnoreCase(1)
self.assertEquals(self.ed.AutoCGetIgnoreCase(), 1)
self.ed.AutoCSetIgnoreCase(0)
self.ed.AutoCSetAutoHide(0)
self.assertEquals(self.ed.AutoCGetAutoHide(), 0)
self.ed.AutoCSetAutoHide(1)
self.ed.AutoCSetDropRestOfWord(1)
self.assertEquals(self.ed.AutoCGetDropRestOfWord(), 1)
self.ed.AutoCSetDropRestOfWord(0)
def testAutoShow(self):
self.assertEquals(self.ed.AutoCActive(), 0)
self.ed.SetSel(0, 0)
self.ed.AutoCShow(0, b"za defn ghi")
self.assertEquals(self.ed.AutoCActive(), 1)
#~ time.sleep(2)
self.assertEquals(self.ed.AutoCPosStart(), 0)
self.assertEquals(self.ed.AutoCGetCurrent(), 0)
t = self.ed.AutoCGetCurrentText(5)
#~ self.assertEquals(l, 3)
self.assertEquals(t, b"za")
self.ed.AutoCCancel()
self.assertEquals(self.ed.AutoCActive(), 0)
def testAutoShowComplete(self):
self.assertEquals(self.ed.AutoCActive(), 0)
self.ed.SetSel(0, 0)
self.ed.AutoCShow(0, b"za defn ghi")
self.ed.AutoCComplete()
self.assertEquals(self.ed.Contents(), b"zaxxx\n")
self.assertEquals(self.ed.AutoCActive(), 0)
def testAutoShowSelect(self):
self.assertEquals(self.ed.AutoCActive(), 0)
self.ed.SetSel(0, 0)
self.ed.AutoCShow(0, b"za defn ghi")
self.ed.AutoCSelect(0, b"d")
self.ed.AutoCComplete()
self.assertEquals(self.ed.Contents(), b"defnxxx\n")
self.assertEquals(self.ed.AutoCActive(), 0)
class TestDirectAccess(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
def testGapPosition(self):
text = b"abcd"
self.ed.SetContents(text)
self.assertEquals(self.ed.GapPosition, 4)
self.ed.TargetStart = 1
self.ed.TargetEnd = 1
rep = b"-"
self.ed.ReplaceTarget(len(rep), rep)
self.assertEquals(self.ed.GapPosition, 2)
def testCharacterPointerAndRangePointer(self):
text = b"abcd"
self.ed.SetContents(text)
characterPointer = self.ed.CharacterPointer
rangePointer = self.ed.GetRangePointer(0,3)
self.assertEquals(characterPointer, rangePointer)
cpBuffer = ctypes.c_char_p(characterPointer)
self.assertEquals(cpBuffer.value, text)
# Gap will not be moved as already moved for CharacterPointer call
rangePointer = self.ed.GetRangePointer(1,3)
cpBuffer = ctypes.c_char_p(rangePointer)
self.assertEquals(cpBuffer.value, text[1:])
class TestWordChars(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
def tearDown(self):
self.ed.SetCharsDefault()
def _setChars(self, charClass, chars):
""" Wrapper to call self.ed.Set*Chars with the right type
@param charClass {str} the character class, "word", "space", etc.
@param chars {iterable of int} characters to set
"""
if sys.version_info.major == 2:
# Python 2, use latin-1 encoded str
unichars = (unichr(x) for x in chars if x != 0)
# can't use literal u"", that's a syntax error in Py3k
# uncode() doesn't exist in Py3k, but we never run it there
result = unicode("").join(unichars).encode("latin-1")
else:
# Python 3, use bytes()
result = bytes(x for x in chars if x != 0)
meth = getattr(self.ed, "Set%sChars" % (charClass.capitalize()))
return meth(None, result)
def assertCharSetsEqual(self, first, second, *args, **kwargs):
""" Assert that the two character sets are equal.
If either set are an iterable of numbers, convert them to chars
first. """
first_set = set()
for c in first:
first_set.add(chr(c) if isinstance(c, int) else c)
second_set = set()
for c in second:
second_set.add(chr(c) if isinstance(c, int) else c)
return self.assertEqual(first_set, second_set, *args, **kwargs)
def testDefaultWordChars(self):
# check that the default word chars are as expected
import string
data = self.ed.GetWordChars(None)
expected = set(string.digits + string.ascii_letters + '_') | \
set(chr(x) for x in range(0x80, 0x100))
self.assertCharSetsEqual(data, expected)
def testDefaultWhitespaceChars(self):
# check that the default whitespace chars are as expected
import string
data = self.ed.GetWhitespaceChars(None)
expected = (set(chr(x) for x in (range(0, 0x20))) | set(' ')) - \
set(['\r', '\n'])
self.assertCharSetsEqual(data, expected)
def testDefaultPunctuationChars(self):
# check that the default punctuation chars are as expected
import string
data = self.ed.GetPunctuationChars(None)
expected = set(chr(x) for x in range(0x20, 0x80)) - \
set(string.ascii_letters + string.digits + "\r\n_ ")
self.assertCharSetsEqual(data, expected)
def testCustomWordChars(self):
# check that setting things to whitespace chars makes them not words
self._setChars("whitespace", range(1, 0x100))
data = self.ed.GetWordChars(None)
expected = set()
self.assertCharSetsEqual(data, expected)
# and now set something to make sure that works too
expected = set(range(1, 0x100, 2))
self._setChars("word", expected)
data = self.ed.GetWordChars(None)
self.assertCharSetsEqual(data, expected)
def testCustomWhitespaceChars(self):
# check setting whitespace chars to non-default values
self._setChars("word", range(1, 0x100))
# we can't change chr(0) from being anything but whitespace
expected = set([0])
data = self.ed.GetWhitespaceChars(None)
self.assertCharSetsEqual(data, expected)
# now try to set it to something custom
expected = set(range(1, 0x100, 2)) | set([0])
self._setChars("whitespace", expected)
data = self.ed.GetWhitespaceChars(None)
self.assertCharSetsEqual(data, expected)
def testCustomPunctuationChars(self):
# check setting punctuation chars to non-default values
self._setChars("word", range(1, 0x100))
expected = set()
data = self.ed.GetPunctuationChars(0)
self.assertEquals(set(data), expected)
# now try to set it to something custom
expected = set(range(1, 0x100, 1))
self._setChars("punctuation", expected)
data = self.ed.GetPunctuationChars(None)
self.assertCharSetsEqual(data, expected)
if __name__ == '__main__':
uu = Xite.main("simpleTests")
#~ for x in sorted(uu.keys()):
#~ print(x, uu[x])
#~ print()
| Python |
# -*- coding: utf-8 -*-
from __future__ import with_statement
from __future__ import unicode_literals
import os, sys, unittest
import ctypes
from ctypes import wintypes
from ctypes import c_int, c_ulong, c_char_p, c_wchar_p, c_ushort, c_uint, c_long
from ctypes.wintypes import HWND, WPARAM, LPARAM, HANDLE, HBRUSH, LPCWSTR
user32=ctypes.windll.user32
gdi32=ctypes.windll.gdi32
kernel32=ctypes.windll.kernel32
from MessageNumbers import msgs, sgsm
import ScintillaCallable
import XiteMenu
scintillaDirectory = ".."
scintillaIncludeDirectory = os.path.join(scintillaDirectory, "include")
scintillaScriptsDirectory = os.path.join(scintillaDirectory, "scripts")
sys.path.append(scintillaScriptsDirectory)
import Face
scintillaBinDirectory = os.path.join(scintillaDirectory, "bin")
os.environ['PATH'] = os.environ['PATH'] + ";" + scintillaBinDirectory
#print(os.environ['PATH'])
WFUNC = ctypes.WINFUNCTYPE(c_int, HWND, c_uint, WPARAM, LPARAM)
WS_CHILD = 0x40000000
WS_CLIPCHILDREN = 0x2000000
WS_OVERLAPPEDWINDOW = 0xcf0000
WS_VISIBLE = 0x10000000
WS_HSCROLL = 0x100000
WS_VSCROLL = 0x200000
WA_INACTIVE = 0
MF_POPUP = 16
MF_SEPARATOR = 0x800
IDYES = 6
OFN_HIDEREADONLY = 4
MB_OK = 0
MB_YESNOCANCEL = 3
MF_CHECKED = 8
MF_UNCHECKED = 0
SW_SHOW = 5
PM_REMOVE = 1
VK_SHIFT = 16
VK_CONTROL = 17
VK_MENU = 18
class OPENFILENAME(ctypes.Structure):
_fields_ = (("lStructSize", c_int),
("hwndOwner", c_int),
("hInstance", c_int),
("lpstrFilter", c_wchar_p),
("lpstrCustomFilter", c_char_p),
("nMaxCustFilter", c_int),
("nFilterIndex", c_int),
("lpstrFile", c_wchar_p),
("nMaxFile", c_int),
("lpstrFileTitle", c_wchar_p),
("nMaxFileTitle", c_int),
("lpstrInitialDir", c_wchar_p),
("lpstrTitle", c_wchar_p),
("flags", c_int),
("nFileOffset", c_ushort),
("nFileExtension", c_ushort),
("lpstrDefExt", c_char_p),
("lCustData", c_int),
("lpfnHook", c_char_p),
("lpTemplateName", c_char_p),
("pvReserved", c_char_p),
("dwReserved", c_int),
("flagsEx", c_int))
def __init__(self, win, title):
ctypes.Structure.__init__(self)
self.lStructSize = ctypes.sizeof(OPENFILENAME)
self.nMaxFile = 1024
self.hwndOwner = win
self.lpstrTitle = title
self.Flags = OFN_HIDEREADONLY
trace = False
#~ trace = True
def WindowSize(w):
rc = ctypes.wintypes.RECT()
user32.GetClientRect(w, ctypes.byref(rc))
return rc.right - rc.left, rc.bottom - rc.top
def IsKeyDown(key):
return (user32.GetKeyState(key) & 0x8000) != 0
def KeyTranslate(w):
tr = { 9: "Tab", 0xD:"Enter", 0x1B: "Esc" }
if w in tr:
return tr[w]
elif ord("A") <= w <= ord("Z"):
return chr(w)
elif 0x70 <= w <= 0x7b:
return "F" + str(w-0x70+1)
else:
return "Unknown_" + hex(w)
class WNDCLASS(ctypes.Structure):
_fields_= (\
('style', c_int),
('lpfnWndProc', WFUNC),
('cls_extra', c_int),
('wnd_extra', c_int),
('hInst', HANDLE),
('hIcon', HANDLE),
('hCursor', HANDLE),
('hbrBackground', HBRUSH),
('menu_name', LPCWSTR),
('lpzClassName', LPCWSTR),
)
hinst = ctypes.windll.kernel32.GetModuleHandleW(0)
def RegisterClass(name, func, background = 0):
# register a window class for toplevel windows.
wc = WNDCLASS()
wc.style = 0
wc.lpfnWndProc = func
wc.cls_extra = 0
wc.wnd_extra = 0
wc.hInst = hinst
wc.hIcon = 0
wc.hCursor = 0
wc.hbrBackground = background
wc.menu_name = None
wc.lpzClassName = name
user32.RegisterClassW(ctypes.byref(wc))
class XiteWin():
def __init__(self, test=""):
self.face = Face.Face()
self.face.ReadFromFile(os.path.join(scintillaIncludeDirectory, "Scintilla.iface"))
self.titleDirty = True
self.fullPath = ""
self.test = test
self.appName = "xite"
self.cmds = {}
self.windowName = "XiteWindow"
self.wfunc = WFUNC(self.WndProc)
RegisterClass(self.windowName, self.wfunc)
user32.CreateWindowExW(0, self.windowName, self.appName, \
WS_VISIBLE | WS_OVERLAPPEDWINDOW | WS_CLIPCHILDREN, \
0, 0, 500, 700, 0, 0, hinst, 0)
args = sys.argv[1:]
self.SetMenus()
if args:
self.GrabFile(args[0])
self.FocusOnEditor()
self.ed.GotoPos(self.ed.Length)
if self.test:
print(self.test)
for k in self.cmds:
if self.cmds[k] == "Test":
user32.PostMessageW(self.win, msgs["WM_COMMAND"], k, 0)
def FocusOnEditor(self):
user32.SetFocus(self.sciHwnd)
def OnSize(self):
width, height = WindowSize(self.win)
user32.SetWindowPos(self.sciHwnd, 0, 0, 0, width, height, 0)
user32.InvalidateRect(self.win, 0, 0)
def OnCreate(self, hwnd):
self.win = hwnd
# Side effect: loads the DLL
x = ctypes.windll.SciLexer.Scintilla_DirectFunction
self.sciHwnd = user32.CreateWindowExW(0,
"Scintilla", "Source",
WS_CHILD | WS_VSCROLL | WS_HSCROLL | WS_CLIPCHILDREN,
0, 0, 100, 100, self.win, 0, hinst, 0)
user32.ShowWindow(self.sciHwnd, SW_SHOW)
user32.SendMessageW.restype = WPARAM
scifn = user32.SendMessageW(self.sciHwnd,
int(self.face.features["GetDirectFunction"]["Value"], 0), 0,0)
sciptr = c_char_p(user32.SendMessageW(self.sciHwnd,
int(self.face.features["GetDirectPointer"]["Value"], 0), 0,0))
self.ed = ScintillaCallable.ScintillaCallable(self.face, scifn, sciptr)
self.FocusOnEditor()
def Invalidate(self):
user32.InvalidateRect(self.win, 0, 0)
def WndProc(self, h, m, w, l):
user32.DefWindowProcW.argtypes = [HWND, c_uint, WPARAM, LPARAM]
ms = sgsm.get(m, "XXX")
if trace:
print("%s %s %s %s" % (hex(h)[2:],ms,w,l))
if ms == "WM_CLOSE":
user32.PostQuitMessage(0)
elif ms == "WM_CREATE":
self.OnCreate(h)
return 0
elif ms == "WM_SIZE":
# Work out size
if w != 1:
self.OnSize()
return 0
elif ms == "WM_COMMAND":
cmdCode = w & 0xffff
if cmdCode in self.cmds:
self.Command(self.cmds[cmdCode])
return 0
elif ms == "WM_ACTIVATE":
if w != WA_INACTIVE:
self.FocusOnEditor()
return 0
else:
return user32.DefWindowProcW(h, m, w, l)
return 0
def Command(self, name):
name = name.replace(" ", "")
method = "Cmd" + name
cmd = None
try:
cmd = getattr(self, method)
except AttributeError:
return
if cmd:
cmd()
def KeyDown(self, w, prefix = ""):
keyName = prefix
if IsKeyDown(VK_CONTROL):
keyName += "<control>"
if IsKeyDown(VK_SHIFT):
keyName += "<shift>"
keyName += KeyTranslate(w)
if trace:
print("Key:", keyName)
if keyName in self.keys:
method = "Cmd" + self.keys[keyName]
getattr(self, method)()
return True
#~ print("UKey:", keyName)
return False
def Accelerator(self, msg):
ms = sgsm.get(msg.message, "XXX")
if ms == "WM_KEYDOWN":
return self.KeyDown(msg.wParam)
elif ms == "WM_SYSKEYDOWN":
return self.KeyDown(msg.wParam, "<alt>")
return False
def AppLoop(self):
msg = ctypes.wintypes.MSG()
lpmsg = ctypes.byref(msg)
while user32.GetMessageW(lpmsg, 0, 0, 0):
if trace and msg.message != msgs["WM_TIMER"]:
print('mm', hex(msg.hWnd)[2:],sgsm.get(msg.message, "XXX"))
if not self.Accelerator(msg):
user32.TranslateMessage(lpmsg)
user32.DispatchMessageW(lpmsg)
def DoEvents(self):
msg = ctypes.wintypes.MSG()
lpmsg = ctypes.byref(msg)
cont = True
while cont:
cont = user32.PeekMessageW(lpmsg, 0, 0, 0, PM_REMOVE)
if cont:
if not self.Accelerator(msg):
user32.TranslateMessage(lpmsg)
user32.DispatchMessageW(lpmsg)
def SetTitle(self, changePath):
if changePath or self.titleDirty != self.ed.Modify:
self.titleDirty = self.ed.Modify
self.title = self.fullPath
if self.titleDirty:
self.title += " * "
else:
self.title += " - "
self.title += self.appName
if self.win:
user32.SetWindowTextW(self.win, self.title)
def Open(self):
ofx = OPENFILENAME(self.win, "Open File")
opath = "\0" * 1024
ofx.lpstrFile = opath
filters = ["Python (.py;.pyw)|*.py;*.pyw|All|*.*"]
filterText = "\0".join([f.replace("|", "\0") for f in filters])+"\0\0"
ofx.lpstrFilter = filterText
if ctypes.windll.comdlg32.GetOpenFileNameW(ctypes.byref(ofx)):
absPath = opath.replace("\0", "")
self.GrabFile(absPath)
self.FocusOnEditor()
self.ed.LexerLanguage = "python"
self.ed.Lexer = self.ed.SCLEX_PYTHON
self.ed.SetKeyWords(0, b"class def else for from if import print return while")
for style in [k for k in self.ed.k if k.startswith("SCE_P_")]:
self.ed.StyleSetFont(self.ed.k[style], b"Verdana")
if "COMMENT" in style:
self.ed.StyleSetFore(self.ed.k[style], 127 * 256)
self.ed.StyleSetFont(self.ed.k[style], b"Comic Sans MS")
elif "OPERATOR" in style:
self.ed.StyleSetBold(self.ed.k[style], 1)
self.ed.StyleSetFore(self.ed.k[style], 127 * 256 * 256)
elif "WORD" in style:
self.ed.StyleSetItalic(self.ed.k[style], 255)
self.ed.StyleSetFore(self.ed.k[style], 255 * 256 * 256)
elif "TRIPLE" in style:
self.ed.StyleSetFore(self.ed.k[style], 0xA0A0)
elif "STRING" in style or "CHARACTER" in style:
self.ed.StyleSetFore(self.ed.k[style], 0xA000A0)
else:
self.ed.StyleSetFore(self.ed.k[style], 0)
def SaveAs(self):
ofx = OPENFILENAME(self.win, "Save File")
opath = "\0" * 1024
ofx.lpstrFile = opath
if ctypes.windll.comdlg32.GetSaveFileNameW(ctypes.byref(ofx)):
self.fullPath = opath.replace("\0", "")
self.Save()
self.SetTitle(1)
self.FocusOnEditor()
def SetMenus(self):
ui = XiteMenu.MenuStructure
self.cmds = {}
self.keys = {}
cmdId = 0
self.menuBar = user32.CreateMenu()
for name, contents in ui:
cmdId += 1
menu = user32.CreateMenu()
for item in contents:
text, key = item
cmdText = text.replace("&", "")
cmdText = cmdText.replace("...", "")
cmdText = cmdText.replace(" ", "")
cmdId += 1
if key:
keyText = key.replace("<control>", "Ctrl+")
keyText = keyText.replace("<shift>", "Shift+")
text += "\t" + keyText
if text == "-":
user32.AppendMenuW(menu, MF_SEPARATOR, cmdId, text)
else:
user32.AppendMenuW(menu, 0, cmdId, text)
self.cmds[cmdId] = cmdText
self.keys[key] = cmdText
#~ print(cmdId, item)
user32.AppendMenuW(self.menuBar, MF_POPUP, menu, name)
user32.SetMenu(self.win, self.menuBar)
self.CheckMenuItem("Wrap", True)
user32.ShowWindow(self.win, SW_SHOW)
def CheckMenuItem(self, name, val):
#~ print(name, val)
if self.cmds:
for k,v in self.cmds.items():
if v == name:
#~ print(name, k)
user32.CheckMenuItem(user32.GetMenu(self.win), \
k, [MF_UNCHECKED, MF_CHECKED][val])
def Exit(self):
sys.exit(0)
def DisplayMessage(self, msg, ask):
return IDYES == user32.MessageBoxW(self.win, \
msg, self.appName, [MB_OK, MB_YESNOCANCEL][ask])
def NewDocument(self):
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
self.ed.SetSavePoint()
def SaveIfUnsure(self):
if self.ed.Modify:
msg = "Save changes to \"" + self.fullPath + "\"?"
print(msg)
decision = self.DisplayMessage(msg, True)
if decision:
self.CmdSave()
return decision
return True
def New(self):
if self.SaveIfUnsure():
self.fullPath = ""
self.overrideMode = None
self.NewDocument()
self.SetTitle(1)
self.Invalidate()
def CheckMenus(self):
pass
def MoveSelection(self, caret, anchor=-1):
if anchor == -1:
anchor = caret
self.ed.SetSelectionStart(caret)
self.ed.SetSelectionEnd(anchor)
self.ed.ScrollCaret()
self.Invalidate()
def GrabFile(self, name):
self.fullPath = name
self.overrideMode = None
self.NewDocument()
fsr = open(name, "rb")
data = fsr.read()
fsr.close()
self.ed.AddText(len(data), data)
self.ed.EmptyUndoBuffer()
self.MoveSelection(0)
self.SetTitle(1)
def Save(self):
fos = open(self.fullPath, "wb")
blockSize = 1024
length = self.ed.Length
i = 0
while i < length:
grabSize = length - i
if grabSize > blockSize:
grabSize = blockSize
#~ print(i, grabSize, length)
data = self.ed.ByteRange(i, i + grabSize)
fos.write(data)
i += grabSize
fos.close()
self.ed.SetSavePoint()
self.SetTitle(0)
# Command handlers are called by menu actions
def CmdNew(self):
self.New()
def CmdOpen(self):
self.Open()
def CmdSave(self):
if (self.fullPath == None) or (len(self.fullPath) == 0):
self.SaveAs()
else:
self.Save()
def CmdSaveAs(self):
self.SaveAs()
def CmdTest(self):
runner = unittest.TextTestRunner()
if self.test:
tests = unittest.defaultTestLoader.loadTestsFromName(self.test)
else:
tests = unittest.defaultTestLoader.loadTestsFromName("simpleTests")
results = runner.run(tests)
#~ print(results)
if self.test:
user32.PostQuitMessage(0)
def CmdExercised(self):
print()
unused = sorted(self.ed.all.difference(self.ed.used))
print("Unused", len(unused))
print()
print("\n".join(unused))
print()
print("Used", len(self.ed.used))
print()
print("\n".join(sorted(self.ed.used)))
def Uncalled(self):
print("")
unused = sorted(self.ed.all.difference(self.ed.used))
uu = {}
for u in unused:
v = self.ed.getvalue(u)
if v > 2000:
uu[v] = u
#~ for x in sorted(uu.keys())[150:]:
return uu
def CmdExit(self):
self.Exit()
def CmdUndo(self):
self.ed.Undo()
def CmdRedo(self):
self.ed.Redo()
def CmdCut(self):
self.ed.Cut()
def CmdCopy(self):
self.ed.Copy()
def CmdPaste(self):
self.ed.Paste()
def CmdDelete(self):
self.ed.Clear()
xiteFrame = None
def main(test):
global xiteFrame
xiteFrame = XiteWin(test)
xiteFrame.AppLoop()
#~ xiteFrame.CmdExercised()
return xiteFrame.Uncalled()
| Python |
# Convert all punctuation characters except '_', '*', and '.' into spaces.
def depunctuate(s):
'''A docstring'''
"""Docstring 2"""
d = ""
for ch in s:
if ch in 'abcde':
d = d + ch
else:
d = d + " "
return d
| Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import ctypes, os, sys
from ctypes import c_int, c_ulong, c_char_p, c_wchar_p, c_ushort, c_uint, c_long
class TEXTRANGE(ctypes.Structure):
_fields_= (\
('cpMin', c_long),
('cpMax', c_long),
('lpstrText', ctypes.POINTER(ctypes.c_char)),
)
class FINDTEXT(ctypes.Structure):
_fields_= (\
('cpMin', c_long),
('cpMax', c_long),
('lpstrText', c_char_p),
('cpMinText', c_long),
('cpMaxText', c_long),
)
class SciCall:
def __init__(self, fn, ptr, msg, stringResult=False):
self._fn = fn
self._ptr = ptr
self._msg = msg
self._stringResult = stringResult
def __call__(self, w=0, l=0):
ww = ctypes.cast(w, c_char_p)
if self._stringResult:
lengthBytes = self._fn(self._ptr, self._msg, ww, None)
if lengthBytes == 0:
return bytearray()
result = (ctypes.c_byte * lengthBytes)(0)
lengthBytes2 = self._fn(self._ptr, self._msg, ww, ctypes.cast(result, c_char_p))
assert lengthBytes == lengthBytes2
return bytearray(result)[:lengthBytes]
else:
ll = ctypes.cast(l, c_char_p)
return self._fn(self._ptr, self._msg, ww, ll)
sciFX = ctypes.CFUNCTYPE(c_long, c_char_p, c_int, c_char_p, c_char_p)
class ScintillaCallable:
def __init__(self, face, scifn, sciptr):
self.__dict__["face"] = face
self.__dict__["used"] = set()
self.__dict__["all"] = set()
# The k member is for accessing constants as a dictionary
self.__dict__["k"] = {}
for f in face.features:
self.all.add(f)
if face.features[f]["FeatureType"] == "val":
self.k[f] = int(self.face.features[f]["Value"], 0)
elif face.features[f]["FeatureType"] == "evt":
self.k["SCN_"+f] = int(self.face.features[f]["Value"], 0)
scifn = sciFX(scifn)
self.__dict__["_scifn"] = scifn
self.__dict__["_sciptr"] = sciptr
def __getattr__(self, name):
if name in self.face.features:
self.used.add(name)
feature = self.face.features[name]
value = int(feature["Value"], 0)
#~ print("Feature", name, feature)
if feature["FeatureType"] == "val":
self.__dict__[name] = value
return value
else:
if feature["Param2Type"] == "stringresult" and \
name not in ["GetText", "GetLine", "GetCurLine"]:
return SciCall(self._scifn, self._sciptr, value, True)
else:
return SciCall(self._scifn, self._sciptr, value)
elif ("Get" + name) in self.face.features:
self.used.add("Get" + name)
feature = self.face.features["Get" + name]
value = int(feature["Value"], 0)
if feature["FeatureType"] == "get" and \
not name.startswith("Get") and \
not feature["Param1Type"] and \
not feature["Param2Type"] and \
feature["ReturnType"] in ["bool", "int", "position"]:
#~ print("property", feature)
return self._scifn(self._sciptr, value, None, None)
elif name.startswith("SCN_") and name in self.k:
self.used.add(name)
feature = self.face.features[name[4:]]
value = int(feature["Value"], 0)
#~ print("Feature", name, feature)
if feature["FeatureType"] == "val":
return value
raise AttributeError(name)
def __setattr__(self, name, val):
if ("Set" + name) in self.face.features:
self.used.add("Set" + name)
feature = self.face.features["Set" + name]
value = int(feature["Value"], 0)
#~ print("setproperty", feature)
if feature["FeatureType"] == "set" and not name.startswith("Set"):
if feature["Param1Type"] in ["bool", "int", "position"]:
return self._scifn(self._sciptr, value, c_char_p(val), None)
elif feature["Param2Type"] in ["string"]:
return self._scifn(self._sciptr, value, None, c_char_p(val))
raise AttributeError(name)
raise AttributeError(name)
def getvalue(self, name):
if name in self.face.features:
feature = self.face.features[name]
if feature["FeatureType"] != "evt":
try:
return int(feature["Value"], 0)
except ValueError:
return -1
return -1
def ByteRange(self, start, end):
tr = TEXTRANGE()
tr.cpMin = start
tr.cpMax = end
length = end - start
tr.lpstrText = ctypes.create_string_buffer(length + 1)
self.GetTextRange(0, ctypes.byref(tr))
text = tr.lpstrText[:length]
text += b"\0" * (length - len(text))
return text
def StyledTextRange(self, start, end):
tr = TEXTRANGE()
tr.cpMin = start
tr.cpMax = end
length = 2 * (end - start)
tr.lpstrText = ctypes.create_string_buffer(length + 2)
self.GetStyledText(0, ctypes.byref(tr))
styledText = tr.lpstrText[:length]
styledText += b"\0" * (length - len(styledText))
return styledText
def FindBytes(self, start, end, s, flags):
ft = FINDTEXT()
ft.cpMin = start
ft.cpMax = end
ft.lpstrText = s
ft.cpMinText = 0
ft.cpMaxText = 0
pos = self.FindText(flags, ctypes.byref(ft))
#~ print(start, end, ft.cpMinText, ft.cpMaxText)
return pos
def Contents(self):
return self.ByteRange(0, self.Length)
def SetContents(self, s):
self.TargetStart = 0
self.TargetEnd = self.Length
self.ReplaceTarget(len(s), s)
| Python |
# -*- coding: utf-8 -*-
from __future__ import with_statement
from __future__ import unicode_literals
import os, string, sys, time, unittest
if sys.platform == "win32":
import XiteWin as Xite
else:
import XiteQt as Xite
class TestPerformance(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
def testAddLine(self):
data = (string.ascii_letters + string.digits + "\n").encode('utf-8')
start = time.time()
for i in range(1000):
self.ed.AddText(len(data), data)
self.assertEquals(self.ed.LineCount, i + 2)
end = time.time()
duration = end - start
print("%6.3f testAddLine" % duration)
self.xite.DoEvents()
self.assert_(self.ed.Length > 0)
def testAddLineMiddle(self):
data = (string.ascii_letters + string.digits + "\n").encode('utf-8')
start = time.time()
for i in range(1000):
self.ed.AddText(len(data), data)
self.assertEquals(self.ed.LineCount, i + 2)
end = time.time()
duration = end - start
print("%6.3f testAddLineMiddle" % duration)
self.xite.DoEvents()
self.assert_(self.ed.Length > 0)
def testHuge(self):
data = (string.ascii_letters + string.digits + "\n").encode('utf-8')
data = data * 100000
start = time.time()
self.ed.AddText(len(data), data)
end = time.time()
duration = end - start
print("%6.3f testHuge" % duration)
self.xite.DoEvents()
self.assert_(self.ed.Length > 0)
def testHugeInserts(self):
data = (string.ascii_letters + string.digits + "\n").encode('utf-8')
data = data * 100000
insert = (string.digits + "\n").encode('utf-8')
self.ed.AddText(len(data), data)
start = time.time()
for i in range(1000):
self.ed.InsertText(0, insert)
end = time.time()
duration = end - start
print("%6.3f testHugeInserts" % duration)
self.xite.DoEvents()
self.assert_(self.ed.Length > 0)
def testHugeReplace(self):
oneLine = (string.ascii_letters + string.digits + "\n").encode('utf-8')
data = oneLine * 100000
insert = (string.digits + "\n").encode('utf-8')
self.ed.AddText(len(data), data)
start = time.time()
for i in range(1000):
self.ed.TargetStart = i * len(insert)
self.ed.TargetEnd = self.ed.TargetStart + len(oneLine)
self.ed.ReplaceTarget(len(insert), insert)
end = time.time()
duration = end - start
print("%6.3f testHugeReplace" % duration)
self.xite.DoEvents()
self.assert_(self.ed.Length > 0)
def testUTF8CaseSearches(self):
self.ed.SetCodePage(65001)
oneLine = "Fold Margin=折りたたみ表示用の余白(&F)\n".encode('utf-8')
manyLines = oneLine * 100000
manyLines = manyLines + "φ\n".encode('utf-8')
self.ed.AddText(len(manyLines), manyLines)
searchString = "φ".encode('utf-8')
start = time.time()
for i in range(10):
self.ed.TargetStart = 0
self.ed.TargetEnd = self.ed.Length-1
self.ed.SearchFlags = self.ed.SCFIND_MATCHCASE
pos = self.ed.SearchInTarget(len(searchString), searchString)
self.assert_(pos > 0)
end = time.time()
duration = end - start
print("%6.3f testUTF8CaseSearches" % duration)
self.xite.DoEvents()
def testUTF8Searches(self):
self.ed.SetCodePage(65001)
oneLine = "Fold Margin=折りたたみ表示用の余白(&F)\n".encode('utf-8')
manyLines = oneLine * 100000
manyLines = manyLines + "φ\n".encode('utf-8')
self.ed.AddText(len(manyLines), manyLines)
searchString = "φ".encode('utf-8')
start = time.time()
for i in range(10):
self.ed.TargetStart = 0
self.ed.TargetEnd = self.ed.Length-1
self.ed.SearchFlags = 0
pos = self.ed.SearchInTarget(len(searchString), searchString)
self.assert_(pos > 0)
end = time.time()
duration = end - start
print("%6.3f testUTF8Searches" % duration)
self.xite.DoEvents()
if __name__ == '__main__':
Xite.main("performanceTests")
| Python |
# -*- coding: utf-8 -*-
import ctypes, os, sys, unittest
from PySide.QtCore import *
from PySide.QtGui import *
import ScintillaCallable
sys.path.append("..")
from bin import ScintillaEditPy
scintillaDirectory = ".."
scintillaIncludeDirectory = os.path.join(scintillaDirectory, "include")
scintillaScriptsDirectory = os.path.join(scintillaDirectory, "scripts")
sys.path.append(scintillaScriptsDirectory)
import Face
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.resize(460,300)
# Create widget
self.edit = ScintillaEditPy.ScintillaEdit(self)
class XiteWin():
def __init__(self, test=""):
self.face = Face.Face()
self.face.ReadFromFile(os.path.join(scintillaIncludeDirectory, "Scintilla.iface"))
self.test = test
self.form = Form()
scifn = self.form.edit.send(int(self.face.features["GetDirectFunction"]["Value"]), 0, 0)
sciptr = ctypes.c_char_p(self.form.edit.send(
int(self.face.features["GetDirectPointer"]["Value"]), 0,0))
self.ed = ScintillaCallable.ScintillaCallable(self.face, scifn, sciptr)
self.form.show()
def DoStuff(self):
print(self.test)
self.CmdTest()
def DoEvents(self):
QApplication.processEvents()
def CmdTest(self):
runner = unittest.TextTestRunner()
tests = unittest.defaultTestLoader.loadTestsFromName(self.test)
results = runner.run(tests)
print(results)
sys.exit(0)
xiteFrame = None
def main(test):
global xiteFrame
app = QApplication(sys.argv)
xiteFrame = XiteWin(test)
xiteFrame.DoStuff()
sys.exit(app.exec_())
| Python |
# -*- coding: utf-8 -*-
import XiteWin
if __name__ == "__main__":
XiteWin.main("")
| Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
""" Define the menu structure used by the Pentacle applications """
MenuStructure = [
["&File", [
["&New", "<control>N"],
["&Open...", "<control>O"],
["&Save", "<control>S"],
["Save &As...", "<control><shift>S"],
["Test", ""],
["Exercised", ""],
["Uncalled", ""],
["-", ""],
["&Exit", ""]]],
[ "&Edit", [
["&Undo", "<control>Z"],
["&Redo", "<control>Y"],
["-", ""],
["Cu&t", "<control>X"],
["&Copy", "<control>C"],
["&Paste", "<control>V"],
["&Delete", "Del"],
["Select &All", "<control>A"],
]],
]
| Python |
class RewiteLocalForwardedRequest:
def process_request(self, request):
if request.META.has_key('HTTP_X_FORWARDED_HOST'):
request.META['HTTP_HOST'] = request.META['HTTP_X_FORWARDED_HOST']
#request.META['SERVER_NAME'] = request.META['HTTP_X_FORWARDED_HOST']
| Python |
from westom import settings
def default(request):
"""
Returns some default context variables
"""
return {
'MEDIA_URL': settings.MEDIA_URL,
'URL_HOST': settings.URL_HOST,
} | Python |
from django.db import models
from django.db.models import Q
from django.contrib.auth.models import User
from westom.settings import FEEDS_DIR
from westom.feednut.utils import misc as misc_utils
import os
import cPickle as pickle
#Set up File locks in a system-independent manner
try:
from fcntl import lockf as lockFile
from fcntl import LOCK_EX, LOCK_SH
except:
from msvcrt import locking as lockFile
from msvcrt import LK_RLCK as LOCK_EX
from msvcrt import LK_NBLCK as LOCK_SH
class Feed(models.Model):
"""
This represents an individual RSS/etc. Feed in the system
"""
xml_url = models.URLField(verify_exists=False, unique=True, db_index=True)
channel_link = models.URLField(verify_exists=False, null=True)
title = models.CharField(maxlength=128, null=False, db_index=True)
subtitle = models.TextField(null=True, blank=True)
icon_url = models.URLField(verify_exists=False, null=True)
updated_date = models.DateTimeField(null=True, blank=True)
encoding = models.CharField(maxlength=64, default='us-ascii')
#These next two deal with the HTTP headers of the feed
last_modified = models.DateTimeField(null=True)
etag = models.CharField(maxlength=128, null=True, blank=True)
create_date = models.DateTimeField(auto_now_add=True)
touch_date = models.DateTimeField(auto_now=True)
suggested_tags = models.CharField(maxlength=255, db_index=True, default='')
default_feed = models.BooleanField(default=False) #True if this should be a feed included in a new users first view
system_feed = models.BooleanField(default=False)
class Admin:
list_display = ('xml_url', 'create_date', 'last_modified', 'touch_date', 'default_feed', 'system_feed')
list_filter = ('default_feed', 'system_feed')
search_fields = ('xml_url', 'title')
def __str__(self):
return self.xml_url
def get_data_path(self):
""" returns the full path to the data file """
name = misc_utils.clean_string(self.xml_url)
return os.path.abspath(os.path.join(FEEDS_DIR, 'feeds_%d/%s' % (self.id % 100, name)))
def set_data(self, data):
""" Set the feed data, which gets pickled and stored """
path = self.get_data_path()
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except:{}
f = open(path, 'wb')
try:
#lock the file and write it
lockFile(f.fileno(), LOCK_EX, 0)
except:{}
pickle.dump(data, f)
f.close()
def get_data(self):
""" Unpickles the data and returns the stored object """
f = open(self.get_data_path(), 'rb')
try:
#lock the file and read it
lockFile(f.fileno(), LOCK_SH, 0)
except:{}
data = pickle.load(f)
f.close()
return data
def get_xml_data(self):
""" Returns the stored XML data """
data = self.get_data()
return data.get('xml_data', None)
def get_entries(self, limit=10):
""" returns a list of entries associated with the feed """
# return self.feedentry_set.all()[:limit]
#get the feed data that got pickled on disk
return self.get_data()['entries'][:limit]
def get_suggested_tags(self):
""" returns list of the suggested tags """
return self.suggested_tags.split()
class FeedEntry(models.Model):
""" An Entry for a Feed """
entry_id = models.CharField(maxlength=255)
title = models.TextField()
link = models.URLField(verify_exists=False, null=True, blank=True)
summary = models.TextField(null=True, blank=True)
updated_date = models.DateTimeField(null=True, blank=True)
feed = models.ForeignKey(Feed)
create_date = models.DateTimeField(auto_now_add=True)
touch_date = models.DateTimeField(auto_now=True)
class Meta:
verbose_name_plural = "Feed entries"
ordering = ['-touch_date', '-id']
class Admin:
list_display = ('entry_id', 'title', 'link', 'updated_date',)
search_fields = ('title', 'link')
def __str__(self):
return self.entry_id
class UserFeed(models.Model):
"""Each item is a feed for the specified user, with its rating """
class Meta:
unique_together = (("user", "feed"),)
user = models.ForeignKey(User)
feed = models.ForeignKey(Feed)
create_date = models.DateTimeField(auto_now_add=True)
position = models.IntegerField(default=0, db_index=True)
is_anchored = models.BooleanField(default=False)
access_count = models.IntegerField(default=0)
is_public = models.BooleanField(default=True)
is_hidden = models.BooleanField(default=False)
num_items = models.IntegerField(default=10)
permanent_feed = models.BooleanField(default=False)
class Admin:
list_display = ('user', 'feed', 'create_date')
list_filter = ('is_public', 'permanent_feed')
def __str__(self):
return '%s, %s' % (self.user.username, self.feed.xml_url)
def get_tags_string(self):
st = ' '.join(self.get_tags())
if len(st) > 0:
st += ' '
return st
def get_tags(self):
items = self.userfeedtag_set.all()
return [item.tag.tag for item in items]
def get_feed(self):
return self.feed
def get_entries(self):
return self.feed.get_entries(limit=self.num_items)
class Tag(models.Model):
""" Tags """
tag = models.CharField(maxlength=32, unique=True, db_index=True)
class Admin:
search_fields = ('tag',)
def __str__(self):
return self.tag
class Entry(models.Model):
""" An Entry for a Feed"""
feed = models.ForeignKey(Feed)
title = models.CharField(maxlength=128, null=False)
link = models.URLField(verify_exists=False, null=False)
description = models.TextField(default='')
#the xml_url of the originating feed where the article came from
xml_url = models.URLField(verify_exists=False, null=True)
create_date = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name_plural = "Entries"
class Admin:
list_display = ('title', 'link', 'create_date')
def __str__(self):
return self.link
class FeedTag(models.Model):
""" A tag for a Feed as a whole. Entries could be 'suggested tags' or something of that sort. """
create_date = models.DateTimeField(auto_now_add=True)
feed = models.ForeignKey(Feed)
tag = models.ForeignKey(Tag)
class Meta:
unique_together = (("feed", "tag"),)
class Admin:
pass
def __str__(self):
return '%s, %s' % (self.feed, self.tag.tag)
class UserFeedTag(models.Model):
""" A user's tag for one of their subscribed feeds """
create_date = models.DateTimeField(auto_now_add=True)
user_feed = models.ForeignKey(UserFeed)
tag = models.ForeignKey(Tag)
class Meta:
unique_together = (("user_feed", "tag"),)
class Admin:
list_display = ('user_feed', 'tag', 'create_date')
def __str__(self):
return '%s, %s' % (self.user_feed, self.tag.tag)
class UserReadEntry(models.Model):
""" Articles read by users """
user = models.ForeignKey(User)
title = models.CharField(maxlength=128, null=False)
link = models.URLField(verify_exists=False)
description = models.TextField(default='')
read_date = models.DateTimeField(auto_now=True)
xml_url = models.URLField(verify_exists=False, null=True)
class Admin:
list_display = ('title', 'user', 'read_date')
def __str__(self):
return self.title
class UserBuddy(models.Model):
""" maps members to a User's network """
user = models.ForeignKey(User, related_name='user')
buddy = models.ForeignKey(User, related_name='buddy')
create_date = models.DateTimeField(auto_now_add=True)
class Meta:
unique_together = (("user", "buddy"),)
ordering = ['buddy']
verbose_name_plural = "User Buddies"
class Admin:
list_display = ('user', 'buddy', 'create_date')
class ForgotPassword(models.Model):
""" keeps track of requests to reset a password """
request_date = models.DateTimeField(auto_now_add=True)
email = models.EmailField(blank=True)
hash = models.CharField(maxlength=32, unique=True)
#set expired to true whenever they request the URL... maybe?
expired = models.BooleanField(default=False)
class Admin:
pass
User._meta.admin.list_display = User._meta.admin.list_display + ('date_joined', 'last_login',)
#class Blacklist(models.Model):
# """ maps members to a User's network """
# ipaddress = models.IPAdressField(_('ipaddress'), blank=False, null=False)
# create_date = models.DateTimeField(auto_now_add=True)
# touch_date = models.DateTimeField(auto_now=True)
#
# class Meta:
# verbose_name = _('blacklist')
# verbose_name_plural = _('blacklists')
## ordering = ('-touch_date',)
# class Admin:
# #fields = (
## # (None, {'fields': ('content_type', 'object_id', 'site')}),
# # ('Content', {'fields': ('user', 'headline', 'comment')}),
## # ('Ratings', {'fields': ('rating1', 'rating2', 'rating3', 'rating4', 'rating5', 'rating6', 'rating7', 'rating8', 'valid_rating')}),
## # ('Meta', {'fields': ('is_public', 'is_removed', 'ip_address')}),
# #)
## list_display = ('ipaddress', 'create_date', 'touch_date')
# list_filter = ('create_date', 'touch_date')
# date_hierarchy = 'create_date'
# search_fields = ('ipaddress',)
#
# def __repr__(self):
# return "%s: %s %s" % (self.ipaddress, self.create_date, self.touch_date)
| Python |
from django.contrib.syndication.feeds import Feed as FEED
from django.contrib.syndication.feeds import ObjectDoesNotExist
from django.contrib.syndication.views import feed as feed_func
from django.views.decorators.cache import cache_page
from westom.feednut.utils import user as user_utils
from westom.feednut.utils import feed_accomplice
from westom.feednut.models import *
from westom.settings import URL_HOST
class HottestFeed(FEED):
"""
The Hottest Feeds on FeedNut
Returns the most popular, both site-wide, and user-based
"""
def get_object(self, bits):
if len(bits) > 1:
raise ObjectDoesNotExist
if len(bits) == 1:
return User.objects.get(username__iexact=bits[0].lower())
return None
def title(self, obj):
if obj:
return "FeedNut.com | %s's Hottest Feeds" % obj.username
else:
return "FeedNut.com | Hottest Feeds"
def link(self, obj):
if obj:
return "http://feednut.com/%s/" % obj.username
else:
return "http://feednut.com/"
def description(self, obj):
if obj:
return "%s's Hottest Feeds at feednut.com" % obj.username
else:
return "The most popular feeds at feednut.com"
def items(self, obj):
"""
Returns the top 25 most popular feeds
Popularity is based on how many people are subscribed to the feeds
OR
the position of the feed in the users list
"""
if obj:
where = ['feednut_userfeed.user_id=' + str(obj.id), 'feednut_feed.id=feednut_userfeed.feed_id']
tables = ['feednut_userfeed']
select = {'uf_pos' : 'select position from feednut_userfeed where feed_id=feednut_feed.id and feednut_userfeed.user_id=' + str(obj.id)}
return Feed.objects.extra(where=where, tables=tables, select=select).order_by('uf_pos')[:25]
else:
return Feed.objects.extra(select={'feed_count': 'SELECT COUNT(*) FROM feednut_userfeed WHERE feed_id = feednut_feed.id'}).order_by('-feed_count', '-create_date')[:25]
def item_link(self, item):
return item.xml_url
class RecentRead(FEED):
"""
Returns the user's or system's most recently read articles
"""
def get_object(self, bits):
if len(bits) > 1:
raise ObjectDoesNotExist
if len(bits) == 1:
return User.objects.get(username__iexact=bits[0].lower())
return None
def title(self, obj):
if obj:
return "FeedNut.com | %s's Recent Reads" % obj.username
else:
return "FeedNut.com | Recent Reads"
def link(self, obj):
if obj:
return "http://feednut.com/%s/" % obj.username
else:
return "http://feednut.com/"
def description(self, obj):
if obj:
return "%s's recent reads at feednut.com" % obj.username
else:
return "Recent reads at feednut.com"
def items(self, obj):
if obj:
#return user_utils.get_latest_read_entries(obj)
feed = feed_accomplice.get_system_feed("%s/%s/latest.rss" % (URL_HOST, obj.username))
else:
feed = feed_accomplice.get_system_feed("%s/latest.rss" % URL_HOST)
return feed_accomplice.get_entries(feed)
def item_link(self, item):
return item.link
class ReadLater(FEED):
"""
Returns the entries the user's has flagged to read later
"""
def get_object(self, bits):
if len(bits) > 1:
raise ObjectDoesNotExist
if len(bits) == 1:
return User.objects.get(username__iexact=bits[0].lower())
return None
def title(self, obj):
if obj:
return "FeedNut.com | %s's Read Later List" % obj.username
def link(self, obj):
if obj:
return "http://feednut.com/%s/" % obj.username
def description(self, obj):
if obj:
return "%s's entries to read later on FeedNut.com" % obj.username
def items(self, obj):
if obj:
feed = feed_accomplice.get_system_feed("%s/%s/readlater.rss" % (URL_HOST, obj.username))
return feed_accomplice.get_entries(feed)
def item_link(self, item):
return item.link
#copied from the contrib area, so we can change the response
#might want to move thisinto the views.py file
def feed(request, url, feed_dict=None):
response = feed_func(request, url, feed_dict)
response['Content-Type'] = 'text/xml'
return response
#let's cache it for 2 minutes
feed = cache_page(feed, 120)
def user_feed(request, username, url, feed_dict=None):
return feed(request, '%s/%s' % (url, username), feed_dict=feed_dict)
| Python |
from UserDict import DictMixin
class Properties(DictMixin):
""" Takes a java like properties file seperated by new lines and stores it as a dictionary object"""
def __init__(self, prefString):
self.prefString = prefString
self.data = {}
self.data = self.parse()
self.properties = ""
def parse(self):
dict = {}
string = self.prefString.strip()
if len(string) == 0: return dict
keyvalues = string.split('\n')
for keyvalue in keyvalues:
split = keyvalue.split('=')
assert len(split) == 2
key = split[0].strip()
value = split[1].strip()
dict[key] = value
return dict
def __getitem__(self, key):
return self.data[key]
def get(self,key, default = ""):
if self.data.has_key(key) is False: return default
return self.data[key]
def set(self,key,value):
self.data[key] = value
#reset the properties value
self.properties = ""
def pop(self, key):
if self.data.has_key(key) == False:
raise KeyError
return self.data.pop(key)
def has_key(self,key):
return self.data.has_key(key)
def keys(self):
return self.data.keys()
def convertToPropertiesFile(self):
""" Takes the key values and converts them to a java like properties file seperated by new line characters"""
if len(self.properties) > 0:
return self.properties
value = ""
split = ""
for key in self.data.keys():
value = value + split + key + "=" + self.data[key]
split = "\n"
self.properties = value
return self.properties | Python |
import urllib, re
from westom.feednut.libs import syndic8
from westom.feednut.utils import feed_accomplice
URL_TECHNORATI = 'http://www.technorati.com/pop/blogs/?faves=1'
def scrape_technorati_top100():
""" Returns a list of URLs representing the top 100 blogs """
lines = urllib.urlopen(URL_TECHNORATI).read().split('\n')
urls = []
for line in lines:
if re.match(r'.*h2><a href.*', line):
line = line.strip()
in1 = line.find('http')
in2 = line.find('"', in1)
urls.append(line[in1:in2])
return urls
def load_technorati_top100():
urls = scrape_technorati_top100()
sites = []
for url in urls:
ids = syndic8.findsites('%s' % url)
sites += syndic8.getfeedinfo(ids, ['dataurl'])
for site in sites:
print site['dataurl']
newfeed = feed_accomplice.get_feed(site['dataurl'])
if newfeed:
print 'Saved Feed!: %s' % newfeed.title
| Python |
import unittest
from properties import Properties
class PropertiesTestCase(unittest.TestCase):
def setup(self):
pass
def teardown(self):
pass
def runTest(self):
"""Test blank"""
prefString= ""
prefs = Properties(prefString)
prefString = "default.name=Bilbo Baggins\nhomepage.feeds=42,24"
prefs = Properties(prefString)
assert cmp('Bilbo Baggins', prefs.get('default.name')) == 0, 'Error retrieving value from key'
assert cmp('', prefs.get('jeff')) == 0, "Missing keys should default to blank string"
prefs.set('Hulk', 'Hogan')
assert cmp('Hogan', prefs.get('Hulk')) == 0, 'Error retrieving set key/value'
stringValue = prefs.convertToPropertiesFile()
#this last test needs to be fixed, can fail if keys are not brought back in same order
assert cmp(prefString + "\nHulk=Hogan", stringValue) == 0, 'Error converting to properties'
prefString = "jeff=1"
prefs = Properties(prefString)
assert cmp('1', prefs.get('jeff')) == 0
assert cmp('1', prefs.pop('jeff')) == 0
try:
prefs.pop('badkey')
unittest.fail('A KeyError exception should have prevented this')
except KeyError:
pass
assert False == prefs.has_key('jeff')
t = PropertiesTestCase()
t.runTest()
| Python |
"""
Feed utility functions
"""
from django.db import connection
from westom.feednut.models import *
from westom.feednut.libs import feedparser
import time
from datetime import datetime
from westom.feednut.utils import misc as misc_utils
from westom.feednut.utils import feed_utils
from westom.settings import URL_HOST
import re, random
import logging
def get_feeds(xml_url=None, user=None, tags=None, only_public=None, default_feed=None, permanent_feed=None, limit=None):
"""
Returns a list of Feed objects matching the criteria in the parameters.
If a feed cannot be found and the xml_url is given, try to download that feed and return it.
"""
if tags and user and 'fn:untagged' in tags:
cursor = connection.cursor()
cursor.execute("""
select feednut_feed.* from
(feednut_userfeed uf left join feednut_userfeedtag uft on uf.id=uft.user_feed_id)
left join feednut_feed on uf.feed_id = feednut_feed.id
where uf.user_id=%s
and uft.user_feed_id is null""" % user.id)
feeds = [Feed(*row) for row in cursor.fetchall()]
return feeds
elif tags and 'fn:all' in tags:
tags=None
elif tags and 'fn:private' in tags:
only_public=False
feeds = []
where = []
tables = []
params = []
if xml_url:
xml_url = xml_url.strip().lower()
if not xml_url.startswith('http'):
xml_url = 'http://%s' % xml_url
where.append('feednut_feed.xml_url=%s')
params.append(xml_url)
#if you are filtering with user, tag, only_public or permanent_feed then you will need the userfeed table
if user or tags or only_public or permanent_feed:
where.append('feednut_feed.id=feednut_userfeed.feed_id')
tables.append('feednut_userfeed')
if user:
where.append('feednut_userfeed.user_id=%s')
params.append(user.id)
if tags:
where.extend(['feednut_tag.tag in (%s)',
'feednut_userfeedtag.user_feed_id=feednut_userfeed.id',
'feednut_userfeedtag.tag_id=feednut_tag.id'])
tables.extend(['feednut_tag', 'feednut_userfeedtag'])
params.append(','.join(tags))
if only_public != None:
where.append('feednut_userfeed.is_public=%s')
params.append(only_public)
if permanent_feed is not None:
where.append('feednut_userfeed.permanent_feed=%s')
params.append(permanent_feed)
if default_feed != None:
where.append('feednut_feed.default_feed=%s')
params.append(default_feed)
#print "where: ", where
#print "tables: ", tables
#print "params: ", params
feeds = Feed.objects.extra(where=where, tables=tables, params=params).distinct()[:limit]
if not feeds and xml_url:
feeds = get_feed(url)
return(feeds)
def get_feed(url):
"""
Gets the Feed for the given URL.
If it doesn't exist yet, the feed is downloaded and added to the Feeds table.
The new Feed is returned, or None if there was an error.
"""
#first, see if it already exists
url = url.strip().lower()
if not url.startswith('http'):
url = 'http://%s' % url
try:
feed = Feed.objects.get(xml_url__iexact=url)
except:
return updatefeed(url)
else:
return feed
def get_system_feed(url):
"""
Gets the special system Feed for the given URL.
If it doesn't exist yet,
"""
#first, see if it already exists
url = url.strip().lower()
if not url.startswith('http'):
url = 'http://%s' % url
try:
feed = Feed.objects.get(xml_url__iexact=url)
except:
return add_system_feed(url)
else:
return feed
def add_system_feed(xml_url):
"""
Create a special system feed. These are the feeds generated by feednut.com
"""
xml_url = xml_url.strip().lower()
if not xml_url.startswith('http'):
xml_url = 'http://%s' % xml_url
feed = Feed(xml_url=xml_url, title='', create_date = datetime.now(), last_modified = datetime.now())
feed.save()
feed = get_feed(xml_url)
return(feed)
def updatefeed(url):
"""
This attempts to download the feed at the given URL and either:
- update the currently existing Feed instance
- Create a new Feed instance
Returns the Feed, or None
"""
if type(url) == str:
return feed_utils.get_feed(url, index=True, update=True)
else:
feed = url
return feed_utils.get_feed(feed.xml_url, last_modified=(feed.last_modified or None), index=True, update=True)
def add_userfeed(user_or_id, feed_or_id, tags=[], permanent_feed=False):
"""
Add a userfeed by taking in the user and feed, and optional
positional parameters and tags
"""
user = user_or_id
if type(user) == str:
user = User.objects.get(id=user)
feed = feed_or_id
if type(feed) == str:
feed = Feed.objects.get(pk=feed)
userfeed = None
if feed:
#make sure it doesn't already exist
try:
userfeed = UserFeed.objects.get(user__id__exact=user.id, feed__id__exact=feed.id)
except:
#create it!
feed = get_feed(feed.xml_url)
userfeed = UserFeed(user=user, feed=feed, create_date=datetime.now())
userfeed.save()
if tags:
tag_feed(userfeed, tags)
if permanent_feed:
userfeed.permanent_feed = True
userfeed.save()
return userfeed
#VALID_TAG = re.compile(r'^\w+$')
def tag_feed(userfeed_or_id, tagnames=[]):
"""
Takes a userfeed or userfeed.id and a list of tags
and adds those tags for the userfeed.
The tags are created if they don't exist already.
Note:* Also, any tags the user already has for this feed that
are not included in tagnames are removed.
"""
userfeed = userfeed_or_id
if type(userfeed) == str:
userfeed = UserFeed.objects.get(id=userfeed)
#get users current tags
curtags = dict((userfeedtag.tag.tag.lower(), userfeedtag) for userfeedtag in userfeed.userfeedtag_set.all())
print 'CURTAGS:', curtags
#loop over new tagnames
for name in tagnames:
name = name.strip().lower()[:20]
if len(name) == 0:
continue
if name == 'fn:untagged':
continue
if name == 'fn:private':
userfeed.is_public = False
userfeed.save()
if not curtags.has_key(name):
#see if we need to add a new tag
try:
tag = Tag.objects.get(tag__iexact=name)
except:
tag = Tag(tag=name)
tag.save()
userfeedtag = UserFeedTag(user_feed=userfeed, tag=tag)
try:
userfeedtag.save()
except:
pass #already tagged with this tag
else:
#otherwise, we already have it, so pop it so we dont delete it
curtags.pop(name, None)
#now, delete the old tags the user no longer has
for userfeedtag in curtags.values():
userfeedtag.delete()
def update_feeds():
"""
Run through the entire list of Feeds and:
- download the latest feed, if it has been updated
- update the feed in the database
"""
feed_ids = [f['feed'] for f in UserFeed.objects.select_related().values('feed').distinct()]
for id in feed_ids:
try:
feed = Feed.objects.get(id=id)
updatefeed(feed)
except:{}
def add_permanent_feeds(user):
permanent_feeds = [('%s/latest.rss' % URL_HOST, ['fn:home', 'fn:recentread']),
('%s/hottest.rss' % URL_HOST, ['fn:home', 'fn:hottestfeed']),
('%s/%s/latest.rss' % (URL_HOST, user.username), ['fn:recentread']),
('%s/%s/hottest.rss' % (URL_HOST, user.username), ['fn:hottestfeed']),
('%s/%s/readlater.rss' % (URL_HOST, user.username), ['fn:readlater'])]
for permanent_feed in permanent_feeds:
feed = get_system_feed(permanent_feed[0])
#print "feed: ", feed
feed.system_feed = True
feed.save()
add_userfeed(user, feed, tags=permanent_feed[1], permanent_feed=True)
#def get_default_feeds():
# return get_feeds(user=user, default_feed=True)
##### Entry functions for Feeds ####
def push_entry(feed_or_id, title=None, link=None, description=None, xml_url=None):
""" Add a new Entry to this Feed, deleting the oldest Entry if necessary. """
feed = feed_or_id
if type(feed) == str:
feed = Feed.objects.get(id=feed)
#this is like a fixed size queue, push an entry onto the back of the queue, and pop one off the top
entries = get_entries(feed)
max_entries = 25
for entry in entries[max_entries-1:len(entries)]:
entry.delete()
#if len(entries) == 10:
# entries[-1].delete()
entry = Entry(feed_id=feed.id, title=title, link=link, description=description, xml_url=xml_url)
entry.save()
def get_entries(feed_or_id):
""" Get all the Entry's for this Feed """
feed = feed_or_id
if type(feed) == str:
feed = Feed.objects.get(id=feed)
entries = feed.entry_set.order_by('-create_date')
return entries
| Python |
import os, logging
from PyLucene import FSDirectory, IndexReader, IndexModifier, StandardAnalyzer, \
IndexSearcher, Document, Term, Field, Sort, MultiFieldQueryParser, \
QueryParser
from dateutil import parser
import cPickle as pickle
from westom.settings import DOCUMENT_ROOT
class IndexSupport:
indexDirectories = {}
readers = {}
modifiers = {}
searchers = {}
def __init__(self, indexPath, batchMode=False, analyzer=None):
self.batchMode = batchMode
self.indexPath = indexPath
self.analyzer = analyzer or StandardAnalyzer()
def getIndexDirectory(self):
if not IndexSupport.indexDirectories.has_key(self.indexPath):
if not os.path.exists(self.indexPath):
os.makedirs(self.indexPath)
IndexSupport.indexDirectories[self.indexPath] = FSDirectory.getDirectory(self.indexPath, False)
return IndexSupport.indexDirectories[self.indexPath]
def getIndexReader(self):
if IndexSupport.readers.has_key(self.indexPath):
if not IndexSupport.readers[self.indexPath].isCurrent() and not self.batchMode:
logging.debug('Refreshing reader...')
IndexSupport.readers[self.indexPath] = IndexReader.open(self.indexPath)
else:
logging.debug('Creating reader...')
IndexSupport.readers[self.indexPath] = IndexReader.open(self.indexPath)
return IndexSupport.readers[self.indexPath]
def getIndexModifier(self):
if IndexSupport.modifiers.has_key(self.indexPath):
return IndexSupport.modifiers[self.indexPath]
logging.debug("Creating an IndexModifier...")
try:
IndexSupport.modifiers[self.indexPath] = IndexModifier(self.indexPath, self.analyzer, False)
except:
# a failure opening a non-existent index causes it to be locked anyway
logging.debug('unlocking indexreader and creating new index')
IndexReader.unlock(self.getIndexDirectory())
IndexSupport.modifiers[self.indexPath] = self.createIndex()
return IndexSupport.modifiers[self.indexPath]
def getIndexSearcher(self):
# if not IndexSupport.searchers.has_key(self.indexPath) or not IndexSupport.searchers[self.indexPath].getIndexReader().isCurrent():
# IndexSupport.searchers[self.indexPath] = IndexSearcher(self.getIndexReader())
# return IndexSupport.searchers[self.indexPath]
return IndexSearcher(self.getIndexReader())
def createIndex(self):
logging.debug('Creating index: %s' % self.indexPath)
if IndexSupport.modifiers.has_key(self.indexPath):
IndexSupport.modifiers[self.indexPath].close()
IndexSupport.modifiers[self.indexPath] = IndexModifier(self.indexPath, self.analyzer, True)
return IndexSupport.modifiers[self.indexPath]
def optimizeIndex(self):
logging.debug('optimizing index: %s' % self.indexPath)
self.getIndexModifier().optimize()
def close(self):
#TODO flush if batch
self.optimizeIndex()
if IndexSupport.readers.has_key(self.indexPath):
IndexSupport.readers[self.indexPath].close()
del IndexSupport.readers[self.indexPath]
if IndexSupport.modifiers.has_key(self.indexPath):
IndexSupport.modifiers[self.indexPath].close()
del IndexSupport.modifiers[self.indexPath]
# def __del__(self):
# try:
# self.close()
# except:{}
#
STORE_DIR = os.path.join(DOCUMENT_ROOT, '../index')
FEED_FIELDS = ['url', 'link', 'title', 'subtitle']
FEED_ENTRY_FIELDS = ['feed_url', 'link', 'title', 'summary', 'updated']
class FeedIndexer:
def __init__(self, store_dir=STORE_DIR, destroy=False, analyzer=None):
self.feedSupport = IndexSupport(os.path.join(store_dir, 'feeds'), analyzer=analyzer)
self.entrySupport = IndexSupport(os.path.join(store_dir, 'entries'), analyzer=analyzer)
def delete_existing_feed_docs(self, docs=None):
""" deletes existing documents relating to the given feed """
numDeleted = 0
try:
reader = self.feedSupport.getIndexReader()
for doc in docs or []:
try:
numDeleted += reader.deleteDocuments(Term('url', doc.get('url')))
except:{}
reader.close()
except Exception, e:
logging.error(e)
logging.debug('deleted %d existing feed documents' % numDeleted)
def delete_existing_entry_docs(self, docs=None):
numDeleted = 0
try:
reader = self.entrySupport.getIndexReader()
for doc in docs or []:
try:
numDeleted += reader.deleteDocuments(Term('id', doc.get('id')))
except:{}
reader.close()
except Exception, e:
logging.error(e)
logging.debug('deleted %d feed entry documents' % numDeleted)
def create_feed_document(self, feed):
doc = Document()
doc.add(Field('id', str(feed.id), Field.Store.YES, Field.Index.UN_TOKENIZED))
doc.add(Field('url', feed.xml_url, Field.Store.YES, Field.Index.UN_TOKENIZED))
if feed.channel_link:
doc.add(Field('link', feed.channel_link, Field.Store.YES, Field.Index.UN_TOKENIZED))
if feed.title:
doc.add(Field('title', feed.title, Field.Store.YES, Field.Index.TOKENIZED))
if feed.subtitle:
doc.add(Field('subtitle', feed.subtitle, Field.Store.YES, Field.Index.TOKENIZED))
return doc
def create_entry_documents(self, feed):
docs = []
for entry in feed.get_entries():
try:
doc = Document()
id = '%s:%s' % (feed.xml_url, entry.get('id', None))
doc.add(Field('id', id, Field.Store.YES, Field.Index.UN_TOKENIZED))
doc.add(Field('feed_url', feed.xml_url, Field.Store.YES, Field.Index.UN_TOKENIZED))
if entry.get('title', None):
doc.add(Field('title', entry['title'], Field.Store.YES, Field.Index.TOKENIZED))
if entry.get('summary', None):
doc.add(Field('summary', entry['summary'], Field.Store.YES, Field.Index.TOKENIZED))
if entry.get('link', None):
doc.add(Field('link', entry['link'], Field.Store.YES, Field.Index.UN_TOKENIZED))
try:
updated = parser.parse(entry.get('updated', None), ignoretz=True)
doc.add(Field('updated', updated.isoformat(' '), Field.Store.YES, Field.Index.NO))
except:{}
try:
doc.add(Field('pickle', pickle.dumps(entry), Field.Store.YES, Field.Index.NO))
except Exception, e:
logging.error('Unable to store pickled entry: %s' % e)
docs.append(doc)
except Exception, e:
logging.error(e)
return docs
def index_feed(self, feed):
""" Indexes the given feed """
logging.debug('Attempting to index feed: %s' % feed.xml_url)
self.index_feeds([feed,])
def index_feeds(self, feeds=None):
if not feeds:
return
feed_docs = []
entry_docs = []
for feed in feeds:
doc = self.create_feed_document(feed)
if doc:
feed_docs.append(doc)
docs = self.create_entry_documents(feed)
if docs:
entry_docs.extend(docs)
if len(feed_docs):
self.delete_existing_feed_docs(feed_docs)
for doc in feed_docs:
try:
modifier = self.feedSupport.getIndexModifier()
modifier.addDocument(doc)
logging.debug('Indexed Feed: %s' % doc.get('url'))
modifier.flush()
except Exception, e:
logging.error(e)
if len(entry_docs):
self.delete_existing_entry_docs(entry_docs)
for doc in entry_docs:
try:
modifier = self.entrySupport.getIndexModifier()
modifier.addDocument(doc)
logging.debug('Indexed Feed Entry: %s' % doc.get('title') or doc.get('id'))
modifier.flush()
except Exception, e:
logging.error(e)
class FeedSearcher:
def __init__(self, store_dir=STORE_DIR, analyzer=None):
self.feedSupport = IndexSupport(os.path.join(store_dir, 'feeds'), analyzer=analyzer)
self.entrySupport = IndexSupport(os.path.join(store_dir, 'entries'), analyzer=analyzer)
def search(self, query, fields=FEED_ENTRY_FIELDS, analyzer=None, support=None):
if not query or len(query.strip()) == 0 or len(fields) == 0:
return None
if not support:
support=self.entrySupport
analyzer = analyzer or support.analyzer
if len(fields) > 1:
qp = MultiFieldQueryParser(fields, analyzer)
else:
qp = QueryParser(fields[0], analyzer)
q = qp.parse(query)
searcher = support.getIndexSearcher()
hits = searcher.search(q, Sort.RELEVANCE)
return HitHolder(hits, searcher)
def search_entries(self, query, fields=FEED_ENTRY_FIELDS, analyzer=None):
return self.search(query, fields=fields, analyzer=analyzer, support=self.entrySupport)
def search_feeds(self, query, fields=FEED_FIELDS, analyzer=None):
return self.search(query, fields=fields, analyzer=analyzer, support=self.feedSupport)
class HitHolder:
def __init__(self, hits, searcher):
self.hits = hits
self.searcher = searcher
def doc(self, index):
return self.hits.doc(index)
def doc_dict(self, index):
holder = {}
doc = self.doc(index)
fields = doc.fields()
while fields.hasMoreElements():
field = fields.nextElement()
holder[field.name()] = unicode(field.stringValue())
return holder
def __len__(self):
return self.hits.length()
def close(self):
try:
logging.debug('Closing Searcher handle...')
self.searcher.close()
except:{}
def __getitem__(self, index):
if type(index) == slice:
return [self.doc_dict(i) for i in range(max(index.start, 0), min(index.stop, self.hits.length()))]
else:
return self.doc_dict(index)
def __iter__(self):
return [self.doc_dict(i) for i in range(self.hits.length())].__iter__()
| Python |
from westom.feednut.cache import Cache, ExpiredError
from westom.settings import FEEDS_DIR
import os, cPickle
class FeedCache(Cache):
def get_full_path(self, key):
""" returns the full path to the data file """
return os.path.abspath(os.path.join(FEEDS_DIR, key))
def get_feed_data(self, key):
""" Unpickles the data and returns the stored object """
# print 'HAD TO FETCH DATA FROM DISK!!!!!'
#TODO read-lock the file?
obj = None
try:
file = open(self.get_full_path(key), 'r')
obj = cPickle.load(file)
except Exception, e:
print e
else:
file.close()
return obj
def __getitem__(self, key):
try:
item = Cache.__getitem__(self, key)
except KeyError, e:
#need to try to fetch it and update the cache
try:
item = self.get_feed_data(key)
except:
raise KeyError
self[key] = item
return item
FEED_CACHE = FeedCache(age='5m')
| Python |
import string, types, datetime
from django.db.models.base import ModelBase
from django.db import models
from westom.feednut.models import *
from westom.feednut.libs import json
from django.db.models.query import QuerySet
class DjangoJsonWriter(json.JsonWriter):
def write(self, obj, escaped_forward_slash=False):
self._escaped_forward_slash = escaped_forward_slash
self._results = []
self._write(obj)
return "".join(self._results)
def _write(self, obj):
if isinstance(obj, QuerySet):
self._write(list(item for item in obj))
elif isinstance(obj, models.Model):
j={}
for i in obj._meta.fields:
if i.attname != 'id':
j[i.attname] = obj.__getattribute__(i.attname)
#even return the foreign objects
# if isinstance(i, models.ForeignKey):
# j[i.name] = apply(obj.__getattribute__("get_" + i.name))
self._write( j )
elif isinstance(obj, datetime.datetime):
self._write(obj.isoformat() )
else:
json.JsonWriter._write(self, obj)
def write(obj, escaped_forward_slash=False):
return DjangoJsonWriter().write(obj, escaped_forward_slash)
| Python |
from HTMLParser import HTMLParser
from westom.feednut.libs.openanything import fetch
MOZILLA_AGENT = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.4) Gecko/20060508 Firefox/1.5.0.4'
class FeedFinder(HTMLParser):
def reset(self):
self.links = []
self.result = None #will be dict containing data, url, status
HTMLParser.reset(self)
def feed_url(self, url):
""" feed it a URL to download data and parse """
result = fetch(url, agent=MOZILLA_AGENT)
self.result = result
return self.feed(result['data'])
def handle_starttag(self, tag, attrs):
d_attrs = self._attrs_to_dict(attrs)
if tag == 'link':
if d_attrs.has_key('type') and d_attrs.has_key('href') and \
(d_attrs['type'] == 'application/rss+xml' or d_attrs['type'] == 'application/atom+xml'):
self.links.append(d_attrs['href'])
def _attrs_to_dict(self, attrs):
""" just converts the html parser's attribute array of tuples to a dict """
d = {}
for item in attrs:
d[item[0]] = item[1]
return d
if __name__ == '__main__':
p = FeedFinder()
# p.feed(file('tom.html').read())
p.feed_url('http://www.msnbc.com')
print p.links
print p.result['status'], p.result['url']
print p.result['data']
# p.reset()
# p.feed_url('http://www.slashdot.org')
# print p.links
# print p.url
| Python |
#!/usr/bin/env python
'''
- requires Python 2.4, cElementTree, elementtree, and elementtidy.
- also uses Mark Pilgrim's openanything lib
- uses PyXML
see http://effbot.org/downloads/ to download the element* packages
Example usage:
url = 'http://www.rssgov.com/rssparsers.html'
dom = HtmlDom(url)
print dom.evaluate('/html:html/html:head/html:title/text()')
dom = HtmlDom('<html><head></head><body><div class="test">some text</div></body></html>')
print dom.evaluate('//html:div/text()')
'''
from westom.feednut.libs.openanything import fetch
from elementtidy import TidyHTMLTreeBuilder as tidy
import cElementTree as _etree
from xml.dom.ext.reader import PyExpat
import sys, xml.xpath
MOZILLA_AGENT = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.4) Gecko/20060508 Firefox/1.5.0.4'
XHTML_NAMESPACE = u'http://www.w3.org/1999/xhtml'
class HtmlDom:
def __init__(self, url):
try:
f = file(url)
data = f.read()
f.close()
except IOError, e:
try:
result = fetch(url, agent=MOZILLA_AGENT)
data = result['data']
except:
raise IOError, 'invalid URL'
# create parser
parser = tidy.TreeBuilder()
parser.feed(data)
xmlText = _etree.tostring(parser.close())
#create the DOM
reader = PyExpat.Reader()
self.dom = reader.fromString(xmlText)
self.nss = {u'html': XHTML_NAMESPACE}
self.context = xml.xpath.Context.Context(self.dom, processorNss=self.nss)
def evaluate(self, expression, node=None):
''' evaluates the given xpath expression and returns the nodes '''
if not node:
return xml.xpath.Evaluate(expression, context=self.context)
else:
cxt = xml.xpath.Context.Context(node, processorNss=self.nss)
return xml.xpath.Evaluate(expression, context=cxt)
def escapeHTML(s):
''' adapted from MochiKit '''
return s.replace('&', '&'). \
replace('"', """). \
replace('<', "<"). \
replace('>', ">")
def toHTML(dom):
''' adapted from MochiKit '''
return ''.join(emitHTML(dom))
def emitHTML(dom):
''' adapted from MochiKit '''
lst = [];
# queue is the call stack, we're doing this non-recursively
queue = [dom];
while len(queue) > 0:
dom = queue.pop();
if not hasattr(dom, 'nodeType'):
lst.append(dom)
elif dom.nodeType == 1:
lst.append('<' + dom.nodeName.lower())
attributes = []
for i in range(dom.attributes.length):
attr = dom.attributes.item(i)
attributes.append(' %s="%s"' % (attr.name, escapeHTML(attr.value)))
attributes.sort()
for attr in attributes:
lst.append(attr)
if dom.hasChildNodes():
lst.append(">")
# queue is the FILO call stack, so we put the close tag on first
queue.append("</" + dom.nodeName.lower() + ">")
cnodes = dom.childNodes
cnodes.reverse()
queue += cnodes
else:
lst.append('/>')
elif dom.nodeType == 3:
lst.append(escapeHTML(dom.nodeValue))
return lst
if __name__ == '__main__':
for arg in sys.argv[1:]:
htmldom = HtmlDom(arg)
from xml.dom.ext import PrettyPrint
PrettyPrint(htmldom.dom)
| Python |
from westom.feednut.libs.ScrapeNFeed import ScrapedFeed
from westom.feednut.libs.PyRSS2Gen import RSSItem, Guid
class SimpleFeed(ScrapedFeed):
def __init__(self, title, url, description, rssItems,
rssFile=None, pickleFile=None, maxItems=20, **kwargs):
ScrapedFeed.__init__(self,
title,
url,
description,
rssFile,
pickleFile,
maxItems=maxItems,
generator = None,
docs = None,
**kwargs)
self.rssItems = rssItems
def HTML2RSS(self, headers, body):
self.addRSSItems(self.rssItems)
def refresh(self):
''' override the parent implementation '''
self.HTML2RSS(None, None)
#if self.rssFile:
# self.writeRSS()
#if self.pickleFile:
# self.pickle()
def main():
from westom.feednut.utils.HtmlDom import HtmlDom, toHTML
import sys
#another example... python job board
job_board = 'http://www.python.org/community/jobs/'
dom = HtmlDom(job_board)
rssItems = []
title = dom.evaluate("/html:html/html:head/html:title/text()")[0].nodeValue
description = 'Feed generated for %s by FeedNut' % job_board
job_ops = dom.evaluate("//html:div[@class='section'][@id][position()>2]")
for i, job_op in zip(range(len(job_ops)), job_ops):
try: itemTitle = dom.evaluate("html:h2/html:a[@class='reference']/text()", node=job_op)[0].nodeValue
except: continue
try: link = dom.evaluate("html:h2/html:a[@class='reference']/@href", node=job_op)[0].nodeValue
except: link = None
try: itemDesc = toHTML(job_op).replace('html:', '')
except: itemDesc = None
item = RSSItem(title=itemTitle, description=itemDesc, link=link, guid=Guid(link and ('%s#%s' % (link, i)) or itemTitle))
rssItems.append(item)
feed = SimpleFeed(title, job_board, description, rssItems)
feed.refresh()
print feed.to_xml()
#one example... top40 dance hits
top40 = 'http://www.bbc.co.uk/radio1/chart/top40.shtml'
dom = HtmlDom(top40)
rssItems = []
title = dom.evaluate("/html:html/html:head/html:title/text()")[0].nodeValue
description = 'Feed generated for %s by FeedNut' % top40
songs = dom.evaluate("//html:td[@class='col4']")
for song in songs:
try: artist = dom.evaluate("html:h4/text()", node=song)[0].nodeValue
except: continue
try: track = dom.evaluate("html:h5/text()", node=song)[0].nodeValue
except: continue
try: link = dom.evaluate("html:a/@href", node=song)[0].nodeValue
except: link = None
try: img = dom.evaluate("html:img/@src", node=song)[0].nodeValue
except: img = None
itemTitle = '%s - %s' % (artist, track)
itemDesc = '<p>%s</p>%s' % (itemTitle, img and ('<img src="%s"/>' % img) or '')
item = RSSItem(title=itemTitle, description=itemDesc, link=link, guid=Guid(link or itemTitle))
rssItems.append(item)
feed = SimpleFeed(title, top40, description, rssItems)
feed.refresh()
print feed.to_xml()
if __name__ == '__main__':
main()
| Python |
from string import letters, digits
import re
VALID_CHARS = letters + digits
def clean_string(s):
""" cleans a string, removing any non-letters/digits """
return ''.join([c in VALID_CHARS and c or '_' for c in s])
class Singleton(type):
def __init__(cls,name,bases,dic):
super(Singleton,cls).__init__(name,bases,dic)
cls.instance=None
def __call__(cls,*args,**kw):
if cls.instance is None:
cls.instance=super(Singleton,cls).__call__(*args,**kw)
return cls.instance
#class Singleton(type):
# """ Base class for creating a singleton """
# def __init__(self, *args):
# type.__init__(self, *args)
# self._instances = {}
#
# def __call__(self, *args):
# if not args in self._instances:
# self._instances[args] = type.__call__(self, *args)
# return self._instances[args]
def split_seq(seq, p):
""" Splits a sequence into equal parts """
newseq = []
n = len(seq) / p # min items per subsequence
r = len(seq) % p # remaindered items
b,e = 0, n + min(1, r) # first split
for i in range(p):
newseq.append(seq[b:e])
r = max(0, r-1) # use up remainders
b,e = e, e + n + min(1, r) # min(1,r) is always 0 or 1
return newseq
def split_every_other(seq):
""" Returns an array of size two, composed of arrays containing the items, interleaved every other """
newseq = [[], []]
for i, item in zip(range(len(seq)), seq):
newseq[i % 2].append(item)
return newseq
def clean_string(s):
""" cleans a string, removing any non-letters/digits """
news = ''
valid_chars = letters + digits
for c in s:
if c not in valid_chars:
news += '_'
else:
news += c
return news
#copied from the django dev release
STRIP_RE = re.compile(r'>\s+<')
def strip_spaces_between_tags(value):
"Returns the given HTML with spaces between tags normalized to a single space"
return STRIP_RE.sub('> <', value)
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
instead of `obj['foo']`. Create one by doing `storage({'a':1})`.
Copied from web.py
"""
def __getattr__(self, key):
if self.has_key(key):
return self[key]
raise AttributeError, repr(key)
def __setattr__(self, key, value):
self[key] = value
def __repr__(self):
return '<Storage ' + dict.__repr__(self) + '>'
storage = Storage | Python |
"""
Searchp utility functions
"""
from westom.feednut.models import *
from westom.feednut.libs import feedparser
import time
from datetime import datetime
from westom.feednut.utils import misc as misc_utils
from westom.feednut.libs import yahoo_terms
from westom.feednut.libs.BeautifulSoup import BeautifulSoup
from westom.settings import URL_HOST
import re, random
import urllib2
import logging
#unit test
#text="<html><head><link rel='alternate' type='application/rss+xml' href='http://example.com/feed.rss'></link></head></html>"
#links = get_html_alternate_links(text=text, types=['application/rss+xml'])
#assert(links[0] == 'http://example.com/feed.rss')
def get_html_alternate_links(html=None, types=[]):
"""
Returns a list of xml_url links
"""
links = []
if not types:
types=['application/rss+xml', 'application/atom+xml']
try:
for type in types:
soup = BeautifulSoup(html)
for alternate in soup.findAll('link', attrs={'rel':"alternate", 'type':type}):
links.append(alternate['href'])
return links
except Exception:
return []
def scrape_alternates(html=None, types=[]):
"""
Returns a list of xml_url links
"""
links = []
if not types:
types=['application/rss+xml', 'application/atom+xml']
try:
for type in types:
soup = BeautifulSoup(html)
for alternate in soup.findAll('link', attrs={'rel':"alternate", 'type':type}):
links.append(alternate['href'])
return links
except Exception:
return []
def scrape_alternates(html=None, types=[]):
"""
Returns a list of xml_url links
"""
links = []
if not types:
types=['application/rss+xml', 'application/atom+xml']
try:
for type in types:
soup = BeautifulSoup(html)
for alternate in soup.findAll('link', attrs={'rel':"alternate", 'type':type}):
links.append(alternate['href'])
return links
except Exception:
return []
def scrape_anchors(html=None, extensions=['.rss', '.xml', '.atom']):
"""
Returns a list of xml_url links
"""
links = []
try:
soup = BeautifulSoup(html)
for anchor in soup.findAll('a'):
if any(map(anchor['href'].endswith, ['.rss', '.xml', '.atom'])):
links.append(anchor['href'])
return links
except Exception, e:
# return []
raise e
#def scrape_alternates(url=None):
# page = urllib2.urlopen(url)
# return get_html_alternate_links(page.read())
def scrape_for_feeds(url=None):
html = urllib2.urlopen(url).read()
url_host = urllib2.Request(url).get_host()
try:
links = scrape_alternates(html)
links.extend(scrape_anchors(html))
for link in links:
try:
if link.startswith('/'):
link = url_host + link
except:
pass
return links
except Exception, e:
#return []
raise e
def generate_feed(context=None, seed=None):
logging.debug("seed: %s" % seed)
tokens = []
for seedling in seed:
#logging.debug("seedling: %s" % seedling)
#logging.debug("context: %s" % context)
index = context.find(seedling)
#logging.debug("index: %s" % index)
token = context[:index]
#logging.debug("token: %s" % token)
tokens.append(token)
context = context[index+len(seedling):]
logging.debug("final tokens: %s" % tokens)
for token in tokens:
print token
return tokens
#return a list of Feed objects
#query('http://cnn.com')
#query('http://del.icio.us/rss/wbornor/')
#query('health')
#query('health cnn')
#query('tags:entertainment')
#query('title:health cnn')
#query('description:News for geeks')
def query(query=None):
#
#search first from our database
# if its a normal url, search through all the channel links
#
# if its a xml_url, search through all the xml_urls
#
# if its plain text, search through the titles, descriptions, and tags
#
#if not in our database, and its a url, pull it from the actual page
# if its an xml_url pull the actual feed into the database
#
# if its a normal url use get_feed_urls_from_html() and pull those feeds into the database
pass
| Python |
# -*- coding: utf-8 -*-
from django.db.models.base import ModelBase
from django.db import models
import types
def _filter_bases(bases, filter_key):
""" Remove all classes descendants of ``filter_key`` from ``bases`` """
new_bases = tuple([base for base in bases \
if ((not (base is filter_key)) and (not issubclass(base, filter_key)))])
# ensure that we don't end up with an orphan class - it must be
# parented by at least models.Model
if len(new_bases) == 0: new_bases = (models.Model,)
return new_bases
class Hooks(object):
_pre_saves = {}
_post_saves = {}
_pre_deletes = {}
_post_deletes = {}
class MetaModelMiddleware(ModelBase):
# pre and post hooks for save() will be temporarily stored in these
#pre_saves = {}
#post_saves = {}
# pre and post hooks for delete()
#pre_deletes = {}
#post_deletes = {}
def __new__(cls, name, bases, attrs):
# whether base classes should be filtered
cls.hide_bases = False
# only filter bases if this wasn't invoked by the ModelMiddleware
# class, which is a super class for all custom middleware, and the
# one we are using as a filter key
if not (name == 'ModelMiddleware'):
if not (ModelMiddleware in bases):
cls.hide_bases = True
if cls.hide_bases:
# replace the original bases with filtered ones to fool Django's inheritance
bases = _filter_bases(bases, ModelMiddleware)
# set the middleware options under Klass._middle
if attrs.has_key('Middle'):
midopts = attrs['Middle']
assert type(midopts) == types.ClassType, "Middle attribute of %s model must be a class, not a %s object" % (name, type(midopts))
opts = {}
opts.update([(k,v) for k,v in midopts.__dict__.items() if not k.startswith('_')])
attrs["_middle"] = opts
attrs.pop('Middle')
return ModelBase.__new__(cls, name, bases, attrs)
def __init__(cls,name,bases,attrs):
# provide a wrapper func for save()
def new_save(func):
def wrapper(*args, **kwargs):
if hasattr(cls, 'pre_saves'):
[pre(args[0]) for pre in cls.pre_saves]
func(*args, **kwargs)
if hasattr(cls, 'post_saves'):
[post(args[0]) for post in cls.post_saves]
return wrapper
# provide a wrapper func for delete()
def new_delete(func):
def wrapper(*args, **kwargs):
if hasattr(cls, 'pre_deletes'):
[pre(args[0]) for pre in cls.pre_deletes]
func(*args, **kwargs)
if hasattr(cls, 'post_deletes') > 0:
[post(args[0]) for post in cls.post_deletes]
return wrapper
# if this is a descendant of ModelMiddleware, but not ModelMiddleware itself
if name != 'ModelMiddleware':
# if this class inherits directly from ModelMiddleware then save its hooks
if ModelMiddleware in bases:
if attrs.has_key('pre_save'):
Hooks._pre_saves[name] = attrs['pre_save']
if attrs.has_key('post_save'):
Hooks._post_saves[name] = attrs['post_save']
if attrs.has_key('pre_delete'):
Hooks._pre_deletes[name] = attrs['pre_delete']
if attrs.has_key('post_delete'):
Hooks._post_deletes[name] = attrs['post_delete']
# if this is NOT a direct descendant of ModelMiddleware - not a holder of callbacks
if ModelMiddleware not in bases:
orig_save = cls.save
orig_delete = cls.delete
for base in bases:
base_pre_save = Hooks._pre_saves.get(base.__name__, False)
if base_pre_save:
if not hasattr(cls,'pre_saves'):
cls.pre_saves = []
cls.pre_saves.append(base_pre_save)
base_post_save = Hooks._post_saves.get(base.__name__, False)
if base_post_save:
if not hasattr(cls, 'post_saves'):
cls.post_saves = []
cls.post_saves.append(base_post_saves)
base_pre_delete = Hooks._pre_deletes.get(base.__name__, False)
if base_pre_delete:
if not hasattr(cls, 'pre_deletes'):
cls.pre_deletes = []
cls.pre_deletes.append(base_pre_deletes)
base_post_delete = Hooks._post_deletes.get(base.__name__, False)
if base_post_delete:
if not hasattr(cls, 'post_deletes'):
cls.post_deletes = []
cls.post_deletes.append(base_post_deletes)
cls.save = new_save(orig_save)
cls.delete = new_delete(orig_delete)
# replace original bases with filtered ones
bases = _filter_bases(bases,ModelMiddleware)
new_class = super(ModelBase,cls).__init__(name,bases,attrs)
return new_class
class ModelMiddleware(models.Model):
"""
Custom model middleware components should subclass this and never
use the MetaModelMiddleware metaclass directly.
"""
__metaclass__ = MetaModelMiddleware
class ReSTMiddleware(ModelMiddleware):
def pre_save(self):
try:
opts = self.__class__._middle["ReST"] # individual options are saved in a dict
except (AttributeError, KeyError):
return # just fail silently, though it might not be a very good idea in practice
# parse for as many fields as we have options for
for opt in opts:
# lets be nice to ourselves and provide a default value for the initial header level
if not opt.has_key("init_header"):
opt["init_header"] = 1
try:
cont = getattr(self, opt["field"]).decode("utf_8")
parts = build_document(cont, initial_header_level=opt["init_header"])
setattr(self, opt["save_body"], parts["html_body"].encode('utf_8'))
setattr(self, opt["save_toc"], parts["toc"].encode('utf_8'))
except:
pass # another silent fail, needs fixing d = datetime.now()
from datetime import datetime
class TimestampMiddleware(ModelMiddleware):
"""
This class can record a timestamp (down to one second precision) into any fields you specify.
There are two types of timestamps: 'always' and 'once'. 'always' means that record must be
made on every save(), while 'once' fields will be timestamped once on the first save() of this
object.
A default set of options (used if none are provided by the model) is provided, which presume
the existance of 'pub_date' and 'last_modified' fields. The 'pub_date' field is of type "once",
and 'last_modified' is of type "always". This lets you timestamp the object's creation and modification
times.
Example options (also the default ones):
class Middle:
Timestamp = ({'field' : 'pub_date', 'type' : 'once'},
{'field' : 'last_modified', 'type' : 'always'})
"""
def pre_save(self):
try:
opts = self.__class__._middle["Timestamp"]
except (AttributeError, KeyError):
opts = ({'field' : 'pub_date', 'type' : 'once'},
{'field' : 'last_modified', 'type' : 'always'})
for opt in opts:
if not opt.has_key('type'):
opt['type'] = 'always'
d = datetime.now()
pdate = datetime(d.year, d.month, d.day, d.hour, d.minute)
# if this is a "set once" type of field, then we check whether
# it's been filled in and if not - do so
if opt['type'] == 'once':
if getattr(self, opt['field']) is None:
setattr(self, opt['field'], pdate)
elif opt['type'] == 'always':
setattr(self, opt['field'], pdate)
| Python |
import sys, os, re, logging, time, shutil, threading
import cPickle as pickle
from string import letters, digits
from datetime import datetime
from dateutil import parser
import PyLucene as lucene
from django.db.models import Q
from westom.feednut.libs import feedparser
from westom.feednut.libs import uuid
from westom.feednut.models import *
from westom.settings import DOCUMENT_ROOT
from westom.feednut.libs import yahoo_terms
from westom.feednut.utils.misc import clean_string, Singleton
def salvage_feed(feed):
"""
Pass in a feedparser result here for feeds that had problems
If it is salvageable, we remove the Exception (keeping the bozo status though)
If not, return None
"""
ex = feed.get('bozo_exception', None)
bozo = feed.get('bozo', 0)
if feed.has_key('feed') and feed['feed'].has_key('title') and len(feed.get('entries', [])) > 0:
if ex:
feed['bozo_exception'] = str(ex)
return feed
return None
class FeedDownloader:
"""
This class can be used to download feeds. It contains disk cacheing
which can be ignored if you wish.
"""
CACHE_DIR = os.path.join(DOCUMENT_ROOT, '../feedcache')
def __init__(self):
self._feed_cache = {}
def get_feeds(self, urls, **kwargs):
feeds = {}
for url in urls:
feed = self.get_feed(url, **kwargs)
if feed:
feeds[url] = feed
return feeds
def get_cached_feeds(self):
return self._feed_cache
@staticmethod
def fetch_feed(url, last_modified=None, etag=None):
""" Does the actual fetching """
logging.debug('Fetching %s' % url)
result = feedparser.parse(url, etag=etag, modified=last_modified, agent=None, referrer=None, handlers=[])
if result and result.get('bozo', 0) == 1:
result = salvage_feed(result)
if not result:
raise Exception, 'Invalid result from feedparser'
return result
def get_feed(self, url, last_modified=None, etag=None, use_cache=False):
"""
Fetches the feed by url, and returns the FeedParser result object.
If the feed has problems, we try to salvage it if we can. Otherwise,
None is returned.
@param last_modified specify a last_modified date
@param etag specify an etag
@param use_cache if this is True, will use the disk cache (only use for testing purposes)
"""
if use_cache:
if url in self._feed_cache:
return self.feeds[url]
clean_url = clean_string(url)
save_path = os.path.join(self.CACHE_DIR, clean_url)
if not os.path.exists(os.path.dirname(save_path)):
try:
os.makedirs(os.path.dirname(save_path))
except:{}
try:
if os.path.exists(save_path):
f = open(save_path, 'rb')
result = pickle.load(f)
f.close()
else:
result = self.fetch_feed(url, last_modified, etag)
if result.get('status', 200) == 200:
f = open(save_path, 'wb')
pickle.dump(result, f)
f.close()
except Exception, e:
logging.error('Problem with %s, %s' % (url, e))
try:
f.close()
os.unlink(save_path)
except:{}
return None
else:
self._feed_cache[url] = result
return result
else:
try:
return self.fetch_feed(url, last_modified, etag)
except Exception, e:
logging.error(e)
return None
def encode(val, encoding):
""" encodes the string with the given codec """
if val:
val = val.encode(encoding)
return val
def parse_date(date_str):
""" attempts to parse a date string """
if date_str:
try:
return parser.parse(entry.get('updated', None), ignoretz=True)
except:{}
return None
def make_entryid(link=None):
""" tries to make a unique entryID """
if link:
return link[:255]
else:
return str(uuid.uuid4())
def store_feed(feed, url=None, update=True):
"""
This stores a FeedParser feed object into the database
Also, stores the feed entries in the database
If update is set to True, will update existing feeds and entries
Returns a database Feed object
"""
#get the encoding
encoding = feed.get('encoding', 'utf-8')
kwargs = {
'xml_url' : url or feed.get('href', None),
'channel_link' : feed.get('feed', None) and feed['feed'].get('link', None),
'title' : feed.get('feed', None) and encode(feed['feed'].get('title', None), encoding),
'subtitle' : feed.get('feed', None) and encode(feed['feed'].get('subtitle', None), encoding),
'icon_url' : feed.get('feed', None) and feed['feed'].get('image', None) and feed['feed']['image'].get('href', None),
'etag' : feed.get('headers', None) and encode(feed['headers'].get('etag', None), encoding),
'updated_date' : feed.get('feed', None) and parse_date(feed['feed'].get('updated', None)),
'last_modified' : feed.get('headers', None) and parse_date(feed['headers'].get('last-modified', None)),
'encoding' : encoding,
}
#TODO do some error checking here to punt if certain fields dont exist (href, title)
if type(kwargs['title']) is not str:
logging.error('Invalid feedparser response')
return None
# see if the feed already exists
try:
db_feed = Feed.objects.get(xml_url__iexact=kwargs['xml_url'])
except:
logging.debug('creating new feed: %s', kwargs['xml_url'])
# it doesn't exist, so create a new one
try:
try:
suggest_text = '%s %s ' % (kwargs['title'], kwargs['subtitle'])
kwargs['suggested_tags'] = ' '.join(yahoo_terms.extract_terms(suggest_text, limit=10))
except:{}
db_feed = Feed(**kwargs)
db_feed.save()
except Exception, e:
logging.error('Error Creating Feed: %s' % e)
return None
else:
if update:
# loop through the args and update the feed item...
logging.debug('updating feed: %s', db_feed.xml_url)
for arg, val in kwargs.iteritems():
setattr(db_feed, arg, val)
try:
db_feed.save()
except Exception, e:
logging.error('Could not update feed: %s, %s' % (kwargs['xml_url'], e))
return db_feed
#loop through and make sure each entry has an ID field
#this way, we can more easily lookup by ID later on if we need
entries = feed.get('entries', [])
for entry in entries:
if not entry.has_key('id') or entry['id'] is None:
entry['id'] = make_entryid(entry.get('link', None))
#TODO should probably put this in a try block
db_feed.set_data(feed)
################################################################
################################################################
# Anything dealing with storing entries in the database is for
# pure testing purposes. For now, we will just return the feed
# without storing feed entries in the database. This is fine as
# long as the Feed.get_entries() method returns the entries from
# disk
################################################################
################################################################
return db_feed
#was going to reverse the entries in order to make the newest ones have the newest timestamp
# entries.reverse()
#was going to reverse the entries in order to make the newest ones have the newest timestamp
# entries.reverse()
for entry in entries:
kwargs = {
'entry_id' : encode(entry.get('id', None), encoding),
'title' : encode(entry.get('title', None), encoding),
'link' : entry.get('link', None),
'summary' : encode(entry.get('summary', None), encoding),
'updated_date' : parse_date(entry.get('updated', None)),
}
#TODO check to see if this already exists in the database
# if it does, then just touch it
try:
q = Q(entry_id=kwargs['entry_id']) | Q(title=kwargs['title'])
if kwargs['link']:
q |= Q(link=kwargs['link'])
q = (q) & Q(feed__id__exact=feed.id)
db_entry = FeedEntry.objects.get(q)
except:{}
else:
if update:
logging.debug('updating feed entry: %s', db_entry.entry_id)
for arg, val in kwargs.iteritems():
setattr(db_entry, arg, val)
try:
db_entry.save()
except Exception, e:
print 'Could not update an Entry', kwargs['entry_id'], e
continue
#try to save the NEW entry
try:
logging.debug('creating feed entry: %s', db_entry.entry_id)
db_entry = FeedEntry(feed=db_feed, **kwargs)
db_entry.save()
except Exception, e:
logging.error('Error creating FeedEntry: %s, %s', (kwargs.get('entry_id', None), e))
return db_feed
STORE_DIR = os.path.join(DOCUMENT_ROOT, '../index')
FEED_FIELDS = ['url', 'link', 'title', 'subtitle']
FEED_ENTRY_FIELDS = ['feed_url', 'link', 'title', 'summary', 'updated']
class IndexModifier:
def __init__(self, store_dir=STORE_DIR, destroy=False, analyzer=None):
self.store_dir = store_dir
self.create = False
if os.path.exists(self.store_dir) and destroy:
shutil.rmtree(self.store_dir)
if not os.path.exists(self.store_dir):
try:
os.makedirs(self.store_dir)
except:{}
self.create = True
self.store = lucene.FSDirectory.getDirectory(self.store_dir, self.create)
self.analyzer = analyzer or lucene.StandardAnalyzer()
if self.create:
self.get_writer(self.create).close() #this inits the segment
def get_writer(self, create=False):
writer = None
while writer is None:
try:
writer = lucene.IndexWriter(self.store, self.analyzer, create)
writer.setMaxFieldLength(1048576)
except Exception, e:
print e
time.sleep(.1)
return writer
def get_reader(self):
reader = None
while reader is None:
try:
reader = lucene.IndexReader.open(self.store)
except Exception, e:
print e
time.sleep(.1)
return reader
class HitHolder:
def __init__(self, hits, searcher):
self.hits = hits
self.searcher = searcher
def doc(self, index):
return self.hits.doc(index)
def doc_dict(self, index):
holder = {}
doc = self.doc(index)
fields = doc.fields()
while fields.hasMoreElements():
field = fields.nextElement()
holder[field.name()] = unicode(field.stringValue())
return holder
def __len__(self):
return self.hits.length()
def __del__(self):
try:
self.searcher.close()
except:{}
def __getitem__(self, index):
if type(index) == slice:
return [self.doc_dict(i) for i in range(max(index.start, 0), min(index.stop, self.hits.length()))]
else:
return self.doc_dict(index)
def __iter__(self):
return [self.doc_dict(i) for i in range(self.hits.length())].__iter__()
class FeedIndexModifier:
""" This needs help -- should do something like the searchable code at
http://mojodna.net/searchable/
"""
def __init__(self, store_dir=STORE_DIR, destroy=False, analyzer=None):
self.store_dir = store_dir
self.analyzer = analyzer or lucene.StandardAnalyzer()
self.feed_modifier = IndexModifier(store_dir=os.path.join(store_dir, 'feeds'), destroy=destroy, analyzer=analyzer)
self.entry_modifier = IndexModifier(store_dir=os.path.join(store_dir, 'entries'), destroy=destroy, analyzer=analyzer)
def delete_existing_feed_docs(self, feed):
""" deletes existing documents relating to the given feed """
reader = lucene.IndexReader.open(self.feed_modifier.store)
numDeleted = reader.deleteDocuments(lucene.Term('url', feed.xml_url))
logging.info('deleted %d existing index documents' % numDeleted)
reader.close()
reader = lucene.IndexReader.open(self.entry_modifier.store)
for entry in feed.get_entries():
try:
id = '%s:%s' % (feed.xml_url, entry.get('id', None))
numDeleted = reader.deleteDocuments(lucene.Term('id', id))
if numDeleted:
logging.info('deleted %d feed entry docyments' % numDeleted)
except:{}
reader.close()
def index_feed(self, feed, feed_data=None):
""" Indexes the given feed """
#remove any existing entries for this feed
self.delete_existing_feed_docs(feed)
writer = self.feed_modifier.get_writer()
doc = lucene.Document()
doc.add(lucene.Field('id', str(feed.id), lucene.Field.Store.YES, lucene.Field.Index.UN_TOKENIZED))
doc.add(lucene.Field('url', feed.xml_url, lucene.Field.Store.YES, lucene.Field.Index.UN_TOKENIZED))
if feed.channel_link:
doc.add(lucene.Field('link', feed.channel_link, lucene.Field.Store.YES, lucene.Field.Index.UN_TOKENIZED))
if feed.title:
doc.add(lucene.Field('title', feed.title, lucene.Field.Store.YES, lucene.Field.Index.TOKENIZED))
if feed.subtitle:
doc.add(lucene.Field('subtitle', feed.subtitle, lucene.Field.Store.YES, lucene.Field.Index.TOKENIZED))
writer.addDocument(doc)
writer.close()
logging.info('Indexed Feed: %s' % feed.xml_url)
writer = self.entry_modifier.get_writer()
for entry in feed.get_entries():
try:
doc = lucene.Document()
id = '%s:%s' % (feed.xml_url, entry.get('id', None))
doc.add(lucene.Field('id', id, lucene.Field.Store.YES, lucene.Field.Index.UN_TOKENIZED))
doc.add(lucene.Field('feed_url', feed.xml_url, lucene.Field.Store.YES, lucene.Field.Index.UN_TOKENIZED))
if entry.get('title', None):
doc.add(lucene.Field('title', entry['title'], lucene.Field.Store.YES, lucene.Field.Index.TOKENIZED))
if entry.get('summary', None):
doc.add(lucene.Field('summary', entry['summary'], lucene.Field.Store.YES, lucene.Field.Index.TOKENIZED))
if entry.get('link', None):
doc.add(lucene.Field('link', entry['link'], lucene.Field.Store.YES, lucene.Field.Index.UN_TOKENIZED))
updated = parse_date(entry.get('updated', None))
if updated:
doc.add(lucene.Field('updated', updated.isoformat(' '), lucene.Field.Store.YES, lucene.Field.Index.NO))
doc.add(lucene.Field('pickle', pickle.dumps(entry), lucene.Field.Store.YES, lucene.Field.Index.NO))
writer.addDocument(doc)
logging.info('Indexed Feed Entry: %s' % entry.get('title', None) or id)
except:{}
writer.close()
def search(self, query, fields=FEED_ENTRY_FIELDS, analyzer=None, store=None):
if not query or len(query.strip()) == 0 or len(fields) == 0:
return None
analyzer = analyzer or self.analyzer
if store is None:
store = self.entry_modifier.store
if len(fields) > 1:
qp = lucene.MultiFieldQueryParser(fields, analyzer)
else:
qp = lucene.QueryParser(fields[0], analyzer)
q = qp.parse(query)
searcher = lucene.IndexSearcher(store)
hits = searcher.search(q, lucene.Sort.RELEVANCE)
return HitHolder(hits, searcher)
def search_entries(self, query, fields=FEED_ENTRY_FIELDS, analyzer=None):
return self.search(query, fields=fields, analyzer=analyzer, store=self.entry_modifier.store)
def search_feeds(self, query, fields=FEED_FIELDS, analyzer=None):
return self.search(query, fields=fields, analyzer=analyzer, store=self.feed_modifier.store)
FeedIndex = FeedIndexModifier
from westom.feednut.utils.lucene_utils import FeedIndexer
def get_feed(url, last_modified=None, etag=None, index=True, update=False):
""" Fetches the feed and stores it in the database. Also, indexes it by default """
try:
db_feed = Feed.objects.get(xml_url__iexact=url)
except:
db_feed = None
if update or not db_feed:
feed_data = FeedDownloader().get_feed(url, last_modified, etag, use_cache=False)
if feed_data:
db_feed = store_feed(feed_data, url=url, update=update)
# if index and db_feed:
# FeedIndex().index_feed(db_feed)
return db_feed
def update_feeds(urls, index=True):
feedMap = FeedDownloader().get_feeds(urls, use_cache=False)
feeds = []
for url, feed_data in feedMap.iteritems():
feeds.append(store_feed(feed_data, url=url, update=True))
# if index:
# FeedIndexer().index_feeds(feeds)
return feeds
#def get_feeds(urls, index=True, update=False):
# """ returns dict of (url, db_feed) of all feeds that were fetched and stored """
# feeds = {}
# for url in urls:
# feed = FeedDownloader().get_feed(url, use_cache=False)
# if feed:
# db_feed = store_feed(feed, update=update)
# feeds[url] = db_feed
# .index_feed(feed)
# indexer = FeedIndexer()
# return feeds
| Python |
import time, random, md5, socket
def uuid(*args):
"""
Generates a 32-character universally unique ID.
This does not follow the UUID spec.
Any arguments only create more randomness.
"""
t = long(time.time() * 1000)
r = long(random.random()*100000000000000000L)
try:
a = socket.gethostbyname(socket.gethostname())
except:
# if we can't get a network address, just imagine one
a = random.random()*100000000000000000L
data = str(t)+' '+str(r)+' '+str(a)+' '+str(args)
data = md5.md5(data).hexdigest()
return data[:32] | Python |
#!/usr/bin/env python
'''
- requires Python 2.4, cElementTree, elementtree, and elementtidy.
- also uses Mark Pilgrim's openanything lib
- uses PyXML
see http://effbot.org/downloads/ to download the element* packages
Example usage:
url = 'http://www.rssgov.com/rssparsers.html'
dom = HtmlDom(url)
print dom.evaluate('/html:html/html:head/html:title/text()')
dom = HtmlDom('<html><head></head><body><div class="test">some text</div></body></html>')
print dom.evaluate('//html:div/text()')
'''
from westom.feednut.libs.openanything import fetch
from elementtidy import TidyHTMLTreeBuilder as tidy
import cElementTree as _etree
from xml.dom.ext.reader import PyExpat
import sys, xml.xpath
MOZILLA_AGENT = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.4) Gecko/20060508 Firefox/1.5.0.4'
XHTML_NAMESPACE = u'http://www.w3.org/1999/xhtml'
class HtmlDom:
def __init__(self, url):
try:
f = file(url)
data = f.read()
f.close()
except IOError, e:
try:
result = fetch(url, agent=MOZILLA_AGENT)
data = result['data']
except:
raise IOError, 'invalid URL'
# create parser
parser = tidy.TreeBuilder()
parser.feed(data)
xmlText = _etree.tostring(parser.close())
#create the DOM
reader = PyExpat.Reader()
self.dom = reader.fromString(xmlText)
self.nss = {u'html': XHTML_NAMESPACE}
self.context = xml.xpath.Context.Context(self.dom, processorNss=self.nss)
def evaluate(self, expression, node=None):
''' evaluates the given xpath expression and returns the nodes '''
if not node:
return xml.xpath.Evaluate(expression, context=self.context)
else:
cxt = xml.xpath.Context.Context(node, processorNss=self.nss)
return xml.xpath.Evaluate(expression, context=cxt)
def escapeHTML(s):
''' adapted from MochiKit '''
return s.replace('&', '&'). \
replace('"', """). \
replace('<', "<"). \
replace('>', ">")
def toHTML(dom):
''' adapted from MochiKit '''
return ''.join(emitHTML(dom))
def emitHTML(dom):
''' adapted from MochiKit '''
lst = [];
# queue is the call stack, we're doing this non-recursively
queue = [dom];
while len(queue) > 0:
dom = queue.pop();
if not hasattr(dom, 'nodeType'):
lst.append(dom)
elif dom.nodeType == 1:
lst.append('<' + dom.nodeName.lower())
attributes = []
for i in range(dom.attributes.length):
attr = dom.attributes.item(i)
attributes.append(' %s="%s"' % (attr.name, escapeHTML(attr.value)))
attributes.sort()
for attr in attributes:
lst.append(attr)
if dom.hasChildNodes():
lst.append(">")
# queue is the FILO call stack, so we put the close tag on first
queue.append("</" + dom.nodeName.lower() + ">")
cnodes = dom.childNodes
cnodes.reverse()
queue += cnodes
else:
lst.append('/>')
elif dom.nodeType == 3:
lst.append(escapeHTML(dom.nodeValue))
return lst
if __name__ == '__main__':
for arg in sys.argv[1:]:
htmldom = HtmlDom(arg)
from xml.dom.ext import PrettyPrint
PrettyPrint(htmldom.dom)
| Python |
from westom.feednut.utils import feed_accomplice
import time, threading
class Task(threading.Thread):
"""
A Scheduler Task
"""
def __init__(self, action, loopdelay, initdelay):
self._action = action
self._loopdelay = loopdelay
self._initdelay = initdelay
self._running = 1
threading.Thread.__init__(self)
def __repr__(self):
return '%s %s %s' % (
self._action, self._loopdelay, self._initdelay)
def run(self):
if self._initdelay:
time.sleep(self._initdelay)
self._runtime = time.time()
while self._running:
start = time.time()
print 'about to run task'
self._action()
self._runtime += self._loopdelay
time.sleep(self._runtime - start)
def stop(self):
self._running = 0
class Scheduler:
"""
A Multi-Purpose Scheduler
"""
def __init__(self):
self._tasks = []
def __repr__(self):
rep = ''
for task in self._tasks:
rep += '%s\n' % task
return rep
def addtask(self, action, loopdelay, initdelay = 0):
task = Task(action, loopdelay, initdelay)
self._tasks.append(task)
def startall(self):
print 'Starting tasks'
for task in self._tasks:
task.start()
def stopall(self):
for task in self._tasks:
print 'Stopping task', task
task.stop()
task.join()
print 'Stopped'
class FeedUpdateScheduler(Scheduler):
""" Scheduler for updating the feeds """
def __init__(self):
Scheduler.__init__(self)
#wait 5 minutes between updates
self.addtask(feed_accomplice.update_feeds, 15, 0)
print 'HERE!'
def __del__(self):
self.stopall()
feed_update_scheduler = FeedUpdateScheduler() | Python |
from westom.feednut.models import *
from django.db.models import Q
from westom.feednut.libs.pybloglines import pybloglines
from westom.feednut.libs.pyblogger import blogger
import re
def get_user_blogs(user, type=None):
"""
This returns a list of tuples (size 2).
Each tuple consists of the Account (see the models), and
a list of blogs for that account
"""
query = Q(type__exact='blog')
if type:
query &= Q(source__exact=type)
accounts = user.get_account_list(query)
blog_tuples = []
for account in accounts:
blogs = blogger.listBlogs(account.username, account.password)
blog_tuples.append((account, blogs))
return blog_tuples
#list any system names here, so people can't use them
INVALID_NAMES = ['static', 'register', 'feed', 'login', 'logout', 'help', 'captcha', 'check', 'gpalert',
'tag', 'tags', 'feeds', 'feednut', 'root', 'admin', 'api', 'readentry', 'feeds', 'read', 'form']
INVALID_NAME_DICT = {}
for val in INVALID_NAMES:
INVALID_NAME_DICT['val'] = val
def is_valid_username(username):
"""
Checks if the input username is valid by:
-seeing if it is long enough
-seeing if it is too long
-seeing if it is a system name
-seeing if someone already has it
If nothing is wrong, an empty string is returned.
Otherwise, it returns an error message.
"""
message = None
if username is not None:
username = username.strip().rstrip('/')
if len(username) < 3:
message = 'Username too short. Must be at least 3 characters'
elif len(username) > 20:
message = 'Username too long. Must be at at most 20 characters'
elif not re.match('^\w+$', username):
message = 'Username can only be comprised of letters, digits, or the underscore.'
elif username in INVALID_NAME_DICT:
message = 'Username already in use'
else:
try:
user = User.objects.get(username__iexact=username)
except:
pass
else:
message = "Username already exists"
return message
EMAIL_RE = re.compile(r"^([0-9a-zA-Z_&.+-]+!)*[0-9a-zA-Z_&.+-]+@(([0-9a-z]([0-9a-z-]*[0-9a-z])?\.)+[a-z]{2,6}|([0-9]{1,3}\.){3}[0-9]{1,3})$")
def is_valid_email(email):
""" checks if the email is valid """
#an empty string if the address is valid
if email is not None and len(email.strip()) >= 3 and len(EMAIL_RE.sub('', email)) == 0:
return None
else:
return 'Invalid email format'
def get_user_feeds(user, only_public=True, limit=-1):
""" Returns a list of UserFeed objects owned by user """
query = Q(user__id__exact=user.id)
if only_public:
query &= Q(is_public=1)
if limit >= 0:
return UserFeed.objects.select_related().filter(query, limit=limit).order_by('position')
else:
return UserFeed.objects.select_related().filter(query).order_by('position')
def get_feeds(user, only_public=True):
""" Returns a list of Feed objects owned by user """
feedlist = []
for uf in get_user_feeds(user, only_public=only_public):
feedlist.append(uf.get_feed())
return feedlist
def get_tags(user, only_public=None):
"""
Returns a list of tags for this user
Also returns a count as part of each tag
"""
where=['feednut_userfeed.user_id=' + str(user.id), 'feednut_userfeedtag.tag_id=feednut_tag.id', 'feednut_userfeedtag.user_feed_id=feednut_userfeed.id']
if(only_public):
where.append('feednut_tag.tag <> "fn:private"')
where.append('feednut_userfeed.is_public = True')
tables=['feednut_userfeed', 'feednut_userfeedtag']
select={'count' : 'select count(*) from feednut_userfeedtag, feednut_userfeed where feednut_userfeedtag.tag_id=feednut_tag.id and feednut_userfeed.id=feednut_userfeedtag.user_feed_id and feednut_userfeed.user_id=' + str(user.id)}
tags = Tag.objects.extra(where=where, tables=tables, select=select).distinct().order_by('tag')
# usertags = UserFeedTag.objects.filter(user_feed__user__exact=user.id).values('tag').distinct()
# usertags = list(tag['tag'] for tag in usertags)
# tags = Tag.objects.in_bulk(usertags).values()
# tags.sort(lambda x, y:cmp(x.tag,(y.tag)))
return tags
def get_accounts(user, type):
""" returns a list of accounts of a specific type """
query = Q()
if type:
query &= Q(type=type)
accounts = user.account_set.filter(query)
return accounts
def read_this_entry(user, url, title, description, xml_url):
""" keep a list of the last 50 articles that the user read """
limit = 50
count = UserReadEntry.objects.count()
if count >= limit:
for entry in UserReadEntry.objects.filter(user=user.id).order_by('-read_date')[limit - 1:count]:
entry.delete()
ure = UserReadEntry(user=user, link=url, title=title, description=description, xml_url=xml_url)
ure.save()
return ure
# uf = UserFeed.objects.get(id=userFeedId)
# uf.touch()
#def bookmark(user, url, title, description):
# """ keep a list of the last 50 articles that the user read """
# try:
# bookmark = Bookmark.objects.get(link__iexact=url)
# except:
# bookmark = Bookmark(user=user, link=url, title=title, description=description)
# bookmark.save()
# return bookmark
# else:
# return None
def get_latest_read_entries(user):
""" return a list of UserReadEntry objects of lastest read entries """
return UserReadEntry.objects.filter(user__id__exact=user.id).order_by('-read_date')
def has_feed(user, feed):
""" returns True if the user has a userfeed for the given feed, otherwise false """
return UserFeed.objects.filter(user__id__exact=user.id, feed__id__exact=feed.id).count() > 0
def get_user_buddies(user):
where=['feednut_userbuddy.user_id=%s' % user.id, 'feednut_userbuddy.buddy_id=auth_user.id']
tables = ['feednut_userbuddy']
return User.objects.extra(where=where, tables=tables).distinct()
| Python |
from django.template import Library, Node
from django import template
from westom.feednut.utils import user as user_utils
from westom.feednut.utils import misc as misc_utils
from westom.feednut.utils import feed_accomplice
from westom.feednut.models import *
register = Library()
def get_entries(parser, token):
try:
# Splitting by None == splitting by spaces.
tag_name, feed = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError, "%r tag requires an argument" % token.contents[0]
return GetEntriesNode(feed)
class GetEntriesNode(template.Node):
def __init__(self, feed):
self.feed = feed
def render(self, context):
feed = template.resolve_variable(self.feed, context)
context['entries'] = feed.get_entries()
return ''
register.tag('get_entries', get_entries)
def get_user_tag_tuples(parser, token):
""" This assumes you have 'page_user' in the current context"""
try:
# Splitting by None == splitting by spaces.
tag_name, var = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError, "%r tag requires an argument" % token.contents[0]
return GetUserTagTuplesNode(var)
class GetUserTagTuplesNode(template.Node):
def __init__(self, var):
self.var = var
def render(self, context):
if context.has_key('user') and context['page_user'] == context['user']:
only_public = False
else:
only_public = True
context[self.var] = user_utils.get_tags(context['page_user'], only_public=only_public)
return ''
register.tag('get_user_tag_tuples', get_user_tag_tuples)
def get_user_buddies(parser, token):
try:
# Splitting by None == splitting by spaces.
tag_name, var = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError, "%r tag requires an argument" % token.contents[0]
return GetUserBuddies(var)
class GetUserBuddies(template.Node):
def __init__(self, var):
self.var = var
def render(self, context):
page_user = context['page_user']
context[self.var] = user_utils.get_user_buddies(page_user)
return ''
register.tag('get_user_buddies', get_user_buddies)
def get_permanent_feeds(parser, token):
try:
# Splitting by None == splitting by spaces.
tag_name, var = token.contents.split(None, 1)
print "in here"
except ValueError:
raise template.TemplateSyntaxError, "%r tag requires an argument" % token.contents[0]
return GetPermanentFeeds(var)
class GetPermanentFeeds(template.Node):
def __init__(self, var):
self.var = var
def render(self, context):
page_user = context['page_user']
context[self.var] = feed_accomplice.get_feeds(user=page_user, permanent_feed=True)
print context[self.var]
return ''
register.tag('get_permanent_feeds', get_permanent_feeds)
def ifisme(parser, token):
nodelist_true = parser.parse(('else', 'endifisme'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endifisme',))
parser.delete_first_token()
else:
nodelist_false = template.NodeList()
return IfIsMeNode(nodelist_true, nodelist_false)
class IfIsMeNode(template.Node):
def __init__(self, nodelist_true, nodelist_false):
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
def render(self, context):
if context.has_key('page_user'):
page_user = context['page_user']
if context.has_key('user'):
user = context['user']
if user and page_user and page_user.id == user.id:
return self.nodelist_true.render(context)
return self.nodelist_false.render(context)
register.tag('ifisme', ifisme)
def ifisnotme(parser, token):
nodelist_true = parser.parse(('else', 'endifisnotme'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endifisnotme',))
parser.delete_first_token()
else:
nodelist_false = template.NodeList()
return IfIsNotMeNode(nodelist_true, nodelist_false)
class IfIsNotMeNode(template.Node):
def __init__(self, nodelist_true, nodelist_false):
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
def render(self, context):
if context.has_key('page_user'):
page_user = context['page_user']
if context.has_key('user'):
user = context['user']
if user and page_user and page_user.id != user.id:
return self.nodelist_true.render(context)
return self.nodelist_false.render(context)
register.tag('ifisnotme', ifisnotme)
def if_user_or_page_user(parser, token):
nodelist_true = parser.parse(('else', 'endifuserorpageuser'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endifuserorpageuser',))
parser.delete_first_token()
else:
nodelist_false = template.NodeList()
return IfUserOrPageUserNode(nodelist_true, nodelist_false)
class IfUserOrPageUserNode(template.Node):
def __init__(self, nodelist_true, nodelist_false):
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
def render(self, context):
ok = False
if context.has_key('page_user'):
page_user = context['page_user']
if page_user and page_user.id:
ok = True
elif context.has_key('user'):
user = context['user']
if user and user.id:
ok = True
if ok:
return self.nodelist_true.render(context)
return self.nodelist_false.render(context)
register.tag('ifuserorpageuser', if_user_or_page_user)
def ifishottestfeed(parser, token):
nodelist_true = parser.parse(('else', 'endifishottestfeed'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endifishottestfeed',))
parser.delete_first_token()
else:
nodelist_false = template.NodeList()
return IfIsHottestFeedNode(nodelist_true, nodelist_false)
class IfIsHottestFeedNode(template.Node):
def __init__(self, nodelist_true, nodelist_false):
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
def render(self, context):
if context.has_key('feed'):
feed = context['feed']
if feed.title == "FeedNut.com - Hottest Feeds":
return self.nodelist_true.render(context)
return self.nodelist_false.render(context)
register.tag('ifishottestfeed', ifishottestfeed)
def ifhasfeed(parser, token):
nodelist_true = parser.parse(('else', 'endifhasfeed'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endifhasfeed',))
parser.delete_first_token()
else:
nodelist_false = template.NodeList()
return IfHasFeedNode(nodelist_true, nodelist_false)
class IfHasFeedNode(template.Node):
def __init__(self, nodelist_true, nodelist_false):
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
def render(self, context):
if (context.has_key('feed') or context.has_key('result')) and context.has_key('user'):
feed = context.get('feed', None) or context.get('result', None)
user = context['user']
if type(feed) == dict:
try:
feed = Feed.objects.get(id=feed['id'])
except:{}
if user_utils.has_feed(user, feed):
return self.nodelist_true.render(context)
return self.nodelist_false.render(context)
register.tag('ifhasfeed', ifhasfeed)
def first_not_none(parser, token):
try:
parts = token.contents.split(None)
del parts[0]
except ValueError:
raise template.TemplateSyntaxError, "%r tag requires an argument" % token.contents[0]
return FirstNotNoneNode(parts)
class FirstNotNoneNode(template.Node):
def __init__(self, parts):
self.parts = parts
def render(self, context):
for part in self.parts:
try:
part = template.resolve_variable(part, context)
except: {}
else:
if part:
return part
return ''
register.tag('first_not_none', first_not_none)
def load_welcome_page(context):
return context
load_welcome_page = register.inclusion_tag('welcome.html', takes_context=True)(load_welcome_page)
def load_account_settings_page(context):
return context
load_account_settings_page = register.inclusion_tag('account_settings.html', takes_context=True)(load_account_settings_page)
def load_search_results_nav_page(context):
return context
load_search_results_nav_page = register.inclusion_tag('search_results_nav.html', takes_context=True)(load_search_results_nav_page)
def load_feed_hdr_page(context):
return context
load_feed_hdr_page = register.inclusion_tag('feed_hdr.html', takes_context=True)(load_feed_hdr_page)
def load_feed_options_page(context):
return context
load_feed_options_page = register.inclusion_tag('feed_options.html', takes_context=True)(load_feed_options_page)
def load_noscript_page(context):
return context
load_noscript_page = register.inclusion_tag('noscript.html', takes_context=True)(load_noscript_page)
def load_addsearch_page(context):
return context
load_addsearch_page = register.inclusion_tag('add_search_feeds.html', takes_context=True)(load_addsearch_page)
def load_options_page(context):
return context
load_options_page = register.inclusion_tag('options.html', takes_context=True)(load_options_page)
def load_login_page(context):
return context
load_login_page = register.inclusion_tag('login.html', takes_context=True)(load_login_page)
def load_footer_page(context):
return context
load_footer_page = register.inclusion_tag('footer.html', takes_context=True)(load_footer_page)
def load_main_page(context):
return context
load_main_page = register.inclusion_tag('main.html', takes_context=True)(load_main_page)
def load_feeds_page(context):
return context
load_feeds_page = register.inclusion_tag('feeds.html', takes_context=True)(load_feeds_page)
def load_tags_page(context):
return context
load_tags_page = register.inclusion_tag('tags.html', takes_context=True)(load_tags_page)
def load_community_page(context):
return context
load_community_page = register.inclusion_tag('communityTab.html', takes_context=True)(load_community_page)
def load_toolbar_page(context):
return context
load_toolbar_page = register.inclusion_tag('toolbar.html', takes_context=True)(load_toolbar_page)
def load_rss_feeds_page(context):
return context
load_rss_feeds_page = register.inclusion_tag('rss_feeds.html', takes_context=True)(load_rss_feeds_page)
def load_blogs_page(context):
return context
load_blogs_page = register.inclusion_tag('blogs.html', takes_context=True)(load_blogs_page)
def load_tag_nav(context):
return context
load_tag_nav = register.inclusion_tag('tag_nav.html', takes_context=True)(load_tag_nav)
def load_feed_nav(context):
return context
load_feed_nav = register.inclusion_tag('feed_nav.html', takes_context=True)(load_feed_nav)
def load_bookmark_nav(context):
return context
load_bookmark_nav = register.inclusion_tag('bookmark_nav.html', takes_context=True)(load_bookmark_nav)
def load_syndication_nav(context):
return context
load_syndication_nav = register.inclusion_tag('syndication_nav.html', takes_context=True)(load_syndication_nav)
def load_header_page(context):
return context
load_header_page = register.inclusion_tag('header.html', takes_context=True)(load_header_page)
def load_tabs_page(context):
return context
load_tabs_page = register.inclusion_tag('tabs.html', takes_context=True)(load_tabs_page)
def load_search_display_page(context):
return context
load_search_display_page = register.inclusion_tag('searchDisplay.html', takes_context=True)(load_search_display_page)
def load_feed_entry_page(context, feed_id, id):
context['feed_id'] = feed_id
context['index'] = id
return context
load_feed_entry_page = register.inclusion_tag('feed_entry.html', takes_context=True)(load_feed_entry_page)
@register.inclusion_tag('feed.html', takes_context=True)
def load_feed_page(context):
context['userfeed'] = UserFeed.objects.get(user__id__exact=context['page_user'].id, feed__id__exact=context['feed'].id)
return context
@register.inclusion_tag('form_subscribe.html', takes_context=True)
def load_form_subscribe_page(context):
return context
#copied from the django dev release
class SpacelessNode(Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
return misc_utils.strip_spaces_between_tags(self.nodelist.render(context).strip())
def spaceless(parser, token):
"""
Normalize whitespace between HTML tags to a single space. This includes tab
characters and newlines.
Example usage::
{% spaceless %}
<p>
<a href="foo/">Foo</a>
</p>
{% endspaceless %}
This example would return this HTML::
<p> <a href="foo/">Foo</a> </p>
Only space between *tags* is normalized -- not space between tags and text. In
this example, the space around ``Hello`` won't be stripped::
{% spaceless %}
<strong>
Hello
</strong>
{% endspaceless %}
"""
nodelist = parser.parse(('endspaceless',))
parser.delete_first_token()
return SpacelessNode(nodelist)
spaceless = register.tag(spaceless)
| Python |
from django.template import Library
from westom.feednut.utils import misc as misc_utils
import re
register = Library()
def split_seq(list, num):
"""
Splits the given list into the given number of lists
Argument: optional, number of lists to return (default=2)
"""
return misc_utils.split_seq(list, int(num))
register.filter(split_seq)
def feedid(prefix, args):
"""
Returns prefix + feedId + itemId
"""
parts = args.split()
itemId = parts[1]
if len(parts) > 2:
for i in range(1, len(parts)):
if parts[i]:
itemId = parts[i]
break
return '%s_%s_%s' % (prefix, parts[0], itemId)
register.filter(feedid)
ANCHOR_RE = re.compile(r'<\s*a.*>(.*)</a>')
def strip_anchors_and_images(html):
""" Strip links from html """
return ANCHOR_RE.sub(r'\1', html)
register.filter(strip_anchors_and_images)
def page_owner(name1, name2):
"""
Takes in two usernames. If equal, returns 'my', otherwise
it returns the second user's name.
Returns an ownership string
"""
if cmp(name1, name2) == 0 and name2 is not None:
return 'my'
elif name2:
return "%s's" % name2
else:
return name2
register.filter(page_owner)
def stripws(value):
""" strips whitespace """
if value:
value = str(value).strip()
return value
register.filter(stripws)
def split_word(word, limit):
""" splits an individual word into space seperated words with maximum length of "limit" """
limit = int(limit)
word_length = len(word)
if word_length <= limit:
return word
one_word = word[:limit]
return ' '.join([one_word, split_word(word[limit:], limit)])
def split_phrase(phrase, limit):
""" make sure that no word in a phrase has length greater than limit """
words = phrase.split(" ")
return_phrase = []
for word in words:
return_phrase.append(split_word(word, limit))
return(' '.join(return_phrase))
register.filter(split_phrase)
def range_list(end, start=0, step=1):
return range(int(start), int(end), int(step))
register.filter(range_list)
| Python |
from django import template
from westom.settings import SITE_ID
import Captcha
from Captcha.Visual import Tests
import tempfile
def _getFactory(id):
return Captcha.PersistentFactory(tempfile.gettempdir() + "/pycaptcha_%d" % id )
class captcha_class(template.Node):
""" generate a captcha image and specify the related input box parameters.
Basically you have the following context variables:
captcha_image: the url to the captcha image
captcha_input_name: the input box name to submit the captcha word
captcha_hidden_input_name: the hidden input box name to submit the captcha id
captcha_id: captcha id, for the hidden input box's content
You will need to submit both the captcha word and the captcha id
in your form for validation. Sample code:
<div id='captcha'>
{% trans "Please enter the word you see in the picture" %}
<input type="text" name="{{captcha_input_name}}"/>
<input type="hidden" name="{{captcha_hidden_input_name}}"
value="{{captcha_id}}"/><input type="submit" name="submit" value="{%
trans 'submit' %}" /><br />
<img src={{captcha_image}} width=150 height=60 />
</div>
"""
def __init__(self, anon_users_only):
self.anon_users_only = anon_users_only
def render(self, context):
if self.anon_users_only:
user = context.get('user', None)
if not user.is_authenticated():
return ""
name = Tests.__all__[0]
test = _getFactory(SITE_ID).new(getattr(Tests, name))
context['captcha_image'] = '/captcha/i/?id=%s' % test.id
context['captcha_input_name'] = "captcha_word"
context['captcha_hidden_input_name'] = "captcha_id"
context['captcha_id'] = test.id
return ''
def captcha(parser, token):
"""
{% captcha %}
"""
return captcha_class(False)
def captcha_anon(parser, token):
"""
{% captcha_anon %}
"""
return captcha_class(True)
register = template.Library()
register.tag('captcha', captcha)
register.tag('captcha_anon', captcha_anon)
| Python |
from westom.feednut.models import *
from westom.feednut.libs import OPMLSubscription as opml
from westom.feednut.utils import feed_accomplice
from westom.feednut.utils import user as user_utils
from westom.feednut.utils.lucene_utils import FeedSearcher
from westom.feednut.utils import djangojson
from westom.feednut import mail
from westom.settings import URL_HOST
from django.shortcuts import render_to_response, get_object_or_404
from django.http import *
from django.template import loader, RequestContext
from django.db.models.query import Q
from django.db import connection
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate
from django.contrib.auth import login as auth_login
from django.contrib.auth import logout as auth_logout
from django.views.decorators.cache import cache_page
import urllib
import StringIO
import datetime
import md5
import random
import logging
def get_user_or_404(username):
return(get_object_or_404(User, username__iexact=username.strip()))
def forgot_password(request):
if request.has_key('email'):
email = request['email'].strip()
try:
user = User.objects.get(email__iexact=email)
except:
return HttpResponseNotFound('Email does not exist for any users.')
else:
hash = md5.new('%s-%s-%s' % (email, datetime.datetime.now(), random.randint(0, 1000))).hexdigest()
fpass = ForgotPassword(email=email, hash=hash)
fpass.save()
mail.send_password_reset(email, hash)
return HttpResponse('An email has been sent to \'%s\'.' % email)
else:
return render_to_response('forgot_password.html', RequestContext(request))
def reset_password(request):
if request.has_key('id'):
id = request['id']
print id
try:
fpass = ForgotPassword.objects.get(hash=id)
except:
return HttpResponseRedirect("/")
else:
context = RequestContext(request)
context['email'] = fpass.email
context['hash'] = fpass.hash
try:
context['username'] = User.objects.get(email__iexact=fpass.email)
except:
return HttpResponseRedirect("/")
return render_to_response('reset_password.html', context)
elif request.has_key('new_pass1') and request.has_key('new_pass2') and \
request.has_key('hash') and request.has_key('email') and request.has_key('username'):
new_pass1 = request['new_pass1']
new_pass2 = request['new_pass2']
hash = request['hash']
email = request['email']
username = request['username']
error = None
#make sure the user exists
try:
user = User.objects.get(username__iexact=username)
except:
error = 'Invalid request - no username "%s"' % username
else:
#make sure the forgot password request exists
try:
forgotPass = ForgotPassword.objects.get(email=email, hash=hash)
except:
error = 'Invalid request - this password reset request is no longer valid'
else:
#make sure the two new passwords are ok
if len(new_pass1) < 5:
error = 'Password too short. Must be at least 5 characters'
elif new_pass1 != new_pass2:
error = 'Passwords do not match'
else:
#if all is OK, then set the password and save!
user.set_password(new_pass1)
user.save()
#might as well just send us to the login view then take us home
post = request.POST.copy()
post['username'] = username
post['password'] = new_pass1
request.POST = post
login(request)
#delete all forgot password requests for this email
ForgotPassword.objects.filter(email__iexact=email).delete()
return HttpResponseRedirect('/%s/' % username)
context = RequestContext(request)
context['error'] = error
context['username'] = username
context['hash'] = hash
context['email'] = email
return render_to_response('reset_password.html', context)
return HttpResponseRedirect("/")
def index(request):
context = RequestContext(request)
if not request.GET.has_key('script') and not request.session.get('script', False):
return render_to_response('base.html', context)
elif not request.session.get('script', False):
request.session['script'] = True
return HttpResponseRedirect('/')
if not request.user.is_authenticated():
#return render_to_response('index.html', context)
return HttpResponseRedirect('/guest/')
else:
return HttpResponseRedirect('/%s/' % request.user.username)
def login(request):
retvals = {}
user = None
context = RequestContext(request)
if 'next' in request.GET:
context['next'] = request.GET['next']
if request.POST.has_key('username') and request.POST.has_key('password'):
username = request.POST.get('username').strip()
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user is not None:
auth_login(request, user)
user.last_login = datetime.datetime.now()
user.save()
# Redirect to a success page.
else:
retvals['bad_user'] = 'Invalid username or password'
elif 'lb' in request.GET:
return render_to_response('login.html', context)
else:
#have an actual login page
return render_to_response('index.html', context)
#check to see if somebody else was already logged in. if so, we need to log them out
if not user:
logout(request)
else:
retvals['ok'] = True
if 'next' in request.POST:
retvals['next'] = urllib.unquote(request.POST['next'])
return HttpResponse(djangojson.write(retvals), 'text/javascript')
def unauthenticated(request):
context = RequestContext(request)
if 'next' in request.GET:
if 'async' in request.GET['next'] or 'json' in request.GET['next']:
#Only return the 403.
#The client (browser parsing the ajax response) MUST take care of redirecting to the login page.
return HttpResponseForbidden("Error - You must be logged in to do that.")
else:
return HttpResponseRedirect("/login/?next=%s" % urllib.quote(request.GET['next']))
else:
return HttpResponseRedirect("/login/")
def logout(request):
""" Logs the user out, and removes cookies if necessary """
response = HttpResponseRedirect("/")
auth_logout(request)
try:
for key in request.session.keys():
del request.session[key]
except:{}
if request.session.get('cookiesAllowed', '') == 'true':
for key in request.COOKIES.keys():
print 'trying to delete cookie:', key
response.delete_cookie(key)
request.COOKIES = {}
for key in request.session.__dict__['_session_cache'].keys():
del request.session[key]
return response
def pass_through(request, page, require_user=False):
context = RequestContext(request)
user = request.user
if require_user and not request.user.is_authenticated():
raise Http404
return render_to_response(page, context)
def register(request, captcha_error):
user = None
retvals = {}
if len(captcha_error) > 0:
retvals['bad_captcha'] = 'Invalid security word'
if request.POST.has_key('new_username') and request.POST.has_key('new_password') and request.POST.has_key('new_email'):
username = request.POST['new_username'].strip()
password = request.POST['new_password']
email = request.POST['new_email'].strip()
user_msg = user_utils.is_valid_username(username)
if user_msg:
retvals['bad_user'] = user_msg
email_msg = user_utils.is_valid_email(email)
if email_msg:
retvals['bad_email'] = email_msg
if len(password) < 5:
retvals['bad_pass'] = 'Password too short. Must be at least 5 characters'
#see if the user already exists
try:
user = User.objects.get(username__iexact=username)
except:
if len(retvals) == 0:
try:
user = User.objects.create_user(username, email, password)
user = authenticate(username=username, password=password)
auth_login(request, user)
retvals['ok'] = True
#send the welcome email
mail.send_welcome_email(user)
for feed in feed_accomplice.get_feeds(default_feed=True):
userfeed = feed_accomplice.add_userfeed(user, feed, ['fn:home'])
feed_accomplice.add_permanent_feeds(user)
if request.has_key('next'):
retvals['next'] = urllib.unquote(request['next'])
except Exception, e:
logging.error(e)
retvals['bad_user'] = 'Unable to create account'
else:
retvals['bad_user'] = 'Username already exists'
else:
raise Http404
return HttpResponse(djangojson.write(retvals), 'text/javascript')
def get_user_page(request, username, tags=None):
""" Returns a user page """
context = RequestContext(request)
page_user = get_object_or_404(User, username__iexact=username.strip())
context['page_user'] = page_user
if(request.user == page_user):
only_public=None
else:
only_public=True
if not tags:
tags = ['fn:home']
else:
tags = tags.split('+')
context['feeds'] = feed_accomplice.get_feeds(user=page_user, tags=tags, only_public=only_public)
if 'async' in request.GET:
return render_to_response('feeds.html', context)
else:
return render_to_response('user_page.html', context)
#do this so that IE doesn't load the same page again... protects against logouts/logins
#user_page = cache_page(user_page, 1)
#@cache_page(60 * 1)
def get_feed(request, id):
feed = get_object_or_404(Feed, id=id)
context = RequestContext(request)
context['feed_id'] = id
context['json'] = True
if request.GET.has_key('json'):
feedobj = {'description':feed.subtitle, 'channel_link':feed.channel_link, 'icon_url':feed.icon_url, 'title':feed.title, 'id':feed.id}
obj = {'feed': feedobj}
entrylist = []
entries = feed.get_entries()
for i, entry in zip(range(len(entries)), entries):
context['entry'] = entry
context['index'] = i
entrylist.append(loader.render_to_string('feed_entry.html', context))
obj['entries'] = entrylist
obj['suggest_tags'] = feed.get_suggested_tags()
return HttpResponse(djangojson.write(obj), 'text/javascript')
else:
data = feed.get_xml_data()
if data is not None:
response = HttpResponse(mimetype='text/xml')
response['Content-Type'] = 'text/xml'
response.write(data)
return response
else:
return HttpResponseRedirect(feed.xml_url)
#user_page = cache_page(user_page, 1)
@login_required
def form_subscribe(request):
context = RequestContext(request)
if 'calling_link' in request.GET:
context['calling_link'] = request.GET['calling_link'] #for hiding the add_feed button after subscribing
if 'page_user_id' in request.GET:
context['on_my_page'] = int(request.GET['page_user_id']) == int(request.user.id)
if request.has_key('url'):
context['feed'] = feed_accomplice.get_feed(url=request.REQUEST['url'])
else:
raise Http404
context['tags'] = user_utils.get_tags(user=request.user)
if 'async' in request.GET:
context['async'] = True
return render_to_response('form_subscribe.html', context)
else:
return render_to_response('subscribe.html', context)
@login_required
def subscribe(request):
if 'feed_id' in request.POST:
feed_id = request.POST['feed_id']
elif request.has_key('url'):
feed_id = feed_accomplice.get_feed(request.REQUEST['url']).id
if not feed_id:
raise Http404
else:
raise Http404
tags = []
if 'tags' in request.POST:
tags = request.POST['tags'].strip().split()
else:
tags = ['fn:home']
#see if the user already has this feed
try:
userfeed = UserFeed.objects.get(user__id__exact=request.user.id, feed__id__exact=feed_id)
return HttpResponse('') #already has userfeed, return empty string
except:
userfeed = feed_accomplice.add_userfeed(request.user, feed_id, tags=tags)
context = RequestContext(request)
context['page_user'] = request.user
context['userfeed'] = userfeed
context['feed'] = userfeed.feed
#render just this new feed page and return it
return render_to_response('feed.html', context)
def search_feeds(request):
if 'query' in request.GET:
query = request.GET['query'].strip()
feeds = []
offset = 0
limit = 10
total = 0
if 'o' in request.GET:
try:
offset = int(request.GET['o'])
except:{}
if offset < 0:
offset = 0
if 'l' in request.GET:
try:
limit = int(request.GET['l'])
except:{}
if limit < 0:
limit = 10
if len(query) < 2:
results = []
else:
# results = FeedSearcher().search_feeds(query)
type = 'desc'
if query.startswith('tags:'):
query = query.replace('tag:', '')
type = 'tag'
elif query.startswith('url:'):
query = query.replace('url:', '')
type = 'url'
if len(query) < 2:
feedlist = []
elif type == 'tag':
where = ['feednut_userfeed.feed_id=feednut_feed.id',
'feednut_userfeedtag.user_feed_id=feednut_userfeed.id',
'feednut_userfeedtag.tag_id=feednut_tag.id',
'feednut_tag.tag="' + query + '"']
tables=['feednut_tag', 'feednut_userfeed', 'feednut_userfeedtag']
total = Feed.objects.extra(where=where, tables=tables).distinct().count()
results = Feed.objects.extra(where=where, tables=tables).distinct()[offset:offset+limit]
elif type == 'url':
q = (Q(xml_url__icontains=query) | Q(channel_link__icontains=query))
filter = Feed.objects.filter(q)
total = filter.count()
results = filter[offset:offset+limit]
else:
q = (Q(title__icontains=query) | Q(subtitle__icontains=query))
total = Feed.objects.filter(q).count()
results = Feed.objects.filter(q)[offset:offset+limit]
context = RequestContext(request)
context['results'] = results #[offset:offset+limit]
context['query'] = query
context['total'] = total #len(results)
context['prevoffset'] = max(0, offset - limit)
context['offset'] = offset + 1
context['endoffset'] = offset + len(results)
context['limit'] = limit
try:
context['page_user'] = User.objects.get(username__iexact=request.GET['page_user'])
except:{}
return render_to_response('search_results.html', context)
raise Http404
@login_required
def tag_feed(request):
if request.POST.has_key('id'):
id = request.POST['id'].strip()
try:
userfeed = UserFeed.objects.get(id=id, user__id__exact=request.user.id)
except:
raise HttpResponseServerError()
tags = []
if 'tags' in request.POST:
tags = request.POST['tags'].strip().split()
feed_accomplice.tag_feed(userfeed, tagnames=tags)
return HttpResponse(djangojson.write(userfeed.get_tags_string()), 'text/javascript')
raise Http404
@login_required
def remove_feed(request):
if request.POST.has_key('id'):
id = request.POST['id'].strip()
try:
userfeed = UserFeed.objects.get(feed__id__exact=id, user__id__exact=request.user.id)
except: {}
else:
if userfeed.permanent_feed:
return HttpServerError("Cannot remove a permanent feed.")
else:
userfeed.delete()
return HttpResponse('')
raise Http404
def read_article(request):
if request.has_key('url') and request.has_key('title') and request.has_key('feed'):
if request.has_key('summary'):
summary = request['summary']
else:
summary = 'Read full article for more details'
link = request['url'].strip()
xml_url = request['feed'].strip()
title = request['title'].strip()
if request.user.is_authenticated():
users_feed = feed_accomplice.get_system_feed('%s/%s/latest.rss' % (URL_HOST, request.user.username))
feed_accomplice.push_entry(users_feed, title=title, link=link, description=summary, xml_url=xml_url)
system_feed = feed_accomplice.get_system_feed('%s/latest.rss' % URL_HOST)
feed_accomplice.push_entry(system_feed, title=title, link=link, description=summary, xml_url=xml_url)
if request.user.is_authenticated():
uname = request.user.username
else:
uname = 'Anonymous User'
logging.info("%s is reading this entry's link: %s" % (uname, link))
return HttpResponseRedirect(link)
raise Http404
@login_required
def read_later(request):
if 'url' in request.POST and 'title' in request.POST and 'summary' in request.POST and 'xml_url' in request.POST:
users_feed = feed_accomplice.get_system_feed('%s/%s/readlater.rss' % (URL_HOST, request.user.username))
feed_accomplice.push_entry(users_feed, title=request.POST['title'], link=request.POST['url'].strip(), description=request.POST['summary'], xml_url=request.POST['xml_url'])
return HttpResponse("")
def new_captcha(request):
user = request.user
if not request.user.is_authenticated():
raise Http404
from westom.settings import SITE_ID
from Captcha.Visual import Tests
import Captcha
import tempfile
name = Tests.__all__[0]
fact = Captcha.PersistentFactory(tempfile.gettempdir() + "/pycaptcha_%d" % SITE_ID)
test = fact.new(getattr(Tests, name))
vals = {'captcha_image': '/captcha/i/?id=%s' % test.id, 'captcha_id' : test.id}
return HttpResponse(djangojson.write(vals), 'text/javascript')
def get_user_tags(request, username):
page_user = get_user_or_404(username)
if page_user != request.user:
only_public = True
else:
only_public = False
tags = user_utils.get_tags(page_user, only_public=only_public)
return HttpResponse(djangojson.write([(tag.tag, tag.count) for tag in tags]), 'text/javascript')
def page_not_found(request):
context = RequestContext(request)
response = render_to_response('404.html', context)
response.status_code = 404
return response
@login_required
def import_subscriptions(request, username):
""" Request to import a list of feeds in OPML Subscription format"""
#this is causing firefox to crash when an un-authenticated user trys to import subs. Should redirect to error page. Not sure why.
try:
xmlData= StringIO.StringIO(request.FILES['opmlfile']['content'])
subscriptions = opml.parseOpml(xmlData)
except:
return HttpResponseServerError('An error occurred parsing the input file')
for sub in subscriptions:
url = sub.xmlUrl.strip().lower()
try:
feed = feed_accomplice.get_feed(url)
feed_accomplice.add_userfeed(request.user, feed)
except:{}
return HttpResponseRedirect("/%s/" % request.user.username)
def export_subscriptions(request, username):
"""Export the users list of feeds in OPML Subscription format """
context = RequestContext(request)
#load the user, if they are logged in
username = username.strip()
page_user = get_object_or_404(User, username__iexact=username)
try:
only_public = (request.user.id != page_user.id)
except:
user = None
only_public = True
context['page_user'] = page_user
#context['feeds'] = page_user.get_feeds(only_public=only_public)
#context['feeds'] = user_utils.get_user_feeds(page_user, only_public=only_public)
context['feeds'] = user_utils.get_feeds(page_user, only_public=only_public)
response = render_to_response("export_opml.html", context)
response['Content-Type'] = 'text/xml'
response['Content-Disposition'] = 'attachment; filename=%s_feeds.opml' % page_user.username
return response
@login_required
def add_user_buddy(request, username):
if request.user.username.lower() != username.lower():
return HttpResponseServerError()
if request.has_key('buddy'):
buddy_name = request['buddy']
buddy = get_object_or_404(User, username__iexact=buddy_name)
try:
UserBuddy.objects.get(user__id__exact=request.user.id, buddy__id__exact=buddy.id)
except:
userbuddy = UserBuddy(user=request.user, buddy=buddy)
userbuddy.save()
return HttpResponse('')
else:
return HttpResponse('Already have buddy!')
raise Http404
@login_required
def remove_user_buddy(request, username):
if request.user.username.lower() != username.lower():
raise HttpResponseServerError()
if request.has_key('buddy'):
buddy_name = request['buddy']
print buddy_name
try:
buddy = UserBuddy.objects.get(user__id__exact=request.user.id, buddy__username__iexact=buddy_name)
except: {}
else:
buddy.delete()
#for now, we just always return a valid http response... basically, failing silently
return HttpResponse('')
def get_user_buddies(request, username):
#TODO only select the username to make it quicker
user = get_object_or_404(User, username__iexact=username)
buddies = user_utils.get_user_buddies(user)
return HttpResponse(djangojson.write([buddy.username for buddy in buddies]), 'text/javascript')
def generate_feed(request):
''' having a little fun generating feeds... so might as well let users see them '''
from westom.feednut.utils.HtmlDom import HtmlDom, toHTML
from westom.feednut.libs.ScrapeNFeed import ScrapedFeed
from westom.feednut.libs.PyRSS2Gen import RSSItem, Guid
from westom.feednut.utils.rssgen import SimpleFeed
if request.has_key('url'):
url = request['url'].lower()
job_board = 'http://www.python.org/community/jobs/'
top40 = 'http://www.bbc.co.uk/radio1/chart/top40.shtml'
feed = None
if url == job_board.lower():
dom = HtmlDom(job_board)
rssItems = []
title = dom.evaluate("/html:html/html:head/html:title/text()")[0].nodeValue
description = 'Feed generated for %s by FeedNut' % job_board
job_ops = dom.evaluate("//html:div[@class='section'][@id][position()>0]")
for i, job_op in zip(range(len(job_ops)), job_ops):
try: itemTitle = dom.evaluate("html:h2/html:a[@class='reference']/text()", node=job_op)[0].nodeValue
except: continue
try: link = dom.evaluate("html:h2/html:a[@class='reference']/@href", node=job_op)[0].nodeValue
except: link = None
try: itemDesc = toHTML(job_op).replace('html:', '')
except: itemDesc = None
item = RSSItem(title=itemTitle, description=itemDesc, link=link, guid=Guid(link and ('%s#%s' % (link, i)) or itemTitle))
rssItems.append(item)
feed = SimpleFeed(title, job_board, description, rssItems)
feed.refresh()
elif url == top40.lower():
dom = HtmlDom(top40)
rssItems = []
title = dom.evaluate("/html:html/html:head/html:title/text()")[0].nodeValue
description = 'Feed generated for %s by FeedNut' % top40
songs = dom.evaluate("//html:td[@class='col4']")
for song in songs:
try: artist = dom.evaluate("html:h4/text()", node=song)[0].nodeValue
except: continue
try: track = dom.evaluate("html:h5/text()", node=song)[0].nodeValue
except: continue
try: link = dom.evaluate("html:a/@href", node=song)[0].nodeValue
except: link = None
try: img = dom.evaluate("html:img/@src", node=song)[0].nodeValue
except: img = None
itemTitle = '%s - %s' % (artist, track)
itemDesc = '<p>%s</p>%s' % (itemTitle, img and ('<img src="%s"/>' % img) or '')
item = RSSItem(title=itemTitle, description=itemDesc, link=link, guid=Guid(link or itemTitle))
rssItems.append(item)
feed = SimpleFeed(title, top40, description, rssItems)
feed.refresh()
if feed is not None:
response = HttpResponse(mimetype='text/xml')
response['Content-Type'] = 'text/xml'
response.write(feed.to_xml())
return response
raise Http404
generate_feed = cache_page(generate_feed, 120)
def userfeed_action(request, username, id):
try:
userfeed = UserFeed.objects.filter(feed__id__exact=id, user__username__iexact=username)[0]
except:
return HttpResponseServerError('Invalid UserFeed')
if request.REQUEST.has_key('a'):
action = request.REQUEST['a'].strip()
if action == 'items' and request.POST.has_key('numitems'):
#make sure the user requesting is the owner
if username.lower() != request.user.username.lower():
return HttpResponseServerError('Invalid user')
numitems = max(int(request.POST.get('numitems')), 0)
userfeed.num_items = numitems
userfeed.save()
return HttpResponse('')
elif action == 'entries':
context = RequestContext(request)
feed = userfeed.feed
context['feed_id'] = feed.id
context['json'] = True
feedobj = {'description':feed.subtitle, 'channel_link':feed.channel_link, 'icon_url':feed.icon_url, 'title':feed.title, 'id':feed.id}
obj = {'feed': feedobj}
entrylist = []
entries = userfeed.get_entries()
for i, entry in zip(range(len(entries)), entries):
context['entry'] = entry
context['index'] = i
entrylist.append(loader.render_to_string('feed_entry.html', context))
obj['entries'] = entrylist
obj['suggest_tags'] = feed.get_suggested_tags()
return HttpResponse(djangojson.write(obj), 'text/javascript')
return HttpResponseServerError('No valid action requested')
| Python |
from django.core.exceptions import ObjectDoesNotExist, ViewDoesNotExist
from westom.settings import SITE_ID
from django.core import urlresolvers
from django.http import Http404, HttpResponse, HttpResponseRedirect
from Captcha.Visual import Tests
import Captcha
import tempfile
def _getFactory( id ):
return Captcha.PersistentFactory(tempfile.gettempdir() + "/pycaptcha_%d" % id )
def image( request ):
"""
Generate the image to show users with the magic word embedded inside
"""
if request.GET:
id = request.GET["id"]
test = _getFactory(SITE_ID).get(id)
if test is not None:
response = HttpResponse(mimetype="image/jpeg")
test.render().save(response, "JPEG")
return response
raise Http404("not found")
def verify( request, forward_to, *arguments, **keywords):
"""
verify the captcha and then forward the request
TBD: redirect to the original form with a validation error
"""
captcha_error = []
if request.POST:
id = request.POST["captcha_id"]
word = request.POST["captcha_word"]
test = _getFactory(SITE_ID).get(id)
if not test:
captcha_error.append('Invalid captcha id.')
if not test.valid:
captcha_error.append('Test invalidated, try again.')
elif not test.testSolutions([word]):
captcha_error.append('Invalid word.')
mod_name, func_name = urlresolvers.get_mod_func(forward_to)
try:
func, ignore = getattr(__import__(mod_name, '', '', ['']), func_name), {}
return func(request, captcha_error, *arguments, **keywords)
except (ImportError, AttributeError), e:
raise ViewDoesNotExist, "Tried %s. Error was: %s" % (forward_to, str(e))
def verify_anon( request, forward_to, *arguments, **keywords):
"""
verify the captcha and then forward the request for anonymous users only
TBD: redirect to the original form with a validation error
"""
captcha_error = []
if request.POST and request.user.is_authenticated():
id = request.POST["captcha_id"]
word = request.POST["captcha_word"]
test = _getFactory(SITE_ID).get(id)
if not test:
captcha_error.append('Invalid captcha id.')
if not test.valid:
captcha_error.append('Test invalidated, try again.')
elif not test.testSolutions([word]):
captcha_error.append('Invalid word.')
mod_name, func_name = urlresolvers.get_mod_func(forward_to)
try:
func, ignore = getattr(__import__(mod_name, '', '', ['']), func_name), {}
return func( request, captcha_error, *arguments, **keywords)
except (ImportError, AttributeError), e:
raise ViewDoesNotExist, "Tried %s. Error was: %s" % (forward_to, str(e))
| Python |
from django.core.mail import send_mail
def send_welcome_email(user):
""" sends an email to the user welcoming them to FeedNut """
subject = 'Welcome to FeedNut'
message = """Thanks for signing up for a free account at www.feednut.com!
Now you can quickly organize and read the news YOU want to read by subscribing to news feeds that interest you!
At account creation, we signed you up for some of the top feeds on FeedNut, just to get you started. Feel free to unsubscribe from them if you wish.
Since FeedNut is a community system, you can also see what other people are reading, share articles with friends, and link into several other social sites, such as www.del.icio.us and www.reddit.com.
Have fun staying on top of the news!
Your username: %s
Your homepage: http://www.feednut.com/%s/
Thanks again!
The FeedNut Team
(Tom and Wes)""" % (user.username, user.username)
return send_mail(subject, message, None, [user.email], fail_silently=False)
def send_suggested_link(from_user, to_email, url, title=None, message=None):
""" the from_user wants to send a link/article to an email """
subject = 'FeedNut.com link%s' % (title and ': %s' % title or '')
message = """A user at www.feednut.com (%s), saw this link and thought you might like it:
%s
After reading the article, check out www.feednut.com for yourself! FeedNut is FREE to use, and lets you stay on top of the news you want to read!
- or -
Check out %s's FeedNut homepage at www.feednut.com/%s/
The FeedNut Team
(Tom and Wes)""" % (from_user.username, url, from_user.username, from_user.username)
return send_mail(subject, message, None, [to_email], fail_silently=False)
def send_password_reset(email, hash):
""" an email to let users reset their password """
subject = 'FeedNut.com Password Reset Request'
message = """Oh No! You forgot your password! It's ok though. We know you've got a lot on your mind.
Please visit the URL below in order to reset your password.
http://www.feednut.com/login/reset/?id=%s
The FeedNut Team
(Tom and Wes)""" % (hash,)
return send_mail(subject, message, None, [email], fail_silently=False)
| Python |
#!/usr/bin/env python
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit http://feedparser.org/ for the latest version
Visit http://feedparser.org/docs/ for the latest documentation
Required: Python 2.1 or later
Recommended: Python 2.3 or later
Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "4.1"# + "$Revision: 1.1 $"[11:15] + "-cvs"
__license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>"]
_debug = 0
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# ---------- required modules (should come with any Python distribution) ----------
import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2
try:
from cStringIO import StringIO as _StringIO
except:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except:
gzip = None
try:
import zlib
except:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
from xml.sax.saxutils import escape as _xmlescape
_XML_AVAILABLE = 1
except:
_XML_AVAILABLE = 0
def _xmlescape(data):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
return data
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except:
base64 = binascii = None
# cjkcodecs and iconv_codec provide support for more character encodings.
# Both are available from http://cjkpython.i18n.org/
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
if _debug:
import chardet.constants
chardet.constants._debug = 1
except:
chardet = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
sgmllib.special = re.compile('<!')
sgmllib.charref = re.compile('&#(x?[0-9A-Fa-f]+)[^0-9A-Fa-f]')
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
'hotrss': 'Hot RSS'
}
try:
UserDict = dict
except NameError:
# Python 2.1 does not have dict
from UserDict import UserDict
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
class FeedParserDict(UserDict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['subtitle', 'summary'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
return UserDict.__getitem__(self, 'tags')[0]['term']
if key == 'categories':
return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')]
realkey = self.keymap.get(key, key)
if type(realkey) == types.ListType:
for k in realkey:
if UserDict.has_key(self, k):
return UserDict.__getitem__(self, k)
if UserDict.has_key(self, key):
return UserDict.__getitem__(self, key)
return UserDict.__getitem__(self, realkey)
def __setitem__(self, key, value):
for k in self.keymap.keys():
if key == k:
key = self.keymap[k]
if type(key) == types.ListType:
key = key[0]
return UserDict.__setitem__(self, key, value)
def get(self, key, default=None):
if self.has_key(key):
return self[key]
else:
return default
def setdefault(self, key, value):
if not self.has_key(key):
self[key] = value
return self[key]
def has_key(self, key):
try:
return hasattr(self, key) or UserDict.has_key(self, key)
except AttributeError:
return False
def __getattr__(self, key):
try:
return self.__dict__[key]
except KeyError:
pass
try:
assert not key.startswith('_')
return self.__getitem__(key)
except:
raise AttributeError, "object has no attribute '%s'" % key
def __setattr__(self, key, value):
if key.startswith('_') or key == 'data':
self.__dict__[key] = value
else:
return self.__setitem__(key, value)
def __contains__(self, key):
return self.has_key(key)
def zopeCompatibilityHack():
global FeedParserDict
del FeedParserDict
def FeedParserDict(aDict=None):
rc = {}
if aDict:
rc.update(aDict)
return rc
_ebcdic_to_ascii_map = None
def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
)
import string
_ebcdic_to_ascii_map = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(_ebcdic_to_ascii_map)
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf'
}
_matchnamespaces = {}
can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'license', 'icon', 'logo']
can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if _debug: sys.stderr.write('initializing FeedParser\n')
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
if baselang:
self.feeddata['language'] = baselang
def unknown_starttag(self, tag, attrs):
if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
# normalize attrs
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
# Note: probably shouldn't simply recreate localname here, but
# our namespace handling isn't actually 100% correct in cases where
# the feed redefines the default namespace (which is actually
# the usual case for inline content, thanks Sam), so here we
# cheat and just reconstruct the element based on localname
# because that compensates for the bugs in our namespace handling.
# This will horribly munge inline content with non-empty qnames,
# but nobody actually does that, so I'm not fixing it.
tag = tag.split(':')[-1]
return self.handle_data('<%s%s>' % (tag, ''.join([' %s="%s"' % t for t in attrs])), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
return self.push(prefix + suffix, 1)
def unknown_endtag(self, tag):
if _debug: sys.stderr.write('end %s\n' % tag)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack: return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack: return
if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref)
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
else:
# entity resolution graciously donated by Aaron Swartz
def name2cp(k):
import htmlentitydefs
if hasattr(htmlentitydefs, 'name2codepoint'): # requires Python 2.3
return htmlentitydefs.name2codepoint[k]
k = htmlentitydefs.entitydefs[k]
if k.startswith('&#') and k.endswith(';'):
return int(k[2:-1]) # not in latin-1
return ord(k)
try: name2cp(ref)
except KeyError: text = '&%s;' % ref
else: text = unichr(name2cp(ref)).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack: return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if _debug: sys.stderr.write('entering parse_declaration\n')
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1: k = len(self.rawdata)
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
return k+1
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
self.version = 'rss090'
if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
self.version = 'rss10'
if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if self._matchnamespaces.has_key(loweruri):
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack: return
if self.elementstack[-1][0] != element: return
element, expectingText, pieces = self.elementstack.pop()
output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText: return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = base64.decodestring(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
# resolve relative URIs within embedded markup
if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding)
# sanitize embedded markup
if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding)
if self.encoding and type(output) != type(u''):
try:
output = unicode(output, self.encoding)
except:
pass
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource) and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value):
context = self._getContext()
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
if not self.version:
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_dlhottitles(self, attrsD):
self.version = 'hotrss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
_start_feedinfo = _start_channel
def _cdf_common(self, attrsD):
if attrsD.has_key('lastmod'):
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if attrsD.has_key('href'):
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
self.inimage = 1
self.push('image', 0)
context = self._getContext()
context.setdefault('image', FeedParserDict())
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
self.intextinput = 1
self.push('textinput', 0)
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['textinput']['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['image']['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['image']['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
elif self.inimage:
context = self._getContext()
context['image']['href'] = value
elif self.intextinput:
context = self._getContext()
context['textinput']['link'] = value
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author = context.get(key)
if not author: return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))''', author)
if not emailmatch: return
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
context.setdefault('%s_detail' % key, FeedParserDict())
context['%s_detail' % key]['name'] = author
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
_start_product = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value))
_end_dcterms_issued = _end_published
_end_issued = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_pubdate = _start_updated
_start_dc_date = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_pubdate = _end_updated
_end_dc_date = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value))
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')))
def _start_cc_license(self, attrsD):
self.push('license', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('license')
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
def _end_creativecommons_license(self):
self.pop('license')
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label): return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(FeedParserDict({'term': term, 'scheme': scheme, 'label': label}))
def _start_category(self, attrsD):
if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split():
self._addTag(term, 'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value: return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
attrsD.setdefault('type', 'text/html')
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context = self._getContext()
context.setdefault('links', [])
context['links'].append(FeedParserDict(attrsD))
if attrsD['rel'] == 'enclosure':
self._start_enclosure(attrsD)
if attrsD.has_key('href'):
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
_start_producturl = _start_link
def _end_link(self):
value = self.pop('link')
context = self._getContext()
if self.intextinput:
context['textinput']['link'] = value
if self.inimage:
context['image']['link'] = value
_end_producturl = _end_link
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
def _start_title(self, attrsD):
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
value = self.popContent('title')
context = self._getContext()
if self.intextinput:
context['textinput']['title'] = value
elif self.inimage:
context['image']['title'] = value
_end_dc_title = _end_title
_end_media_title = _end_title
def _start_description(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
context = self._getContext()
if self.intextinput:
context['textinput']['description'] = value
elif self.inimage:
context['image']['description'] = value
self._summaryKey = None
_end_abstract = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if context.has_key('generator_detail'):
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
self._getContext().setdefault('enclosures', []).append(FeedParserDict(attrsD))
href = attrsD.get('href')
if href:
context = self._getContext()
if not context.get('id'):
context['id'] = href
def _start_source(self, attrsD):
self.insource = 1
def _end_source(self):
self.insource = 0
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_prodlink(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToDescription:
self._save('description', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
_end_prodlink = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
if _debug: sys.stderr.write('trying StrictFeedParser\n')
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
def startPrefixMapping(self, prefix, uri):
self.trackNamespace(prefix, uri)
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
if prefix:
localname = prefix + ':' + localname
localname = str(localname).lower()
if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname))
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD = {}
for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param']
def __init__(self, encoding):
self.encoding = encoding
if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
#data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace
data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
if self.encoding and type(data) == type(u''):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
def normalize_attrs(self, attrs):
# utility method to be called by descendants
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
uattrs = []
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
for key, value in attrs:
if type(value) != type(u''):
value = unicode(value, self.encoding)
uattrs.append((unicode(key, self.encoding), value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding)
if tag in self.elements_no_end_tag:
self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
else:
self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
self.pieces.append('&#%(ref)s;' % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
self.pieces.append('&%(ref)s;' % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text)
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%(text)s-->' % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%(text)s>' % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%(text)s>' % locals())
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = [('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')]
def __init__(self, baseuri, encoding):
_BaseHTMLProcessor.__init__(self, encoding)
self.baseuri = baseuri
def resolveURI(self, uri):
return _urljoin(self.baseuri, uri)
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding):
if _debug: sys.stderr.write('entering _resolveRelativeURIs\n')
p = _RelativeURIResolver(baseURI, encoding)
p.feed(htmlSource)
return p.output()
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big',
'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col',
'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset',
'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input',
'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup',
'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike',
'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th',
'thead', 'tr', 'tt', 'u', 'ul', 'var']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing',
'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols',
'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled',
'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace',
'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method',
'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly',
'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type',
'usemap', 'valign', 'value', 'vspace', 'width']
unacceptable_elements_with_end_tag = ['script', 'applet']
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
def unknown_starttag(self, tag, attrs):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
return
attrs = self.normalize_attrs(attrs)
attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def _sanitizeHTML(htmlSource, encoding):
p = _HTMLSanitizer(encoding)
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = type(data) == type(u'')
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
if ((code / 100) == 3) and (code != 304):
return self.http_error_302(req, fp, code, msg, headers)
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
return infourl
def http_error_302(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
def http_error_301(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
http_error_300 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions)
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
try:
assert sys.version.split()[0] >= '2.3.3'
assert base64 != None
user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
except:
return self.http_error_default(req, fp, code, msg, headers)
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it must be a tuple of 9 integers
as returned by gmtime() in the standard Python time module. This MUST
be in GMT (Greenwich Mean Time). The formatted date/time will be used
as the value of an If-Modified-Since request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if url_file_stream_or_string == '-':
return sys.stdin
if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'):
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.encodestring(user_passwd).strip()
# try to open with urllib2 (to use optional headers)
request = urllib2.Request(url_file_stream_or_string)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
request.add_header('A-IM', 'feed') # RFC 3229 support
opener = apply(urllib2.build_opener, tuple(handlers + [_FeedURLHandler()]))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string)
except:
pass
# treat url_file_stream_or_string as string
return _StringIO(str(url_file_stream_or_string))
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
del tmpl
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
del regex
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m: break
if not m: return
if m.span() == (0, 0): return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params.keys():
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(params.get('second', 0))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
# daylight savings is complex, but not needed for feedparser's purposes
# as time zones, if specified, include mention of whether it is active
# (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and
# and most implementations have DST bugs
daylight_savings_flag = 0
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tm))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m: return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
_mssql_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
def _parse_date_mssql(dateString):
'''Parse a string according to the MS SQL date format'''
m = _mssql_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_mssql)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m: return
try:
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
except:
return
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date)
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m: return
try:
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
except:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<julian>\d\d\d)'
'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
rfc822._timezones.update(_additional_timezones)
registerDateHandler(_parse_date_rfc822)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
if not date9tuple: continue
if len(date9tuple) != 9:
if _debug: sys.stderr.write('date handler function must return 9-tuple\n')
raise ValueError
map(int, date9tuple)
return date9tuple
except Exception, e:
if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e)))
pass
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). CJKCodecs and iconv_codec help a lot;
you should definitely install them if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
return content_type, params.get('charset', '').replace("'", '')
sniffed_xml_encoding = ''
xml_encoding = ''
true_encoding = ''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type'))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = _ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
true_encoding = http_encoding or 'us-ascii'
elif http_headers and (not http_headers.has_key('content-type')):
true_encoding = xml_encoding or 'iso-8859-1'
else:
true_encoding = xml_encoding or 'utf-8'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding)
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16be':
sys.stderr.write('trying utf-16be instead\n')
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16le':
sys.stderr.write('trying utf-16le instead\n')
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-8':
sys.stderr.write('trying utf-8 instead\n')
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32be':
sys.stderr.write('trying utf-32be instead\n')
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32le':
sys.stderr.write('trying utf-32le instead\n')
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE)
data = entity_pattern.sub('', data)
doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE)
doctype_results = doctype_pattern.findall(data)
doctype = doctype_results and doctype_results[0] or ''
if doctype.lower().count('netscape'):
version = 'rss091n'
else:
version = None
data = doctype_pattern.sub('', data)
return version, data
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]):
'''Parse a feed from a URL, file, stream, or string'''
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
if _XML_AVAILABLE:
result['bozo'] = 0
if type(handlers) == types.InstanceType:
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
f = None
# if feed is gzip-compressed, decompress it
if f and data and hasattr(f, 'headers'):
if gzip and f.headers.get('content-encoding', '') == 'gzip':
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except Exception, e:
# Some feeds claim to be gzipped but they're not, so
# we get garbage. Ideally, we should re-request the
# feed without the 'Accept-encoding: gzip' header,
# but we don't.
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
elif zlib and f.headers.get('content-encoding', '') == 'deflate':
try:
data = zlib.decompress(data, -zlib.MAX_WBITS)
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
# save HTTP headers
if hasattr(f, 'info'):
info = f.info()
result['etag'] = info.getheader('ETag')
last_modified = info.getheader('Last-Modified')
if last_modified:
result['modified'] = _parse_date(last_modified)
if hasattr(f, 'url'):
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'headers'):
result['headers'] = f.headers.dict
if hasattr(f, 'close'):
f.close()
result['xml_data'] = data
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
http_headers = result.get('headers', {})
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if http_headers.has_key('content-type'):
bozo_message = '%s is not an XML media type' % http_headers['content-type']
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
result['version'], data = _stripDoctype(data)
baseuri = http_headers.get('content-location', result.get('href'))
baselang = http_headers.get('content-language', None)
# if server sent 304, we're done
if result.get('status', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if not data:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding):
if not proposed_encoding: continue
if proposed_encoding in tried_encodings: continue
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
break
except:
pass
# if no luck and we have auto-detection library, try that
if (not known_encoding) and chardet:
try:
proposed_encoding = chardet.detect(data)['encoding']
if proposed_encoding and (proposed_encoding not in tried_encodings):
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried utf-8 yet, try that
if (not known_encoding) and ('utf-8' not in tried_encodings):
try:
proposed_encoding = 'utf-8'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried windows-1252 yet, try that
if (not known_encoding) and ('windows-1252' not in tried_encodings):
try:
proposed_encoding = 'windows-1252'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck, give up
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, and windows-1252 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = ''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'documented declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
if hasattr(saxparser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
saxparser.parse(source)
except Exception, e:
if _debug:
import traceback
traceback.print_stack()
traceback.print_exc()
sys.stderr.write('xml parsing failed\n')
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '')
feedparser.feed(data)
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
if __name__ == '__main__':
if not sys.argv[1:]:
print __doc__
sys.exit(0)
else:
urls = sys.argv[1:]
zopeCompatibilityHack()
from pprint import pprint
for url in urls:
print url
print
result = parse(url)
pprint(result)
print
#REVISION HISTORY
#1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements,
# added Simon Fell's test suite
#1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections
#2.0 - 10/19/2002
# JD - use inchannel to watch out for image and textinput elements which can
# also contain title, link, and description elements
# JD - check for isPermaLink='false' attribute on guid elements
# JD - replaced openAnything with open_resource supporting ETag and
# If-Modified-Since request headers
# JD - parse now accepts etag, modified, agent, and referrer optional
# arguments
# JD - modified parse to return a dictionary instead of a tuple so that any
# etag or modified information can be returned and cached by the caller
#2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything
# because of etag/modified, return the old etag/modified to the caller to
# indicate why nothing is being returned
#2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its
# useless. Fixes the problem JD was addressing by adding it.
#2.1 - 11/14/2002 - MAP - added gzip support
#2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent.
# start_admingeneratoragent is an example of how to handle elements with
# only attributes, no content.
#2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify);
# also, make sure we send the User-Agent even if urllib2 isn't available.
# Match any variation of backend.userland.com/rss namespace.
#2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is.
#2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's
# snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed
# project name
#2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree);
# removed unnecessary urllib code -- urllib2 should always be available anyway;
# return actual url, status, and full HTTP headers (as result['url'],
# result['status'], and result['headers']) if parsing a remote feed over HTTP --
# this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>;
# added the latest namespace-of-the-week for RSS 2.0
#2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom
# User-Agent (otherwise urllib2 sends two, which confuses some servers)
#2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for
# inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds
#2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or
# textInput, and also to return the character encoding (if specified)
#2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking
# nested divs within content (JohnD); fixed missing sys import (JohanS);
# fixed regular expression to capture XML character encoding (Andrei);
# added support for Atom 0.3-style links; fixed bug with textInput tracking;
# added support for cloud (MartijnP); added support for multiple
# category/dc:subject (MartijnP); normalize content model: 'description' gets
# description (which can come from description, summary, or full content if no
# description), 'content' gets dict of base/language/type/value (which can come
# from content:encoded, xhtml:body, content, or fullitem);
# fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang
# tracking; fixed bug tracking unknown tags; fixed bug tracking content when
# <content> element is not in default namespace (like Pocketsoap feed);
# resolve relative URLs in link, guid, docs, url, comments, wfw:comment,
# wfw:commentRSS; resolve relative URLs within embedded HTML markup in
# description, xhtml:body, content, content:encoded, title, subtitle,
# summary, info, tagline, and copyright; added support for pingback and
# trackback namespaces
#2.7 - 1/5/2004 - MAP - really added support for trackback and pingback
# namespaces, as opposed to 2.6 when I said I did but didn't really;
# sanitize HTML markup within some elements; added mxTidy support (if
# installed) to tidy HTML markup within some elements; fixed indentation
# bug in _parse_date (FazalM); use socket.setdefaulttimeout if available
# (FazalM); universal date parsing and normalization (FazalM): 'created', modified',
# 'issued' are parsed into 9-tuple date format and stored in 'created_parsed',
# 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified'
# and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa
#2.7.1 - 1/9/2004 - MAP - fixed bug handling " and '. fixed memory
# leak not closing url opener (JohnD); added dc:publisher support (MarekK);
# added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK)
#2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in
# encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL);
# fixed relative URI processing for guid (skadz); added ICBM support; added
# base64 support
#2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many
# blogspot.com sites); added _debug variable
#2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing
#3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available);
# added several new supported namespaces; fixed bug tracking naked markup in
# description; added support for enclosure; added support for source; re-added
# support for cloud which got dropped somehow; added support for expirationDate
#3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking
# xml:base URI, one for documents that don't define one explicitly and one for
# documents that define an outer and an inner xml:base that goes out of scope
# before the end of the document
#3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level
#3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version']
# will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized;
# added support for creativeCommons:license and cc:license; added support for
# full Atom content model in title, tagline, info, copyright, summary; fixed bug
# with gzip encoding (not always telling server we support it when we do)
#3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail
# (dictionary of 'name', 'url', 'email'); map author to author_detail if author
# contains name + email address
#3.0b8 - 1/28/2004 - MAP - added support for contributor
#3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added
# support for summary
#3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from
# xml.util.iso8601
#3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain
# dangerous markup; fiddled with decodeEntities (not right); liberalized
# date parsing even further
#3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right);
# added support to Atom 0.2 subtitle; added support for Atom content model
# in copyright; better sanitizing of dangerous HTML elements with end tags
# (script, frameset)
#3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img,
# etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />)
#3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under
# Python 2.1
#3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS;
# fixed bug capturing author and contributor URL; fixed bug resolving relative
# links in author and contributor URL; fixed bug resolvin relative links in
# generator URL; added support for recognizing RSS 1.0; passed Simon Fell's
# namespace tests, and included them permanently in the test suite with his
# permission; fixed namespace handling under Python 2.1
#3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15)
#3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023
#3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei);
# use libxml2 (if available)
#3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author
# name was in parentheses; removed ultra-problematic mxTidy support; patch to
# workaround crash in PyXML/expat when encountering invalid entities
# (MarkMoraes); support for textinput/textInput
#3.0b20 - 4/7/2004 - MAP - added CDF support
#3.0b21 - 4/14/2004 - MAP - added Hot RSS support
#3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in
# results dict; changed results dict to allow getting values with results.key
# as well as results[key]; work around embedded illformed HTML with half
# a DOCTYPE; work around malformed Content-Type header; if character encoding
# is wrong, try several common ones before falling back to regexes (if this
# works, bozo_exception is set to CharacterEncodingOverride); fixed character
# encoding issues in BaseHTMLProcessor by tracking encoding and converting
# from Unicode to raw strings before feeding data to sgmllib.SGMLParser;
# convert each value in results to Unicode (if possible), even if using
# regex-based parsing
#3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain
# high-bit characters in attributes in embedded HTML in description (thanks
# Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in
# FeedParserDict; tweaked FeedParserDict.has_key to return True if asking
# about a mapped key
#3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and
# results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could
# cause the same encoding to be tried twice (even if it failed the first time);
# fixed DOCTYPE stripping when DOCTYPE contained entity declarations;
# better textinput and image tracking in illformed RSS 1.0 feeds
#3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed
# my blink tag tests
#3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that
# failed to parse utf-16 encoded feeds; made source into a FeedParserDict;
# duplicate admin:generatorAgent/@rdf:resource in generator_detail.url;
# added support for image; refactored parse() fallback logic to try other
# encodings if SAX parsing fails (previously it would only try other encodings
# if re-encoding failed); remove unichr madness in normalize_attrs now that
# we're properly tracking encoding in and out of BaseHTMLProcessor; set
# feed.language from root-level xml:lang; set entry.id from rdf:about;
# send Accept header
#3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between
# iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are
# windows-1252); fixed regression that could cause the same encoding to be
# tried twice (even if it failed the first time)
#3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types;
# recover from malformed content-type header parameter with no equals sign
# ('text/xml; charset:iso-8859-1')
#3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities
# to Unicode equivalents in illformed feeds (aaronsw); added and
# passed tests for converting character entities to Unicode equivalents
# in illformed feeds (aaronsw); test for valid parsers when setting
# XML_AVAILABLE; make version and encoding available when server returns
# a 304; add handlers parameter to pass arbitrary urllib2 handlers (like
# digest auth or proxy support); add code to parse username/password
# out of url and send as basic authentication; expose downloading-related
# exceptions in bozo_exception (aaronsw); added __contains__ method to
# FeedParserDict (aaronsw); added publisher_detail (aaronsw)
#3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always
# convert feed to UTF-8 before passing to XML parser; completely revamped
# logic for determining character encoding and attempting XML parsing
# (much faster); increased default timeout to 20 seconds; test for presence
# of Location header on redirects; added tests for many alternate character
# encodings; support various EBCDIC encodings; support UTF-16BE and
# UTF16-LE with or without a BOM; support UTF-8 with a BOM; support
# UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no
# XML parsers are available; added support for 'Content-encoding: deflate';
# send blank 'Accept-encoding: ' header if neither gzip nor zlib modules
# are available
#3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure
# problem tracking xml:base and xml:lang if element declares it, child
# doesn't, first grandchild redeclares it, and second grandchild doesn't;
# refactored date parsing; defined public registerDateHandler so callers
# can add support for additional date formats at runtime; added support
# for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added
# zopeCompatibilityHack() which turns FeedParserDict into a regular
# dictionary, required for Zope compatibility, and also makes command-
# line debugging easier because pprint module formats real dictionaries
# better than dictionary-like objects; added NonXMLContentType exception,
# which is stored in bozo_exception when a feed is served with a non-XML
# media type such as 'text/plain'; respect Content-Language as default
# language if not xml:lang is present; cloud dict is now FeedParserDict;
# generator dict is now FeedParserDict; better tracking of xml:lang,
# including support for xml:lang='' to unset the current language;
# recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default
# namespace; don't overwrite final status on redirects (scenarios:
# redirecting to a URL that returns 304, redirecting to a URL that
# redirects to another URL with a different type of redirect); add
# support for HTTP 303 redirects
#4.0 - MAP - support for relative URIs in xml:base attribute; fixed
# encoding issue with mxTidy (phopkins); preliminary support for RFC 3229;
# support for Atom 1.0; support for iTunes extensions; new 'tags' for
# categories/keywords/etc. as array of dict
# {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0
# terminology; parse RFC 822-style dates with no time; lots of other
# bug fixes
#4.1 - MAP - removed socket timeout; added support for chardet library
| Python |
"""PyRSS2Gen - A Python library for generating RSS 2.0 feeds."""
__name__ = "PyRSS2Gen"
__version__ = (1, 0, 0)
__author__ = "Andrew Dalke <dalke@dalkescientific.com>"
_generator_name = __name__ + "-" + ".".join(map(str, __version__))
import datetime
# Could make this the base class; will need to add 'publish'
class WriteXmlMixin:
def write_xml(self, outfile, encoding = "iso-8859-1"):
from xml.sax import saxutils
handler = saxutils.XMLGenerator(outfile, encoding)
handler.startDocument()
self.publish(handler)
handler.endDocument()
def to_xml(self, encoding = "iso-8859-1"):
try:
import cStringIO as StringIO
except ImportError:
import StringIO
f = StringIO.StringIO()
self.write_xml(f, encoding)
return f.getvalue()
def _element(handler, name, obj, d = {}):
if isinstance(obj, basestring) or obj is None:
# special-case handling to make the API easier
# to use for the common case.
handler.startElement(name, d)
if obj is not None:
handler.characters(obj)
handler.endElement(name)
else:
# It better know how to emit the correct XML.
obj.publish(handler)
def _opt_element(handler, name, obj):
if obj is None:
return
_element(handler, name, obj)
def _format_date(dt):
"""convert a datetime into an RFC 822 formatted date
Input date must be in GMT.
"""
# Looks like:
# Sat, 07 Sep 2002 00:00:01 GMT
# Can't use strftime because that's locale dependent
#
# Isn't there a standard way to do this for Python? The
# rfc822 and email.Utils modules assume a timestamp. The
# following is based on the rfc822 module.
return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (
["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][dt.weekday()],
dt.day,
["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"][dt.month-1],
dt.year, dt.hour, dt.minute, dt.second)
##
# A couple simple wrapper objects for the fields which
# take a simple value other than a string.
class IntElement:
"""implements the 'publish' API for integers
Takes the tag name and the integer value to publish.
(Could be used for anything which uses str() to be published
to text for XML.)
"""
element_attrs = {}
def __init__(self, name, val):
self.name = name
self.val = val
def publish(self, handler):
handler.startElement(self.name, self.element_attrs)
handler.characters(str(self.val))
handler.endElement(self.name)
class DateElement:
"""implements the 'publish' API for a datetime.datetime
Takes the tag name and the datetime to publish.
Converts the datetime to RFC 2822 timestamp (4-digit year).
"""
def __init__(self, name, dt):
self.name = name
self.dt = dt
def publish(self, handler):
_element(handler, self.name, _format_date(self.dt))
####
class Category:
"""Publish a category element"""
def __init__(self, category, domain = None):
self.category = category
self.domain = domain
def publish(self, handler):
d = {}
if self.domain is not None:
d["domain"] = self.domain
_element(handler, "category", self.category, d)
class Cloud:
"""Publish a cloud"""
def __init__(self, domain, port, path,
registerProcedure, protocol):
self.domain = domain
self.port = port
self.path = path
self.registerProcedure = registerProcedure
self.protocol = protocol
def publish(self, handler):
_element(handler, "cloud", None, {
"domain": self.domain,
"port": str(self.port),
"path": self.path,
"registerProcedure": self.registerProcedure,
"protocol": self.protocol})
class Image:
"""Publish a channel Image"""
element_attrs = {}
def __init__(self, url, title, link,
width = None, height = None, description = None):
self.url = url
self.title = title
self.link = link
self.width = width
self.height = height
self.description = description
def publish(self, handler):
handler.startElement("image", self.element_attrs)
_element(handler, "url", self.url)
_element(handler, "title", self.title)
_element(handler, "link", self.link)
width = self.width
if isinstance(width, int):
width = IntElement("width", width)
_opt_element(handler, "width", width)
height = self.height
if isinstance(height, int):
height = IntElement("height", height)
_opt_element(handler, "height", height)
_opt_element(handler, "description", self.description)
handler.endElement("image")
class Guid:
"""Publish a guid
Defaults to being a permalink, which is the assumption if it's
omitted. Hence strings are always permalinks.
"""
def __init__(self, guid, isPermaLink = 1):
self.guid = guid
self.isPermaLink = isPermaLink
def publish(self, handler):
d = {}
if self.isPermaLink:
d["isPermaLink"] = "true"
else:
d["isPermaLink"] = "false"
_element(handler, "guid", self.guid, d)
class TextInput:
"""Publish a textInput
Apparently this is rarely used.
"""
element_attrs = {}
def __init__(self, title, description, name, link):
self.title = title
self.description = description
self.name = name
self.link = link
def publish(self, handler):
handler.startElement("textInput", self.element_attrs)
_element(handler, "title", self.title)
_element(handler, "description", self.description)
_element(handler, "name", self.name)
_element(handler, "link", self.link)
handler.endElement("textInput")
class Enclosure:
"""Publish an enclosure"""
def __init__(self, url, length, type):
self.url = url
self.length = length
self.type = type
def publish(self, handler):
_element(handler, "enclosure", None,
{"url": self.url,
"length": str(self.length),
"type": self.type,
})
class Source:
"""Publish the item's original source, used by aggregators"""
def __init__(self, name, url):
self.name = name
self.url = url
def publish(self, handler):
_element(handler, "source", self.name, {"url": self.url})
class SkipHours:
"""Publish the skipHours
This takes a list of hours, as integers.
"""
element_attrs = {}
def __init__(self, hours):
self.hours = hours
def publish(self, handler):
if self.hours:
handler.startElement("skipHours", self.element_attrs)
for hour in self.hours:
_element(handler, "hour", str(hour))
handler.endElement("skipHours")
class SkipDays:
"""Publish the skipDays
This takes a list of days as strings.
"""
element_attrs = {}
def __init__(self, days):
self.days = days
def publish(self, handler):
if self.days:
handler.startElement("skipDays", self.element_attrs)
for day in self.days:
_element(handler, "day", day)
handler.endElement("skipDays")
class RSS2(WriteXmlMixin):
"""The main RSS class.
Stores the channel attributes, with the "category" elements under
".categories" and the RSS items under ".items".
"""
rss_attrs = {"version": "2.0"}
element_attrs = {}
def __init__(self,
title,
link,
description,
language = None,
copyright = None,
managingEditor = None,
webMaster = None,
pubDate = None, # a datetime, *in* *GMT*
lastBuildDate = None, # a datetime
categories = None, # list of strings or Category
generator = _generator_name,
docs = "http://blogs.law.harvard.edu/tech/rss",
cloud = None, # a Cloud
ttl = None, # integer number of minutes
image = None, # an Image
rating = None, # a string; I don't know how it's used
textInput = None, # a TextInput
skipHours = None, # a SkipHours with a list of integers
skipDays = None, # a SkipDays with a list of strings
items = None, # list of RSSItems
):
self.title = title
self.link = link
self.description = description
self.language = language
self.copyright = copyright
self.managingEditor = managingEditor
self.webMaster = webMaster
self.pubDate = pubDate
self.lastBuildDate = lastBuildDate
if categories is None:
categories = []
self.categories = categories
self.generator = generator
self.docs = docs
self.cloud = cloud
self.ttl = ttl
self.image = image
self.rating = rating
self.textInput = textInput
self.skipHours = skipHours
self.skipDays = skipDays
if items is None:
items = []
self.items = items
def publish(self, handler):
handler.startElement("rss", self.rss_attrs)
handler.startElement("channel", self.element_attrs)
_element(handler, "title", self.title)
_element(handler, "link", self.link)
_element(handler, "description", self.description)
self.publish_extensions(handler)
_opt_element(handler, "language", self.language)
_opt_element(handler, "copyright", self.copyright)
_opt_element(handler, "managingEditor", self.managingEditor)
_opt_element(handler, "webMaster", self.webMaster)
pubDate = self.pubDate
if isinstance(pubDate, datetime.datetime):
pubDate = DateElement("pubDate", pubDate)
_opt_element(handler, "pubDate", pubDate)
lastBuildDate = self.lastBuildDate
if isinstance(lastBuildDate, datetime.datetime):
lastBuildDate = DateElement("lastBuildDate", lastBuildDate)
_opt_element(handler, "lastBuildDate", lastBuildDate)
for category in self.categories:
if isinstance(category, basestring):
category = Category(category)
category.publish(handler)
_opt_element(handler, "generator", self.generator)
_opt_element(handler, "docs", self.docs)
if self.cloud is not None:
self.cloud.publish(handler)
ttl = self.ttl
if isinstance(self.ttl, int):
ttl = IntElement("ttl", ttl)
_opt_element(handler, "tt", ttl)
if self.image is not None:
self.image.publish(handler)
_opt_element(handler, "rating", self.rating)
if self.textInput is not None:
self.textInput.publish(handler)
if self.skipHours is not None:
self.skipHours.publish(handler)
if self.skipDays is not None:
self.skipDays.publish(handler)
for item in self.items:
item.publish(handler)
handler.endElement("channel")
handler.endElement("rss")
def publish_extensions(self, handler):
# Derived classes can hook into this to insert
# output after the three required fields.
pass
class RSSItem(WriteXmlMixin):
"""Publish an RSS Item"""
element_attrs = {}
def __init__(self,
title = None, # string
link = None, # url as string
description = None, # string
author = None, # email address as string
categories = None, # list of string or Category
comments = None, # url as string
enclosure = None, # an Enclosure
guid = None, # a unique string
pubDate = None, # a datetime
source = None, # a Source
):
if title is None and description is None:
raise TypeError(
"must define at least one of 'title' or 'description'")
self.title = title
self.link = link
self.description = description
self.author = author
if categories is None:
categories = []
self.categories = categories
self.comments = comments
self.enclosure = enclosure
self.guid = guid
self.pubDate = pubDate
self.source = source
# It sure does get tedious typing these names three times...
def publish(self, handler):
handler.startElement("item", self.element_attrs)
_opt_element(handler, "title", self.title)
_opt_element(handler, "link", self.link)
self.publish_extensions(handler)
_opt_element(handler, "description", self.description)
_opt_element(handler, "author", self.author)
for category in self.categories:
if isinstance(category, basestring):
category = Category(category)
category.publish(handler)
_opt_element(handler, "comments", self.comments)
if self.enclosure is not None:
self.enclosure.publish(handler)
_opt_element(handler, "guid", self.guid)
pubDate = self.pubDate
if isinstance(pubDate, datetime.datetime):
pubDate = DateElement("pubDate", pubDate)
_opt_element(handler, "pubDate", pubDate)
if self.source is not None:
self.source.publish(handler)
handler.endElement("item")
def publish_extensions(self, handler):
# Derived classes can hook into this to insert
# output after the title and link elements
pass
| Python |
r"""UUID objects (universally unique identifiers) according to RFC 4122.
This module provides immutable UUID objects (class UUID) and the functions
uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5
UUIDs as specified in RFC 4122.
If all you want is a unique ID, you should probably call uuid1() or uuid4().
Note that uuid1() may compromise privacy since it creates a UUID containing
the computer's network address. uuid4() creates a random UUID.
Typical usage:
>>> import uuid
# make a UUID based on the host ID and current time
>>> uuid.uuid1()
UUID('a8098c1a-f86e-11da-bd1a-00112444be1e')
# make a UUID using an MD5 hash of a namespace UUID and a name
>>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org')
UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e')
# make a random UUID
>>> uuid.uuid4()
UUID('16fd2706-8baf-433b-82eb-8c7fada847da')
# make a UUID using a SHA-1 hash of a namespace UUID and a name
>>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org')
UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d')
# make a UUID from a string of hex digits (braces and hyphens ignored)
>>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}')
# convert a UUID to a string of hex digits in standard form
>>> str(x)
'00010203-0405-0607-0809-0a0b0c0d0e0f'
# get the raw 16 bytes of the UUID
>>> x.bytes
'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
# make a UUID from a 16-byte string
>>> uuid.UUID(bytes=x.bytes)
UUID('00010203-0405-0607-0809-0a0b0c0d0e0f')
This module works with Python 2.3 or higher."""
__author__ = 'Ka-Ping Yee <ping@zesty.ca>'
__date__ = '$Date: 2006/06/12 23:15:40 $'.split()[1].replace('/', '-')
__version__ = '$Revision: 1.30 $'.split()[1]
RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
'reserved for NCS compatibility', 'specified in RFC 4122',
'reserved for Microsoft compatibility', 'reserved for future definition']
class UUID(object):
"""Instances of the UUID class represent UUIDs as specified in RFC 4122.
UUID objects are immutable, hashable, and usable as dictionary keys.
Converting a UUID to a string with str() yields something in the form
'12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts
four possible forms: a similar string of hexadecimal digits, or a
string of 16 raw bytes as an argument named 'bytes', or a tuple of
six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and
48-bit values respectively) as an argument named 'fields', or a single
128-bit integer as an argument named 'int'.
UUIDs have these read-only attributes:
bytes the UUID as a 16-byte string
fields a tuple of the six integer fields of the UUID,
which are also available as six individual attributes
and two derived attributes:
time_low the first 32 bits of the UUID
time_mid the next 16 bits of the UUID
time_hi_version the next 16 bits of the UUID
clock_seq_hi_variant the next 8 bits of the UUID
clock_seq_low the next 8 bits of the UUID
node the last 48 bits of the UUID
time the 60-bit timestamp
clock_seq the 14-bit sequence number
hex the UUID as a 32-character hexadecimal string
int the UUID as a 128-bit integer
urn the UUID as a URN as specified in RFC 4122
variant the UUID variant (one of the constants RESERVED_NCS,
RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE)
version the UUID version number (1 through 5, meaningful only
when the variant is RFC_4122)
"""
def __init__(self, hex=None, bytes=None, fields=None, int=None,
version=None):
r"""Create a UUID from either a string of 32 hexadecimal digits,
a string of 16 bytes as the 'bytes' argument, a tuple of six
integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,
8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as
the 'fields' argument, or a single 128-bit integer as the 'int'
argument. When a string of hex digits is given, curly braces,
hyphens, and a URN prefix are all optional. For example, these
expressions all yield the same UUID:
UUID('{12345678-1234-5678-1234-567812345678}')
UUID('12345678123456781234567812345678')
UUID('urn:uuid:12345678-1234-5678-1234-567812345678')
UUID(bytes='\x12\x34\x56\x78'*4)
UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
UUID(int=0x12345678123456781234567812345678)
Exactly one of 'hex', 'bytes', 'fields', or 'int' must be given.
The 'version' argument is optional; if given, the resulting UUID
will have its variant and version number set according to RFC 4122,
overriding bits in the given 'hex', 'bytes', 'fields', or 'int'.
"""
if [hex, bytes, fields, int].count(None) != 3:
raise TypeError('need just one of hex, bytes, fields, or int')
if hex is not None:
hex = hex.replace('urn:', '').replace('uuid:', '')
hex = hex.strip('{}').replace('-', '')
if len(hex) != 32:
raise ValueError('badly formed hexadecimal UUID string')
int = long(hex, 16)
if bytes is not None:
if len(bytes) != 16:
raise ValueError('bytes is not a 16-char string')
int = long(('%02x'*16) % tuple(map(ord, bytes)), 16)
if fields is not None:
if len(fields) != 6:
raise ValueError('fields is not a 6-tuple')
(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node) = fields
if not 0 <= time_low < 1<<32L:
raise ValueError('field 1 out of range (need a 32-bit value)')
if not 0 <= time_mid < 1<<16L:
raise ValueError('field 2 out of range (need a 16-bit value)')
if not 0 <= time_hi_version < 1<<16L:
raise ValueError('field 3 out of range (need a 16-bit value)')
if not 0 <= clock_seq_hi_variant < 1<<8L:
raise ValueError('field 4 out of range (need an 8-bit value)')
if not 0 <= clock_seq_low < 1<<8L:
raise ValueError('field 5 out of range (need an 8-bit value)')
if not 0 <= node < 1<<48L:
raise ValueError('field 6 out of range (need a 48-bit value)')
clock_seq = (clock_seq_hi_variant << 8L) | clock_seq_low
int = ((time_low << 96L) | (time_mid << 80L) |
(time_hi_version << 64L) | (clock_seq << 48L) | node)
if int is not None:
if not 0 <= int < 1<<128L:
raise ValueError('int is out of range (need a 128-bit value)')
if version is not None:
if not 1 <= version <= 5:
raise ValueError('illegal version number')
# Set the variant to RFC 4122.
int &= ~(0xc000 << 48L)
int |= 0x8000 << 48L
# Set the version number.
int &= ~(0xf000 << 64L)
int |= version << 76L
self.__dict__['int'] = int
def __cmp__(self, other):
if isinstance(other, UUID):
return cmp(self.int, other.int)
return NotImplemented
def __hash__(self):
return hash(self.int)
def __int__(self):
return self.int
def __repr__(self):
return 'UUID(%r)' % str(self)
def __setattr__(self, name, value):
raise TypeError('UUID objects are immutable')
def __str__(self):
hex = '%032x' % self.int
return '%s-%s-%s-%s-%s' % (
hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:])
def get_bytes(self):
bytes = ''
for shift in range(0, 128, 8):
bytes = chr((self.int >> shift) & 0xff) + bytes
return bytes
bytes = property(get_bytes)
def get_fields(self):
return (self.time_low, self.time_mid, self.time_hi_version,
self.clock_seq_hi_variant, self.clock_seq_low, self.node)
fields = property(get_fields)
def get_time_low(self):
return self.int >> 96L
time_low = property(get_time_low)
def get_time_mid(self):
return (self.int >> 80L) & 0xffff
time_mid = property(get_time_mid)
def get_time_hi_version(self):
return (self.int >> 64L) & 0xffff
time_hi_version = property(get_time_hi_version)
def get_clock_seq_hi_variant(self):
return (self.int >> 56L) & 0xff
clock_seq_hi_variant = property(get_clock_seq_hi_variant)
def get_clock_seq_low(self):
return (self.int >> 48L) & 0xff
clock_seq_low = property(get_clock_seq_low)
def get_time(self):
return (((self.time_hi_version & 0x0fffL) << 48L) |
(self.time_mid << 32L) | self.time_low)
time = property(get_time)
def get_clock_seq(self):
return (((self.clock_seq_hi_variant & 0x3fL) << 8L) |
self.clock_seq_low)
clock_seq = property(get_clock_seq)
def get_node(self):
return self.int & 0xffffffffffff
node = property(get_node)
def get_hex(self):
return '%032x' % self.int
hex = property(get_hex)
def get_urn(self):
return 'urn:uuid:' + str(self)
urn = property(get_urn)
def get_variant(self):
if not self.int & (0x8000 << 48L):
return RESERVED_NCS
elif not self.int & (0x4000 << 48L):
return RFC_4122
elif not self.int & (0x2000 << 48L):
return RESERVED_MICROSOFT
else:
return RESERVED_FUTURE
variant = property(get_variant)
def get_version(self):
# The version bits are only meaningful for RFC 4122 UUIDs.
if self.variant == RFC_4122:
return int((self.int >> 76L) & 0xf)
version = property(get_version)
def _ifconfig_getnode():
"""Get the hardware address on Unix by running ifconfig."""
import os
for dir in ['', '/sbin/', '/usr/sbin']:
try:
pipe = os.popen(os.path.join(dir, 'ifconfig'))
except IOError:
continue
for line in pipe:
words = line.lower().split()
for i in range(len(words)):
if words[i] in ['hwaddr', 'ether']:
return int(words[i + 1].replace(':', ''), 16)
def _ipconfig_getnode():
"""Get the hardware address on Windows by running ipconfig.exe."""
import os, re
dirs = ['', r'c:\windows\system32', r'c:\winnt\system32']
try:
import ctypes
buffer = ctypes.create_string_buffer(300)
ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300)
dirs.insert(0, buffer.value.decode('mbcs'))
except:
pass
for dir in dirs:
try:
pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all')
except IOError:
continue
for line in pipe:
value = line.split(':')[-1].strip().lower()
if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
return int(value.replace('-', ''), 16)
def _netbios_getnode():
"""Get the hardware address on Windows using NetBIOS calls.
See http://support.microsoft.com/kb/118623 for details."""
import win32wnet, netbios
ncb = netbios.NCB()
ncb.Command = netbios.NCBENUM
ncb.Buffer = adapters = netbios.LANA_ENUM()
adapters._pack()
if win32wnet.Netbios(ncb) != 0:
return
adapters._unpack()
for i in range(adapters.length):
ncb.Reset()
ncb.Command = netbios.NCBRESET
ncb.Lana_num = ord(adapters.lana[i])
if win32wnet.Netbios(ncb) != 0:
continue
ncb.Reset()
ncb.Command = netbios.NCBASTAT
ncb.Lana_num = ord(adapters.lana[i])
ncb.Callname = '*'.ljust(16)
ncb.Buffer = status = netbios.ADAPTER_STATUS()
if win32wnet.Netbios(ncb) != 0:
continue
status._unpack()
bytes = map(ord, status.adapter_address)
return ((bytes[0]<<40L) + (bytes[1]<<32L) + (bytes[2]<<24L) +
(bytes[3]<<16L) + (bytes[4]<<8L) + bytes[5])
# Thanks to Thomas Heller for ctypes and for his help with its use here.
# If ctypes is available, use it to find system routines for UUID generation.
_uuid_generate_random = _uuid_generate_time = _UuidCreate = None
try:
import ctypes, ctypes.util
_buffer = ctypes.create_string_buffer(16)
# The uuid_generate_* routines are provided by libuuid on at least
# Linux and FreeBSD, and provided by libc on Mac OS X.
for libname in ['uuid', 'c']:
try:
lib = ctypes.CDLL(ctypes.util.find_library(libname))
except:
continue
if hasattr(lib, 'uuid_generate_random'):
_uuid_generate_random = lib.uuid_generate_random
if hasattr(lib, 'uuid_generate_time'):
_uuid_generate_time = lib.uuid_generate_time
# On Windows prior to 2000, UuidCreate gives a UUID containing the
# hardware address. On Windows 2000 and later, UuidCreate makes a
# random UUID and UuidCreateSequential gives a UUID containing the
# hardware address. These routines are provided by the RPC runtime.
try:
lib = ctypes.windll.rpcrt4
except:
lib = None
_UuidCreate = getattr(lib, 'UuidCreateSequential',
getattr(lib, 'UuidCreate', None))
except:
pass
def _unixdll_getnode():
"""Get the hardware address on Unix using ctypes."""
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw).node
def _windll_getnode():
"""Get the hardware address on Windows using ctypes."""
if _UuidCreate(_buffer) == 0:
return UUID(bytes=_buffer.raw).node
def _random_getnode():
"""Get a random node ID, with eighth bit set as suggested by RFC 4122."""
import random
return random.randrange(0, 1<<48L) | 0x010000000000L
_node = None
def getnode():
"""Get the hardware address as a 48-bit integer. The first time this
runs, it may launch a separate program, which could be quite slow. If
all attempts to obtain the hardware address fail, we choose a random
48-bit number with its eighth bit set to 1 as recommended in RFC 4122."""
global _node
if _node is not None:
return _node
import sys
if sys.platform == 'win32':
getters = [_windll_getnode, _netbios_getnode, _ipconfig_getnode]
else:
getters = [_unixdll_getnode, _ifconfig_getnode]
for getter in getters + [_random_getnode]:
try:
_node = getter()
except:
continue
if _node is not None:
return _node
def uuid1(node=None, clock_seq=None):
"""Generate a UUID from a host ID, sequence number, and the current time.
If 'node' is not given, getnode() is used to obtain the hardware
address. If 'clock_seq' is given, it is used as the sequence number;
otherwise a random 14-bit sequence number is chosen."""
# When the system provides a version-1 UUID generator, use it (but don't
# use UuidCreate here because its UUIDs don't conform to RFC 4122).
if _uuid_generate_time and node is clock_seq is None:
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw)
import time
nanoseconds = int(time.time() * 1e9)
# 0x01b21dd213814000 is the number of 100-ns intervals between the
# UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00.
timestamp = int(nanoseconds/100) + 0x01b21dd213814000L
if clock_seq is None:
import random
clock_seq = random.randrange(1<<14L) # instead of stable storage
time_low = timestamp & 0xffffffffL
time_mid = (timestamp >> 32L) & 0xffffL
time_hi_version = (timestamp >> 48L) & 0x0fffL
clock_seq_low = clock_seq & 0xffL
clock_seq_hi_variant = (clock_seq >> 8L) & 0x3fL
if node is None:
node = getnode()
return UUID(fields=(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node), version=1)
def uuid3(namespace, name):
"""Generate a UUID from the MD5 hash of a namespace UUID and a name."""
import md5
hash = md5.md5(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=3)
def uuid4():
"""Generate a random UUID."""
# When the system provides a version-4 UUID generator, use it.
if _uuid_generate_random:
_uuid_generate_random(_buffer)
return UUID(bytes=_buffer.raw)
# Otherwise, get randomness from urandom or the 'random' module.
try:
import os
return UUID(bytes=os.urandom(16), version=4)
except:
import random
bytes = [chr(random.randrange(256)) for i in range(16)]
return UUID(bytes=bytes, version=4)
def uuid5(namespace, name):
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
import sha
hash = sha.sha(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=5)
# The following standard UUIDs are for use with uuid3() or uuid5().
NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8')
| Python |
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.
A well-structured XML/HTML document yields a well-behaved data
structure. An ill-structured XML/HTML document yields a
correspondingly ill-behaved data structure. If your document is only
locally well-structured, you can use this library to find and process
the well-structured part of it.
Beautiful Soup works with Python 2.2 and up. It has no external
dependencies, but you'll have more success at converting data to UTF-8
if you also install these three packages:
* chardet, for auto-detecting character encodings
http://chardet.feedparser.org/
* cjkcodecs and iconv_codec, which add more encodings to the ones supported
by stock Python.
http://cjkpython.i18n.org/
Beautiful Soup defines classes for two main parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid. This class has web browser-like heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
the encoding of an HTML or XML document, and converting it to
Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed
Parser.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/documentation.html
"""
from __future__ import generators
__author__ = "Leonard Richardson (crummy.com)"
__contributors__ = ["Sam Ruby (intertwingly.net)",
"the unwitting Mark Pilgrim (diveintomark.org)",
"http://www.crummy.com/software/BeautifulSoup/AUTHORS.html"]
__version__ = "3.0.3"
__copyright__ = "Copyright (c) 2004-2006 Leonard Richardson"
__license__ = "PSF"
from sgmllib import SGMLParser, SGMLParseError
import codecs
import types
import re
import sgmllib
from htmlentitydefs import name2codepoint
# This RE makes Beautiful Soup able to parse XML with namespaces.
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
# This RE makes Beautiful Soup capable of recognizing numeric character
# references that use hexadecimal.
sgmllib.charref = re.compile('&#(\d+|x[0-9a-fA-F]+);')
DEFAULT_OUTPUT_ENCODING = "utf-8"
# First, the classes that represent markup elements.
class PageElement:
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.contents.index(self)
if hasattr(replaceWith, 'parent') and replaceWith.parent == self.parent:
# We're replacing this element with one of its siblings.
index = self.parent.contents.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
self.parent.contents.remove(self)
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if (isinstance(newChild, basestring)
or isinstance(newChild, unicode)) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent != None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent == self:
index = self.find(newChild)
if index and index < position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position-1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before after Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
#These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
# Build a SoupStrainer
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These Generators can be used to navigate starting from both
#NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s
class NavigableString(unicode, PageElement):
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
def __unicode__(self):
return __str__(self, None)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
if encoding:
return self.encode(encoding)
else:
return self
class CData(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)
class ProcessingInstruction(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
output = self
if "%SOUP-ENCODING%" in output:
output = self.substituteEncoding(output, encoding)
return "<?%s?>" % self.toEncoding(output, encoding)
class Comment(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!--%s-->" % NavigableString.__str__(self, encoding)
class Declaration(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!%s>" % NavigableString.__str__(self, encoding)
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
XML_ENTITIES_TO_CHARS = { 'apos' : "'",
"quot" : '"',
"amp" : "&",
"lt" : "<",
"gt" : ">"
}
# An RE for finding ampersands that aren't the start of of a
# numeric entity.
BARE_AMPERSAND = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.convertHTMLEntities = parser.convertHTMLEntities
self.name = name
if attrs == None:
attrs = []
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in range(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.findAll, args, kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in range(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.__str__(encoding)
def __unicode__(self):
return self.__str__(None)
def _convertEntities(self, match):
x = match.group(1)
if x in name2codepoint:
return unichr(name2codepoint[x])
elif "&" + x + ";" in self.XML_ENTITIES_TO_CHARS:
return '&%s;' % x
else:
return '&%s;' % x
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
encodedName = self.toEncoding(self.name, encoding)
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isString(val):
if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
val = self.substituteEncoding(val, encoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
# This can't happen naturally, but it can happen
# if you modify an attribute value after parsing.
if "'" in val:
val = val.replace('"', """)
else:
fmt = "%s='%s'"
# Optionally convert any HTML entities
if self.convertHTMLEntities:
val = re.sub("&(\w+);", self._convertEntities, val)
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = val.replace("<", "<").replace(">", ">")
val = self.BARE_AMPERSAND.sub("&", val)
attrs.append(fmt % (self.toEncoding(key, encoding),
self.toEncoding(val, encoding)))
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % encodedName
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.renderContents(encoding, prettyPrint, indentContents)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (encodedName, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.__str__(encoding, True)
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.__str__(encoding)
elif isinstance(c, Tag):
s.append(c.__str__(encoding, prettyPrint, indentLevel))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
#Utility methods
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.contents.append(tag)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def childGenerator(self):
for i in range(0, len(self.contents)):
yield self.contents[i]
raise StopIteration
def recursiveChildGenerator(self):
stack = [(self, 0)]
while stack:
tag, start = stack.pop()
if isinstance(tag, Tag):
for i in range(start, len(tag.contents)):
a = tag.contents[i]
yield a
if isinstance(a, Tag) and tag.contents:
if i < len(tag.contents) - 1:
stack.append((tag, i+1))
stack.append((a, 0))
break
raise StopIteration
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isString(attrs):
kwargs['class'] = attrs
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k,v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if isList(markup) and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isString(markup):
if self._matches(markup, self.text):
found = markup
else:
raise Exception, "I don't know how to match against a %s" \
% markup.__class__
return found
def _matches(self, markup, matchAgainst):
#print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst == True and type(matchAgainst) == types.BooleanType:
result = markup != None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup and not isString(markup):
markup = unicode(markup)
#Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif isList(matchAgainst):
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isString(markup):
if isinstance(markup, unicode):
matchAgainst = unicode(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def isList(l):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is listlike."""
return hasattr(l, '__iter__') \
or (type(l) in (types.ListType, types.TupleType))
def isString(s):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is stringlike."""
try:
return isinstance(s, unicode) or isintance(s, basestring)
except NameError:
return isinstance(s, str)
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif isList(portion):
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
# Now, the parser classes.
class BeautifulStoneSoup(Tag, SGMLParser):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
ALL_ENTITIES = [HTML_ENTITIES, XML_ENTITIES]
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
sgmllib will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
sgmllib, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke sgmllib:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
if convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
if isList(convertEntities):
self.convertHTMLEntities = self.HTML_ENTITIES in convertEntities
self.convertXMLEntities = self.XML_ENTITIES in convertEntities
else:
self.convertHTMLEntities = self.HTML_ENTITIES == convertEntities
self.convertXMLEntities = self.XML_ENTITIES == convertEntities
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
SGMLParser.__init__(self)
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed()
except StopParsing:
pass
self.markup = None # The markup can now be GCed
def _feed(self, inDocumentEncoding=None):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, unicode):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit\
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
if markup:
if self.markupMassage:
if not isList(self.markupMassage):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
self.reset()
SGMLParser.feed(self, markup or "")
SGMLParser.close(self)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def __getattr__(self, methodName):
"""This method routes method call requests to either the SGMLParser
superclass or the Tag superclass, depending on the method name."""
#print "__getattr__ called on %s.%s" % (self.__class__, methodName)
if methodName.find('start_') == 0 or methodName.find('end_') == 0 \
or methodName.find('do_') == 0:
return SGMLParser.__getattr__(self, methodName)
elif methodName.find('__') != 0:
return Tag.__getattr__(self, methodName)
else:
raise AttributeError
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name)
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
SGMLParser.reset(self)
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
# Tags with just one string-owning child get the child as a
# 'string' property, so that soup.tag.string is shorthand for
# soup.tag.contents[0]
if len(self.currentTag.contents) == 1 and \
isinstance(self.currentTag.contents[0], NavigableString):
self.currentTag.string = self.currentTag.contents[0]
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = ''.join(self.currentData)
if currentData.endswith('<') and self.convertHTMLEntities:
currentData = currentData[:-1] + '<'
if not currentData.strip():
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar<p> should pop to 'p', not 'b'.
<p>Foo<table>Bar<p> should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar<p> should pop to 'tr', not 'p'.
<p>Foo<b>Bar<p> should pop to 'p', not 'b'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers != None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers == None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs))
self.currentData.append('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
#print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.currentData.append('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
if self.convertHTMLEntities:
if data[0] == '&':
data = self.BARE_AMPERSAND.sub("&",data)
else:
data = data.replace('&','&') \
.replace('<','<') \
.replace('>','>')
self.currentData.append(data)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.endData()
self.handle_data(text)
self.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = "xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if ref[0] == 'x':
data = unichr(int(ref[1:],16))
else:
data = unichr(int(ref))
if u'\x80' <= data <= u'\x9F':
data = UnicodeDammit.subMSChar(chr(ord(data)), self.smartQuotesTo)
elif not self.convertHTMLEntities and not self.convertXMLEntities:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML entity references to the corresponding Unicode
characters."""
replaceWithXMLEntity = self.convertXMLEntities and \
self.XML_ENTITIES_TO_CHARS.has_key(ref)
if self.convertHTMLEntities or replaceWithXMLEntity:
try:
data = unichr(name2codepoint[ref])
except KeyError:
if replaceWithXMLEntity:
data = self.XML_ENTITIES_TO_CHARS.get(ref)
else:
data="&%s" % ref
else:
data = '&%s;' % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup, MinimalSoup, or
BeautifulStoneSoup before writing your own subclass."""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('smartQuotesTo'):
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
QUOTE_TAGS = {'script': None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center']
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del']
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
'thead' : ['table'],
'tbody' : ['table'],
'tfoot' : ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre']
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# Used to detect the charset in a META tag; see start_meta
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)")
def start_meta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if getattr(self, 'declaredHTMLEncoding') or \
(self.originalEncoding == self.fromEncoding):
# This is our second pass through the document, or
# else an encoding was specified explicitly and it
# worked. Rewrite the meta tag.
newAttr = self.CHARSET_RE.sub\
(lambda(match):match.group(1) +
"%SOUP-ENCODING%", value)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the new information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big']
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript']
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisitude,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except:
chardet = None
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml'):
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if isinstance(markup, unicode):
return markup
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def subMSChar(orig, smartQuotesTo):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = UnicodeDammit.MS_CHARS.get(orig)
if type(sub) == types.TupleType:
if smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
elif smartQuotesTo == 'html':
sub = '&%s;' % sub[0]
else:
sub = unichr(int(sub[1],16))
return sub
subMSChar = staticmethod(subMSChar)
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed in("windows-1252",
"ISO-8859-1",
"ISO-8859-2"):
markup = re.compile("([\x80-\x9f])").sub \
(lambda(x): self.subMSChar(x.group(1),self.smartQuotesTo),
markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception, e:
# print "That didn't work!"
# print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata
def _detectEncoding(self, xml_data):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
xml_encoding_match = re.compile \
('^<\?.*encoding=[\'"](.*?)[\'"].*\?>')\
.match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except LookupError:
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
'\x81' : ' ',
'\x82' : ('sbquo', '201A'),
'\x83' : ('fnof', '192'),
'\x84' : ('bdquo', '201E'),
'\x85' : ('hellip', '2026'),
'\x86' : ('dagger', '2020'),
'\x87' : ('Dagger', '2021'),
'\x88' : ('circ', '2C6'),
'\x89' : ('permil', '2030'),
'\x8A' : ('Scaron', '160'),
'\x8B' : ('lsaquo', '2039'),
'\x8C' : ('OElig', '152'),
'\x8D' : '?',
'\x8E' : ('#x17D', '17D'),
'\x8F' : '?',
'\x90' : '?',
'\x91' : ('lsquo', '2018'),
'\x92' : ('rsquo', '2019'),
'\x93' : ('ldquo', '201C'),
'\x94' : ('rdquo', '201D'),
'\x95' : ('bull', '2022'),
'\x96' : ('ndash', '2013'),
'\x97' : ('mdash', '2014'),
'\x98' : ('tilde', '2DC'),
'\x99' : ('trade', '2122'),
'\x9a' : ('scaron', '161'),
'\x9b' : ('rsaquo', '203A'),
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
'\x9f' : ('Yuml', '178'),}
#######################################################################
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin.read())
print soup.prettify()
| Python |
"""Text to Scripting News XML
This module converts loosely structured text into XML that conforms to
the Scripting News XML format.
About the Scripting News XML format:
http://my.userland.com/stories/storyReader$11
Specification for text to XML translation:
http://my.userland.com/stories/storyReader$14
"""
__author__ = "Mark Pilgrim (f8dy@diveintomark.org)"
__version__ = "$Revision: 1.1 $"
__date__ = "$Date: 2006/03/04 10:07:32 $"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
__license__ = "Python"
import re
import cgi
from xml.dom.minidom import Document, Element, Text
_link = re.compile(r'(.*?)<a href=["''](.*?)["''].*?>(.*?)</a>(.*)', re.IGNORECASE + re.DOTALL)
class _TextElement(Element):
def __init__(self, tag, text):
Element.__init__(self, tag)
self.appendChild(Text(text))
def _parseItem(itemtext):
links = []
while 1:
match = _link.search(itemtext)
if not match: break
prefix, url, linetext, suffix = match.groups()
links.append((url, linetext))
itemtext = "%s%s%s" % (prefix, linetext, suffix)
itemtext = cgi.escape(itemtext)
return (itemtext, links)
def textToXML(headers, text):
"""convert text to Scripting News XML
Returns: string, complete XML output as single string
Arguments:
- headers: dictionary of additional headers for <header> node;
some of these are required, see http://my.userland.com/stories/storyReader$11
- text: text to convert
"""
scriptingNewsNode = Element("scriptingNews")
headernode = Element("header")
headernode.appendChild(_TextElement("scriptingNewsVersion", "2.0b1"))
headernode.appendChild(_TextElement("docs", "http://my.userland.com/stories/storyReader$11"))
for k, v in headers.items():
headernode.appendChild(_TextElement(k, v))
scriptingNewsNode.appendChild(headernode)
itemlist = text.split("\n\n")
for itemtext in itemlist:
itemnode = Element("item")
itemtext, linklist = _parseItem(itemtext)
itemnode.appendChild(_TextElement("text", itemtext))
for link in linklist:
linknode = Element("link")
url, linetext = link
linknode.appendChild(_TextElement("url", url))
linknode.appendChild(_TextElement("linetext", linetext))
itemnode.appendChild(linknode)
scriptingNewsNode.appendChild(itemnode)
doc = Document()
doc.appendChild(scriptingNewsNode)
return doc.toxml()
| Python |
"""Extensions to the Blogger XML-RPC interface
This module implements some useful functions which ought
to be part of Blogger's XML-RPC interface, but aren't.
These functions are Blogger.com-specific and do not use
XML-RPC, so they will not work with weblog servers that
are otherwise compatible with the Blogger XML-RPC interface.
The hope is that Blogger (and other servers) will eventually
implement these functions in a consistent manner via XML-RPC,
and all this nastiness can disappear. But I need them sooner
than that, so here we are.
"""
__author__ = "Mark Pilgrim (f8dy@diveintomark.org)"
__version__ = "$Revision: 1.1 $"
__date__ = "$Date: 2006/03/04 10:07:32 $"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
__license__ = "Python"
import urllib
import Cookie
import re
_opener = None
_pid = None
_settings = {}
_info = {}
def _getHTML(url, params=None):
global _opener
if not _opener:
_opener = urllib.FancyURLopener()
_opener.addheaders = [('User-agent', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 4.0)')]
if params:
data = urllib.urlencode(params)
usock = _opener.open(url, data)
else:
usock = _opener.open(url)
cookies = Cookie.SmartCookie()
for c in usock.headers.getallmatchingheaders("set-cookie"):
cookies.load(c)
html = usock.read()
usock.close()
_opener.addheaders.extend([('Cookie', c) for c in cookies.output().replace("\n", "").split("Set-Cookie: ")[1:]])
return html
def _login(username, password):
global _pid
if _opener: return
_getHTML("http://www.blogger.com/")
params = {"username":username,
"password":password,
"remember":"1"}
_getHTML("http://www.blogger.com/login-action.pyra", params)
cookies = [v for k, v in _opener.addheaders if k == "Cookie"]
userid = re.compile("^PyraID=(.*?);")
_pid = [userid.search(v).group(1) for v in cookies if userid.search(v)][0]
def _getSettings(blogID):
global _settings
blogID = str(blogID)
if not _settings.has_key(blogID):
_settings[blogID] = {}
s = _getHTML("http://www.blogger.com/blog_edit.pyra?blogID=%s" % blogID)
_settings[blogID]["blogTitle"] = re.search(r'"txtTitle".*?value="(.*?)">', s).group(1)
_settings[blogID]["blogDescription"] = re.search(r'"txaBody".*?>(.*?)</textarea', s).group(1)
_settings[blogID]["blogURL"] = re.search(r'"txtBlogURL".*?value="(.*?)">', s).group(1)
_settings[blogID]["ftpServer"] = re.search(r'"txtFTPServer".*?value="(.*?)">', s).group(1)
_settings[blogID]["ftpPath"] = re.search(r'"txtFTPPath".*?value="(.*?)">', s).group(1)
_settings[blogID]["ftpFileName"] = re.search(r'"txtFTPFileName".*?value="(.*?)">', s).group(1)
_settings[blogID]["ftpUserName"] = re.search(r'"txtFTPUserName".*?value="(.*?)">', s).group(1)
_settings[blogID]["ftpPassword"] = re.search(r'"txtFTPPassword".*?value="(.*?)">', s).group(1)
def getBlogSetting(settingName, blogID, username, password):
"""Get blog setting
Returns: string
Arguments:
- settingName: in ('blogTitle', 'blogDescription', 'blogURL',
'ftpServer', 'ftpPath', 'ftpFileName',
'ftpUserName', 'ftpPassword')
- blog ID: your weblog ID
- username: your weblog username
- password: your weblog password
"""
blogID = str(blogID)
_login(username, password)
_getSettings(blogID)
return _settings[blogID][settingName]
| Python |
#!/usr/bin/env python
"""
post to, repost to, or delete articles from a weblog via Blogger API
requires pyblogger and xmlrpclib
Copyright 2001 Adam Feuer
This is free software, distributed under the Python 2.1.1 license.
See http://www.python.org/2.1.1/license.html
---------------------------
reads name and password from ~/.bloggerrc
file should be formatted like:
username adamfeuer
password foo
"""
__author__ = "Adam Feuer <adamf at pobox dot com>"
__version__ = "0.3"
__date__ = "4 November 2001"
__copyright__ = "Copyright (c) 2001 Adam Feuer"
__license__ = "Python"
import sys, os, string
import Blog
##----------- global variables --------------------
debug = 0
BlogInfoFileName = "~/.bloggerrc"
ValidCommands = { 'delete' : 2,
'd' : 2,
'post' : 3,
'p' : 3,
'repost' : 3,
'r' : 3,
'gettemplate' : 2,
'gt' : 2,
'savetemplate' : 3,
'st' : 3}
ProgramName = os.path.split(sys.argv[0])[-1]
##----------- utility functions --------------------
def Message(msg):
if debug == 1:
sys.stderr.write(msg)
sys.stderr.write('\n')
def Error(msg):
sys.stderr.write("Error: %s" % msg)
sys.stderr.write('\n')
sys.exit(0)
def ReadFile(filename):
"""read a post out of a file and into a string."""
try:
f = open(filename)
contents = f.read()
f.close()
except:
return None
return contents
def SettingsDict(contents):
"""Convert a string to a dict containing name, value pairs.
Splits name,value on first space or tab.
Ignores blank lines or lines where first non-whitespace is '#'
All names are returned lowercase."""
lines = string.split(contents,'\n')
dict = {}
for line in lines:
line = string.strip(line)
# ignore blank lines and comments
if len(line) == 0:
continue
if line[0] == '#':
continue
name, value = string.split(line,None,1)
name = string.strip(string.lower(name))
value = string.strip(value)
dict[name] = value
return dict
def GetUsernameAndPassword(contents):
"""parse username and password from a string"""
settings = SettingsDict(contents)
username = None
password = None
if settings.has_key('username'):
username = settings['username']
if settings.has_key('password'):
password = settings['password']
return username, password
def GetBlogUsernameAndPassword():
"""parse username and password from the ~/.bloggerrc file"""
filename = os.path.expanduser(BlogInfoFileName)
contents = ReadFile(filename)
return GetUsernameAndPassword(contents)
##------------- blogger functions --------------
def Post(blog, PostContents):
Message("Post: getting posts...")
posts = blog.posts
Message ("Post: doing posts...")
posts.append(PostContents)
def Repost(blog, PostContents, PostID):
posts = blog.posts
if PostID is None:
posts[-1] = PostContents
else:
for post in posts:
if post.id == PostID:
post.content = PostContents
sys.exit(0)
Error("Did not find article '%s' in '%s'." % (PostID, blog.name))
def DeletePost(blog, PostID):
posts = blog.posts
if PostID is None:
del posts[-1]
else:
for post in posts:
if post.id == PostID:
del post
sys.exit(0)
Error("did not find article '%s' in '%s'." % (PostID, blog.name))
def GetTemplate(blog):
html = blog.template.main
print html
def SaveTemplate(blog, FileContents):
blog.template.main = FileContents
def Usage():
print "Usage: %s [-d] <command> [<parameters>] " % ProgramName
print " -d: print debugging info to STDERR."
print
print "Commands:"
print " post <filename>: posts a file to a weblog"
print " p <filename>: same as post"
print " repost <filename> [<PostID>]: reposts file to weblog as article <PostID>, or as most recent article if <PostID> is omitted."
print " r <filename> [<PostID>]: same as repost"
print " delete [<PostID>]: deletes article with PostID, or most recent article if PostID is omitted."
print " d [<PostID>]: same as delete"
print " gettemplate: gets the HTML template and writes it to STDOUT."
print " gt: same as gettemplate."
print " savetemplate <filename>: saves file as the HTML template for this blog."
print " st: same as savetemplate."
print
print "Note: only operates on the first weblog."
print
##----------------------------
def main():
global debug
if len(sys.argv) < 2:
Usage()
sys.exit(0)
if sys.argv[1] == '-d':
del sys.argv[1]
debug = 1
else:
debug = 0
command = sys.argv[1]
if not ValidCommands.has_key(command):
Usage()
Error("'%s' is not a valid command." % command)
sys.exit(0)
if len(sys.argv) < ValidCommands[command]:
Usage()
Error("%s requires at least %d parameter(s)." % (command, ValidCommands[command] - 1))
FileName = None
FileContents = None
PostID = None
if command in ['post', 'p', 'repost', 'r', 'savetemplate', 'st']:
FileName = sys.argv[2]
if command in ['post', 'p', 'repost', 'r']:
if len(sys.argv) > 3:
PostID = sys.argv[3]
if command in ['delete', 'd']:
if len(sys.argv) > 2:
PostID = sys.argv[2]
UserInfo = GetBlogUsernameAndPassword()
if UserInfo is None:
Error("could not read Username and Password.")
else:
Username, Password = UserInfo
if FileName is not None:
FileContents = ReadFile(FileName)
if FileContents is None:
Error("could not read from '%s'." % FileName)
Message("Getting blog user info...")
user = Blog.User(Username, Password)
blogs = user.blogs
# fixme: all operations are on the first blog
blog = blogs[0]
if command in ['post', 'p']:
Message("Posting file '%s' to '%s'" % (FileName, blog.name))
Post(blog, FileContents)
elif command in ['repost', 'r']:
if PostID is not None:
Message("Reposting file '%s' to article '%s' of '%s'" % (FileName, PostID, blog.name))
else:
Message("Reposting file '%s' to '%s'" % (FileName, blog.name))
Repost(blog, FileContents, PostID)
elif command in ['delete', 'd']:
if PostID is not None:
Message("Deleting article '%s' from '%s'." % (PostID,blog.name))
else:
Message("Deleting most recent article from '%s'." % blog.name)
DeletePost(blog, PostID)
elif command in ['gettemplate','gt']:
Message("Getting main template from '%s'" % blog.name)
GetTemplate(blog)
elif command in ['savetemplate','st']:
Message("Saving file '%s' as main template of '%s'" % (FileName, blog.name))
SaveTemplate(blog, FileContents)
##----------------------------
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
"""
post to, repost to, or delete articles from a weblog via Blogger API
requires pyblogger and xmlrpclib
Copyright 2001 Adam Feuer
This is free software, distributed under the Python 2.1.1 license.
See http://www.python.org/2.1.1/license.html
---------------------------
reads name and password from ~/.bloggerrc
file should be formatted like:
username adamfeuer
password foo
"""
__author__ = "Adam Feuer <adamf at pobox dot com>"
__version__ = "0.3"
__date__ = "4 November 2001"
__copyright__ = "Copyright (c) 2001 Adam Feuer"
__license__ = "Python"
import sys, os, string
import Blog
##----------- global variables --------------------
debug = 0
BlogInfoFileName = "~/.bloggerrc"
ValidCommands = { 'delete' : 2,
'd' : 2,
'post' : 3,
'p' : 3,
'repost' : 3,
'r' : 3,
'gettemplate' : 2,
'gt' : 2,
'savetemplate' : 3,
'st' : 3}
ProgramName = os.path.split(sys.argv[0])[-1]
##----------- utility functions --------------------
def Message(msg):
if debug == 1:
sys.stderr.write(msg)
sys.stderr.write('\n')
def Error(msg):
sys.stderr.write("Error: %s" % msg)
sys.stderr.write('\n')
sys.exit(0)
def ReadFile(filename):
"""read a post out of a file and into a string."""
try:
f = open(filename)
contents = f.read()
f.close()
except:
return None
return contents
def SettingsDict(contents):
"""Convert a string to a dict containing name, value pairs.
Splits name,value on first space or tab.
Ignores blank lines or lines where first non-whitespace is '#'
All names are returned lowercase."""
lines = string.split(contents,'\n')
dict = {}
for line in lines:
line = string.strip(line)
# ignore blank lines and comments
if len(line) == 0:
continue
if line[0] == '#':
continue
name, value = string.split(line,None,1)
name = string.strip(string.lower(name))
value = string.strip(value)
dict[name] = value
return dict
def GetUsernameAndPassword(contents):
"""parse username and password from a string"""
settings = SettingsDict(contents)
username = None
password = None
if settings.has_key('username'):
username = settings['username']
if settings.has_key('password'):
password = settings['password']
return username, password
def GetBlogUsernameAndPassword():
"""parse username and password from the ~/.bloggerrc file"""
filename = os.path.expanduser(BlogInfoFileName)
contents = ReadFile(filename)
return GetUsernameAndPassword(contents)
##------------- blogger functions --------------
def Post(blog, PostContents):
Message("Post: getting posts...")
posts = blog.posts
Message ("Post: doing posts...")
posts.append(PostContents)
def Repost(blog, PostContents, PostID):
posts = blog.posts
if PostID is None:
posts[-1] = PostContents
else:
for post in posts:
if post.id == PostID:
post.content = PostContents
sys.exit(0)
Error("Did not find article '%s' in '%s'." % (PostID, blog.name))
def DeletePost(blog, PostID):
posts = blog.posts
if PostID is None:
del posts[-1]
else:
for post in posts:
if post.id == PostID:
del post
sys.exit(0)
Error("did not find article '%s' in '%s'." % (PostID, blog.name))
def GetTemplate(blog):
html = blog.template.main
print html
def SaveTemplate(blog, FileContents):
blog.template.main = FileContents
def Usage():
print "Usage: %s [-d] <command> [<parameters>] " % ProgramName
print " -d: print debugging info to STDERR."
print
print "Commands:"
print " post <filename>: posts a file to a weblog"
print " p <filename>: same as post"
print " repost <filename> [<PostID>]: reposts file to weblog as article <PostID>, or as most recent article if <PostID> is omitted."
print " r <filename> [<PostID>]: same as repost"
print " delete [<PostID>]: deletes article with PostID, or most recent article if PostID is omitted."
print " d [<PostID>]: same as delete"
print " gettemplate: gets the HTML template and writes it to STDOUT."
print " gt: same as gettemplate."
print " savetemplate <filename>: saves file as the HTML template for this blog."
print " st: same as savetemplate."
print
print "Note: only operates on the first weblog."
print
##----------------------------
def main():
global debug
if len(sys.argv) < 2:
Usage()
sys.exit(0)
if sys.argv[1] == '-d':
del sys.argv[1]
debug = 1
else:
debug = 0
command = sys.argv[1]
if not ValidCommands.has_key(command):
Usage()
Error("'%s' is not a valid command." % command)
sys.exit(0)
if len(sys.argv) < ValidCommands[command]:
Usage()
Error("%s requires at least %d parameter(s)." % (command, ValidCommands[command] - 1))
FileName = None
FileContents = None
PostID = None
if command in ['post', 'p', 'repost', 'r', 'savetemplate', 'st']:
FileName = sys.argv[2]
if command in ['post', 'p', 'repost', 'r']:
if len(sys.argv) > 3:
PostID = sys.argv[3]
if command in ['delete', 'd']:
if len(sys.argv) > 2:
PostID = sys.argv[2]
UserInfo = GetBlogUsernameAndPassword()
if UserInfo is None:
Error("could not read Username and Password.")
else:
Username, Password = UserInfo
if FileName is not None:
FileContents = ReadFile(FileName)
if FileContents is None:
Error("could not read from '%s'." % FileName)
Message("Getting blog user info...")
user = Blog.User(Username, Password)
blogs = user.blogs
# fixme: all operations are on the first blog
blog = blogs[0]
if command in ['post', 'p']:
Message("Posting file '%s' to '%s'" % (FileName, blog.name))
Post(blog, FileContents)
elif command in ['repost', 'r']:
if PostID is not None:
Message("Reposting file '%s' to article '%s' of '%s'" % (FileName, PostID, blog.name))
else:
Message("Reposting file '%s' to '%s'" % (FileName, blog.name))
Repost(blog, FileContents, PostID)
elif command in ['delete', 'd']:
if PostID is not None:
Message("Deleting article '%s' from '%s'." % (PostID,blog.name))
else:
Message("Deleting most recent article from '%s'." % blog.name)
DeletePost(blog, PostID)
elif command in ['gettemplate','gt']:
Message("Getting main template from '%s'" % blog.name)
GetTemplate(blog)
elif command in ['savetemplate','st']:
Message("Saving file '%s' as main template of '%s'" % (FileName, blog.name))
SaveTemplate(blog, FileContents)
##----------------------------
if __name__ == "__main__":
main()
| Python |
"""Factory to output blogs in various formats
Currently supported formats:
- RSS: for syndication
- Scripting News XML: also for syndication
- minimal HTML: for text browsers and mobile devices
- Javascript: for dynamic inclusion in other HTML pages
"""
__author__ = "Mark Pilgrim (f8dy@diveintomark.org)"
__version__ = "$Revision: 1.1 $"
__date__ = "$Date: 2006/03/04 10:07:32 $"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
__license__ = "Python"
import blogger
import bloggerext
import re
import time
import xml.dom.minidom
import scriptingnews
class _Factory:
postPattern = re.compile(r'(.*?)<Blogger>(.*?)</Blogger>(.*)', re.IGNORECASE | re.DOTALL)
dateHeaderPattern = re.compile(r'(.*?)<BlogDateHeader>(.*?)</BlogDateHeader>(.*)', re.IGNORECASE | re.DOTALL)
stripTagsPattern = re.compile(r'(<.*?>)(.*?)(</.*?>)')
stripTagsReplace = r'\2'
titlePattern = re.compile(r'(.*?)([.?!-]\s|\n)')
descriptionPattern = re.compile(r'(.*?)([.?!-]\s|\n)\n*(.*?)(\n|$)')
def __init__(self, blogID, username, password, maxposts=15):
self.blogID = blogID
self.username = username
self.password = password
self.maxposts = maxposts
self.refresh()
def refresh(self):
self.posts = blogger.listPosts(self.blogID, self.username, self.password, self.maxposts)
self.posts.reverse()
self.blogTitle = bloggerext.getBlogSetting("blogTitle", self.blogID, self.username, self.password)
self.blogURL = bloggerext.getBlogSetting("blogURL", self.blogID, self.username, self.password)
self.blogDescription = bloggerext.getBlogSetting("blogDescription", self.blogID, self.username, self.password)
info = blogger.getUserInfo(self.username, self.password)
self.userID = info["userid"]
self.userRealName = "%s %s" % (info["firstname"], info["lastname"])
def striptags(self, text):
return self.stripTagsPattern.sub(self.stripTagsReplace, text)
def splittitle(self, post):
text = self.striptags(post["content"])
title = self.titlePattern.search(text)
if title:
title = title.group(1)
description = self.descriptionPattern.search(text).group(3)
else:
title = text
description = ""
if len(title) > self.maxTitleLength:
title = title[:self.maxTitleLength-3] + "..."
if len(description) > self.maxPostLength:
description = description[:self.maxPostLength-3] + "..."
return (title, description)
class SimpleElement(xml.dom.minidom.Element):
def __init__(self, tag, data):
xml.dom.minidom.Element.__init__(self, tag)
self.appendChild(xml.dom.minidom.Text(data))
class _XMLFactory(_Factory):
maxTitleLength = 100
maxPostLength = 500
class _RSSFactory(_XMLFactory):
def get(self):
doc = xml.dom.minidom.Document()
rss = xml.dom.minidom.Element("rss")
rss.attributes["version"] = "0.92"
channel = xml.dom.minidom.Element("channel")
channel.appendChild(SimpleElement("title", self.blogTitle))
channel.appendChild(SimpleElement("link", self.blogURL))
channel.appendChild(SimpleElement("description", self.blogDescription))
for post in self.posts:
item = xml.dom.minidom.Element("item")
title, description = self.splittitle(post)
item.appendChild(SimpleElement("title", title))
item.appendChild(SimpleElement("description", description))
item.appendChild(SimpleElement("link", "%s#%s" % (self.blogURL, post["postid"])))
channel.appendChild(item)
rss.appendChild(channel)
doc.appendChild(rss)
return doc.toxml()
class _ScriptingNewsFactory(_XMLFactory):
headerDateTimeFormat = "%a, %d %b %Y %H:%M:%S GMT"
def get(self):
headers = {}
if self.posts:
lastBuildDate = self.posts[-1]["dateCreated"]
else:
lastBuildDate = time.localtime()
gap = lastBuildDate[-1] and time.altzone or time.timezone
lastBuildDate = time.localtime(time.mktime(lastBuildDate) + gap)
headers["copyright"] = "Copyright %s %s" % (lastBuildDate[0], self.userRealName)
headers["pubDate"] = time.strftime(self.headerDateTimeFormat, time.gmtime(time.time()))
headers["lastBuildDate"] = time.strftime(self.headerDateTimeFormat, lastBuildDate)
headers["channelDescription"] = self.blogDescription
headers["channelLink"] = self.blogURL
headers["channelTitle"] = self.blogTitle
allposts = [p["content"].strip() for p in self.posts]
return scriptingnews.textToXML(headers, "\n\n".join(allposts))
class _HTMLFactory(_Factory):
maxTitleLength = 80
maxPostLength = 255
itemDateFormat = "%m/%d, %I:%M%p"
def get(self):
if not self.template:
raise ValueError, "no template defined"
pagetext = self.template
pagetext = pagetext.replace('<$BlogTitle$>', self.blogTitle)
pagetext = pagetext.replace('<$BlogDescription$>', self.blogDescription)
postTemplate = self.postPattern.search(pagetext).group(2)
posttexts = []
for post in self.posts:
posttext = postTemplate
title, description = self.splittitle(post)
posttext = posttext.replace("<$BlogItemTitle$>", title)
posttext = posttext.replace("<$BlogItemBody$>", description)
posttext = posttext.replace("<$BlogItemAuthor$>", self.userRealName)
posttext = posttext.replace("<$BlogItemDateTime$>",
time.strftime(self.itemDateFormat, post["dateCreated"]))
posttext = posttext.replace("<$BlogItemArchiveFileName$>", self.blogURL)
posttext = posttext.replace("<$BlogItemNumber$>", post["postid"])
posttexts.append(posttext)
return self.postPattern.sub(r'\1%s\3' % ''.join(posttexts), pagetext)
class _MinimalFactory(_HTMLFactory):
template = """<html>
<head>
<title><$BlogTitle$></title>
</head>
<body>
<p><b><u><$BlogDescription$></u></b></p>
<p>
<Blogger>
<p>
<b><a href="<$BlogItemArchiveFileName$>#<$BlogItemNumber$>"><$BlogItemTitle$></a></b>
<br>
<$BlogItemBody$>
<br>
<i><$BlogItemAuthor$>, <$BlogItemDateTime$></i>
</p>
</Blogger>
</body>
</html>"""
class _JavascriptFactory(_HTMLFactory):
template = """document.writeln("<p><span class="blogdescription"><$BlogDescription$></span></p>");
<Blogger>document.writeln("<p><span class='blogitemtitle'><$BlogItemTitle$></span>");
document.writeln("<br />");
document.writeln("<span class='blogitembody'><$BlogItemBody$></span>");
document.writeln("<br />");
document.writeln("<span class='blogitemauthor'><$BlogItemAuthor$></span>,");
document.writeln("<span class='blogitemdatetime'><$BlogItemDateTime$></span>");
document.writeln("</p>");
</Blogger>"""
def getBlogAsRSS(blogID, username, password, maxposts=15):
"""output blog in RSS format
Returns: string
Arguments:
- blogID: your weblog ID
- username: your weblog username
- password: your weblog password
- maxposts: maximum number of posts to include in output
"""
return getBlogAs("RSS", blogID, username, password, maxposts)
def getBlogAsScriptingNews(blogID, username, password, maxposts=20):
"""output blog in Scripting News XML format
Returns: string
Arguments:
- blogID: your weblog ID
- username: your weblog username
- password: your weblog password
- maxposts: maximum number of posts to include in output
"""
return getBlogAs("ScriptingNews", blogID, username, password, maxposts)
def getBlogAsMinimal(blogID, username, password, maxposts=20):
"""output blog as minimal HTML, suitable for reading on text browsers
Returns: string
Arguments:
- blogID: your weblog ID
- username: your weblog username
- password: your weblog password
- maxposts: maximum number of posts to include in output
"""
return getBlogAs("Minimal", blogID, username, password, maxposts)
def getBlogAsJavascript(blogID, username, password, maxposts=20):
"""output blog as Javascript code
Returns: string
Arguments:
- blogID: your weblog ID
- username: your weblog username
- password: your weblog password
- maxposts: maximum number of posts to include in output
Usage:
- <script language="JavaScript">OUTPUT_OF_THIS_FUNCTION</script>
- <script language="JavaScript" src="FILE_CONTAINING_OUTPUT_OF_THIS_FUNCTION"></script>
- <script language="JavaScript" src="URL_CONTAINING_OUTPUT_OF_THIS_FUNCTION"></script>
- <script language="JavaScript" src="URL_THAT_DYNAMICALLY_CALLS_THIS_FUNCTION"></script>
Presumably you would only be able to do that last one if you control your
own web server.
"""
return getBlogAs("Javascript", blogID, username, password, maxposts)
def getBlogAs(format, blogID, username, password, maxposts=20):
"""output blog in given format
Returns: string
Arguments:
- format: in ('RSS', 'ScriptingNews', 'Minimal', 'Javascript')
- blogID: your weblog ID
- username: your weblog username
- password: your weblog password
- maxposts: maximum number of posts to include in output
"""
factory = globals()["_%sFactory" % format]
return factory(blogID, username, password, maxposts).get()
| Python |
"""Unit tests for blogger.py
These require PyUnit (unittest.py), which is part of the standard library
starting with Python 2.1.
"""
__author__ = "Mark Pilgrim (f8dy@diveintomark.org)"
__version__ = "$Revision: 1.1 $"
__date__ = "$Date: 2006/03/04 10:07:32 $"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
__license__ = "Python"
import unittest
import blogger
import xmlrpclib
import new
import time
import copy
import random
from xml.dom import minidom
class constants:
blogID = "1000"
title = "Fake blog"
url = "http://localhost/default.ida"
email = "me@mydomain.org"
userID = "999"
nickname = "nicky"
firstname = "first"
lastname = "last"
username = "username"
password = "password"
invalidUsername = "wrong username"
invalidPassword = "wrong password"
template = {"main":"main template",
"archiveIndex":"archive index template"}
autoPostText = "This post intentionally contains the following random number: %s"
c = constants()
# utility functions to insert into minidom.Node class
def _all(self, tagname):
return self.getElementsByTagName(tagname)
minidom.Node.all = new.instancemethod(_all, None, minidom.Node)
def _first(self, path):
node = self
for name in path.split("/"):
node = node.all(name)[0]
return node
minidom.Node.first = new.instancemethod(_first, None, minidom.Node)
def _text(self):
try:
return str(self.firstChild.data)
except:
return ""
minidom.Node.text = new.instancemethod(_text, None, minidom.Node)
def text(element):
return element.text()
# fake XML-RPC server to facilitate local testing;
# this acts just like the Blogger.com server, but with a hard-coded blog
class FakeTransport:
authFailed = "Error: User authentication failed: %s"
postNotFound = "Post %s not found"
noPerms = "ERROR: User does not have permission to post to this blog."
def __init__(self):
self.posts = [{"dateCreated":xmlrpclib.DateTime(time.time()),
"userid":c.userID,
"postid":'3',
"content":"Third post!"},
{"dateCreated":xmlrpclib.DateTime(time.time()),
"userid":c.userID,
"postid":'2',
"content":"Second post!"},
{"dateCreated":xmlrpclib.DateTime(time.time()),
"userid":c.userID,
"postid":'1',
"content":"First post!"}]
self.template = copy.deepcopy(c.template)
self.nextPostID = len(self.posts) + 1
def request(self, host, handler, request_body, verbose=0):
xmldoc = minidom.parseString(request_body)
methodName = xmldoc.first("methodName").text().replace("blogger.", "")
return getattr(self, "do_%s" % methodName)(xmldoc)
def _err(self, message="error"):
raise xmlrpclib.Fault(0, message)
def do_getUserInfo(self, doc):
appkey, username, password = map(text, doc.all("string"))
if (username <> c.username) or (password <> c.password):
self._err(self.authFailed % username)
return ({"nickname":c.nickname,
"userid":c.userID,
"url":c.url,
"email":c.email,
"lastname":c.lastname,
"firstname":c.firstname},)
def do_getUsersBlogs(self, doc):
appkey, username, password = map(text, doc.all("string"))
if (username <> c.username) or (password <> c.password):
self._err(self.authFailed % username)
return ([{"blogid":c.blogID,
"blogName":c.title,
"blogURL":c.url}],)
def do_getPost(self, doc):
appkey, postID, username, password = map(text, doc.all("string"))
if (username <> c.username) or (password <> c.password):
self._err(self.authFailed % username)
findPost = lambda d, p=postID: d["postid"]==p
found = filter(findPost, self.posts)
if found:
return copy.deepcopy(found)
else:
self._err(self.postNotFound % postID)
def do_getRecentPosts(self, doc):
appkey, blogID, username, password = map(text, doc.all("string"))
maxposts = int(doc.first("int").text())
if blogID <> c.blogID:
self._err(self.noPerms)
if (username <> c.username) or (password <> c.password):
self._err(self.authFailed % username)
return copy.deepcopy(self.posts)[:maxposts]
def do_newPost(self, doc):
appkey, blogID, username, password, content = map(text, doc.all("string"))
if blogID <> c.blogID:
self._err(self.noPerms)
if (username <> c.username) or (password <> c.password):
self._err(self.authFailed % username)
postID = str(self.nextPostID)
self.nextPostID += 1
self.posts.insert(0, {"dateCreated":xmlrpclib.DateTime(time.time()),
"userid":c.userID,
"postid":postID,
"content":content})
return postID
def do_editPost(self, doc):
appkey, postID, username, password, content = map(text, doc.all("string"))
if (username <> c.username) or (password <> c.password):
self._err(self.authFailed % username)
findPost = lambda d, p=postID: d["postid"]==p
found = filter(findPost, self.posts)
if found:
found[0]["content"] = content
return (xmlrpclib.True,)
else:
self._err(self.postNotFound % postID)
def do_deletePost(self, doc):
appkey, postID, username, password = map(text, doc.all("string"))
if (username <> c.username) or (password <> c.password):
self._err(self.authFailed % username)
for i in range(len(self.posts)):
if self.posts[i]["postid"] == postID:
del self.posts[i]
return (xmlrpclib.True,)
self._err("Post %s not found" % postID)
def do_getTemplate(self, doc):
appkey, blogID, username, password, templatetype = map(text, doc.all("string"))
if blogID <> c.blogID:
self._err(self.noPerms)
if (username <> c.username) or (password <> c.password):
self._err(self.authFailed % username)
return self.template[templatetype]
def do_setTemplate(self, doc):
appkey, blogID, username, password, html, templatetype = map(text, doc.all("string"))
if blogID <> c.blogID:
self._err(self.noPerms)
if (username <> c.username) or (password <> c.password):
self._err(self.authFailed % username)
self.template[templatetype] = html
return (xmlrpclib.True,)
##----------------------- unit tests -----------------------##
def getRandomNumber():
return str(random.randint(0, 100000))
class BaseTest(unittest.TestCase):
def setUp(self):
blogger.constants.transport = FakeTransport()
self.checkPosts = blogger.constants.transport.posts
self.checkTemplate = blogger.constants.transport.template
class GetUserInfoTest(BaseTest):
def testNickname(self):
"""getUserInfo returns known nickname"""
info = blogger.getUserInfo(c.username, c.password)
self.assertEqual(info["nickname"], c.nickname)
def testUserID(self):
"""getUserInfo returns known user ID"""
info = blogger.getUserInfo(c.username, c.password)
self.assertEqual(info["userid"], c.userID)
def testURL(self):
"""getUserInfo returns known URL"""
info = blogger.getUserInfo(c.username, c.password)
self.assertEqual(info["url"], c.url)
def testEmail(self):
"""getUserInfo returns known email"""
info = blogger.getUserInfo(c.username, c.password)
self.assertEqual(info["email"], c.email)
def testLastName(self):
"""getUserInfo returns known last name"""
info = blogger.getUserInfo(c.username, c.password)
self.assertEqual(info["lastname"], c.lastname)
def testFirstName(self):
"""getUserInfo returns known first name"""
info = blogger.getUserInfo(c.username, c.password)
self.assertEqual(info["firstname"], c.firstname)
def testNoOtherReturnValues(self):
"""getUserInfo returns only known keys"""
info = blogger.getUserInfo(c.username, c.password)
self.assertEqual(len(info.keys()), 6)
def testFailsWithInvalidUsername(self):
"""getUserInfo fails with invalid username"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.getUserInfo, c.invalidUsername, c.password)
def testFailsWithInvalidPassword(self):
"""getUserInfo fails with invalid password"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.getUserInfo, c.username, c.invalidPassword)
class ListBlogsTest(BaseTest):
def testCount(self):
"""listBlogs says I have 1 blog"""
blogs = blogger.listBlogs(c.username, c.password)
self.assertEqual(len(blogs), 1)
def testBlogID(self):
"""listBlogs returns known blog ID"""
blogs = blogger.listBlogs(c.username, c.password)
self.assertEqual(blogs[0]["blogid"], c.blogID)
def testBlogName(self):
"""listBlogs returns known blog name"""
blogs = blogger.listBlogs(c.username, c.password)
self.assertEqual(blogs[0]["blogName"], c.title)
def testBlogURL(self):
"""listBlogs returns known blog URL"""
blogs = blogger.listBlogs(c.username, c.password)
self.assertEqual(blogs[0]["blogURL"], c.url)
def testFailsWithInvalidUsername(self):
"""listBlogs fails with invalid username"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.listBlogs, c.invalidUsername, c.password)
def testFailsWithInvalidPassword(self):
"""listBlogs fails with invalid password"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.listBlogs, c.username, c.invalidPassword)
class ListPostsTest(BaseTest):
def testCount(self):
"""listPosts says I have 3 posts"""
posts = blogger.listPosts(c.blogID, c.username, c.password)
self.assertEqual(len(posts), 3)
def testPostID(self):
"""listPosts returns known post ID"""
posts = blogger.listPosts(c.blogID, c.username, c.password)
self.assertEqual(posts[0]["postid"], "1")
def testUserID(self):
"""listPosts returns known user ID"""
posts = blogger.listPosts(c.blogID, c.username, c.password)
self.assertEqual(posts[1]["userid"], c.userID)
def testContent(self):
"""listPosts returns known content"""
posts = blogger.listPosts(c.blogID, c.username, c.password)
self.assertEqual(posts[2]["content"], "Third post!")
def testPartialListing(self):
"""listPosts returns partial listing"""
posts = blogger.listPosts(c.blogID, c.username, c.password, 2)
self.assertEqual(len(posts), 2)
def testFailsWithInvalidBlogID(self):
"""listPosts fails with invalid blog ID"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.listPosts, 0, c.username, c.password)
def testFailsWithInvalidUsername(self):
"""listPosts fails with invalid username"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.listPosts, c.blogID, c.invalidUsername, c.password)
def testFailsWithInvalidPassword(self):
"""listPosts fails with invalid password"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.listPosts, c.blogID, c.username, c.invalidPassword)
class GetPostTest(BaseTest):
def testUserID(self):
"""getPost returns known user ID"""
post = blogger.getPost(1, c.username, c.password)
self.assertEqual(post["userid"], c.userID)
def testContent(self):
"""getPost returns known content"""
post = blogger.getPost(3, c.username, c.password)
self.assertEqual(post["content"], "Third post!")
def testFailsWithInvalidPostID(self):
"""getPost fails with invalid post ID"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.getPost, 0, c.username, c.password)
def testFailsWithInvalidUsername(self):
"""getPost fails with invalid username"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.getPost, 1, c.invalidUsername, c.password)
def testFailsWithInvalidPassword(self):
"""getPost fails with invalid password"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.getPost, 1, c.username, c.invalidPassword)
class NewPostTest(BaseTest):
def testNewPost(self):
"""newPost happy path"""
randomNumber = getRandomNumber()
postText = c.autoPostText % randomNumber
postID = blogger.newPost(c.blogID, c.username, c.password, postText, 1)
self.assert_(self.checkPosts[0]["content"].find(randomNumber) >= 0)
def testFailsWithInvalidBlogID(self):
"""newPost fails with invalid blog ID"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.newPost, 0, c.username, c.password, "abc", 1)
def testFailsWithInvalidUsername(self):
"""newPost fails with invalid username"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.newPost, c.blogID, c.invalidUsername, c.password, "abc", 1)
def testFailsWithInvalidPassword(self):
"""newPost fails with invalid password"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.newPost, c.blogID, c.username, c.invalidPassword, "abc", 1)
class EditPostTest(BaseTest):
def testEditPost(self):
"""edit post happy path"""
randomNumber = getRandomNumber()
postText = c.autoPostText % randomNumber
blogger.editPost(2, c.username, c.password, postText, 1)
self.assert_(self.checkPosts[-2]["content"].find(randomNumber) >= 0)
def testFailsWithInvalidPostID(self):
"""editPost fails with invalid post ID"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.editPost, 0, c.username, c.password, "abc", 1)
def testFailsWithInvalidUsername(self):
"""editPost fails with invalid username"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.editPost, c.blogID, c.invalidUsername, c.password, "abc", 1)
def testFailsWithInvalidPassword(self):
"""editPost fails with invalid password"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.editPost, c.blogID, c.username, c.invalidPassword, "abc", 1)
class DeletePostTest(BaseTest):
def testDeletePost(self):
"""deletePost happy path"""
blogger.deletePost(3, c.username, c.password, 1)
self.assertEqual(len(self.checkPosts), 2)
self.assertEqual(self.checkPosts[0]["postid"], "2")
self.assertEqual(self.checkPosts[1]["postid"], "1")
def testInvalidPostID(self):
"""deletePost fails with invalid post ID"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.deletePost, 0, c.username, c.password, 1)
def testInvalidUsername(self):
"""deletePost fails with invalid username"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.deletePost, c.blogID, c.invalidUsername, c.password, 1)
def testInvalidPassword(self):
"""deletePost fails with invalid password"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.deletePost, c.blogID, c.username, c.invalidPassword, 1)
class GetTemplateTest(BaseTest):
def testGetMainTemplate(self):
"""getTemplate happy path (main template)"""
data = blogger.getTemplate(c.blogID, c.username, c.password, blogger.TemplateType.main)
self.assertEqual(data, c.template["main"])
def testGetArchiveIndexTemplate(self):
"""getTemplate happy path (archive index template)"""
data = blogger.getTemplate(c.blogID, c.username, c.password, blogger.TemplateType.archiveIndex)
self.assertEqual(data, c.template["archiveIndex"])
def testFailsWithInvalidBlogID(self):
"""getTemplate fails with invalid blog ID"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.getTemplate, 0, c.username, c.password)
def testFailsWithInvalidTemplateType(self):
"""getTemplate fails with invalid template type"""
self.assertRaises(ValueError, blogger.getTemplate, c.blogID, c.username, c.password, "abc")
def testFailsWithInvalidUsername(self):
"""getTemplate fails with invalid username"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.getTemplate, c.blogID, c.invalidUsername, c.password)
def testFailsWithInvalidPassword(self):
"""getTemplate fails with invalid password"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.getTemplate, c.blogID, c.username, c.invalidPassword)
class SetTemplateTest(BaseTest):
def testSetMainTemplate(self):
"""setTemplate happy path (main template)"""
randomNumber = getRandomNumber()
blogger.setTemplate(c.blogID, c.username, c.password, randomNumber, blogger.TemplateType.main)
self.assertEqual(self.checkTemplate["main"], randomNumber)
def testSetArchiveIndexTemplate(self):
"""setTemplate happy path (archive index template)"""
randomNumber = getRandomNumber()
blogger.setTemplate(c.blogID, c.username, c.password, randomNumber, blogger.TemplateType.archiveIndex)
self.assertEqual(self.checkTemplate["archiveIndex"], randomNumber)
def testFailsWithInvalidBlogID(self):
"""setTemplate fails with invalid blog ID"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.setTemplate, 0, c.username, c.password, "")
def testFailsWithInvalidTemplateType(self):
"""setTemplate fails with invalid template type"""
self.assertRaises(ValueError, blogger.setTemplate, c.blogID, c.username, c.password, "", "abc")
def testFailsWithInvalidUsername(self):
"""setTemplate fails with invalid username"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.setTemplate, c.blogID, c.invalidUsername, c.password, "abc")
def testFailsWithInvalidPassword(self):
"""setTemplate fails with invalid password"""
self.assertRaises(blogger.xmlrpclib.Fault, blogger.setTemplate, c.blogID, c.username, c.invalidPassword, "abc")
if __name__ == "__main__":
unittest.main()
| Python |
"""
postObject model for Blogger
This is a high-level object-oriented interface for blogger.py,
which defines all the available functions exposed by Blogger.com's
XML-RPC API.
This code was originally written by Mark Pilgrim
(f8dy@diveintomark.org). I have added objects to allow interface with the added
methods available in Movable Type's XML-RPC API.
Example:
>>> user = Blog.User("YOUR_BLOGGER_USERNAME", "YOUR_BLOGGER_PASSWORD")
>>> blogs = user.blogs # list of all blogs
>>> for blog in blogs:
... print "Blog ID:", blog.id # internal Blogger.com blog ID
... print "Blog name:", blog.name # title of blog
... print "Blog URL:", blog.url # base URL of blog
>>> blog = blogs[0] # get reference to user's first blog
>>> posts = blog.posts # list of most recent posts (up to 20)
>>> for post in blog.posts:
... print "Post ID:", post.postid # internal Blogger.com post ID
... print "Post date:", post.dateCreated # date created, in tuple format
... print "Posted by:", post.userid # internal Blogger.com user ID
... print "Post text:", post.content # text of post
>>> posts.append("Ping.") # post new entry to blog
>>> len(posts) # count posts
>>> posts[-1].content = "Pong." # edit text of most recent post
>>> del posts[-1] # delete most recent post
>>> html = blog.template.main # get HTML of main blog entry template
>>> blog.template.main = html # set HTML template for main blog entries
>>> html = blog.template.archiveIndex # get HTML of archive index template
>>> blog.template.archiveIndex = html # set HTML template for archive index
Movable Type Example:
>>> user = Blog.user ("YOUR_MT_USERNAME", "YOUR_MT_PASSWORD")
>>> blogs = user.blogs
>>> for blog in blogs:
... print "Blog ID:", blog.id # internal Blogger.com blog ID
... print "Blog name:", blog.name # title of blog
... print "Blog URL:", blog.url # base URL of blog
>>> blog = blogs[0] # get reference to user's first blog
>>> posts = blog.mtposts # list of most recent posts (up to 20)
>>> for post in blog.posts:
... print "Post ID:", post.postid # internal Blogger.com post ID
... print "Post date:", post.dateCreated # date created, in tuple format
... print "Posted by:", post.userid # internal Blogger.com user ID
... print "Post text:", post.content # text of post
>>> content = {} # fill dictionary with info for new post
>>> content ["title"] = "New Post Title"
>>> content ["description"] = "Ping."A
>>> mtposts.append (content)
>>> len(posts) # count posts
>>> posts[-1].content = "Pong." # edit text of most recent post
>>> del posts[-1] # delete most recent post
Compatibility note:
There are several weblog services that offer an XML-RPC interface
with varying levels of compatibility with Blogger.com's API. These
generally will not work with this high-level API, due to missing
support for key functions. For instance, Manila does not implement
the getUsersBlogs API, so blogger.listBlogs() will not work, so trying
to access the Blog.User.blogs attribute will not work. The other
lower-level functions in blogger.py will work with Manila; I suggest
you use those directly if you wish to talk to a Manila server.
"""
__author__ = "Brent Loertscher (blurch@cbtlsl.com)"
__version__ = "$Revision: 1.1 $"
__date__ = "$Date: 2006/03/04 10:07:32 $"
__copyright__ = "Copyright (c) 2001-2 Mark Pilgrim, 2003 Brent Loertscher"
__license__ = "Python"
import blogger
class User:
"""Blogger user
.username (read-only) - Blogger login
.password (read-only) - Blogger password
.nickname (read-only) - user's nickname
.userid (read-only) - internal Blogger user ID
.url (read-only) - user's URL
.email (read-only) - user's email
.lastname (read-only) - user's last name
.firstname (read-only) - user's first name
.blogs - list of Blog objects to access this user's blogs
"""
def __init__(self, username, password):
self.username = username
self.password = password
self.__blogs = None
self.__info = {}
def __getattr__(self, key):
if key in ("nickname", "userid", "url", "email", "lastname", "firstname"):
if not self.__info:
self.__info = blogger.getUserInfo(self.username, self.password)
return self.__info[key]
if key == "blogs":
if not self.__blogs:
self.__blogs = [Blog(self, b) for b in blogger.listBlogs(self.username, self.password)]
return self.__blogs
else:
raise AttributeError, key
class Blog:
"""weblog
.id (read-only) - internal ID of this blog
.name (read-only) - title of this blog
.url (read-only) - base web address of this blog
.template - Template object to access this blog's HTML templates
.posts - Posts object to access this blog's recent posts
.posttitles - PostTitles object to access title information for this blogs
recent posts
.mtposts - Posts object designed for use with metaWeblog blogs.
.categories - Categories object for use with Movable Type blogs.
.filters - Filters object for use with Movable Type blogs.
Notes:
the .posttitles, mtposts, categories, and filters objects are designed to
work exclusively with Movable Type.
"""
def __init__(self, user, params):
self.user = user
self.id = params["blogid"]
self.name = params["blogName"]
self.url = params["url"]
self.template = Template(self)
self.__posts = None
self.__posttitles = None
self.__categories = None
self.__filters = None
self.__mtposts = None
def __getattr__(self, key):
if key == "posts":
if not self.__posts:
allPosts = blogger.listPosts(self.id, self.user.username, self.user.password)
self.__posts = Posts(self, [Post(self, p) for p in allPosts])
return self.__posts
if key == "mtposts":
if not self.__mtposts:
allMTPosts = blogger.listMetaWeblogPosts(self.id, self.user.username, self.user.password)
self.__mtposts = MTPosts(self, [MTPost(self, p) for p in allMTPosts])
self.__mtposts.count = len (allMTPosts)
return self.__mtposts
if key == "posttitles":
if not self.__posttitles:
allPostTitles = blogger.listPostTitles(self.id, self.user.username, self.user.password)
self.__posttitles = PostTitles (self, [PostTitle(self, p) for p in allPostTitles])
self.__posttitles.count = len(allPostTitles)
return self.__posttitles
if key == "categories":
if not self.__categories:
allCategories = blogger.listCategories(self.id, self.user.username, self.user.password)
self.__categories = Categories (self, [Category(self, p) for p in allCategories])
self.__categories.count = len(allCategories)
return self.__categories
if key == "filters":
if not self.__filters:
allFilters = blogger.listTextFilters()
self.__filters = Filters (self, [Filter(self, p) for p in allFilters])
self.__filters.count = len(allFilters)
return self.__filters
else:
raise AttributeError, key
class Template:
"""blog template
.main (read/write) - HTML template for blog entries
.archiveIndex (read/write) - HTML template for blog archive index
"""
def __init__(self, blog):
self.blog = blog
self.__template = {}
def __getattr__(self, key):
if key in blogger.TemplateType.acceptableTypes:
if not self.__template.has_key(key):
self.__template[key] = blogger.getTemplate(self.blog.id,
self.blog.user.username,
self.blog.user.password,
key)
return self.__template[key]
else:
raise AttributeError, key
def __setattr__(self, key, value):
if key in blogger.TemplateType.acceptableTypes:
self.__template[key] = value
blogger.setTemplate(self.blog.id,
self.blog.user.username,
self.blog.user.password,
value,
key)
else:
self.__dict__[key] = value
class Posts:
"""list of posts in a blog
You can use *some* standard list operations to manipulate this:
posts.append(text) - adds a new post
posts[n] - returns Post object containing information about a single post
posts[n] = text - edits the text of a post
del posts[n] - deletes a post
"""
def __init__(self, blog, data):
self.blog = blog
self.data = data
def append(self, text):
import time
username = self.blog.user.username
postID = blogger.newPost(self.blog.id, username, self.blog.user.password, text, 1)
params = {"dateCreated": time.localtime(),
"userid": username,
"postid": postID,
"content": text}
self.data.append(Post(self.blog, params))
def _appendMultiple(self, postList):
for p in postList:
self.append(p)
def __getitem__(self, index):
return self.data[index]
def __setitem__(self, index, text):
self.data[index].content = text
def __delitem__(self, index):
postID = self.data[index].id
blogger.deletePost(postID, self.blog.user.username, self.blog.user.password, 1)
del self.data[index]
def __contains__(self, item):
return item in [post.content for post in self.data]
def __add__(self, other):
self._appendMultiple(other)
def extend(self, other):
self._appendMultiple(other)
def __repr__(self):
return repr(self.data)
class MTPosts:
"""list of posts in a blog
You can use *some* standard list operations to manipulate this:
posts.append(text) - adds a new post
posts[n] - returns Post object containing information about a single post
posts[n] = text - edits the text of a post
del posts[n] - deletes a post
"""
def __init__(self, blog, data):
self.blog = blog
self.data = data
def append (self, content, publish=1):
import time
postID = blogger.newMetaWeblogPost (self.blog.id, self.blog.user.username, self.blog.user.password, content, publish)
content["dateCreated"] = time.localtime()
content["userid"] = self.blog.user.username
content["postid"] = postID
self.data.append(MTPost(self.blog, content))
def _appendMultiple (self, postList):
for p in postList:
self.append(p)
def __getitem__(self, index):
return self.data[index]
def __setitem__(self, index, text):
self.data[index].content = text
def __delitem__(self, index, publish=1):
postID = self.data[index].id
blogger.deletePost(postID, self.blog.user.username, self.blog.user.password, publish)
del self.data[index]
def __contains__(self, item):
return item in [mtpost.content for mtpost in self.data]
def __add__(self, other):
self._appendMultiple(other)
def extend(self, other):
self._appendMultiple(other)
def __repr__(self):
return repr(self.data)
def __len__(self):
return self.count
class PostTitles:
"""list of post titles in a blog
You can use *some* standard list operations to manipulate this:
posts[n] - returns Post object containing information about a single post
"""
def __init__(self, blog, data):
self.blog = blog
self.data = data
self.count = 0
def __getitem__(self, index):
return self.data[index]
def __contains__(self, item):
return item in [posttitle.title for posttitle in self.data]
def __repr__(self):
return repr(self.data)
def __len__(self):
return self.count
class Categories:
"""list of categories in a blog
You can use *some* standard list operations to manipulate this:
categories[n] - returns Post object containing information about a single post
"""
def __init__(self, blog, data):
self.blog = blog
self.data = data
self.count = 0
def __getitem__(self, index):
return self.data[index]
def __contains__(self, item):
return item in [category.content for category in self.data]
def __repr__(self):
return repr(self.data)
def __len__(self):
return self.count
class MTPostCategories:
"""list of categories in a blog
You can use *some* standard list operations to manipulate this:
categories[n] - returns Post object containing information about a single post
"""
def __init__(self, post, data):
self.post = post
self.data = data
self.count = 0
def append (self, content):
self.data.append(MTPostCategory(self.post, content))
def _appendMultiple (self, postList):
for p in postList:
self.append(p)
def __getitem__(self, index):
return self.data[index]
def __delitem__(self, index):
del self.data[index]
def __contains__(self, item):
return item in [mtpostcategory.content for mtpostcategory in self.data]
def __add__(self, other):
self._appendMultiple(other)
def extend(self, other):
self._appendMultiple(other)
def __repr__(self):
return repr(self.data)
def __len__(self):
return self.count
def apply (self):
content = []
for p in self.data:
content.append ({"categoryId": p.id })
blogger.setPostCategories (self.post.id, self.post.blog.user.username, self.post.blog.user.password, content)
blogger.publishPost(self.post.id, self.post.blog.user.username, self.post.blog.user.password)
class Filters:
"""list of filters in a blog
You can use *some* standard list operations to manipulate this:
filters[n] - returns Post object containing information about a single post
"""
def __init__(self, blog, data):
self.blog = blog
self.data = data
self.count = 0
def __getitem__(self, index):
return self.data[index]
def __contains__(self, item):
return item in [filter.content for filter in self.data]
def __repr__(self):
return repr(self.data)
def __len__(self):
return self.count
class Post:
"""information about a blog post
.id (read-only) - internal post ID
.dateCreated (read-only) - tuple representing date/time this post was first created
.userid (read-only) - internal user ID of user who posted this post
.content (read/write) - text of post
"""
def __init__(self, blog, params):
self.blog = blog
self.id = params["postid"]
self.dateCreated = params["dateCreated"]
self.userid = params["userid"]
self.__content = params["content"]
def __getattr__(self, key):
if key == "content":
return self.__content
else:
raise AttributeError, key
def __setattr__(self, key, value):
if key == "content":
self.__content = value
blogger.editPost(self.id, self.blog.user.username, self.blog.user.password, value, 1)
else:
self.__dict__[key] = value
if __name__ == "__main__":
try:
import pydoc
pydoc.help("Blog")
except ImportError:
print __doc__
class MTPost:
"""information about a MT blog post
.id (read-only) - internal post ID
.dateCreated (read-only) - tuple representing date/time this post was first created
.userid (read-only) - internal user ID of user who posted this post
.description - post description,
.title - post title,
.link - post link,
.permaLink - post permalink,
.mt_excerpt - post excerpt,
.mt_text_more - post more,
.mt_allow_comments - if open for comments,
.mt_allow_pings - if open for pings,
.mt_convert_breaks - text filter id,
.mt_keywords - post keywords
"""
def __init__(self, blog, params):
self.blog = blog
self.id = params["postid"]
self.dateCreated = params["dateCreated"]
self.userid = params["userid"]
self.__description = params["description"]
self.__title = params["title"]
if params.has_key("link"):
self.__link = params["link"]
if params.has_key("permaLink"):
self.__permaLink = params["permaLink"]
if params.has_key("mt_excerpt"):
self.__mt_excerpt = params["mt_excerpt"]
if params.has_key("mt_text_more"):
self.__mt_text_more = params["mt_text_more"]
if params.has_key("mt_allow_comments"):
self.__mt_allow_comments = params["mt_allow_comments"]
if params.has_key("mt_allow_pings"):
self.__mt_allow_pings = params["mt_allow_pings"]
if params.has_key("mt_convert_breaks"):
self.__mt_convert_breaks = params["mt_convert_breaks"]
if params.has_key("mt_keywords"):
self.__mt_keywords = params["mt_keywords"]
self.__postcategories = None
def __getattr__(self, key):
if key == "postcategories":
if not self.__postcategories:
allMTPostCategories = blogger.listPostCategories (self.id, self.blog.user.username, self.blog.user.password)
self.__postcategories = MTPostCategories (self, [MTPostCategory(self, p) for p in allMTPostCategories])
self.__postcategories.count = len (allMTPostCategories)
return self.__postcategories
if key == "description":
return self.__description
if key == "title":
return self.__title
if key == "link":
return self.__link
if key == "permaLink":
return self.__permaLink
if key == "mt_excerpt":
return self.__mt_excerpt
if key == "mt_text_more":
return self.__mt_text_more
if key == "mt_allow_comments":
return self.__mt_allow_comments
if key == "mt_allow_pings":
return self.__mt_allow_pings
if key == "mt_convert_breaks":
return self.__mt_convert_breaks
if key == "mt_keywords":
return self.__mt_keywords
else:
raise AttributeError, key
def __setattr__(self, key, value):
if key == "description":
self.__description = value
content = {}
content ['description'] = self.__description
blogger.editMetaWeblogPost(self.id, self.blog.user.username, self.blog.user.password, content, 1)
if key == "mt_excerpt":
self.__mt_excerpt = value
content = {}
content ['mt_excerpt'] = self.__mt_excerpt
blogger.editMetaWeblogPost(self.id, self.blog.user.username, self.blog.user.password, content, 1)
if key == "mt_text_more":
self.__mt_text_more = value
content = {}
content ['mt_text_more'] = self.__mt_text_more
blogger.editMetaWeblogPost(self.id, self.blog.user.username, self.blog.user.password, content, 1)
if key == "title":
self.__title = value
content = {}
content ['title'] = self.__title
blogger.editMetaWeblogPost(self.id, self.blog.user.username, self.blog.user.password, content, 1)
else:
self.__dict__[key] = value
if __name__ == "__main__":
try:
import pydoc
pydoc.help("Blog")
except ImportError:
print __doc__
class PostTitle:
"""information about a blog post title
.id (read-only) - internal post ID
.dateCreated (read-only) - tuple representing date/time this post was first created
.userid (read-only) - internal user ID of user who posted this post
.title (read-only) - text of post
"""
def __init__(self, blog, params):
self.blog = blog
self.id = params["postid"]
self.dateCreated = params["dateCreated"]
self.userid = params["userid"]
self.__title = params["title"]
def __getattr__(self, key):
if key == "title":
return self.__title
else:
raise AttributeError, key
class Category:
"""information about a blog category
.id (read-only) - internal category ID
.category (read-only) - category name
"""
def __init__(self, blog, params):
self.blog = blog
self.id = params["categoryId"]
self.__category = params["categoryName"]
def __getattr__(self, key):
if key == "category":
return self.__category
else:
raise AttributeError, key
class MTPostCategory:
"""information about a post category
.id (read-only) - internal category ID
.category - category name
.primary - whether category is primary category or not
"""
def __init__(self, post, params):
self.post = post
self.id = params["categoryId"]
if params.has_key("categoryName"):
self.__categoryname = params["categoryName"]
if params.has_key("isPrimary"):
self.__primary = params["isPrimary"]
def __getattr__(self, key):
if key == "categoryname":
return self.__categoryname
if key == "primary":
return self.__primary
else:
raise AttributeError, key
def __setattr__(self, key, value):
self.__dict__[key] = value
class Filter:
"""information about a blog text filter
.id (read-only) - internal category ID
.filter (read-only) - text of post
"""
def __init__(self, blog, params):
self.blog = blog
self.id = params["key"]
self.__filter = params["label"]
def __getattr__(self, key):
if key == "filter":
return self.__filter
else:
raise AttributeError, key
if __name__ == "__main__":
try:
import pydoc
pydoc.help("Blog")
except ImportError:
print __doc__
| Python |
"""Blogger interface for Python
http://sourceforge.net/projects/pyblogger/
This module allows you to post to a weblog and manipulate its
settings. It was originally designed to work with Blogger
(http://www.blogger.com/), but other weblog systems have since
implemented this API, and this module can talk to any of them.
Whichever system you use, you'll need an account.
- Blogger: http://www.blogger.com/
- Manila: http://www.manilasites.com/
- LiveJournal: http://www.livejournal.com/
Note that LiveJournal does not support this API directly; you'll
need to use a Blogger-to-LiveJournal gateway, described here:
http://www.tswoam.co.uk/index.php?n_go=14
Many functions take the following common arguments:
- blogID:
- If connecting to Blogger, this is your blog's ID number on
blogger.com; to get this, log in on blogger.com, click on your blog
to edit it, and look in the query string of the URL.
- For Manila, this is the base URL of your weblog.
- For LiveJournal, this is the journal name. Can be left blank
and the user's default journal will be used.
- username: your weblog system username.
- password: your weblog system password.
This code was originally written by Mark Pilgrim (f8dy@diveintomark.org). I
have added the interface for the added methods available in Movable Type's
XML-RPC API.
Example:
>>> import blogger
>>> username = "YOUR_BLOGGER_USERNAME"
>>> password = "YOUR_BLOGGER_PASSWORD"
>>> blogs = blogger.listBlogs(username, password)
>>> myFirstBlog = blogs[0]
>>> url = myFirstBlog["url"]
>>> blogID = myFirstBlog["blogid"]
>>> postID = blogger.newPost(blogID, username, password, "First post!", 1)
>>> print "New post is available at %s#%s" % (url, postID)
"""
__author__ = "Brent Loertscher (blurch@cbtlsl.com)"
__version__ = "$Revision: 1.1 $"
__date__ = "$Date: 2006/03/04 10:07:32 $"
__copyright__ = "Copyright (c) 2001-2 Mark Pilgrim, 2003 Brent Loertscher"
__license__ = "Python"
# Requires Pythonware's XML-RPC library
# This comes standard in Python 2.2
# Users of earlier versions must download and install from
# http://www.pythonware.com/products/xmlrpc/
import xmlrpclib
class TemplateType:
main = "main"
archiveIndex = "archiveIndex"
acceptableTypes = (main, archiveIndex)
class constants:
# XML-RPC server. We default to Blogger's server, but you
# can set this to any Blogger-compatible server
# - Manila: set to your base URL + "/RPC2"
# - LiveJournal: set to your Blogger-LiveJournal gateway
# - Movable Type: set to the location of your mt-xmlrpc.cgi script
# Alternatively, you can pass the server to any of the
# functions as the last parameter to override this setting.
xmlrpcServer = "http://www.blogger.com/api"
# The application key is required by Blogger;
# other weblog systems ignore it
applicationKey = "1973FAF4B76FC60D35E266310C6F0605456798"
# Transport is only used for testing; should be None for production
transport = None
def getUserInfo(username, password, serverURL=None):
"""Get information about a user
Returns: dictionary
{"nickname": "user's nickname",
"userid": "user ID",
"url": "user's URL",
"email": "user's email",
"lastname": "user's last name",
"firstname": "user's first name"}
Arguments:
- username: your weblog username
- password: your weblog password
- serverURL: URL of remote server (optional, defaults to constants.xmlrpcServer)
Example:
>>> info = blogger.getUserInfo("my_blogger_username", "my_secret_password")
>>> for k, v in info.items():
... print k, v
"""
server = xmlrpclib.Server(serverURL or constants.xmlrpcServer, constants.transport)
info = server.blogger.getUserInfo(constants.applicationKey,
username,
password)
return info
def listBlogs(username, password, serverURL=None):
"""Get a list of your blogs
Returns: list of dictionaries
[{"blogid": ID_of_this_blog,
"blogName": "name_of_this_blog",
"url": "URL_of_this_blog"}, ...]
Arguments:
- username: your weblog username
- password: your weblog password
- serverURL: URL of remote server (optional, defaults to constants.xmlrpcServer)
Example:
>>> blogList = blogger.listBlogs("my_blogger_username", "my_secret_password")
>>> for blog in blogList:
... print "ID:", blog["blogid"]
... print "Name:, blog["blogName"]
... print "URL:", blog["url"]
... print
Manila notes:
- Manila does not support this method, because it does not keep a centralized
database of a user's blogs.
"""
server = xmlrpclib.Server(serverURL or constants.xmlrpcServer, constants.transport)
response = server.blogger.getUsersBlogs(constants.applicationKey,
username,
password)
return response
getUsersBlogs = listBlogs
def listPosts(blogID, username, password, maxPosts=20, serverURL=None):
"""List recent posts in your blog
Returns: list of dictionaries
[{"dateCreated": date/time of this post in tuple format (see http://python.org/doc/lib/module-time.html)
"userid": user who posted this entry,
"postid": ID of this post,
"content": text of this post
}, ...]
Posts are listed in chronological order, oldest to newest, so
listPosts(...)[-1] is the newest post
Arguments:
- blogID: your weblog's ID number (see module docs for details)
- username: your weblog username
- password: your weblog password
- maxPosts: maximum number of posts to return
- serverURL: URL of remote server (optional, defaults to constants.xmlrpcServer)
Example:
>>> blogger.listPosts(my_blog_ID, "my_blogger_username", "my_blogger_password", 1)
# returns the most recent post
Notes:
- The Blogger server will only return the 20 most recent posts.
"""
server = xmlrpclib.Server(serverURL or constants.xmlrpcServer, constants.transport)
response = server.blogger.getRecentPosts(constants.applicationKey,
str(blogID),
str(username),
str(password),
maxPosts)
response.reverse()
for i in range(len(response)):
v = response[i]["dateCreated"].value
response[i]["dateCreated"] = (int(v[:4]), int(v[4:6]), int(v[6:8]), int(v[9:11]), int(v[12:14]), int(v[15:17]), 0, 0, 0)
return response
getRecentPosts = listPosts
def listMetaWeblogPosts(blogID, username, password, maxPosts=20, serverURL=None):
"""List recent posts in your blog
Returns: list of dictionaries
[{"dateCreated": date/time of this post in tuple format (see http://python.org/doc/lib/module-time.html)
"userid": user who posted this entry,
"postid": ID of this post,
"description": post description,
"title": post title,
"link": post link,
"permaLink": post permalink,
"mt_excerpt": post excerpt,
"mt_text_more": post more,
"mt_allow_comments": if open for comments,
"mt_allow_pings": if open for pings,
"mt_convert_breaks": text filter id,
"mt_keywords": post keywords}
}, ...]
Posts are listed in chronological order, oldest to newest, so
listPosts(...)[-1] is the newest post
Arguments:
- blogID: your weblog's ID number (see module docs for details)
- username: your weblog username
- password: your weblog password
- maxPosts: maximum number of posts to return
- serverURL: URL of remote server (optional, defaults to constants.xmlrpcServer)
Example:
>>> blogger.listMetaWeblogPosts(my_blog_ID, "my_blogger_username", "my_blogger_password", 1)
# returns the most recent post
Notes:
- Blogger does not recognize this method. Only servers such as Movable Type
that support metaWeblog methods will accept this.
"""
server = xmlrpclib.Server(serverURL or constants.xmlrpcServer, constants.transport)
response = server.metaWeblog.getRecentPosts(str(blogID),
str(username),
str(password),
maxPosts)
response.reverse()
for i in range(len(response)):
v = response[i]["dateCreated"].value
response[i]["dateCreated"] = (int(v[:4]), int(v[4:6]), int(v[6:8]), int(v[9:11]), int(v[12:14]), int(v[15:17]), 0, 0, 0)
return response
def listPostTitles(blogID, username, password, maxPosts=20, serverURL=None):
"""List recent posts in your blog
Returns: list of dictionaries
[{"dateCreated": date/time of this post in tuple format (see http://python.org/doc/lib/module-time.html)
"userid": user who posted this entry,
"postid": ID of this post,
"title": title of post
}, ...]
Posts are listed in chronological order, oldest to newest, so
listPosts(...)[-1] is the newest post
Arguments:
- blogID: your weblog's ID number (see module docs for details)
- username: your weblog username
- password: your weblog password
- maxPosts: maximum number of posts to return
- serverURL: URL of remote server (optional, defaults to constants.xmlrpcServer)
Example:
>>> blogger.listPosts(my_blog_ID, "my_blogger_username", "my_blogger_password", 1)
# returns the most recent post
Notes:
- Blogger does not recognize this method. Only servers such as Movable Type
that support metaWeblog methods will accept this.
- This is a bandwidth friendly way to get a list of post titles from your
blog.
"""
server = xmlrpclib.Server(serverURL or constants.xmlrpcServer, constants.transport)
response = server.mt.getRecentPostTitles(str(blogID),
str(username),
str(password),
maxPosts)
response.reverse()
for i in range(len(response)):
v = response[i]["dateCreated"].value
response[i]["dateCreated"] = (int(v[:4]), int(v[4:6]), int(v[6:8]), int(v[9:11]), int(v[12:14]), int(v[15:17]), 0, 0, 0)
return response
def cmp_categories (x, y):
return cmp (x["categoryName"], y["categoryName"])
def listCategories(blogID, username, password, serverURL=None):
"""List all categories defined in weblog
Returns: list of dictionaries
[{"categoryId": string containing the category id,
"categoryName": name of category
}, ...]
Arguments:
- blogID: your weblog's ID number (see module docs for details)
- username: your weblog username
- password: your weblog password
- serverURL: URL of remote server (optional, defaults to constants.xmlrpcServer)
Example:
>>> blogger.listCategories(my_blog_ID, "my_blogger_username", "my_blogger_password")
# returns categories
Notes:
- This method will only work with Movable Type.
"""
server = xmlrpclib.Server(serverURL or constants.xmlrpcServer, constants.transport)
response = server.mt.getCategoryList(str(blogID),
str(username),
str(password))
response.sort(cmp_categories)
return response
def listPostCategories(postID, username, password, serverURL=None):
"""List all categories selected for a given post
Returns: list of dictionaries
[{"categoryId": string containing the category id,
"categoryName": name of category
"isPrimary": is category primary?
}, ...]
Arguments:
- postID: the post ID for the post
- username: your weblog username
- password: your weblog password
- serverURL: URL of remote server (optional, defaults to constants.xmlrpcServer)
Example:
>>> blogger.listPostCategories(my_post_ID, "my_blogger_username", "my_blogger_password")
# returns categories
Notes:
- This method will only work with Movable Type.
"""
server = xmlrpclib.Server(serverURL or constants.xmlrpcServer, constants.transport)
response = server.mt.getPostCategories(str(postID),
str(username),
str(password))
response.sort(cmp_categories)
return response
def setPostCategories(postID, username, password, categories, serverURL=None):
"""Sets the categories for a given post.
Returns: boolean TRUE or FALSE
Arguments:
- postID: the post ID for the post
- username: your weblog username
- password: your weblog password
- categories: list of dictionaries
[{"categoryID": string containing the category is,
"isPrimary": is categor primary?
}, ...]
- serverURL: URL of remote server (optional, defaults to constants.xmlrpcServer)
Example:
>>> blogger.listCategories(my_post_ID, "my_blogger_username", "my_blogger_password")
# returns categories
Notes:
- isPrimary is optional. If omitted, the first categoryID is the primary
category.
- This method will only work with Movable Type.
"""
server = xmlrpclib.Server(serverURL or constants.xmlrpcServer, constants.transport)
response = server.mt.setPostCategories(str(postID),
str(username),
str(password),
categories)
return response
def cmp_filters (x, y):
return cmp (x["label"], y["label"])
def listTextFilters(serverURL=None):
"""List all available text filters for blog
Returns: list of dictionaries
[{"filterKey": string containing the category id,
"filterLabel": name of category
}, ...]
Arguments:
none
Example:
>>> blogger.listTextFilters()
# returns filters
Notes:
- This method will only work with Movable Type.
"""
server = xmlrpclib.Server(serverURL or constants.xmlrpcServer, constants.transport)
response = server.mt.supportedTextFilters()
response.sort(cmp_filters)
return response
def getPost(postID, username, password, serverURL=None):
"""Get a single post by ID
Returns: dictionary
{"dateCreated": date/time of this post in tuple format (see http://python.org/doc/lib/module-time.html)
"userid": user who posted this entry,
"postid": ID of this post,
"content": text of this post}
Arguments:
- postID: the ID of the post to get
- username: your weblog username
- password: your weblog password
- serverURL: URL of remote server (optional, defaults to constants.xmlrpcServer)
Example:
>>> blogger.getPost(postID, "my_blogger_username", "my_blogger_password")
"""
server = xmlrpclib.Server(serverURL or constants.xmlrpcServer, constants.transport)
response = server.blogger.getPost(constants.applicationKey,
str(postID),
str(username),
str(password))
v = response["dateCreated"].value
response["dateCreated"] = (int(v[:4]), int(v[4:6]), int(v[6:8]), int(v[9:11]), int(v[12:14]), int(v[15:17]), 0, 0, 0)
return response
def getMetaWeblogPost(postID, username, password, serverURL=None):
"""Get a single post by ID
Returns: dictionary
{"dateCreated": date/time of this post in tuple format (see http://python.org/doc/lib/module-time.html)
"userid": user who posted this entry,
"postid": ID of this post,
"description": post description,
"title": post title,
"link": post link,
"permaLink": post permalink,
"mt_excerpt": post excerpt,
"mt_text_more": post more,
"mt_allow_comments": if open for comments,
"mt_allow_pings": if open for pings,
"mt_convert_breaks": text filter id,
"mt_keywords": post keywords}
Arguments:
- postID: the ID of the post to get
- username: your weblog username
- password: your weblog password
- serverURL: URL of remote server (optional, defaults to constants.xmlrpcServer)
Example:
>>> blogger.getPost(postID, "my_blogger_username", "my_blogger_password")
Notes:
- Blogger does not recognize this method. Only servers such as Movable Type
that support metaWeblog methods will accept this.
"""
server = xmlrpclib.Server(serverURL or constants.xmlrpcServer, constants.transport)
response = server.metaWeblog.getPost(str(postID),
str(username),
str(password))
v = response["dateCreated"].value
response["dateCreated"] = (int(v[:4]), int(v[4:6]), int(v[6:8]), int(v[9:11]), int(v[12:14]), int(v[15:17]), 0, 0, 0)
return response
def newPost(blogID, username, password, text, publish=0, serverURL=None):
"""Post a new message to your blog
Returns: string
post ID: append this to your base blog URL to link to your new post
Arguments:
- blogID: your blog's ID number (see module docs for details)
- username: your weblog username
- password: your weblog password
- text: the actual text you'd like to post
- publish (optional): 0 = post but do not publish (default)
1 = post and publish
- serverURL: URL of remote server (optional, defaults to constants.xmlrpcServer)
Example:
>>> postID = blogger.newPost(my_blog_ID, "my_blogger_username", "my_blogger_password, "First post!", 1)
>>> print postID
Blogger notes:
- Posts are limited to 65536 characters by the Blogger server.
- If you want to publish, you must set up your blog to remember your
FTP username and password. You must do this through the web interface
at blogger.com; there is currently no way to do it through this API.
Manila notes:
- Manila does not have the concept of "post but don't publish"; all
posts are published immediately. So the "publish" flag is used as
an approval flag for multi-member weblogs. See
http://frontier.userland.com/emulatingBloggerInManila
for details.
"""
server = xmlrpclib.Server(serverURL or constants.xmlrpcServer, constants.transport)
postID = server.blogger.newPost(constants.applicationKey,
str(blogID),
str(username),
str(password),
str(text),
publish and xmlrpclib.True or xmlrpclib.False)
return postID
def newMetaWeblogPost(blogID, username, password, contents, publish=0, serverURL=None):
"""Post a new message to your blog
Returns: string
post ID: append this to your base blog URL to link to your new post
Arguments:
- blogID: your blog's ID number (see module docs for details)
- username: your weblog username
- password: your weblog password
- content: dictionary containing content to post
- publish (optional): 0 = post but do not publish (default)
1 = post and publish
- serverURL: URL of remote server (optional, defaults to constants.xmlrpcServer)
Example:
>>> postID = blogger.newMetaWeblogPost(my_blog_ID, "my_blogger_username", "my_blogger_password, post_content, 1)
>>> print postID
Notes:
- Blogger does not recognize this method. Only servers such as Movable Type
that support metaWeblog methods will accept this.
"""
server = xmlrpclib.Server(serverURL or constants.xmlrpcServer, constants.transport)
postID = server.metaWeblog.newPost( str(blogID),
str(username),
str(password),
contents,
publish and xmlrpclib.True or xmlrpclib.False)
return postID
def editPost(postID, username, password, text, publish=0, serverURL=None):
"""Edit an existing message in your blog
Returns: 1
Arguments:
- postID: ID of post to edit
- username: your weblog username
- password: your weblog password
- text: the actual text you'd like to post
- publish (optional): 0 = post but do not publish (default)
1 = post and publish
- serverURL: URL of remote server (optional, defaults to constants.xmlrpcServer)
Example:
>>> postID = blogger.newPost(my_blog_ID, "my_blogger_username", "my_blogger_password, "First post!", 1)
>>> blogger.editPost(postID, "my_blogger_username", "my_blogger_password, "This text overwrites the old text completely.", 1)
Blogger notes:
- Posts are limited to 65536 characters by the Blogger server.
- If you want to publish, you must set up your blog to remember your
FTP username and password. You must do this through the web interface
at blogger.com; there is currently no way to do it through this API.
Manila notes:
- Manila does not have the concept of "post but don't publish"; all
posts are published immediately. So the "publish" flag is used as
an approval flag for multi-member weblogs. See
http://frontier.userland.com/emulatingBloggerInManila
for details.
LiveJournal notes:
- Post IDs (item IDs) are not guaranteed to be unique across all of a
user's journals, so the default journal is always used. There is
currently no way of editing entries on a secondary journal. See
http://www.tswoam.co.uk/index.php?n_go=14
for details.
"""
server = xmlrpclib.Server(serverURL or constants.xmlrpcServer, constants.transport)
response = server.blogger.editPost(constants.applicationKey,
str(postID),
str(username),
str(password),
str(text),
publish and xmlrpclib.True or xmlrpclib.False)
return response == xmlrpclib.True
def editMetaWeblogPost(postID, username, password, contents, publish=0, serverURL=None):
"""Edit an existing message in your blog
Returns: 1
Arguments:
- postID: ID of post to edit
- username: your weblog username
- password: your weblog password
- contents: a dictionary containing the contents of the post
- publish (optional): 0 = post but do not publish (default)
1 = post and publish
- serverURL: URL of remote server (optional, defaults to constants.xmlrpcServer)
Example:
>>> postID = blogger.newMetaWeblogPost(my_blog_ID, "my_blogger_username", "my_blogger_password", contents, 1)
>>> blogger.editPost(postID, "my_blogger_username", "my_blogger_password, contents, 1)
Notes:
- Blogger does not recognize this method. Only servers such as Movable Type
that support metaWeblog methods will accept this.
"""
server = xmlrpclib.Server(serverURL or constants.xmlrpcServer, constants.transport)
response = server.metaWeblog.editPost(str(postID),
str(username),
str(password),
contents,
publish and xmlrpclib.True or xmlrpclib.False)
return response == xmlrpclib.True
def publishPost(postID, username, password, serverURL=None):
"""Sends signal to regenerate static files assoctiated with a given
post. This is done without sending HTTP pings.
Returns: 1
Arguments:
- postID: ID of post to edit
- username: your weblog username
- password: your weblog password
- serverURL: URL of remote server (optional, defaults to constants.xmlrpcServer)
Example:
>>> postID = blogger.newMetaWeblogPost(my_blog_ID, "my_blogger_username", "my_blogger_password", contents, 1)
>>> blogger.editPost(postID, "my_blogger_username", "my_blogger_password, contents, 1)
Notes:
- This method will only work with Movable Type.
"""
server = xmlrpclib.Server(serverURL or constants.xmlrpcServer, constants.transport)
response = server.mt.publishPost(str(postID),
str(username),
str(password))
return response == xmlrpclib.True
def deletePost(postID, username, password, publish=0, serverURL=None):
"""Delete an existing message in your blog
Returns: 1
Arguments:
- postID: ID of post to edit
- username: your weblog username
- password: your weblog password
- publish (optional): 0 = delete but do not publish (default)
1 = delete and publish
- serverURL: URL of remote server (optional, defaults to constants.xmlrpcServer)
Example:
>>> postID = blogger.newPost(my_blog_ID, "my_blogger_username", "my_blogger_password, "First post!", 1)
>>> blogger.deletePost(postID, "my_blogger_username", "my_blogger_password, 1)
Blogger notes:
- Posts are limited to 7200 characters by the Blogger server.
- If you want to publish, you must set up your blog to remember your
FTP username and password. You must do this through the web interface
at blogger.com; there is currently no way to do it through this API.
LiveJournal notes:
- Post IDs (item IDs) are not guaranteed to be unique across all of a
user's journals, so the default journal is always used. There is
currently no way of deleting entries on a secondary journal. See
http://www.tswoam.co.uk/index.php?n_go=14
for details.
"""
server = xmlrpclib.Server(serverURL or constants.xmlrpcServer, constants.transport)
response = server.blogger.deletePost(constants.applicationKey,
str(postID),
str(username),
str(password),
publish and xmlrpclib.True or xmlrpclib.False)
return response == xmlrpclib.True
def getTemplate(blogID, username, password, templateType="main", serverURL=None):
"""Get HTML template for your blog
Returns: string
specified HTML template
Arguments:
- blogID: your blog's ID number
- username: your blogger.com username
- password: your blogger.com password
- templateType: 'main' = get main page template (default)
'archiveIndex' = get archive index template
- serverURL: URL of remote server (optional, defaults to constants.xmlrpcServer)
"""
if templateType not in TemplateType.acceptableTypes:
raise ValueError, "invalid template type: %s" % templateType
server = xmlrpclib.Server(serverURL or constants.xmlrpcServer, constants.transport)
htmlTemplate = server.blogger.getTemplate(constants.applicationKey,
str(blogID),
str(username),
str(password),
templateType)
return htmlTemplate
def setTemplate(blogID, username, password, text, templateType="main", serverURL=None):
"""Set HTML template for your blog
Returns: 1
Arguments:
- blogID: your blog's ID number
- username: your blogger.com username
- password: your blogger.com password
- text: complete HTML text of template
- templateType: 'main' = set main page template (default)
'archiveIndex' = set archive index template
- serverURL: URL of remote server (optional, defaults to constants.xmlrpcServer)
Notes:
- The given username must be marked as an administrator on the blog in order to
set the template. This is the default if you created the blog, but
not the default if somebody else added you to a team blog. Administrators
can add other users to their blog and give them administrative access,
but they need to do it through the web interface at blogger.com.
"""
if templateType not in TemplateType.acceptableTypes:
raise ValueError, "invalid template type: %s" % templateType
server = xmlrpclib.Server(serverURL or constants.xmlrpcServer, constants.transport)
server.blogger.setTemplate(constants.applicationKey,
str(blogID),
str(username),
str(password),
text,
templateType)
if __name__ == "__main__":
try:
import pydoc
pydoc.help("blogger")
except ImportError:
print __doc__
| Python |
# PyBloglines $Id: pybloglines.py,v 1.2 2006/03/04 10:10:47 tzellman Exp $
# Module for accessing the Bloglines Web Services
# See <http://www.josephson.org/projects/pybloglines/>
# Suggestion to add BloglinesUnread to OPML parsing by Erik Bryn
import urllib
import urllib2
import string
import re
import base64
import xml.parsers.expat
import westom.feednut.libs.feedparser as feedparser# http://sourceforge.net/projects/feedparser/
class BloglinesException(Exception): pass
class Subscription: pass
class BloglinesWebServices:
BWS_HOSTNAME = "rpc.bloglines.com"
BWS_REALM = "Bloglines RPC"
def __init__(self, user, password = None):
"""Initialises for the specified user. If just the
update() method is being used then password is optional.
Other methods will require authentication."""
if user == None:
raise BloglinesException("user must be specified")
else:
self.user = user
self.password = password
def getContent(self, url, requiresAuth = False):
request = urllib2.Request(url)
if requiresAuth:
b64 = base64.encodestring("%s:%s" % (self.user, self.password))[:-1]
request.add_header("Authorization", "Basic %s" % b64)
f = urllib2.urlopen(request)
content = string.join(f.readlines()).strip()
f.close()
return content
def checkPasswordSpecified(self):
if self.password == None:
raise BloglinesException("password must be specified to call this method")
def update(self):
"""Returns the unread count for the Bloglines account."""
# http://www.bloglines.com/services/api/notifier
params = urllib.urlencode([("user", self.user), ("ver", "1")])
url ="http://%s/update?%s" % (self.BWS_HOSTNAME, params)
content = self.getContent(url)
m = re.match("\|(\-{0,1}[0-9]+)\|([^|]*)\|", content)
if m:
unreadCount = int(m.group(1))
if unreadCount != -1:
return unreadCount
else:
raise BloglinesException("user does not exist")
else:
raise BloglinesException("response did not match expected pattern")
def listsubs(self):
"""Returns a list of subscriptions for the Bloglines account.
This is returned as a list of Subscription objects where each
entry has title, htmlUrl, type, xmlUrl, bloglinesSubId and
bloglinesIgnore."""
# http://www.bloglines.com/services/api/listsubs
self.checkPasswordSpecified()
url = "http://%s/listsubs" % self.BWS_HOSTNAME
content = self.getContent(url, True)
opmlParser = OpmlParser()
feedlist = opmlParser.parse(content)
return feedlist
def getitems(self, bloglinesSubId, markAsRead = False, date = None):
"""For the specified subscription, returns either a list of unread items
or all items since the data specified, optionally marking the selected
items as read.
This is returned as the result of parsing the response using Mark Pilgrim's
feedparser. See http://www.feedparser.org for details."""
# http://www.bloglines.com/services/api/getitems
self.checkPasswordSpecified()
paramList = [("s", str(bloglinesSubId))]
if markAsRead:
paramList.append(("n", "1"))
else:
paramList.append(("n", "0"))
if date != None:
paramList.append(("d", str(date)))
params = urllib.urlencode(paramList)
url = "http://%s/getitems?%s" % (self.BWS_HOSTNAME, params)
auth = urllib2.HTTPBasicAuthHandler()
auth.add_password(self.BWS_REALM, self.BWS_HOSTNAME, self.user, self.password)
parsedData = feedparser.parse(url, handlers = [auth])
return parsedData
class OpmlParser:
def __init__(self):
self.parser = xml.parsers.expat.ParserCreate()
self.parser.StartElementHandler = self.start_element
self.parser.EndElementHandler = self.end_element
def parse(self, opml):
self.feedlist = []
self.parser.Parse(opml)
return self.feedlist
def start_element(self, name, attrs):
if name == "outline":
if attrs.has_key('title') and attrs.has_key('xmlUrl'):
sub = Subscription()
sub.title = attrs["title"]
sub.htmlUrl = attrs["htmlUrl"]
sub.type = attrs["type"]
sub.xmlUrl = attrs["xmlUrl"]
sub.bloglinesSubId = int(attrs["BloglinesSubId"])
sub.bloglinesIgnore = int(attrs["BloglinesIgnore"])
sub.bloglinesUnread = int(attrs["BloglinesUnread"])
self.feedlist.append(sub)
def end_element(self, name):
pass
if __name__ == '__main__':
service = BloglinesWebServices('tom@zematek.com', 'bloglinespass')
for item in service.listsubs():
print item.title
items = service.getitems(item.bloglinesSubId)
for key, val in items.iteritems():
print key, val | Python |
import string
import types
import decimal
## json.py implements a JSON (http://json.org) reader and writer.
## Copyright (C) 2005 Patrick D. Logan
## Contact mailto:patrickdlogan@stardecisions.com
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
class _StringGenerator(object):
def __init__(self, string):
self.string = string
self.index = -1
def peek(self):
i = self.index + 1
if i < len(self.string):
return self.string[i]
else:
return None
def next(self):
self.index += 1
if self.index < len(self.string):
return self.string[self.index]
else:
raise StopIteration
def all(self):
return self.string
class WriteException(Exception):
pass
class ReadException(Exception):
pass
class JsonReader(object):
hex_digits = {'A': 10,'B': 11,'C': 12,'D': 13,'E': 14,'F':15}
escapes = {'t':'\t','n':'\n','f':'\f','r':'\r','b':'\b'}
def read(self, s):
self._generator = _StringGenerator(s)
result = self._read()
return result
def _read(self):
self._eatWhitespace()
peek = self._peek()
if peek is None:
raise ReadException, "Nothing to read: '%s'" % self._generator.all()
if peek == '{':
return self._readObject()
elif peek == '[':
return self._readArray()
elif peek == '"':
return self._readString()
elif peek == '-' or peek.isdigit():
return self._readNumber()
elif peek == 't':
return self._readTrue()
elif peek == 'f':
return self._readFalse()
elif peek == 'n':
return self._readNull()
elif peek == '/':
self._readComment()
return self._read()
else:
raise ReadException, "Input is not valid JSON: '%s'" % self._generator.all()
def _readTrue(self):
self._assertNext('t', "true")
self._assertNext('r', "true")
self._assertNext('u', "true")
self._assertNext('e', "true")
return True
def _readFalse(self):
self._assertNext('f', "false")
self._assertNext('a', "false")
self._assertNext('l', "false")
self._assertNext('s', "false")
self._assertNext('e', "false")
return False
def _readNull(self):
self._assertNext('n', "null")
self._assertNext('u', "null")
self._assertNext('l', "null")
self._assertNext('l', "null")
return None
def _assertNext(self, ch, target):
if self._next() != ch:
raise ReadException, "Trying to read %s: '%s'" % (target, self._generator.all())
def _readNumber(self):
isfloat = False
result = self._next()
peek = self._peek()
while peek is not None and (peek.isdigit() or peek == "."):
isfloat = isfloat or peek == "."
result = result + self._next()
peek = self._peek()
try:
if isfloat:
return float(result)
else:
return int(result)
except ValueError:
raise ReadException, "Not a valid JSON number: '%s'" % result
def _readString(self):
result = ""
assert self._next() == '"'
try:
while self._peek() != '"':
ch = self._next()
if ch == "\\":
ch = self._next()
if ch in 'brnft':
ch = self.escapes[ch]
elif ch == "u":
ch4096 = self._next()
ch256 = self._next()
ch16 = self._next()
ch1 = self._next()
n = 4096 * self._hexDigitToInt(ch4096)
n += 256 * self._hexDigitToInt(ch256)
n += 16 * self._hexDigitToInt(ch16)
n += self._hexDigitToInt(ch1)
ch = unichr(n)
elif ch not in '"/\\':
raise ReadException, "Not a valid escaped JSON character: '%s' in %s" % (ch, self._generator.all())
result = result + ch
except StopIteration:
raise ReadException, "Not a valid JSON string: '%s'" % self._generator.all()
assert self._next() == '"'
return result
def _hexDigitToInt(self, ch):
try:
result = self.hex_digits[ch.upper()]
except KeyError:
try:
result = int(ch)
except ValueError:
raise ReadException, "The character %s is not a hex digit." % ch
return result
def _readComment(self):
assert self._next() == "/"
second = self._next()
if second == "/":
self._readDoubleSolidusComment()
elif second == '*':
self._readCStyleComment()
else:
raise ReadException, "Not a valid JSON comment: %s" % self._generator.all()
def _readCStyleComment(self):
try:
done = False
while not done:
ch = self._next()
done = (ch == "*" and self._peek() == "/")
if not done and ch == "/" and self._peek() == "*":
raise ReadException, "Not a valid JSON comment: %s, '/*' cannot be embedded in the comment." % self._generator.all()
self._next()
except StopIteration:
raise ReadException, "Not a valid JSON comment: %s, expected */" % self._generator.all()
def _readDoubleSolidusComment(self):
try:
ch = self._next()
while ch != "\r" and ch != "\n":
ch = self._next()
except StopIteration:
pass
def _readArray(self):
result = []
assert self._next() == '['
done = self._peek() == ']'
while not done:
item = self._read()
result.append(item)
self._eatWhitespace()
done = self._peek() == ']'
if not done:
ch = self._next()
if ch != ",":
raise ReadException, "Not a valid JSON array: '%s' due to: '%s'" % (self._generator.all(), ch)
assert ']' == self._next()
return result
def _readObject(self):
result = {}
assert self._next() == '{'
done = self._peek() == '}'
while not done:
key = self._read()
if type(key) is not types.StringType:
raise ReadException, "Not a valid JSON object key (should be a string): %s" % key
self._eatWhitespace()
ch = self._next()
if ch != ":":
raise ReadException, "Not a valid JSON object: '%s' due to: '%s'" % (self._generator.all(), ch)
self._eatWhitespace()
val = self._read()
result[key] = val
self._eatWhitespace()
done = self._peek() == '}'
if not done:
ch = self._next()
if ch != ",":
raise ReadException, "Not a valid JSON array: '%s' due to: '%s'" % (self._generator.all(), ch)
assert self._next() == "}"
return result
def _eatWhitespace(self):
p = self._peek()
while p is not None and p in string.whitespace or p == '/':
if p == '/':
self._readComment()
else:
self._next()
p = self._peek()
def _peek(self):
return self._generator.peek()
def _next(self):
return self._generator.next()
class JsonWriter(object):
def _append(self, s):
self._results.append(s)
def write(self, obj, escaped_forward_slash=False):
self._escaped_forward_slash = escaped_forward_slash
self._results = []
self._write(obj)
return "".join(self._results)
def _write(self, obj):
ty = type(obj)
if ty is types.DictType:
n = len(obj)
self._append("{")
for k, v in obj.items():
self._write(k)
self._append(":")
self._write(v)
n = n - 1
if n > 0:
self._append(",")
self._append("}")
elif ty is types.ListType or ty is types.TupleType:
n = len(obj)
self._append("[")
for item in obj:
self._write(item)
n = n - 1
if n > 0:
self._append(",")
self._append("]")
elif ty is types.StringType or ty is types.UnicodeType:
self._append('"')
obj = obj.replace('\\', r'\\')
if self._escaped_forward_slash:
obj = obj.replace('/', r'\/')
obj = obj.replace('"', r'\"')
obj = obj.replace('\b', r'\b')
obj = obj.replace('\f', r'\f')
obj = obj.replace('\n', r'\n')
obj = obj.replace('\r', r'\r')
obj = obj.replace('\t', r'\t')
self._append(obj)
self._append('"')
elif ty is types.IntType or ty is types.LongType:
self._append(str(obj))
elif ty is types.FloatType:
self._append("%f" % obj)
elif ty is decimal.Decimal:
self._append("%f" % obj)
elif obj is True:
self._append("true")
elif obj is False:
self._append("false")
elif obj is None:
self._append("null")
else:
raise WriteException, "Cannot write in JSON: %s" % repr(obj)
def write(obj, escaped_forward_slash=False):
return JsonWriter().write(obj, escaped_forward_slash)
def read(s):
return JsonReader().read(s)
| Python |
'''OpenAnything: a kind and thoughtful library for HTTP web services
This program is part of 'Dive Into Python', a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
'''
__author__ = 'Mark Pilgrim (mark@diveintopython.org)'
__version__ = '$Revision: 1.6 $'[11:-2]
__date__ = '$Date: 2004/04/16 21:16:24 $'
__copyright__ = 'Copyright (c) 2004 Mark Pilgrim'
__license__ = 'Python'
import urllib2, urlparse, gzip
from StringIO import StringIO
USER_AGENT = 'OpenAnything/%s +http://diveintopython.org/http_web_services/' % __version__
class SmartRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_301(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_301(
self, req, fp, code, msg, headers)
result.status = code
return result
def http_error_302(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_302(
self, req, fp, code, msg, headers)
result.status = code
return result
class DefaultErrorHandler(urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
result = urllib2.HTTPError(
req.get_full_url(), code, msg, headers, fp)
result.status = code
return result
def openAnything(source, etag=None, lastmodified=None, agent=USER_AGENT):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the lastmodified argument is supplied, it must be a formatted
date/time string in GMT (as returned in the Last-Modified header of
a previous request). The formatted date/time will be used
as the value of an If-Modified-Since request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
"""
if hasattr(source, 'read'):
return source
if source == '-':
return sys.stdin
if urlparse.urlparse(source)[0] == 'http':
# open URL with urllib2
request = urllib2.Request(source)
request.add_header('User-Agent', agent)
if lastmodified:
request.add_header('If-Modified-Since', lastmodified)
if etag:
request.add_header('If-None-Match', etag)
request.add_header('Accept-encoding', 'gzip')
opener = urllib2.build_opener(SmartRedirectHandler(), DefaultErrorHandler())
return opener.open(request)
# try to open with native open function (if source is a filename)
try:
return open(source)
except (IOError, OSError):
pass
# treat source as string
return StringIO(str(source))
def fetch(source, etag=None, lastmodified=None, agent=USER_AGENT):
'''Fetch data and metadata from a URL, file, stream, or string'''
result = {}
f = openAnything(source, etag, lastmodified, agent)
result['data'] = f.read()
if hasattr(f, 'headers'):
# save ETag, if the server sent one
result['etag'] = f.headers.get('ETag')
# save Last-Modified header, if the server sent one
result['lastmodified'] = f.headers.get('Last-Modified')
if f.headers.get('content-encoding') == 'gzip':
# data came back gzip-compressed, decompress it
result['data'] = gzip.GzipFile(fileobj=StringIO(result['data'])).read()
if hasattr(f, 'url'):
result['url'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
f.close()
return result
| Python |
## Copyright (c) 1999 - 2003 L. C. Rees. All rights reserved.
## See COPYRIGHT file for license terms.
## http://www.theotherblog.com/Articles/2006/08/04/python-web-crawler-spider/
from __future__ import generators
from traceback import *
__name__ = 'spider'
__version__ = '0.5'
__author__ = 'L.C. Rees (xanimal@users.sf.net)'
__all__ = ['ftpurls', 'ftppaths', 'weburls', 'ftpmirror', 'ftpspider',
'webpaths', 'webreport', 'webmirror', 'webspider', 'urlreport',
'badurlreport', 'badhtmreport', 'redireport', 'outreport', 'othereport']
'''Multithreaded crawling, reporting, and mirroring for Web and FTP.'''
class Spider:
'''HTTP and FTP crawling, reporting, and checking'''
import os as _os
import urllib as _ulib
import urlparse as _uparse
from os import path as _path
from ftplib import FTP as _ftp
from time import strftime as _formtime
from time import localtime as _localtime
from ftplib import error_perm as _ftperr
from sgmllib import SGMLParseError as _sperror
from robotparser import RobotFileParser as _rparser
# Use threads if available
try: from threading import Thread as _thread
except ImportError: pass
_bdsig, _bfsig, _session, _newparser = None, None, None, None
# HTML tags with URLs
_urltags = {'a':1, 'img':1, 'link':1, 'script':1, 'iframe':1, 'object':1,
'embed':1, 'area':1, 'frame':1, 'applet':1, 'input':1, 'base':1,
'div':1, 'layer':1, 'ilayer':1, 'bgsound':1}
# Supported protocols
_supported = {'HTTP':1, 'http':1, 'HTTPS':1, 'https':1, 'FTP':1, 'ftp':1}
# HTML attributes with URLs
_urlattrs = {'href':1, 'src':1, 'data':1}
def __init__(self, base=None, width=None, depth=None):
'''Initializes a Spider instance and its base attributes
Arguments:
base -- URL to crawl (default: None)
width -- maximum resources to crawl (default: None)
depth -- how deep in a hierarchy to crawl (default: None)'''
if base: self.base = base
else: self.base = None
if width: self.width = width
else: self.width = None
if depth: self.depth = depth
else: self.depth = None
def _ftpopen(self, base, name='anonymous', password=None, attempts=3):
'''Returns FTP client session
Arguments:
base -- FTP server URL
name -- login name (default: 'anonymous')
password -- login password (default: None)
attempts -- number of login attempts to try (default: 3)'''
def ftpprompt(tries=0):
'''Prompts for FTP username and password
Arguments:
tries -- number of login attempts'''
tries += tries
try:
self._name = raw_input('Enter login name: ')
self._password = raw_input('Enter password: ')
session = ftp(base, self._name, self._password)
return session
# If login attempt fails, retry login
except ftperr:
if attempts >= tries:
session = ftpprompt(tries)
return session
# Too many login attempts? End program
elif attempts <= tries:
raise IOError, 'Permission denied.'
import sys
sys.exit(0)
# Assignments
self._name, self._password, ftperr = name, password, self._ftperr
su, ftp = self._uparse.urlsplit(base), self._ftp
# Set URL, path, and strip 'ftp://' off
base, path = su[1], '/'.join([su[2], ''])
try: session = ftp(base, name, password)
# Prompt for username, password if initial arguments are incorrect
except ftperr: session = ftpprompt()
# Change to remote path if it exits
if path: session.cwd(path)
return session
def ftpmirror(self, l, t=None, b=None, w=200, d=6, n='anonymous', p=None):
'''Mirrors an FTP site on a local filesystem
Arguments:
l -- local filesystem path (default: None)
b -- FTP server URL (default: None)
t -- number of download threads (default: None)
w -- maximum amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 6)
n -- login username (default: 'anonymous')
p -- login password (default: None)'''
if b: self.ftpspider(b, w, d, n, p)
return self._mirror((self.paths, self.urls), l, t)
def ftppaths(self, b=None, w=200, d=6, n='anonymous', p=None):
'''Returns a list of FTP paths.
Arguments:
b -- FTP server URL (default: None)
w -- maximum amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 6)
n -- login username (default: 'anonymous')
p -- login password (default: None)'''
def sortftp(rdir):
'''Returns a list of entries marked as files or directories
Arguments:
rdir -- remote directory list'''
rlist = []
rappend = rlist.append
for rl in rdir:
# Split remote file based on whitespace
ri = rl.split()[-1]
# Add tuple of remote item type, permissions & name to rlist
if ri not in ('.', '..'): rappend((rl[0], rl[7], ri))
return rlist
def visitftp():
'''Extracts contents of an FTP directory'''
wd = pwd()
if wd[-1] != '/': wd = '/'.join([wd, ''])
# Add present working directory to visited directories
dirs[wd], rlist = None, []
# Get list of current directory's contents
retr('LIST -a', rlist.append)
for url in sortftp(rlist):
# Test if remote item is a file (indicated by '-')
if url[0] == '-':
# Resolve path of file
purl = ''.join([wd, url[2]])
# Ensure file list don't exceed max number of resources
if len(files) >= width: return None
# Add files to file dictionary
elif purl not in files: files[purl] = None
# Test if it's a directory ('d') and allows scanning ('-')
elif url[0] == 'd':
if url[1] != '-':
# Resolve path of directory
purl = ''.join([wd, url[2], '/'])
# Ensure no recursion beyond depth allowed
if len(purl.split('/')) >= depth: dirs[purl] = None
# Visit directory if it hasn't been visited yet
elif purl not in dirs:
# Change to new directory
cwd(purl)
# Run 'visitftp' on new directory
visitftp()
# Use classwide attributes if set
if b: self.base = b
else: b = self.base
# Use classwide width if different from method default
if self.width and w == 200: width = self.width
else: width = w
# Use classwide depth if different from method default
if self.depth and d == 6: depth = self.depth + 1
else: depth = d + 1
# File and directory dicts
files, dirs = {}, {}
# Use existing FTP client session if present
if self._session: ftp = self._session
# Create new FTP client session if necessary
else:
ftp = self._ftpopen(b, n, p)
self._session = ftp
# Avoid outside namespace lookups
cwd, pwd, retr = ftp.cwd, ftp.pwd, ftp.retrlines
# Walk FTP site
visitftp()
# Make path list out of files' keys and return it
self.paths = files.keys()
self.paths.sort()
return self.paths
def ftpspider(self, b=None, w=200, d=6, n='anonymous', p=None):
'''Returns lists of URLs and paths plus a live FTP client session
Arguments:
b -- FTP server URL (default: None)
w -- maximum amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 6)
n -- login username (default: 'anonymous')
p -- login password (default: None)'''
if b: ftppaths(b, w, d, n, p)
return self.paths, ftpurls(), self._session
def ftpurls(self, b=None, w=200, d=6, n='anonymous', p=None):
'''Returns a list of FTP URLs
Arguments:
b -- FTP server URL (default: None)
w -- maximum amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 6)
n -- login username (default: 'anonymous')
p -- login password (default: None)'''
if b:
ftppaths(b, w, d, n, p)
# Get rid of trailing '/' in base if present before joining
if b[-1] == '/': base = b[:-1]
else:
base = self.base
# Get rid of trailing '/' in base if present before joining
if base[-1] == '/': base = self.base[:-1]
paths = self.paths
# Add FTP URL
self.urls = [''.join([base, i]) for i in paths]
return self.urls
def _parserpick(self, old=None):
'''Returns a class using the sgmllib parser or the sgmlop parser
Arguments:
old -- use classic sgmllib SGMLParser'''
# Assignments
urltags, urlattrs = self._urltags, self._urlattrs
# Lists for bad file and bad directory signatures
self._bfsig, self._bdsig = [], []
bfsig, bdsig = self._bfsig, self._bdsig
# Use faster SGMLParser if available
try:
from sgmlop import SGMLParser as newparser
self._newparser = newparser
# If unavailable, use classic SGML parser
except ImportError:
from sgmllib import SGMLParser as oldparser
old = 1
# Classes using classic sgmllib SGML Parser
if old:
from sgmllib import SGMLParser as oldparser
# Remove sgmlop parser if present
self._newparser = None
# UrlExtract class using classic parser
class UrlExtract(oldparser):
'''Extracts URLs from a SGMLish document'''
def reset(self):
'''Resets SGML parser and clears lists'''
oldparser.reset(self)
self.urls, self.text, self.badurl = [], [], None
def handle_data(self, data):
'''Handles non-markup data'''
# Get first 5 lines of non-markup data
if len(self.text) <= 5: self.text.append(data)
# Compare signature of known bad URL to a new web page
if self.text == bfsig: self.badurl = 1
elif self.text == bdsig: self.badurl = 1
def finish_starttag(self, tag, attrs):
'''Extracts URL bearing tags'''
if tag in urltags:
# Get key, vale in attributes if they match
url = [v for k, v in attrs if k in urlattrs]
self.url = url
if url: self.urls.extend(url)
# BadUrl class using classic parser
class BadUrl(oldparser):
'''Collects results of intentionally incorrect URLs'''
def reset(self):
'''Resets SGML parser and clears lists'''
oldparser.reset(self)
self.text = []
def handle_data(self, data):
'''Collects lines to profile bad URLs'''
# Adds first 5 lines of non-markup data to text
if len(self.text) <= 5: self.text.append(data)
# If no old flag, use SGMLParser from sgmlop and related classes
else:
# UrlExtract class using sgmlop parser
class UrlExtract:
'''Extracts URLs from a SGMLish document'''
def __init__(self):
'''Resets SGML parser and clears lists'''
self.urls, self.text, self.badurl = [], [], None
def handle_data(self, data):
'''Handles non-markup data'''
# Get first 5 lines of non-markup data
if len(self.text) <= 5: self.text.append(data)
# Compare signature of known bad URL to a new web page
if self.text == bfsig: self.badurl = 1
elif self.text == bdsig: self.badurl = 1
def finish_starttag(self, tag, attrs):
'''Extracts URL bearing tags'''
if tag in urltags:
# Get key, vale in attributes if they match
url = [v for k, v in attrs if k in urlattrs]
if url: self.urls.extend(url)
# BadUrl class using sgmlop parser
class BadUrl:
'''Collects results of intentionally incorrect URLs'''
def __init__(self):
'''Resets SGML parser and clears lists'''
self.text = []
def handle_data(self, data):
'''Collects lines to profile not found responses'''
# Adds first 5 lines of non-markup data to list 'text'
if len(self.text) <= 5: self.text.append(data)
# Make resulting classes available class wide
self._UrlExtract, self._BadUrl = UrlExtract, BadUrl
def _webtest(self):
'''Generates signatures for identifying bad URLs'''
def badurl(url):
'''Returns first 5 lines of a bad URL
Arguments:
url -- Bad URL to open and parse'''
# Use different classes if faster SGML Parser is available
if self._newparser:
# sgmlop parser must have a handler passed to it
parser, urlget = self._newparser(), BadUrl()
# Pass handler (sgmlop cannot be subclassed)
parser.register(urlget)
parser.feed(urlopen(url).read())
parser.close()
# Use classic parser
else:
urlget = BadUrl()
urlget.feed(urlopen(url).read())
urlget.close()
# Return singature of bad URL
return urlget.text
# Make globals local
base, urljoin = self.base, self._uparse.urljoin
urlopen, BadUrl = self._ulib.urlopen, self._BadUrl
# Generate random string of jibber
from string import letters, digits
from random import choice, randint
jibber = ''.join([letters, digits])
ru = ''.join([choice(jibber) for x in range(randint(1, 30))])
# Builds signature of a bad URL for a file
self._bfsig.extend(badurl(urljoin(base, '%s.html' % ru)))
# Builds signature of a bad URL for a directory
self._bdsig.extend(badurl(urljoin(base,'%s/' % ru)))
def handle(self, html, urllist):
return urllist #after modifying
def _webparser(self, html):
'''Parses HTML and returns bad URL indicator and extracted URLs
Arguments:
html -- HTML data'''
# Use different classes if faster SGML Parser is available
if self._newparser:
# Make instances of SGML parser and URL extracting handler
parser, urlget = self._newparser(), self._UrlExtract()
# Pass handler to parser
parser.register(urlget)
# Feed data to parser
parser.feed(html)
parser.close()
# Return bad URL indicator and extracted URLs
else:
urlget = self._UrlExtract()
urlget.feed(html)
urlget.close()
# Return badurl marker and list of child URLS
#self._UrlExtract()
self.handle( html, urlget.urls)
return urlget.badurl, urlget.urls
def _webopen(self, base):
'''Verifies URL and returns actual URL and extracted child URLs
Arguments:
base -- tuple containing a URL and its referring URL'''
# Assignments
good, cbase = self._good, base[0]
try:
# If webspiders can access URL, open it
if self._robot.can_fetch('*', cbase):
try:
url = self._ulib.urlopen(cbase)
except:
raise
# Otherwise, mark as visited and abort
else:
self._visited[cbase] = 1
return False
# If HTTP error, log bad URL and abort
except IOError:
self._visited[cbase] = 1
self.badurls.append((base[1], cbase))
return False
# Get real URL
newbase = url.geturl()
self.newbase= url
# Change URL if different from old URL
if newbase != cbase: cbase, base = newbase, (newbase, base[1])
# URLs with mimetype 'text/html" scanned for URLs
if url.headers.type == 'text/html':
# Feed parser
# fixed?
try:
contents = url.read()
badurl, urls = self._webparser(contents)
url.close()
# Return URL and extracted urls if it's good
if not badurl:
return cbase, urls
# If the URL is bad (after BadUrl), stop processing and log URL
else:
self._visited[cbase] = 1
self.badurls.append((base[1], cbase))
return False
# Log URL if SGML parser can't parse it
except Exception, err:
url.close()
print "Error:", cbase, err
self._visited[cbase], self.badhtm[cbase] = 1, 1
return False
# Return URL of non-HTML resources and empty list
else:
url.close()
return cbase, []
def _genverify(self, urls, base):
'''Verifies a list of full URL relative to a base URL
Arguments:
urls -- list of raw URLs
base -- referring URL'''
# Assignments
cache, visit, urlverify = self._cache, self._visited, self._urlverify
# Strip file off base URL for joining
newbase = base.replace(base.split('/')[-1], '')
for url in urls:
# Get resolved url and raw child URLs
url, rawurls = urlverify(url, base, newbase)
# Handle any child URLs
if rawurls:
newurls = {}
# Eliminate duplicate URLs
for rawurl in rawurls:
# Eliminate known visited URLs
if rawurl not in visit: newurls[rawurl] = 1
# Put new URLs in cache if present
if newurls: cache[url] = newurls
# Yield new URL
#if url: yield url
try:yield url
except:pass
def _multiverify(self, url, base):
'''Verifies a full URL relative to a base URL
Arguments:
url -- a raw URLs
base -- referring URL'''
# Assignments
cache, visited = self._cache, self._visited
# Strip file off base URL for joining
newbase = base.replace(base.split('/')[-1], '')
# Get resolved url and raw child URLs
url, rawurls = self._urlverify(url, base, newbase)
# Handle any child URLs
if rawurls:
# Eliminate known visited URLs and duplicates
for rawurl in rawurls:
# Put new URLs in cache if present
if rawurl not in visited: cache[rawurl] = url
# Put URL in list of good URLs
if url: self._good[url] = 1
def _urlverify(self, url, base, newbase):
'''Returns a full URL relative to a base URL
Arguments:
urls -- list of raw URLs
base -- referring URL
newbase -- temporary version of referring URL for joining'''
# Assignments
visited, webopen, other = self._visited, self._webopen, self.other
sb, depth, urljoin = self._sb[2], self.depth, self._uparse.urljoin
urlsplit, urldefrag = self._uparse.urlsplit, self._uparse.urldefrag
outside, redirs, supported = self.outside, self.redirs, self._supported
if url not in visited:
# Remove whitespace from URL
if url.find(' ') != -1:
visited[url], url = 1, url.replace(' ', '')
if url in visited: return 0, 0
# Remove fragments i.e. 'http:foo/bar#frag'
if url.find('#') != -1:
visited[url], url = 1, urldefrag(url)[0]
if url in visited: return 0, 0
# Process full URLs i.e. 'http://foo/bar
if url.find(':') != -1:
urlseg = urlsplit(url)
# Block non-FTP, HTTP URLs
if urlseg[0] not in supported:
# Log as non-FTP/HTTP URL
other[url], visited[url] = 1, 1
return 0, 0
# If URL is not in root domain, block it
if urlseg[1] not in sb:
visited[url], outside[url] = 1, 1
return 0, 0
# Block duplicate root URLs
elif not urlseg[2] and urlseg[1] == sb:
visited[url] = 1
return 0, 0
# Handle relative URLs i.e. ../foo/bar
elif url.find(':') == -1:
# Join root domain and relative URL
visited[url], url = 1, urljoin(newbase, url)
if url in visited: return 0, 0
# Test URL by attempting to open it
rurl = webopen((url, base))
if rurl and rurl[0] not in visited:
# Get URL
turl, rawurls = rurl
visited[url], visited[turl] = 1, 1
# If URL resolved to a different URL, process it
if turl != url:
urlseg = urlsplit(turl)
# If URL is not in root domain, block it
if urlseg[1] not in sb:
# Log as a redirected internal URL
redirs[(url, turl)] = 1
return 0, 0
# Block duplicate root URLs
elif not urlseg[2] and urlseg[1] == sb: return 0, 0
# If URL exceeds depth, don't process
if len(turl.split('/')) >= depth: return 0, 0
# Otherwise return URL
else:
if rawurls: return turl, rawurls
else: return turl, []
else: return 0,0
else: return 0, 0
def _onewalk(self):
'''Yields good URLs from under a base URL'''
# Assignments
cache, genverify = self._cache, self._genverify
# End processing if cache is empty
while cache:
# Fetch item from cache
base, urls = cache.popitem()
# If item has child URLs, process them and yield good URLs
if urls:
for url in genverify(urls, base): yield url
def _multiwalk(self, threads):
'''Extracts good URLs from under a base URL
Arguments:
threads -- number of threads to run'''
def urlthread(url, base):
'''Spawns a thread containing a multiverify function
Arguments:
url -- URL to verify
base -- referring URL'''
# Create instance of Thread
dthread = Thread(target=multiverify, args=(url, base))
# Put in pool
pool.append(dthread)
# Assignments
pool, cache, multiverify = [], self._cache, self._multiverify
Thread, width, good = self._thread, self.width, self._good
# End processing if cache is empty
while cache:
# Process URLs as long as width not exceeded
if len(good) <= width:
# Fetch item from cache
url, base = cache.popitem()
# Make thread
if url:
urlthread(url, base)
# Run threads once pool size is reached
if len(pool) == threads or threads >= len(cache):
# Start threads
for thread in pool: thread.start()
# Empty thread pool as threads complete
while pool:
for thread in pool:
if not thread.isAlive(): pool.remove(thread)
# End if width reached
elif len(good) >= width: break
def weburls(self, base=None, width=1000, depth=10, thread=None):
'''Returns a list of web paths.
Arguments:
base -- base web URL (default: None)
width -- amount of resources to crawl (default: 200)
depth -- depth in hierarchy to crawl (default: 5)
thread -- number of threads to run (default: None)'''
# Assignments
self._visited, self._good, self._cache, self.badurls = {}, {}, {}, []
self.redirs, self.outside, self.badhtm, self.other = {}, {}, {}, {}
onewalk, good, self._robot = self._onewalk, self._good, self._rparser()
uparse, robot, multiwalk = self._uparse, self._robot, self._multiwalk
cache = self._cache
try:
# Assign width
if self.width and width == 200: width = self.width
else: self.width = width
# sgmlop crashes Python after too many iterations
if width > 5000: self._parserpick(1)
else: self._parserpick()
# Use global base if present
if not base: base = self.base
# Verify URL and get child URLs
newbase, rawurls = self._webopen((base, ''))
if newbase:
# Change base URL if different
if newbase != base: base = newbase
# Ensure there's a trailing '/' in base URL
if base[-1] != '/':
url = list(uparse.urlsplit(base))
url[1] = ''.join([url[1], '/'])
base = uparse.urlunsplit(url)
# Eliminate duplicates and put raw URLs in cache
newurls = {}
for rawurl in rawurls: newurls[rawurl] = 1
if newurls:
# Cache URLs individually if threads are desired
if thread:
for newurl in newurls: cache[newurl] = base
# Cache in group if no threads
else: cache[base] = newurls
# Make base URL, get split, and put in verified URL list
self.base, self._sb = base, base.split('/')
self._visited[base], good[base] = 1, 1
# If URL is bad, abort and raise error
else: raise IOError, "URL is invalid"
# Adjust dept to length of base URL
if self.depth and depth == 6: self.depth += len(self._sb)
else: self.depth = depth + len(self._sb)
# Get robot limits
robot.set_url(''.join([base, 'robots.txt']))
robot.read()
# Get signature of bad URL
self._webtest()
# Get good URLs as long as total width isn't exceeded
try:
# Multiwalk if threaded
if thread: self._multiwalk(thread)
# Otherwise, use single thread
else:
for item in onewalk():
# Don't exceed maximum width
if len(good) <= width: good[item] = 1
elif len(good) >= width: break
# If user interrupts crawl, return what's done
except KeyboardInterrupt: pass
# Get URLs, sort them, and return list
self.urls = good.keys()
self.urls.sort()
return self.urls
except:
return []
def webpaths(self, b=None, w=200, d=5, t=None):
'''Returns a list of web paths.
Arguments:
b -- base web URL (default: None)
w -- amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 5)
t -- number of threads (default: None)'''
def pathize():
'''Strips base URL from full URLs to produce paths'''
for url in urls:
# Remove base URL from path list
url = url.replace(self.base, '')
# Add default name 'index.html' to root URLs and directories
if not url: url = 'index.html'
elif url[-1] == '/': url = ''.join([url, 'index.html'])
# Verify removal of base URL and remove it if found
if url.find(':') != -1: url = urlsplit(url)[2:][0]
yield url
# Assignments
urlsplit = self._uparse.urlsplit
# Run weburls if base passed as an argument
if b: self.weburls(b, w, d, t)
# Strip off trailing resource or query from base URL
if self.base[-1] != '/': self.base = '/'.join(self._sb[:-1])
urls = self.urls
# Return path list after stripping base URL
self.paths = list(pathize())
return self.paths
def webmirror(self, root=None, t=None, base=None, width=200, depth=5):
'''Mirrors a website on a local filesystem
Arguments:
root -- local filesystem path (default: None)
t -- number of threads (default: None)
base -- base web URL (default: None)
width -- amount of resources to crawl (default: 200)
depth -- depth in hierarchy to crawl (default: 5)'''
if base: self.webspider(base, width, depth, t)
return self._mirror((self.paths, self.urls), root, t)
def webspider(self, b=None, w=200, d=5, t=None):
'''Returns two lists of child URLs and paths
Arguments:
b -- base web URL (default: None)
w -- amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 5)
t -- number of threads (default: None)'''
if b: self.weburls(b, w, d, t)
return self.webpaths(), self.urls
def badurlreport(self, f=None, b=None, w=200, d=5, t=None):
'''Pretties up a list of bad URLs
Arguments:
f -- output file for report (default: None)
b -- base web URL (default: None)
w -- amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 5)
t -- number of threads (default: None)'''
if b: self.weburls(b, w, d, t)
# Format report if information is available
if self.badurls:
# Number of bad URLs
amount = str(len(self.badurls))
header = '%s broken URLs under %s on %s:\n'
# Print referring URL pointing to bad URL
body = '\n'.join([' -> '.join([i[0], i[1]]) for i in self.badurls])
report = self._formatreport(amount, header, body, f)
# Return if just getting string
if report: return report
def badhtmreport(self, f=None, b=None, w=200, d=5, t=None):
'''Pretties up a list of unparsed HTML URLs
Arguments:
f -- output file for report (default: None)
b -- base web URL (default: None)
w -- amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 5)
t -- number of threads (default: None)'''
if b: self.weburls(b, w, d, t)
# Format report if information is available
if self.badhtm:
amount = str(len(self.badhtm))
header = '%s unparsable HTML URLs under %s on %s:\n'
body = '\n'.join(self.badhtm)
report = self._formatreport(amount, header, body, f)
# Return if just getting string
if report: return report
def redireport(self, f=None, b=None, w=200, d=5, t=None):
'''Pretties up a list of URLs redirected to an external URL
Arguments:
f -- output file for report (default: None)
b -- base web URL (default: None)
w -- amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 5)
t -- number of threads (default: None)'''
if b: self.weburls(b, w, d, t)
# Format report if information is available
if self.redirs:
amount = str(len(self.redirs))
header = '%s redirects to external URLs under %s on %s:\n'
# Print referring URL pointing to new URL
body = '\n'.join([' -> '.join([i[0], i[1]]) for i in self.redirs])
report = self._formatreport(amount, header, body, f)
# Return if just getting string
if report: return report
def outreport(self, f=None, b=None, w=200, d=5, t=None):
'''Pretties up a list of outside URLs referenced under the base URL
Arguments:
f -- output file for report (default: None)
b -- base web URL (default: None)
w -- amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 5)
t -- number of threads (default: None)'''
if b: self.weburls(b, w, d, t)
# Format report if information is available
if self.outside:
amount = str(len(self.outside))
header = '%s links to external URLs under %s on %s:\n'
body = '\n'.join(self.outside)
report = self._formatreport(amount, header, body, f)
# Return if just getting string
if report: return report
def othereport(self, f=None, b=None, w=200, d=5, t=None):
'''Pretties up a list of non-HTTP/FTP URLs
Arguments:
f -- output file for report (default: None)
b -- base web URL (default: None)
w -- amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 5)
t -- number of threads (default: None)'''
if b: self.weburls(b, w, d, t)
# Format report if information is available
if self.other:
amount = str(len(self.other))
header = '%s non-FTP/non-HTTP URLs under %s on %s:\n'
body = '\n'.join(self.other)
report = self._formatreport(amount, header, body, f)
# Return if just getting string
if report: return report
def urlreport(self, f=None, b=None, w=200, d=5, t=None):
'''Pretties up a list of all URLs under a URL
Arguments:
f -- output file for report (default: None)
b -- base web URL (default: None)
w -- amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 5)
t -- number of threads (default: None)'''
if b: self.weburls(b, w, d, t)
# Format report if information is available
if self.urls:
amount = str(len(self.urls))
header = '%s verified URLs under %s on %s:\n'
body = '\n'.join(self.urls)
report = self._formatreport(amount, header, body, f)
# Return if just getting string
if report: return report
def webreport(self, f=None, b=None, w=200, d=5, t=None, *vargs):
'''Pretties up a list of logged information under a URL
Arguments:
f -- output file for report (default: None)
b -- base web URL (default: None)
w -- amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 5)
t -- number of threads (default: None)
vargs -- report sections to include or exclude
To override defaults:
To include a section add 'badhtm', 'redirs', 'outside', or 'other'
To exclude a section add 'badurls' or "urls"'''
if b: self.weburls(b, w, d, t)
# Defaults for report
badurls, badhtm, redirs, urls, outside, other = 1, 0, 0, 1, 0, 0
# Create compilation list
compile = []
# Override default report settings if argument is passed to vargs
for arg in vargs:
if arg == 'badurls': badurls = 0
elif arg == 'badhtm': badhtm = 1
elif arg == 'redirs': redirs = 1
elif arg == 'urls': urls = 0
elif arg == 'outside': outside = 1
elif arg == 'other': other = 1
# Compile report
if badurls:
badurls = self.badurlreport()
if badurls: compile.append(badurls)
if urls:
urls = self.urlreport()
if urls: compile.append(urls)
if outside:
outside = self.outreport()
if outside: compile.append(outside)
if redirs:
redirs = self.redireport()
if redirs: compile.append(redirs)
if badhtm:
badhtm = self.badhtmreport()
if badhtm: compile.append(badhtm)
if other:
other = self.othereport()
if other: compile.append(other)
# Make report
report = '\n\n'.join(compile)
# Write to file if argument present
if file: open(f, 'w').write(report)
# Or return string
else: return report
def _formatreport(self, amount, header, body, file=None):
'''Generic prettifier with date/time stamper
Arguments:
header -- title of report
body -- body of report
file -- output file for report (default: None)'''
# Get current time
localtime, strftime = self._localtime, self._formtime
curtime = strftime('%A, %B %d, %Y at %I:%M %p', localtime())
# Make section header
header = header % (amount, self.base, curtime)
# Add header to body
report = '\n'.join([header, body])
# Write to file if argument present
if file: open(file, 'w').write(report)
# Or return string
else: return report
def _mirror(self, lists, root=None, threads=None):
'''Mirrors a site on a local filesystem based on lists passed to it
Argument:
lists -- lists of URLs and paths
root -- local filesystem path (default: None)
threads -- number of threads (default: None)'''
def download(url, np, op):
'''Downloads files that need to be mirrored.'''
# If ftp...
if url[:3] == 'ftp':
# Open local file
local = open(np, 'wb')
# Download using FTP session
ftp = ftpopen(base, name, password)
ftp.retrbinary('RETR %s' % op, local.write)
ftp.close()
# Close local file
local.close()
# Use normal urlretrieve if no FTP required
else: ulib.urlretrieve(url, np)
def dlthread(url, np, op):
'''Spawns a thread containing the download function'''
# Create thread
dthread = Thread(target=download, args=(url, np, op))
# Add to thread pool
pool.append(dthread)
# Extract path and URL lists
paths, urls = lists
# Avoid outside namespace lookups
ulib, makedirs, sep = self._ulib, self._os.makedirs, self._os.sep
normcase, split = self._path.normcase, self._path.split
exists, isdir = self._path.exists, self._path.isdir
ftpopen = self._ftpopen
# Create local names for thread class and thread pool
if threads: Thread, pool = self._thread, []
# Localize name and password if exists
try: base, name, password = self.base, self._name, self._password
except AttributeError: pass
# Change to directory if given...
if root:
if exists(root):
if isdir(root): self._os.chdir(root)
# Create root if it doesn't exist
else:
makedirs(root)
self._os.chdir(root)
# Otherwise use current directory
else: root = self._os.getcwd()
# Iterate over paths and download files
for oldpath in paths:
# Sync with the URL for oldpath
url = urls[paths.index(oldpath)]
# Create name of local copy
newpath = normcase(oldpath).lstrip(sep)
# Get directory name
dirname = split(newpath)[0]
# If the directory exists, download the file directly
if exists(dirname):
if isdir(dirname):
if threads: dlthread(url, newpath, oldpath)
else: download(url, newpath, oldpath)
# Don't create local directory if path in root of remote URL
elif not dirname:
if threads: dlthread(url, newpath, oldpath)
else: download(url, newpath, oldpath)
# Make local directory if it doesn't exist, then dowload file
else:
makedirs(dirname)
if threads: dlthread(url, newpath, oldpath)
else: download(url, newpath, oldpath)
# Run threads if they've hit the max number of threads allowed
if threads:
# Run if max threads or final thread reached
if len(pool) == threads or paths[-1] == oldpath:
# Start all threads
for thread in pool: thread.start()
# Clear the thread pool as they finish
while pool:
for thread in pool:
if not thread.isAlive(): pool.remove(thread)
# Instance of Spider enables exporting Spider's methods as standalone functions
_inst = Spider()
ftpurls = _inst.ftpurls
weburls = _inst.weburls
ftppaths = _inst.ftppaths
webpaths = _inst.webpaths
ftpmirror = _inst.ftpmirror
ftpspider = _inst.ftpspider
webmirror = _inst.webmirror
webspider = _inst.webspider
webreport = _inst.webreport
urlreport = _inst.urlreport
outreport = _inst.outreport
redireport = _inst.redireport
othereport = _inst.othereport
badurlreport = _inst.badurlreport
badhtmreport = _inst.badhtmreport
| Python |
#!/usr/bin/env python
#
# libgmail -- Gmail access via Python
#
## To get the version number of the available libgmail version.
## Reminder: add date before next release. This attribute is also
## used in the setup script.
Version = '0.1.4' # (Feb 2006)
# Original author: follower@myrealbox.com
# Maintainers: Waseem (wdaher@mit.edu) and Stas Z (stas@linux.isbeter.nl)
#
# Contacts support added by wdaher@mit.edu and Stas Z
# (with massive initial help from
# Adrian Holovaty's 'gmail.py'
# and the Johnvey Gmail API)
#
# License: GPL 2.0
#
# Thanks:
# * Live HTTP Headers <http://livehttpheaders.mozdev.org/>
# * Gmail <http://gmail.google.com/>
# * Google Blogoscoped <http://blog.outer-court.com/>
# * ClientCookie <http://wwwsearch.sourceforge.net/ClientCookie/>
# (There when I needed it...)
# * The *first* big G. :-)
#
# NOTE:
# You should ensure you are permitted to use this script before using it
# to access Google's Gmail servers.
#
#
# Gmail Implementation Notes
# ==========================
#
# * Folders contain message threads, not individual messages. At present I
# do not know any way to list all messages without processing thread list.
#
LG_DEBUG=0
from lgconstants import *
import os,pprint
import re
import urllib
import urllib2
import mimetypes
import types
from cPickle import load, dump
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
URL_LOGIN = "https://www.google.com/accounts/ServiceLoginBoxAuth"
URL_GMAIL = "https://mail.google.com/mail/"
# TODO: Get these on the fly?
STANDARD_FOLDERS = [U_INBOX_SEARCH, U_STARRED_SEARCH,
U_ALL_SEARCH, U_DRAFTS_SEARCH,
U_SENT_SEARCH, U_SPAM_SEARCH]
# Constants with names not from the Gmail Javascript:
# TODO: Move to `lgconstants.py`?
U_SAVEDRAFT_VIEW = "sd"
D_DRAFTINFO = "di"
# NOTE: All other DI_* field offsets seem to match the MI_* field offsets
DI_BODY = 19
versionWarned = False # If the Javascript version is different have we
# warned about it?
RE_SPLIT_PAGE_CONTENT = re.compile("D\((.*?)\);", re.DOTALL)
class GmailError(Exception):
'''
Exception thrown upon gmail-specific failures, in particular a
failure to log in and a failure to parse responses.
'''
pass
def _parsePage(pageContent):
"""
Parse the supplied HTML page and extract useful information from
the embedded Javascript.
"""
lines = pageContent.splitlines()
data = '\n'.join([x for x in lines if x and x[0] in ['D', ')', ',', ']']])
data = data.replace(',,',',').replace(',,',',')
result = []
try:
exec data in {'__builtins__': None}, {'D': lambda x: result.append(x)}
except SyntaxError,info:
print info
raise GmailError, 'Failed to parse data returned from gmail.'
items = result
itemsDict = {}
namesFoundTwice = []
for item in items:
name = item[0]
try:
parsedValue = item[1:]
except Exception:
parsedValue = ['']
if itemsDict.has_key(name):
# This handles the case where a name key is used more than
# once (e.g. mail items, mail body etc) and automatically
# places the values into list.
# TODO: Check this actually works properly, it's early... :-)
if len(parsedValue) and type(parsedValue[0]) is types.ListType:
for item in parsedValue:
itemsDict[name].append(item)
else:
itemsDict[name].append(parsedValue)
else:
if len(parsedValue) and type(parsedValue[0]) is types.ListType:
itemsDict[name] = []
for item in parsedValue:
itemsDict[name].append(item)
else:
itemsDict[name] = [parsedValue]
return itemsDict
def _splitBunches(infoItems):# Is this still needed ?? Stas
"""
Utility to help make it easy to iterate over each item separately,
even if they were bunched on the page.
"""
result= []
# TODO: Decide if this is the best approach.
for group in infoItems:
if type(group) == tuple:
result.extend(group)
else:
result.append(group)
return result
class CookieJar:
"""
A rough cookie handler, intended to only refer to one domain.
Does no expiry or anything like that.
(The only reason this is here is so I don't have to require
the `ClientCookie` package.)
"""
def __init__(self):
"""
"""
self._cookies = {}
def extractCookies(self, response, nameFilter = None):
"""
"""
# TODO: Do this all more nicely?
for cookie in response.headers.getheaders('Set-Cookie'):
name, value = (cookie.split("=", 1) + [""])[:2]
if LG_DEBUG: print "Extracted cookie `%s`" % (name)
if not nameFilter or name in nameFilter:
self._cookies[name] = value.split(";")[0]
if LG_DEBUG: print "Stored cookie `%s` value `%s`" % (name, self._cookies[name])
def addCookie(self, name, value):
"""
"""
self._cookies[name] = value
def setCookies(self, request):
"""
"""
request.add_header('Cookie',
";".join(["%s=%s" % (k,v)
for k,v in self._cookies.items()]))
def _buildURL(**kwargs):
"""
"""
return "%s?%s" % (URL_GMAIL, urllib.urlencode(kwargs))
def _paramsToMime(params, filenames, files):
"""
"""
mimeMsg = MIMEMultipart("form-data")
for name, value in params.iteritems():
mimeItem = MIMEText(value)
mimeItem.add_header("Content-Disposition", "form-data", name=name)
# TODO: Handle this better...?
for hdr in ['Content-Type','MIME-Version','Content-Transfer-Encoding']:
del mimeItem[hdr]
mimeMsg.attach(mimeItem)
if filenames or files:
filenames = filenames or []
files = files or []
for idx, item in enumerate(filenames + files):
# TODO: This is messy, tidy it...
if isinstance(item, str):
# We assume it's a file path...
filename = item
contentType = mimetypes.guess_type(filename)[0]
payload = open(filename, "rb").read()
else:
# We assume it's an `email.Message.Message` instance...
# TODO: Make more use of the pre-encoded information?
filename = item.get_filename()
contentType = item.get_content_type()
payload = item.get_payload(decode=True)
if not contentType:
contentType = "application/octet-stream"
mimeItem = MIMEBase(*contentType.split("/"))
mimeItem.add_header("Content-Disposition", "form-data",
name="file%s" % idx, filename=filename)
# TODO: Encode the payload?
mimeItem.set_payload(payload)
# TODO: Handle this better...?
for hdr in ['MIME-Version','Content-Transfer-Encoding']:
del mimeItem[hdr]
mimeMsg.attach(mimeItem)
del mimeMsg['MIME-Version']
return mimeMsg
class GmailLoginFailure(Exception):
"""
Raised whenever the login process fails--could be wrong username/password,
or Gmail service error, for example.
Extract the error message like this:
try:
foobar
except GmailLoginFailure,e:
mesg = e.message# or
print e# uses the __str__
"""
def __init__(self,message):
self.message = message
def __str__(self):
return repr(self.message)
class GmailAccount:
"""
"""
def __init__(self, name = "", pw = "", state = None):
"""
"""
# TODO: Change how all this is handled?
if name and pw:
self.name = name
self._pw = pw
self._cookieJar = CookieJar()
elif state:
# TODO: Check for stale state cookies?
self.name, self._cookieJar = state.state
else:
raise ValueError("GmailAccount must be instantiated with " \
"either GmailSessionState object or name " \
"and password.")
self._cachedQuotaInfo = None
self._cachedLabelNames = None
def login(self):
"""
"""
# TODO: Throw exception if we were instantiated with state?
data = urllib.urlencode({'continue': URL_GMAIL,
'Email': self.name,
'Passwd': self._pw,
})
headers = {'Host': 'www.google.com',
'User-Agent': 'User-Agent: Mozilla/5.0 (compatible;)'}
req = urllib2.Request(URL_LOGIN, data=data, headers=headers)
pageData = self._retrievePage(req)
# TODO: Tidy this up?
# This requests the page that provides the required "GV" cookie.
RE_PAGE_REDIRECT = 'CheckCookie\?continue=([^"]+)'
# TODO: Catch more failure exceptions here...?
try:
redirectURL = urllib.unquote(re.search(RE_PAGE_REDIRECT,
pageData).group(1))
except AttributeError:
raise GmailLoginFailure("Login failed. (Wrong username/password?)")
# We aren't concerned with the actual content of this page,
# just the cookie that is returned with it.
pageData = self._retrievePage(redirectURL)
def _retrievePage(self, urlOrRequest):
"""
"""
if not isinstance(urlOrRequest, urllib2.Request):
req = urllib2.Request(urlOrRequest)
else:
req = urlOrRequest
self._cookieJar.setCookies(req)
try:
resp = urllib2.urlopen(req)
except urllib2.HTTPError,info:
print info
return None
pageData = resp.read()
# Extract cookies here
self._cookieJar.extractCookies(resp)
# TODO: Enable logging of page data for debugging purposes?
return pageData
def _parsePage(self, urlOrRequest):
"""
Retrieve & then parse the requested page content.
"""
items = _parsePage(self._retrievePage(urlOrRequest))
# Automatically cache some things like quota usage.
# TODO: Cache more?
# TODO: Expire cached values?
# TODO: Do this better.
try:
self._cachedQuotaInfo = items[D_QUOTA]
except KeyError:
pass
#pprint.pprint(items)
try:
self._cachedLabelNames = [category[CT_NAME] for category in items[D_CATEGORIES][0]]
except KeyError:
pass
return items
def _parseSearchResult(self, searchType, start = 0, **kwargs):
"""
"""
params = {U_SEARCH: searchType,
U_START: start,
U_VIEW: U_THREADLIST_VIEW,
}
params.update(kwargs)
return self._parsePage(_buildURL(**params))
def _parseThreadSearch(self, searchType, allPages = False, **kwargs):
"""
Only works for thread-based results at present. # TODO: Change this?
"""
start = 0
tot = 0
threadsInfo = []
# Option to get *all* threads if multiple pages are used.
while (start == 0) or (allPages and
len(threadsInfo) < threadListSummary[TS_TOTAL]):
items = self._parseSearchResult(searchType, start, **kwargs)
#TODO: Handle single & zero result case better? Does this work?
try:
threads = items[D_THREAD]
except KeyError:
break
else:
for th in threads:
if not type(th[0]) is types.ListType:
th = [th]
threadsInfo.append(th)
# TODO: Check if the total or per-page values have changed?
threadListSummary = items[D_THREADLIST_SUMMARY][0]
threadsPerPage = threadListSummary[TS_NUM]
start += threadsPerPage
# TODO: Record whether or not we retrieved all pages..?
return GmailSearchResult(self, (searchType, kwargs), threadsInfo)
def _retrieveJavascript(self, version = ""):
"""
Note: `version` seems to be ignored.
"""
return self._retrievePage(_buildURL(view = U_PAGE_VIEW,
name = "js",
ver = version))
def getMessagesByFolder(self, folderName, allPages = False):
"""
Folders contain conversation/message threads.
`folderName` -- As set in Gmail interface.
Returns a `GmailSearchResult` instance.
*** TODO: Change all "getMessagesByX" to "getThreadsByX"? ***
"""
return self._parseThreadSearch(folderName, allPages = allPages)
def getMessagesByQuery(self, query, allPages = False):
"""
Returns a `GmailSearchResult` instance.
"""
return self._parseThreadSearch(U_QUERY_SEARCH, q = query,
allPages = allPages)
def getQuotaInfo(self, refresh = False):
"""
Return MB used, Total MB and percentage used.
"""
# TODO: Change this to a property.
if not self._cachedQuotaInfo or refresh:
# TODO: Handle this better...
self.getMessagesByFolder(U_INBOX_SEARCH)
return self._cachedQuotaInfo[0][:3]
def getLabelNames(self, refresh = False):
"""
"""
# TODO: Change this to a property?
if not self._cachedLabelNames or refresh:
# TODO: Handle this better...
self.getMessagesByFolder(U_INBOX_SEARCH)
return self._cachedLabelNames
def getMessagesByLabel(self, label, allPages = False):
"""
"""
return self._parseThreadSearch(U_CATEGORY_SEARCH,
cat=label, allPages = allPages)
def getRawMessage(self, msgId):
"""
"""
# U_ORIGINAL_MESSAGE_VIEW seems the only one that returns a page.
# All the other U_* results in a 404 exception. Stas
PageView = U_ORIGINAL_MESSAGE_VIEW
return self._retrievePage(
_buildURL(view=PageView, th=msgId))
def getUnreadMessages(self):
"""
"""
return self._parseThreadSearch(U_QUERY_SEARCH,
q = "is:" + U_AS_SUBSET_UNREAD)
def getUnreadMsgCount(self):
"""
"""
items = self._parseSearchResult(U_QUERY_SEARCH,
q = "is:" + U_AS_SUBSET_UNREAD)
try:
result = items[D_THREADLIST_SUMMARY][0][TS_TOTAL_MSGS]
except KeyError:
result = 0
return result
def _getActionToken(self):
"""
"""
try:
at = self._cookieJar._cookies[ACTION_TOKEN_COOKIE]
except KeyError:
self.getLabelNames(True)
at = self._cookieJar._cookies[ACTION_TOKEN_COOKIE]
return at
def sendMessage(self, msg, asDraft = False, _extraParams = None):
"""
`msg` -- `GmailComposedMessage` instance.
`_extraParams` -- Dictionary containing additional parameters
to put into POST message. (Not officially
for external use, more to make feature
additional a little easier to play with.)
Note: Now returns `GmailMessageStub` instance with populated
`id` (and `_account`) fields on success or None on failure.
"""
# TODO: Handle drafts separately?
params = {U_VIEW: [U_SENDMAIL_VIEW, U_SAVEDRAFT_VIEW][asDraft],
U_REFERENCED_MSG: "",
U_THREAD: "",
U_DRAFT_MSG: "",
U_COMPOSEID: "1",
U_ACTION_TOKEN: self._getActionToken(),
U_COMPOSE_TO: msg.to,
U_COMPOSE_CC: msg.cc,
U_COMPOSE_BCC: msg.bcc,
"subject": msg.subject,
"msgbody": msg.body,
}
if _extraParams:
params.update(_extraParams)
# Amongst other things, I used the following post to work out this:
# <http://groups.google.com/groups?
# selm=mailman.1047080233.20095.python-list%40python.org>
mimeMessage = _paramsToMime(params, msg.filenames, msg.files)
#### TODO: Ughh, tidy all this up & do it better...
## This horrible mess is here for two main reasons:
## 1. The `Content-Type` header (which also contains the boundary
## marker) needs to be extracted from the MIME message so
## we can send it as the request `Content-Type` header instead.
## 2. It seems the form submission needs to use "\r\n" for new
## lines instead of the "\n" returned by `as_string()`.
## I tried changing the value of `NL` used by the `Generator` class
## but it didn't work so I'm doing it this way until I figure
## out how to do it properly. Of course, first try, if the payloads
## contained "\n" sequences they got replaced too, which corrupted
## the attachments. I could probably encode the submission,
## which would probably be nicer, but in the meantime I'm kludging
## this workaround that replaces all non-text payloads with a
## marker, changes all "\n" to "\r\n" and finally replaces the
## markers with the original payloads.
## Yeah, I know, it's horrible, but hey it works doesn't it? If you've
## got a problem with it, fix it yourself & give me the patch!
##
origPayloads = {}
FMT_MARKER = "&&&&&&%s&&&&&&"
for i, m in enumerate(mimeMessage.get_payload()):
if not isinstance(m, MIMEText): #Do we care if we change text ones?
origPayloads[i] = m.get_payload()
m.set_payload(FMT_MARKER % i)
mimeMessage.epilogue = ""
msgStr = mimeMessage.as_string()
contentTypeHeader, data = msgStr.split("\n\n", 1)
contentTypeHeader = contentTypeHeader.split(":", 1)
data = data.replace("\n", "\r\n")
for k,v in origPayloads.iteritems():
data = data.replace(FMT_MARKER % k, v)
####
req = urllib2.Request(_buildURL(), data = data)
req.add_header(*contentTypeHeader)
items = self._parsePage(req)
# TODO: Check composeid?
# Sometimes we get the success message
# but the id is 0 and no message is sent
result = None
resultInfo = items[D_SENDMAIL_RESULT][0]
if resultInfo[SM_SUCCESS]:
result = GmailMessageStub(id = resultInfo[SM_NEWTHREADID],
_account = self)
return result
def trashMessage(self, msg):
"""
"""
# TODO: Decide if we should make this a method of `GmailMessage`.
# TODO: Should we check we have been given a `GmailMessage` instance?
params = {
U_ACTION: U_DELETEMESSAGE_ACTION,
U_ACTION_MESSAGE: msg.id,
U_ACTION_TOKEN: self._getActionToken(),
}
items = self._parsePage(_buildURL(**params))
# TODO: Mark as trashed on success?
return (items[D_ACTION_RESULT][0][AR_SUCCESS] == 1)
def _doThreadAction(self, actionId, thread):
"""
"""
# TODO: Decide if we should make this a method of `GmailThread`.
# TODO: Should we check we have been given a `GmailThread` instance?
params = {
U_SEARCH: U_ALL_SEARCH, #TODO:Check this search value always works.
U_VIEW: U_UPDATE_VIEW,
U_ACTION: actionId,
U_ACTION_THREAD: thread.id,
U_ACTION_TOKEN: self._getActionToken(),
}
items = self._parsePage(_buildURL(**params))
return (items[D_ACTION_RESULT][0][AR_SUCCESS] == 1)
def trashThread(self, thread):
"""
"""
# TODO: Decide if we should make this a method of `GmailThread`.
# TODO: Should we check we have been given a `GmailThread` instance?
result = self._doThreadAction(U_MARKTRASH_ACTION, thread)
# TODO: Mark as trashed on success?
return result
def _createUpdateRequest(self, actionId): #extraData):
"""
Helper method to create a Request instance for an update (view)
action.
Returns populated `Request` instance.
"""
params = {
U_VIEW: U_UPDATE_VIEW,
}
data = {
U_ACTION: actionId,
U_ACTION_TOKEN: self._getActionToken(),
}
#data.update(extraData)
req = urllib2.Request(_buildURL(**params),
data = urllib.urlencode(data))
return req
# TODO: Extract additional common code from handling of labels?
def createLabel(self, labelName):
"""
"""
req = self._createUpdateRequest(U_CREATECATEGORY_ACTION + labelName)
# Note: Label name cache is updated by this call as well. (Handy!)
items = self._parsePage(req)
print items
return (items[D_ACTION_RESULT][0][AR_SUCCESS] == 1)
def deleteLabel(self, labelName):
"""
"""
# TODO: Check labelName exits?
req = self._createUpdateRequest(U_DELETECATEGORY_ACTION + labelName)
# Note: Label name cache is updated by this call as well. (Handy!)
items = self._parsePage(req)
return (items[D_ACTION_RESULT][0][AR_SUCCESS] == 1)
def renameLabel(self, oldLabelName, newLabelName):
"""
"""
# TODO: Check oldLabelName exits?
req = self._createUpdateRequest("%s%s^%s" % (U_RENAMECATEGORY_ACTION,
oldLabelName, newLabelName))
# Note: Label name cache is updated by this call as well. (Handy!)
items = self._parsePage(req)
return (items[D_ACTION_RESULT][0][AR_SUCCESS] == 1)
def storeFile(self, filename, label = None):
"""
"""
# TODO: Handle files larger than single attachment size.
# TODO: Allow file data objects to be supplied?
FILE_STORE_VERSION = "FSV_01"
FILE_STORE_SUBJECT_TEMPLATE = "%s %s" % (FILE_STORE_VERSION, "%s")
subject = FILE_STORE_SUBJECT_TEMPLATE % os.path.basename(filename)
msg = GmailComposedMessage(to="", subject=subject, body="",
filenames=[filename])
draftMsg = self.sendMessage(msg, asDraft = True)
if draftMsg and label:
draftMsg.addLabel(label)
return draftMsg
## CONTACTS SUPPORT
def getContacts(self):
"""
Returns a GmailContactList object
that has all the contacts in it as
GmailContacts
"""
contactList = []
# pnl = a is necessary to get *all* contacts
myUrl = _buildURL(view='cl',search='contacts', pnl='a')
myData = self._parsePage(myUrl)
# This comes back with a dictionary
# with entry 'cl'
addresses = myData['cl']
for entry in addresses:
if len(entry) >= 6 and entry[0]=='ce':
newGmailContact = GmailContact(entry[1], entry[2], entry[4], entry[5])
#### new code used to get all the notes
#### not used yet due to lockdown problems
##rawnotes = self._getSpecInfo(entry[1])
##print rawnotes
##newGmailContact = GmailContact(entry[1], entry[2], entry[4],rawnotes)
contactList.append(newGmailContact)
return GmailContactList(contactList)
def addContact(self, myContact, *extra_args):
"""
Attempts to add a GmailContact to the gmail
address book. Returns true if successful,
false otherwise
Please note that after version 0.1.3.3,
addContact takes one argument of type
GmailContact, the contact to add.
The old signature of:
addContact(name, email, notes='') is still
supported, but deprecated.
"""
if len(extra_args) > 0:
# The user has passed in extra arguments
# He/she is probably trying to invoke addContact
# using the old, deprecated signature of:
# addContact(self, name, email, notes='')
# Build a GmailContact object and use that instead
(name, email) = (myContact, extra_args[0])
if len(extra_args) > 1:
notes = extra_args[1]
else:
notes = ''
myContact = GmailContact(-1, name, email, notes)
# TODO: In the ideal world, we'd extract these specific
# constants into a nice constants file
# This mostly comes from the Johnvey Gmail API,
# but also from the gmail.py cited earlier
myURL = _buildURL(view='up')
myDataList = [ ('act','ec'),
('at', self._cookieJar._cookies['GMAIL_AT']), # Cookie data?
('ct_nm', myContact.getName()),
('ct_em', myContact.getEmail()),
('ct_id', -1 )
]
notes = myContact.getNotes()
if notes != '':
myDataList.append( ('ctf_n', notes) )
validinfokeys = [
'i', # IM
'p', # Phone
'd', # Company
'a', # ADR
'e', # Email
'm', # Mobile
'b', # Pager
'f', # Fax
't', # Title
'o', # Other
]
moreInfo = myContact.getMoreInfo()
ctsn_num = -1
if moreInfo != {}:
for ctsf,ctsf_data in moreInfo.items():
ctsn_num += 1
# data section header, WORK, HOME,...
sectionenum ='ctsn_%02d' % ctsn_num
myDataList.append( ( sectionenum, ctsf ))
ctsf_num = -1
if isinstance(ctsf_data[0],str):
ctsf_num += 1
# data section
subsectionenum = 'ctsf_%02d_%02d_%s' % (ctsn_num, ctsf_num, ctsf_data[0]) # ie. ctsf_00_01_p
myDataList.append( (subsectionenum, ctsf_data[1]) )
else:
for info in ctsf_data:
if validinfokeys.count(info[0]) > 0:
ctsf_num += 1
# data section
subsectionenum = 'ctsf_%02d_%02d_%s' % (ctsn_num, ctsf_num, info[0]) # ie. ctsf_00_01_p
myDataList.append( (subsectionenum, info[1]) )
myData = urllib.urlencode(myDataList)
request = urllib2.Request(myURL,
data = myData)
pageData = self._retrievePage(request)
if pageData.find("The contact was successfully added") == -1:
print pageData
if pageData.find("already has the email address") > 0:
raise Exception("Someone with same email already exists in Gmail.")
elif pageData.find("https://www.google.com/accounts/ServiceLogin"):
raise Exception("Login has expired.")
return False
else:
return True
def _removeContactById(self, id):
"""
Attempts to remove the contact that occupies
id "id" from the gmail address book.
Returns True if successful,
False otherwise.
This is a little dangerous since you don't really
know who you're deleting. Really,
this should return the name or something of the
person we just killed.
Don't call this method.
You should be using removeContact instead.
"""
myURL = _buildURL(search='contacts', ct_id = id, c=id, act='dc', at=self._cookieJar._cookies['GMAIL_AT'], view='up')
pageData = self._retrievePage(myURL)
if pageData.find("The contact has been deleted") == -1:
return False
else:
return True
def removeContact(self, gmailContact):
"""
Attempts to remove the GmailContact passed in
Returns True if successful, False otherwise.
"""
# Let's re-fetch the contact list to make
# sure we're really deleting the guy
# we think we're deleting
newContactList = self.getContacts()
newVersionOfPersonToDelete = newContactList.getContactById(gmailContact.getId())
# Ok, now we need to ensure that gmailContact
# is the same as newVersionOfPersonToDelete
# and then we can go ahead and delete him/her
if (gmailContact == newVersionOfPersonToDelete):
return self._removeContactById(gmailContact.getId())
else:
# We have a cache coherency problem -- someone
# else now occupies this ID slot.
# TODO: Perhaps signal this in some nice way
# to the end user?
print "Unable to delete."
print "Has someone else been modifying the contacts list while we have?"
print "Old version of person:",gmailContact
print "New version of person:",newVersionOfPersonToDelete
return False
## Don't remove this. contact stas
## def _getSpecInfo(self,id):
## """
## Return all the notes data.
## This is currently not used due to the fact that it requests pages in
## a dos attack manner.
## """
## myURL =_buildURL(search='contacts',ct_id=id,c=id,\
## at=self._cookieJar._cookies['GMAIL_AT'],view='ct')
## pageData = self._retrievePage(myURL)
## myData = self._parsePage(myURL)
## #print "\nmyData form _getSpecInfo\n",myData
## rawnotes = myData['cov'][7]
## return rawnotes
class GmailContact:
"""
Class for storing a Gmail Contacts list entry
"""
def __init__(self, name, email, *extra_args):
"""
Returns a new GmailContact object
(you can then call addContact on this to commit
it to the Gmail addressbook, for example)
Consider calling setNotes() and setMoreInfo()
to add extended information to this contact
"""
# Support populating other fields if we're trying
# to invoke this the old way, with the old constructor
# whose signature was __init__(self, id, name, email, notes='')
id = -1
notes = ''
if len(extra_args) > 0:
(id, name) = (name, email)
email = extra_args[0]
if len(extra_args) > 1:
notes = extra_args[1]
else:
notes = ''
self.id = id
self.name = name
self.email = email
self.notes = notes
self.moreInfo = {}
def __str__(self):
return "%s %s %s %s" % (self.id, self.name, self.email, self.notes)
def __eq__(self, other):
if not isinstance(other, GmailContact):
return False
return (self.getId() == other.getId()) and \
(self.getName() == other.getName()) and \
(self.getEmail() == other.getEmail()) and \
(self.getNotes() == other.getNotes())
def getId(self):
return self.id
def getName(self):
return self.name
def getEmail(self):
return self.email
def getNotes(self):
return self.notes
def setNotes(self, notes):
"""
Sets the notes field for this GmailContact
Note that this does NOT change the note
field on Gmail's end; only adding or removing
contacts modifies them
"""
self.notes = notes
def getMoreInfo(self):
return self.moreInfo
def setMoreInfo(self, moreInfo):
"""
moreInfo format
---------------
Use special key values::
'i' = IM
'p' = Phone
'd' = Company
'a' = ADR
'e' = Email
'm' = Mobile
'b' = Pager
'f' = Fax
't' = Title
'o' = Other
Simple example::
moreInfo = {'Home': ( ('a','852 W Barry'),
('p', '1-773-244-1980'),
('i', 'aim:brianray34') ) }
Complex example::
moreInfo = {
'Personal': (('e', 'Home Email'),
('f', 'Home Fax')),
'Work': (('d', 'Sample Company'),
('t', 'Job Title'),
('o', 'Department: Department1'),
('o', 'Department: Department2'),
('p', 'Work Phone'),
('m', 'Mobile Phone'),
('f', 'Work Fax'),
('b', 'Pager')) }
"""
self.moreInfo = moreInfo
def getVCard(self):
"""Returns a vCard 3.0 for this
contact, as a string"""
# The \r is is to comply with the RFC2425 section 5.8.1
vcard = "BEGIN:VCARD\r\n"
vcard += "VERSION:3.0\r\n"
## Deal with multiline notes
##vcard += "NOTE:%s\n" % self.getNotes().replace("\n","\\n")
vcard += "NOTE:%s\r\n" % self.getNotes()
# Fake-out N by splitting up whatever we get out of getName
# This might not always do 'the right thing'
# but it's a *reasonable* compromise
fullname = self.getName().split()
fullname.reverse()
vcard += "N:%s" % ';'.join(fullname) + "\r\n"
vcard += "FN:%s\r\n" % self.getName()
vcard += "EMAIL;TYPE=INTERNET:%s\r\n" % self.getEmail()
vcard += "END:VCARD\r\n\r\n"
# Final newline in case we want to put more than one in a file
return vcard
class GmailContactList:
"""
Class for storing an entire Gmail contacts list
and retrieving contacts by Id, Email address, and name
"""
def __init__(self, contactList):
self.contactList = contactList
def __str__(self):
return '\n'.join([str(item) for item in self.contactList])
def getCount(self):
"""
Returns number of contacts
"""
return len(self.contactList)
def getAllContacts(self):
"""
Returns an array of all the
GmailContacts
"""
return self.contactList
def getContactByName(self, name):
"""
Gets the first contact in the
address book whose name is 'name'.
Returns False if no contact
could be found
"""
nameList = self.getContactListByName(name)
if len(nameList) > 0:
return nameList[0]
else:
return False
def getContactByEmail(self, email):
"""
Gets the first contact in the
address book whose name is 'email'.
As of this writing, Gmail insists
upon a unique email; i.e. two contacts
cannot share an email address.
Returns False if no contact
could be found
"""
emailList = self.getContactListByEmail(email)
if len(emailList) > 0:
return emailList[0]
else:
return False
def getContactById(self, myId):
"""
Gets the first contact in the
address book whose id is 'myId'.
REMEMBER: ID IS A STRING
Returns False if no contact
could be found
"""
idList = self.getContactListById(myId)
if len(idList) > 0:
return idList[0]
else:
return False
def getContactListByName(self, name):
"""
This function returns a LIST
of GmailContacts whose name is
'name'.
Returns an empty list if no contacts
were found
"""
nameList = []
for entry in self.contactList:
if entry.getName() == name:
nameList.append(entry)
return nameList
def getContactListByEmail(self, email):
"""
This function returns a LIST
of GmailContacts whose email is
'email'. As of this writing, two contacts
cannot share an email address, so this
should only return just one item.
But it doesn't hurt to be prepared?
Returns an empty list if no contacts
were found
"""
emailList = []
for entry in self.contactList:
if entry.getEmail() == email:
emailList.append(entry)
return emailList
def getContactListById(self, myId):
"""
This function returns a LIST
of GmailContacts whose id is
'myId'. We expect there only to
be one, but just in case!
Remember: ID IS A STRING
Returns an empty list if no contacts
were found
"""
idList = []
for entry in self.contactList:
if entry.getId() == myId:
idList.append(entry)
return idList
class GmailSearchResult:
"""
"""
def __init__(self, account, search, threadsInfo):
"""
`threadsInfo` -- As returned from Gmail but unbunched.
"""
#print "\nthreadsInfo\n",threadsInfo
try:
if not type(threadsInfo[0]) is types.ListType:
threadsInfo = [threadsInfo]
except IndexError:
print "No messages found"
self._account = account
self.search = search # TODO: Turn into object + format nicely.
self._threads = []
for thread in threadsInfo:
self._threads.append(GmailThread(self, thread[0]))
def __iter__(self):
"""
"""
return iter(self._threads)
def __len__(self):
"""
"""
return len(self._threads)
def __getitem__(self,key):
"""
"""
return self._threads.__getitem__(key)
class GmailSessionState:
"""
"""
def __init__(self, account = None, filename = ""):
"""
"""
if account:
self.state = (account.name, account._cookieJar)
elif filename:
self.state = load(open(filename, "rb"))
else:
raise ValueError("GmailSessionState must be instantiated with " \
"either GmailAccount object or filename.")
def save(self, filename):
"""
"""
dump(self.state, open(filename, "wb"), -1)
class _LabelHandlerMixin(object):
"""
Note: Because a message id can be used as a thread id this works for
messages as well as threads.
"""
def _makeLabelList(self, labelList):
self._labels = labelList
def addLabel(self, labelName):
"""
"""
# Note: It appears this also automatically creates new labels.
result = self._account._doThreadAction(U_ADDCATEGORY_ACTION+labelName,
self)
if not self._labels:
self._makeLabelList([])
# TODO: Caching this seems a little dangerous; suppress duplicates maybe?
self._labels.append(labelName)
return result
def removeLabel(self, labelName):
"""
"""
# TODO: Check label is already attached?
# Note: An error is not generated if the label is not already attached.
result = \
self._account._doThreadAction(U_REMOVECATEGORY_ACTION+labelName,
self)
removeLabel = True
try:
self._labels.remove(labelName)
except:
removeLabel = False
pass
# If we don't check both, we might end up in some weird inconsistent state
return result and removeLabel
def getLabels(self):
return self._labels
class GmailThread(_LabelHandlerMixin):
"""
Note: As far as I can tell, the "canonical" thread id is always the same
as the id of the last message in the thread. But it appears that
the id of any message in the thread can be used to retrieve
the thread information.
"""
def __init__(self, parent, threadsInfo):
"""
"""
# TODO Handle this better?
self._parent = parent
self._account = self._parent._account
self.id = threadsInfo[T_THREADID] # TODO: Change when canonical updated?
self.subject = threadsInfo[T_SUBJECT_HTML]
self.snippet = threadsInfo[T_SNIPPET_HTML]
#self.extraSummary = threadInfo[T_EXTRA_SNIPPET] #TODO: What is this?
# TODO: Store other info?
# Extract number of messages in thread/conversation.
self._authors = threadsInfo[T_AUTHORS_HTML]
self.info = threadsInfo
try:
# TODO: Find out if this information can be found another way...
# (Without another page request.)
self._length = int(re.search("\((\d+?)\)\Z",
self._authors).group(1))
except AttributeError,info:
# If there's no message count then the thread only has one message.
self._length = 1
# TODO: Store information known about the last message (e.g. id)?
self._messages = []
# Populate labels
self._makeLabelList(threadsInfo[T_CATEGORIES])
def __getattr__(self, name):
"""
Dynamically dispatch some interesting thread properties.
"""
attrs = { 'unread': T_UNREAD,
'star': T_STAR,
'date': T_DATE_HTML,
'authors': T_AUTHORS_HTML,
'flags': T_FLAGS,
'subject': T_SUBJECT_HTML,
'snippet': T_SNIPPET_HTML,
'categories': T_CATEGORIES,
'attach': T_ATTACH_HTML,
'matching_msgid': T_MATCHING_MSGID,
'extra_snippet': T_EXTRA_SNIPPET }
if name in attrs:
return self.info[ attrs[name] ];
raise AttributeError("no attribute %s" % name)
def __len__(self):
"""
"""
return self._length
def __iter__(self):
"""
"""
if not self._messages:
self._messages = self._getMessages(self)
return iter(self._messages)
def __getitem__(self, key):
"""
"""
if not self._messages:
self._messages = self._getMessages(self)
try:
result = self._messages.__getitem__(key)
except IndexError:
result = []
return result
def _getMessages(self, thread):
"""
"""
# TODO: Do this better.
# TODO: Specify the query folder using our specific search?
items = self._account._parseSearchResult(U_QUERY_SEARCH,
view = U_CONVERSATION_VIEW,
th = thread.id,
q = "in:anywhere")
result = []
# TODO: Handle this better?
# Note: This handles both draft & non-draft messages in a thread...
for key, isDraft in [(D_MSGINFO, False), (D_DRAFTINFO, True)]:
try:
msgsInfo = items[key]
except KeyError:
# No messages of this type (e.g. draft or non-draft)
continue
else:
# TODO: Handle special case of only 1 message in thread better?
if type(msgsInfo[0]) != types.ListType:
msgsInfo = [msgsInfo]
for msg in msgsInfo:
result += [GmailMessage(thread, msg, isDraft = isDraft)]
return result
class GmailMessageStub(_LabelHandlerMixin):
"""
Intended to be used where not all message information is known/required.
NOTE: This may go away.
"""
# TODO: Provide way to convert this to a full `GmailMessage` instance
# or allow `GmailMessage` to be created without all info?
def __init__(self, id = None, _account = None):
"""
"""
self.id = id
self._account = _account
class GmailMessage(object):
"""
"""
def __init__(self, parent, msgData, isDraft = False):
"""
Note: `msgData` can be from either D_MSGINFO or D_DRAFTINFO.
"""
# TODO: Automatically detect if it's a draft or not?
# TODO Handle this better?
self._parent = parent
self._account = self._parent._account
self.author = msgData[MI_AUTHORFIRSTNAME]
self.id = msgData[MI_MSGID]
self.number = msgData[MI_NUM]
self.subject = msgData[MI_SUBJECT]
self.cc = msgData[MI_CC]
self.bcc = msgData[MI_BCC]
self.sender = msgData[MI_AUTHOREMAIL]
self.attachments = [GmailAttachment(self, attachmentInfo)
for attachmentInfo in msgData[MI_ATTACHINFO]]
# TODO: Populate additional fields & cache...(?)
# TODO: Handle body differently if it's from a draft?
self.isDraft = isDraft
self._source = None
def _getSource(self):
"""
"""
if not self._source:
# TODO: Do this more nicely...?
# TODO: Strip initial white space & fix up last line ending
# to make it legal as per RFC?
self._source = self._account.getRawMessage(self.id)
return self._source
source = property(_getSource, doc = "")
class GmailAttachment:
"""
"""
def __init__(self, parent, attachmentInfo):
"""
"""
# TODO Handle this better?
self._parent = parent
self._account = self._parent._account
self.id = attachmentInfo[A_ID]
self.filename = attachmentInfo[A_FILENAME]
self.mimetype = attachmentInfo[A_MIMETYPE]
self.filesize = attachmentInfo[A_FILESIZE]
self._content = None
def _getContent(self):
"""
"""
if not self._content:
# TODO: Do this a more nicely...?
self._content = self._account._retrievePage(
_buildURL(view=U_ATTACHMENT_VIEW, disp="attd",
attid=self.id, th=self._parent._parent.id))
return self._content
content = property(_getContent, doc = "")
def _getFullId(self):
"""
Returns the "full path"/"full id" of the attachment. (Used
to refer to the file when forwarding.)
The id is of the form: "<thread_id>_<msg_id>_<attachment_id>"
"""
return "%s_%s_%s" % (self._parent._parent.id,
self._parent.id,
self.id)
_fullId = property(_getFullId, doc = "")
class GmailComposedMessage:
"""
"""
def __init__(self, to, subject, body, cc = None, bcc = None,
filenames = None, files = None):
"""
`filenames` - list of the file paths of the files to attach.
`files` - list of objects implementing sub-set of
`email.Message.Message` interface (`get_filename`,
`get_content_type`, `get_payload`). This is to
allow use of payloads from Message instances.
TODO: Change this to be simpler class we define ourselves?
"""
self.to = to
self.subject = subject
self.body = body
self.cc = cc
self.bcc = bcc
self.filenames = filenames
self.files = files
if __name__ == "__main__":
import sys
from getpass import getpass
try:
name = sys.argv[1]
except IndexError:
name = raw_input("Gmail account name: ")
pw = getpass("Password: ")
ga = GmailAccount(name, pw)
print "\nPlease wait, logging in..."
try:
ga.login()
except GmailLoginFailure,e:
print "\nLogin failed. (%s)" % e.message
else:
print "Login successful.\n"
# TODO: Use properties instead?
quotaInfo = ga.getQuotaInfo()
quotaMbUsed = quotaInfo[QU_SPACEUSED]
quotaMbTotal = quotaInfo[QU_QUOTA]
quotaPercent = quotaInfo[QU_PERCENT]
print "%s of %s used. (%s)\n" % (quotaMbUsed, quotaMbTotal, quotaPercent)
searches = STANDARD_FOLDERS + ga.getLabelNames()
while 1:
try:
print "Select folder or label to list: (Ctrl-C to exit)"
for optionId, optionName in enumerate(searches):
print " %d. %s" % (optionId, optionName)
name = searches[int(raw_input("Choice: "))]
if name in STANDARD_FOLDERS:
result = ga.getMessagesByFolder(name, True)
else:
result = ga.getMessagesByLabel(name, True)
if not len(result):
print "No threads found in `%s`." % name
break
tot = len(result)
i = 0
for thread in result:
print "%s messages in thread" % len(thread)
print thread.id, len(thread), thread.subject
for msg in thread:
print "\n ", msg.id, msg.number, msg.author,msg.subject
# Just as an example of other usefull things
#print " ", msg.cc, msg.bcc,msg.sender
i += 1
print
print "number of threads:",tot
print "number of messages:",i
except KeyboardInterrupt:
break
print "\n\nDone."
| Python |
#!/usr/bin/env python
#
# libgmail -- Gmail access via Python
#
## To get the version number of the available libgmail version.
## Reminder: add date before next release. This attribute is also
## used in the setup script.
Version = '0.1.4' # (Feb 2006)
# Original author: follower@myrealbox.com
# Maintainers: Waseem (wdaher@mit.edu) and Stas Z (stas@linux.isbeter.nl)
#
# Contacts support added by wdaher@mit.edu and Stas Z
# (with massive initial help from
# Adrian Holovaty's 'gmail.py'
# and the Johnvey Gmail API)
#
# License: GPL 2.0
#
# Thanks:
# * Live HTTP Headers <http://livehttpheaders.mozdev.org/>
# * Gmail <http://gmail.google.com/>
# * Google Blogoscoped <http://blog.outer-court.com/>
# * ClientCookie <http://wwwsearch.sourceforge.net/ClientCookie/>
# (There when I needed it...)
# * The *first* big G. :-)
#
# NOTE:
# You should ensure you are permitted to use this script before using it
# to access Google's Gmail servers.
#
#
# Gmail Implementation Notes
# ==========================
#
# * Folders contain message threads, not individual messages. At present I
# do not know any way to list all messages without processing thread list.
#
LG_DEBUG=0
from lgconstants import *
import os,pprint
import re
import urllib
import urllib2
import mimetypes
import types
from cPickle import load, dump
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
URL_LOGIN = "https://www.google.com/accounts/ServiceLoginBoxAuth"
URL_GMAIL = "https://mail.google.com/mail/"
# TODO: Get these on the fly?
STANDARD_FOLDERS = [U_INBOX_SEARCH, U_STARRED_SEARCH,
U_ALL_SEARCH, U_DRAFTS_SEARCH,
U_SENT_SEARCH, U_SPAM_SEARCH]
# Constants with names not from the Gmail Javascript:
# TODO: Move to `lgconstants.py`?
U_SAVEDRAFT_VIEW = "sd"
D_DRAFTINFO = "di"
# NOTE: All other DI_* field offsets seem to match the MI_* field offsets
DI_BODY = 19
versionWarned = False # If the Javascript version is different have we
# warned about it?
RE_SPLIT_PAGE_CONTENT = re.compile("D\((.*?)\);", re.DOTALL)
class GmailError(Exception):
'''
Exception thrown upon gmail-specific failures, in particular a
failure to log in and a failure to parse responses.
'''
pass
def _parsePage(pageContent):
"""
Parse the supplied HTML page and extract useful information from
the embedded Javascript.
"""
lines = pageContent.splitlines()
data = '\n'.join([x for x in lines if x and x[0] in ['D', ')', ',', ']']])
data = data.replace(',,',',').replace(',,',',')
result = []
try:
exec data in {'__builtins__': None}, {'D': lambda x: result.append(x)}
except SyntaxError,info:
print info
raise GmailError, 'Failed to parse data returned from gmail.'
items = result
itemsDict = {}
namesFoundTwice = []
for item in items:
name = item[0]
try:
parsedValue = item[1:]
except Exception:
parsedValue = ['']
if itemsDict.has_key(name):
# This handles the case where a name key is used more than
# once (e.g. mail items, mail body etc) and automatically
# places the values into list.
# TODO: Check this actually works properly, it's early... :-)
if len(parsedValue) and type(parsedValue[0]) is types.ListType:
for item in parsedValue:
itemsDict[name].append(item)
else:
itemsDict[name].append(parsedValue)
else:
if len(parsedValue) and type(parsedValue[0]) is types.ListType:
itemsDict[name] = []
for item in parsedValue:
itemsDict[name].append(item)
else:
itemsDict[name] = [parsedValue]
return itemsDict
def _splitBunches(infoItems):# Is this still needed ?? Stas
"""
Utility to help make it easy to iterate over each item separately,
even if they were bunched on the page.
"""
result= []
# TODO: Decide if this is the best approach.
for group in infoItems:
if type(group) == tuple:
result.extend(group)
else:
result.append(group)
return result
class CookieJar:
"""
A rough cookie handler, intended to only refer to one domain.
Does no expiry or anything like that.
(The only reason this is here is so I don't have to require
the `ClientCookie` package.)
"""
def __init__(self):
"""
"""
self._cookies = {}
def extractCookies(self, response, nameFilter = None):
"""
"""
# TODO: Do this all more nicely?
for cookie in response.headers.getheaders('Set-Cookie'):
name, value = (cookie.split("=", 1) + [""])[:2]
if LG_DEBUG: print "Extracted cookie `%s`" % (name)
if not nameFilter or name in nameFilter:
self._cookies[name] = value.split(";")[0]
if LG_DEBUG: print "Stored cookie `%s` value `%s`" % (name, self._cookies[name])
def addCookie(self, name, value):
"""
"""
self._cookies[name] = value
def setCookies(self, request):
"""
"""
request.add_header('Cookie',
";".join(["%s=%s" % (k,v)
for k,v in self._cookies.items()]))
def _buildURL(**kwargs):
"""
"""
return "%s?%s" % (URL_GMAIL, urllib.urlencode(kwargs))
def _paramsToMime(params, filenames, files):
"""
"""
mimeMsg = MIMEMultipart("form-data")
for name, value in params.iteritems():
mimeItem = MIMEText(value)
mimeItem.add_header("Content-Disposition", "form-data", name=name)
# TODO: Handle this better...?
for hdr in ['Content-Type','MIME-Version','Content-Transfer-Encoding']:
del mimeItem[hdr]
mimeMsg.attach(mimeItem)
if filenames or files:
filenames = filenames or []
files = files or []
for idx, item in enumerate(filenames + files):
# TODO: This is messy, tidy it...
if isinstance(item, str):
# We assume it's a file path...
filename = item
contentType = mimetypes.guess_type(filename)[0]
payload = open(filename, "rb").read()
else:
# We assume it's an `email.Message.Message` instance...
# TODO: Make more use of the pre-encoded information?
filename = item.get_filename()
contentType = item.get_content_type()
payload = item.get_payload(decode=True)
if not contentType:
contentType = "application/octet-stream"
mimeItem = MIMEBase(*contentType.split("/"))
mimeItem.add_header("Content-Disposition", "form-data",
name="file%s" % idx, filename=filename)
# TODO: Encode the payload?
mimeItem.set_payload(payload)
# TODO: Handle this better...?
for hdr in ['MIME-Version','Content-Transfer-Encoding']:
del mimeItem[hdr]
mimeMsg.attach(mimeItem)
del mimeMsg['MIME-Version']
return mimeMsg
class GmailLoginFailure(Exception):
"""
Raised whenever the login process fails--could be wrong username/password,
or Gmail service error, for example.
Extract the error message like this:
try:
foobar
except GmailLoginFailure,e:
mesg = e.message# or
print e# uses the __str__
"""
def __init__(self,message):
self.message = message
def __str__(self):
return repr(self.message)
class GmailAccount:
"""
"""
def __init__(self, name = "", pw = "", state = None):
"""
"""
# TODO: Change how all this is handled?
if name and pw:
self.name = name
self._pw = pw
self._cookieJar = CookieJar()
elif state:
# TODO: Check for stale state cookies?
self.name, self._cookieJar = state.state
else:
raise ValueError("GmailAccount must be instantiated with " \
"either GmailSessionState object or name " \
"and password.")
self._cachedQuotaInfo = None
self._cachedLabelNames = None
def login(self):
"""
"""
# TODO: Throw exception if we were instantiated with state?
data = urllib.urlencode({'continue': URL_GMAIL,
'Email': self.name,
'Passwd': self._pw,
})
headers = {'Host': 'www.google.com',
'User-Agent': 'User-Agent: Mozilla/5.0 (compatible;)'}
req = urllib2.Request(URL_LOGIN, data=data, headers=headers)
pageData = self._retrievePage(req)
# TODO: Tidy this up?
# This requests the page that provides the required "GV" cookie.
RE_PAGE_REDIRECT = 'CheckCookie\?continue=([^"]+)'
# TODO: Catch more failure exceptions here...?
try:
redirectURL = urllib.unquote(re.search(RE_PAGE_REDIRECT,
pageData).group(1))
except AttributeError:
raise GmailLoginFailure("Login failed. (Wrong username/password?)")
# We aren't concerned with the actual content of this page,
# just the cookie that is returned with it.
pageData = self._retrievePage(redirectURL)
def _retrievePage(self, urlOrRequest):
"""
"""
if not isinstance(urlOrRequest, urllib2.Request):
req = urllib2.Request(urlOrRequest)
else:
req = urlOrRequest
self._cookieJar.setCookies(req)
try:
resp = urllib2.urlopen(req)
except urllib2.HTTPError,info:
print info
return None
pageData = resp.read()
# Extract cookies here
self._cookieJar.extractCookies(resp)
# TODO: Enable logging of page data for debugging purposes?
return pageData
def _parsePage(self, urlOrRequest):
"""
Retrieve & then parse the requested page content.
"""
items = _parsePage(self._retrievePage(urlOrRequest))
# Automatically cache some things like quota usage.
# TODO: Cache more?
# TODO: Expire cached values?
# TODO: Do this better.
try:
self._cachedQuotaInfo = items[D_QUOTA]
except KeyError:
pass
#pprint.pprint(items)
try:
self._cachedLabelNames = [category[CT_NAME] for category in items[D_CATEGORIES][0]]
except KeyError:
pass
return items
def _parseSearchResult(self, searchType, start = 0, **kwargs):
"""
"""
params = {U_SEARCH: searchType,
U_START: start,
U_VIEW: U_THREADLIST_VIEW,
}
params.update(kwargs)
return self._parsePage(_buildURL(**params))
def _parseThreadSearch(self, searchType, allPages = False, **kwargs):
"""
Only works for thread-based results at present. # TODO: Change this?
"""
start = 0
tot = 0
threadsInfo = []
# Option to get *all* threads if multiple pages are used.
while (start == 0) or (allPages and
len(threadsInfo) < threadListSummary[TS_TOTAL]):
items = self._parseSearchResult(searchType, start, **kwargs)
#TODO: Handle single & zero result case better? Does this work?
try:
threads = items[D_THREAD]
except KeyError:
break
else:
for th in threads:
if not type(th[0]) is types.ListType:
th = [th]
threadsInfo.append(th)
# TODO: Check if the total or per-page values have changed?
threadListSummary = items[D_THREADLIST_SUMMARY][0]
threadsPerPage = threadListSummary[TS_NUM]
start += threadsPerPage
# TODO: Record whether or not we retrieved all pages..?
return GmailSearchResult(self, (searchType, kwargs), threadsInfo)
def _retrieveJavascript(self, version = ""):
"""
Note: `version` seems to be ignored.
"""
return self._retrievePage(_buildURL(view = U_PAGE_VIEW,
name = "js",
ver = version))
def getMessagesByFolder(self, folderName, allPages = False):
"""
Folders contain conversation/message threads.
`folderName` -- As set in Gmail interface.
Returns a `GmailSearchResult` instance.
*** TODO: Change all "getMessagesByX" to "getThreadsByX"? ***
"""
return self._parseThreadSearch(folderName, allPages = allPages)
def getMessagesByQuery(self, query, allPages = False):
"""
Returns a `GmailSearchResult` instance.
"""
return self._parseThreadSearch(U_QUERY_SEARCH, q = query,
allPages = allPages)
def getQuotaInfo(self, refresh = False):
"""
Return MB used, Total MB and percentage used.
"""
# TODO: Change this to a property.
if not self._cachedQuotaInfo or refresh:
# TODO: Handle this better...
self.getMessagesByFolder(U_INBOX_SEARCH)
return self._cachedQuotaInfo[0][:3]
def getLabelNames(self, refresh = False):
"""
"""
# TODO: Change this to a property?
if not self._cachedLabelNames or refresh:
# TODO: Handle this better...
self.getMessagesByFolder(U_INBOX_SEARCH)
return self._cachedLabelNames
def getMessagesByLabel(self, label, allPages = False):
"""
"""
return self._parseThreadSearch(U_CATEGORY_SEARCH,
cat=label, allPages = allPages)
def getRawMessage(self, msgId):
"""
"""
# U_ORIGINAL_MESSAGE_VIEW seems the only one that returns a page.
# All the other U_* results in a 404 exception. Stas
PageView = U_ORIGINAL_MESSAGE_VIEW
return self._retrievePage(
_buildURL(view=PageView, th=msgId))
def getUnreadMessages(self):
"""
"""
return self._parseThreadSearch(U_QUERY_SEARCH,
q = "is:" + U_AS_SUBSET_UNREAD)
def getUnreadMsgCount(self):
"""
"""
items = self._parseSearchResult(U_QUERY_SEARCH,
q = "is:" + U_AS_SUBSET_UNREAD)
try:
result = items[D_THREADLIST_SUMMARY][0][TS_TOTAL_MSGS]
except KeyError:
result = 0
return result
def _getActionToken(self):
"""
"""
try:
at = self._cookieJar._cookies[ACTION_TOKEN_COOKIE]
except KeyError:
self.getLabelNames(True)
at = self._cookieJar._cookies[ACTION_TOKEN_COOKIE]
return at
def sendMessage(self, msg, asDraft = False, _extraParams = None):
"""
`msg` -- `GmailComposedMessage` instance.
`_extraParams` -- Dictionary containing additional parameters
to put into POST message. (Not officially
for external use, more to make feature
additional a little easier to play with.)
Note: Now returns `GmailMessageStub` instance with populated
`id` (and `_account`) fields on success or None on failure.
"""
# TODO: Handle drafts separately?
params = {U_VIEW: [U_SENDMAIL_VIEW, U_SAVEDRAFT_VIEW][asDraft],
U_REFERENCED_MSG: "",
U_THREAD: "",
U_DRAFT_MSG: "",
U_COMPOSEID: "1",
U_ACTION_TOKEN: self._getActionToken(),
U_COMPOSE_TO: msg.to,
U_COMPOSE_CC: msg.cc,
U_COMPOSE_BCC: msg.bcc,
"subject": msg.subject,
"msgbody": msg.body,
}
if _extraParams:
params.update(_extraParams)
# Amongst other things, I used the following post to work out this:
# <http://groups.google.com/groups?
# selm=mailman.1047080233.20095.python-list%40python.org>
mimeMessage = _paramsToMime(params, msg.filenames, msg.files)
#### TODO: Ughh, tidy all this up & do it better...
## This horrible mess is here for two main reasons:
## 1. The `Content-Type` header (which also contains the boundary
## marker) needs to be extracted from the MIME message so
## we can send it as the request `Content-Type` header instead.
## 2. It seems the form submission needs to use "\r\n" for new
## lines instead of the "\n" returned by `as_string()`.
## I tried changing the value of `NL` used by the `Generator` class
## but it didn't work so I'm doing it this way until I figure
## out how to do it properly. Of course, first try, if the payloads
## contained "\n" sequences they got replaced too, which corrupted
## the attachments. I could probably encode the submission,
## which would probably be nicer, but in the meantime I'm kludging
## this workaround that replaces all non-text payloads with a
## marker, changes all "\n" to "\r\n" and finally replaces the
## markers with the original payloads.
## Yeah, I know, it's horrible, but hey it works doesn't it? If you've
## got a problem with it, fix it yourself & give me the patch!
##
origPayloads = {}
FMT_MARKER = "&&&&&&%s&&&&&&"
for i, m in enumerate(mimeMessage.get_payload()):
if not isinstance(m, MIMEText): #Do we care if we change text ones?
origPayloads[i] = m.get_payload()
m.set_payload(FMT_MARKER % i)
mimeMessage.epilogue = ""
msgStr = mimeMessage.as_string()
contentTypeHeader, data = msgStr.split("\n\n", 1)
contentTypeHeader = contentTypeHeader.split(":", 1)
data = data.replace("\n", "\r\n")
for k,v in origPayloads.iteritems():
data = data.replace(FMT_MARKER % k, v)
####
req = urllib2.Request(_buildURL(), data = data)
req.add_header(*contentTypeHeader)
items = self._parsePage(req)
# TODO: Check composeid?
# Sometimes we get the success message
# but the id is 0 and no message is sent
result = None
resultInfo = items[D_SENDMAIL_RESULT][0]
if resultInfo[SM_SUCCESS]:
result = GmailMessageStub(id = resultInfo[SM_NEWTHREADID],
_account = self)
return result
def trashMessage(self, msg):
"""
"""
# TODO: Decide if we should make this a method of `GmailMessage`.
# TODO: Should we check we have been given a `GmailMessage` instance?
params = {
U_ACTION: U_DELETEMESSAGE_ACTION,
U_ACTION_MESSAGE: msg.id,
U_ACTION_TOKEN: self._getActionToken(),
}
items = self._parsePage(_buildURL(**params))
# TODO: Mark as trashed on success?
return (items[D_ACTION_RESULT][0][AR_SUCCESS] == 1)
def _doThreadAction(self, actionId, thread):
"""
"""
# TODO: Decide if we should make this a method of `GmailThread`.
# TODO: Should we check we have been given a `GmailThread` instance?
params = {
U_SEARCH: U_ALL_SEARCH, #TODO:Check this search value always works.
U_VIEW: U_UPDATE_VIEW,
U_ACTION: actionId,
U_ACTION_THREAD: thread.id,
U_ACTION_TOKEN: self._getActionToken(),
}
items = self._parsePage(_buildURL(**params))
return (items[D_ACTION_RESULT][0][AR_SUCCESS] == 1)
def trashThread(self, thread):
"""
"""
# TODO: Decide if we should make this a method of `GmailThread`.
# TODO: Should we check we have been given a `GmailThread` instance?
result = self._doThreadAction(U_MARKTRASH_ACTION, thread)
# TODO: Mark as trashed on success?
return result
def _createUpdateRequest(self, actionId): #extraData):
"""
Helper method to create a Request instance for an update (view)
action.
Returns populated `Request` instance.
"""
params = {
U_VIEW: U_UPDATE_VIEW,
}
data = {
U_ACTION: actionId,
U_ACTION_TOKEN: self._getActionToken(),
}
#data.update(extraData)
req = urllib2.Request(_buildURL(**params),
data = urllib.urlencode(data))
return req
# TODO: Extract additional common code from handling of labels?
def createLabel(self, labelName):
"""
"""
req = self._createUpdateRequest(U_CREATECATEGORY_ACTION + labelName)
# Note: Label name cache is updated by this call as well. (Handy!)
items = self._parsePage(req)
print items
return (items[D_ACTION_RESULT][0][AR_SUCCESS] == 1)
def deleteLabel(self, labelName):
"""
"""
# TODO: Check labelName exits?
req = self._createUpdateRequest(U_DELETECATEGORY_ACTION + labelName)
# Note: Label name cache is updated by this call as well. (Handy!)
items = self._parsePage(req)
return (items[D_ACTION_RESULT][0][AR_SUCCESS] == 1)
def renameLabel(self, oldLabelName, newLabelName):
"""
"""
# TODO: Check oldLabelName exits?
req = self._createUpdateRequest("%s%s^%s" % (U_RENAMECATEGORY_ACTION,
oldLabelName, newLabelName))
# Note: Label name cache is updated by this call as well. (Handy!)
items = self._parsePage(req)
return (items[D_ACTION_RESULT][0][AR_SUCCESS] == 1)
def storeFile(self, filename, label = None):
"""
"""
# TODO: Handle files larger than single attachment size.
# TODO: Allow file data objects to be supplied?
FILE_STORE_VERSION = "FSV_01"
FILE_STORE_SUBJECT_TEMPLATE = "%s %s" % (FILE_STORE_VERSION, "%s")
subject = FILE_STORE_SUBJECT_TEMPLATE % os.path.basename(filename)
msg = GmailComposedMessage(to="", subject=subject, body="",
filenames=[filename])
draftMsg = self.sendMessage(msg, asDraft = True)
if draftMsg and label:
draftMsg.addLabel(label)
return draftMsg
## CONTACTS SUPPORT
def getContacts(self):
"""
Returns a GmailContactList object
that has all the contacts in it as
GmailContacts
"""
contactList = []
# pnl = a is necessary to get *all* contacts
myUrl = _buildURL(view='cl',search='contacts', pnl='a')
myData = self._parsePage(myUrl)
# This comes back with a dictionary
# with entry 'cl'
addresses = myData['cl']
for entry in addresses:
if len(entry) >= 6 and entry[0]=='ce':
newGmailContact = GmailContact(entry[1], entry[2], entry[4], entry[5])
#### new code used to get all the notes
#### not used yet due to lockdown problems
##rawnotes = self._getSpecInfo(entry[1])
##print rawnotes
##newGmailContact = GmailContact(entry[1], entry[2], entry[4],rawnotes)
contactList.append(newGmailContact)
return GmailContactList(contactList)
def addContact(self, myContact, *extra_args):
"""
Attempts to add a GmailContact to the gmail
address book. Returns true if successful,
false otherwise
Please note that after version 0.1.3.3,
addContact takes one argument of type
GmailContact, the contact to add.
The old signature of:
addContact(name, email, notes='') is still
supported, but deprecated.
"""
if len(extra_args) > 0:
# The user has passed in extra arguments
# He/she is probably trying to invoke addContact
# using the old, deprecated signature of:
# addContact(self, name, email, notes='')
# Build a GmailContact object and use that instead
(name, email) = (myContact, extra_args[0])
if len(extra_args) > 1:
notes = extra_args[1]
else:
notes = ''
myContact = GmailContact(-1, name, email, notes)
# TODO: In the ideal world, we'd extract these specific
# constants into a nice constants file
# This mostly comes from the Johnvey Gmail API,
# but also from the gmail.py cited earlier
myURL = _buildURL(view='up')
myDataList = [ ('act','ec'),
('at', self._cookieJar._cookies['GMAIL_AT']), # Cookie data?
('ct_nm', myContact.getName()),
('ct_em', myContact.getEmail()),
('ct_id', -1 )
]
notes = myContact.getNotes()
if notes != '':
myDataList.append( ('ctf_n', notes) )
validinfokeys = [
'i', # IM
'p', # Phone
'd', # Company
'a', # ADR
'e', # Email
'm', # Mobile
'b', # Pager
'f', # Fax
't', # Title
'o', # Other
]
moreInfo = myContact.getMoreInfo()
ctsn_num = -1
if moreInfo != {}:
for ctsf,ctsf_data in moreInfo.items():
ctsn_num += 1
# data section header, WORK, HOME,...
sectionenum ='ctsn_%02d' % ctsn_num
myDataList.append( ( sectionenum, ctsf ))
ctsf_num = -1
if isinstance(ctsf_data[0],str):
ctsf_num += 1
# data section
subsectionenum = 'ctsf_%02d_%02d_%s' % (ctsn_num, ctsf_num, ctsf_data[0]) # ie. ctsf_00_01_p
myDataList.append( (subsectionenum, ctsf_data[1]) )
else:
for info in ctsf_data:
if validinfokeys.count(info[0]) > 0:
ctsf_num += 1
# data section
subsectionenum = 'ctsf_%02d_%02d_%s' % (ctsn_num, ctsf_num, info[0]) # ie. ctsf_00_01_p
myDataList.append( (subsectionenum, info[1]) )
myData = urllib.urlencode(myDataList)
request = urllib2.Request(myURL,
data = myData)
pageData = self._retrievePage(request)
if pageData.find("The contact was successfully added") == -1:
print pageData
if pageData.find("already has the email address") > 0:
raise Exception("Someone with same email already exists in Gmail.")
elif pageData.find("https://www.google.com/accounts/ServiceLogin"):
raise Exception("Login has expired.")
return False
else:
return True
def _removeContactById(self, id):
"""
Attempts to remove the contact that occupies
id "id" from the gmail address book.
Returns True if successful,
False otherwise.
This is a little dangerous since you don't really
know who you're deleting. Really,
this should return the name or something of the
person we just killed.
Don't call this method.
You should be using removeContact instead.
"""
myURL = _buildURL(search='contacts', ct_id = id, c=id, act='dc', at=self._cookieJar._cookies['GMAIL_AT'], view='up')
pageData = self._retrievePage(myURL)
if pageData.find("The contact has been deleted") == -1:
return False
else:
return True
def removeContact(self, gmailContact):
"""
Attempts to remove the GmailContact passed in
Returns True if successful, False otherwise.
"""
# Let's re-fetch the contact list to make
# sure we're really deleting the guy
# we think we're deleting
newContactList = self.getContacts()
newVersionOfPersonToDelete = newContactList.getContactById(gmailContact.getId())
# Ok, now we need to ensure that gmailContact
# is the same as newVersionOfPersonToDelete
# and then we can go ahead and delete him/her
if (gmailContact == newVersionOfPersonToDelete):
return self._removeContactById(gmailContact.getId())
else:
# We have a cache coherency problem -- someone
# else now occupies this ID slot.
# TODO: Perhaps signal this in some nice way
# to the end user?
print "Unable to delete."
print "Has someone else been modifying the contacts list while we have?"
print "Old version of person:",gmailContact
print "New version of person:",newVersionOfPersonToDelete
return False
## Don't remove this. contact stas
## def _getSpecInfo(self,id):
## """
## Return all the notes data.
## This is currently not used due to the fact that it requests pages in
## a dos attack manner.
## """
## myURL =_buildURL(search='contacts',ct_id=id,c=id,\
## at=self._cookieJar._cookies['GMAIL_AT'],view='ct')
## pageData = self._retrievePage(myURL)
## myData = self._parsePage(myURL)
## #print "\nmyData form _getSpecInfo\n",myData
## rawnotes = myData['cov'][7]
## return rawnotes
class GmailContact:
"""
Class for storing a Gmail Contacts list entry
"""
def __init__(self, name, email, *extra_args):
"""
Returns a new GmailContact object
(you can then call addContact on this to commit
it to the Gmail addressbook, for example)
Consider calling setNotes() and setMoreInfo()
to add extended information to this contact
"""
# Support populating other fields if we're trying
# to invoke this the old way, with the old constructor
# whose signature was __init__(self, id, name, email, notes='')
id = -1
notes = ''
if len(extra_args) > 0:
(id, name) = (name, email)
email = extra_args[0]
if len(extra_args) > 1:
notes = extra_args[1]
else:
notes = ''
self.id = id
self.name = name
self.email = email
self.notes = notes
self.moreInfo = {}
def __str__(self):
return "%s %s %s %s" % (self.id, self.name, self.email, self.notes)
def __eq__(self, other):
if not isinstance(other, GmailContact):
return False
return (self.getId() == other.getId()) and \
(self.getName() == other.getName()) and \
(self.getEmail() == other.getEmail()) and \
(self.getNotes() == other.getNotes())
def getId(self):
return self.id
def getName(self):
return self.name
def getEmail(self):
return self.email
def getNotes(self):
return self.notes
def setNotes(self, notes):
"""
Sets the notes field for this GmailContact
Note that this does NOT change the note
field on Gmail's end; only adding or removing
contacts modifies them
"""
self.notes = notes
def getMoreInfo(self):
return self.moreInfo
def setMoreInfo(self, moreInfo):
"""
moreInfo format
---------------
Use special key values::
'i' = IM
'p' = Phone
'd' = Company
'a' = ADR
'e' = Email
'm' = Mobile
'b' = Pager
'f' = Fax
't' = Title
'o' = Other
Simple example::
moreInfo = {'Home': ( ('a','852 W Barry'),
('p', '1-773-244-1980'),
('i', 'aim:brianray34') ) }
Complex example::
moreInfo = {
'Personal': (('e', 'Home Email'),
('f', 'Home Fax')),
'Work': (('d', 'Sample Company'),
('t', 'Job Title'),
('o', 'Department: Department1'),
('o', 'Department: Department2'),
('p', 'Work Phone'),
('m', 'Mobile Phone'),
('f', 'Work Fax'),
('b', 'Pager')) }
"""
self.moreInfo = moreInfo
def getVCard(self):
"""Returns a vCard 3.0 for this
contact, as a string"""
# The \r is is to comply with the RFC2425 section 5.8.1
vcard = "BEGIN:VCARD\r\n"
vcard += "VERSION:3.0\r\n"
## Deal with multiline notes
##vcard += "NOTE:%s\n" % self.getNotes().replace("\n","\\n")
vcard += "NOTE:%s\r\n" % self.getNotes()
# Fake-out N by splitting up whatever we get out of getName
# This might not always do 'the right thing'
# but it's a *reasonable* compromise
fullname = self.getName().split()
fullname.reverse()
vcard += "N:%s" % ';'.join(fullname) + "\r\n"
vcard += "FN:%s\r\n" % self.getName()
vcard += "EMAIL;TYPE=INTERNET:%s\r\n" % self.getEmail()
vcard += "END:VCARD\r\n\r\n"
# Final newline in case we want to put more than one in a file
return vcard
class GmailContactList:
"""
Class for storing an entire Gmail contacts list
and retrieving contacts by Id, Email address, and name
"""
def __init__(self, contactList):
self.contactList = contactList
def __str__(self):
return '\n'.join([str(item) for item in self.contactList])
def getCount(self):
"""
Returns number of contacts
"""
return len(self.contactList)
def getAllContacts(self):
"""
Returns an array of all the
GmailContacts
"""
return self.contactList
def getContactByName(self, name):
"""
Gets the first contact in the
address book whose name is 'name'.
Returns False if no contact
could be found
"""
nameList = self.getContactListByName(name)
if len(nameList) > 0:
return nameList[0]
else:
return False
def getContactByEmail(self, email):
"""
Gets the first contact in the
address book whose name is 'email'.
As of this writing, Gmail insists
upon a unique email; i.e. two contacts
cannot share an email address.
Returns False if no contact
could be found
"""
emailList = self.getContactListByEmail(email)
if len(emailList) > 0:
return emailList[0]
else:
return False
def getContactById(self, myId):
"""
Gets the first contact in the
address book whose id is 'myId'.
REMEMBER: ID IS A STRING
Returns False if no contact
could be found
"""
idList = self.getContactListById(myId)
if len(idList) > 0:
return idList[0]
else:
return False
def getContactListByName(self, name):
"""
This function returns a LIST
of GmailContacts whose name is
'name'.
Returns an empty list if no contacts
were found
"""
nameList = []
for entry in self.contactList:
if entry.getName() == name:
nameList.append(entry)
return nameList
def getContactListByEmail(self, email):
"""
This function returns a LIST
of GmailContacts whose email is
'email'. As of this writing, two contacts
cannot share an email address, so this
should only return just one item.
But it doesn't hurt to be prepared?
Returns an empty list if no contacts
were found
"""
emailList = []
for entry in self.contactList:
if entry.getEmail() == email:
emailList.append(entry)
return emailList
def getContactListById(self, myId):
"""
This function returns a LIST
of GmailContacts whose id is
'myId'. We expect there only to
be one, but just in case!
Remember: ID IS A STRING
Returns an empty list if no contacts
were found
"""
idList = []
for entry in self.contactList:
if entry.getId() == myId:
idList.append(entry)
return idList
class GmailSearchResult:
"""
"""
def __init__(self, account, search, threadsInfo):
"""
`threadsInfo` -- As returned from Gmail but unbunched.
"""
#print "\nthreadsInfo\n",threadsInfo
try:
if not type(threadsInfo[0]) is types.ListType:
threadsInfo = [threadsInfo]
except IndexError:
print "No messages found"
self._account = account
self.search = search # TODO: Turn into object + format nicely.
self._threads = []
for thread in threadsInfo:
self._threads.append(GmailThread(self, thread[0]))
def __iter__(self):
"""
"""
return iter(self._threads)
def __len__(self):
"""
"""
return len(self._threads)
def __getitem__(self,key):
"""
"""
return self._threads.__getitem__(key)
class GmailSessionState:
"""
"""
def __init__(self, account = None, filename = ""):
"""
"""
if account:
self.state = (account.name, account._cookieJar)
elif filename:
self.state = load(open(filename, "rb"))
else:
raise ValueError("GmailSessionState must be instantiated with " \
"either GmailAccount object or filename.")
def save(self, filename):
"""
"""
dump(self.state, open(filename, "wb"), -1)
class _LabelHandlerMixin(object):
"""
Note: Because a message id can be used as a thread id this works for
messages as well as threads.
"""
def _makeLabelList(self, labelList):
self._labels = labelList
def addLabel(self, labelName):
"""
"""
# Note: It appears this also automatically creates new labels.
result = self._account._doThreadAction(U_ADDCATEGORY_ACTION+labelName,
self)
if not self._labels:
self._makeLabelList([])
# TODO: Caching this seems a little dangerous; suppress duplicates maybe?
self._labels.append(labelName)
return result
def removeLabel(self, labelName):
"""
"""
# TODO: Check label is already attached?
# Note: An error is not generated if the label is not already attached.
result = \
self._account._doThreadAction(U_REMOVECATEGORY_ACTION+labelName,
self)
removeLabel = True
try:
self._labels.remove(labelName)
except:
removeLabel = False
pass
# If we don't check both, we might end up in some weird inconsistent state
return result and removeLabel
def getLabels(self):
return self._labels
class GmailThread(_LabelHandlerMixin):
"""
Note: As far as I can tell, the "canonical" thread id is always the same
as the id of the last message in the thread. But it appears that
the id of any message in the thread can be used to retrieve
the thread information.
"""
def __init__(self, parent, threadsInfo):
"""
"""
# TODO Handle this better?
self._parent = parent
self._account = self._parent._account
self.id = threadsInfo[T_THREADID] # TODO: Change when canonical updated?
self.subject = threadsInfo[T_SUBJECT_HTML]
self.snippet = threadsInfo[T_SNIPPET_HTML]
#self.extraSummary = threadInfo[T_EXTRA_SNIPPET] #TODO: What is this?
# TODO: Store other info?
# Extract number of messages in thread/conversation.
self._authors = threadsInfo[T_AUTHORS_HTML]
self.info = threadsInfo
try:
# TODO: Find out if this information can be found another way...
# (Without another page request.)
self._length = int(re.search("\((\d+?)\)\Z",
self._authors).group(1))
except AttributeError,info:
# If there's no message count then the thread only has one message.
self._length = 1
# TODO: Store information known about the last message (e.g. id)?
self._messages = []
# Populate labels
self._makeLabelList(threadsInfo[T_CATEGORIES])
def __getattr__(self, name):
"""
Dynamically dispatch some interesting thread properties.
"""
attrs = { 'unread': T_UNREAD,
'star': T_STAR,
'date': T_DATE_HTML,
'authors': T_AUTHORS_HTML,
'flags': T_FLAGS,
'subject': T_SUBJECT_HTML,
'snippet': T_SNIPPET_HTML,
'categories': T_CATEGORIES,
'attach': T_ATTACH_HTML,
'matching_msgid': T_MATCHING_MSGID,
'extra_snippet': T_EXTRA_SNIPPET }
if name in attrs:
return self.info[ attrs[name] ];
raise AttributeError("no attribute %s" % name)
def __len__(self):
"""
"""
return self._length
def __iter__(self):
"""
"""
if not self._messages:
self._messages = self._getMessages(self)
return iter(self._messages)
def __getitem__(self, key):
"""
"""
if not self._messages:
self._messages = self._getMessages(self)
try:
result = self._messages.__getitem__(key)
except IndexError:
result = []
return result
def _getMessages(self, thread):
"""
"""
# TODO: Do this better.
# TODO: Specify the query folder using our specific search?
items = self._account._parseSearchResult(U_QUERY_SEARCH,
view = U_CONVERSATION_VIEW,
th = thread.id,
q = "in:anywhere")
result = []
# TODO: Handle this better?
# Note: This handles both draft & non-draft messages in a thread...
for key, isDraft in [(D_MSGINFO, False), (D_DRAFTINFO, True)]:
try:
msgsInfo = items[key]
except KeyError:
# No messages of this type (e.g. draft or non-draft)
continue
else:
# TODO: Handle special case of only 1 message in thread better?
if type(msgsInfo[0]) != types.ListType:
msgsInfo = [msgsInfo]
for msg in msgsInfo:
result += [GmailMessage(thread, msg, isDraft = isDraft)]
return result
class GmailMessageStub(_LabelHandlerMixin):
"""
Intended to be used where not all message information is known/required.
NOTE: This may go away.
"""
# TODO: Provide way to convert this to a full `GmailMessage` instance
# or allow `GmailMessage` to be created without all info?
def __init__(self, id = None, _account = None):
"""
"""
self.id = id
self._account = _account
class GmailMessage(object):
"""
"""
def __init__(self, parent, msgData, isDraft = False):
"""
Note: `msgData` can be from either D_MSGINFO or D_DRAFTINFO.
"""
# TODO: Automatically detect if it's a draft or not?
# TODO Handle this better?
self._parent = parent
self._account = self._parent._account
self.author = msgData[MI_AUTHORFIRSTNAME]
self.id = msgData[MI_MSGID]
self.number = msgData[MI_NUM]
self.subject = msgData[MI_SUBJECT]
self.cc = msgData[MI_CC]
self.bcc = msgData[MI_BCC]
self.sender = msgData[MI_AUTHOREMAIL]
self.attachments = [GmailAttachment(self, attachmentInfo)
for attachmentInfo in msgData[MI_ATTACHINFO]]
# TODO: Populate additional fields & cache...(?)
# TODO: Handle body differently if it's from a draft?
self.isDraft = isDraft
self._source = None
def _getSource(self):
"""
"""
if not self._source:
# TODO: Do this more nicely...?
# TODO: Strip initial white space & fix up last line ending
# to make it legal as per RFC?
self._source = self._account.getRawMessage(self.id)
return self._source
source = property(_getSource, doc = "")
class GmailAttachment:
"""
"""
def __init__(self, parent, attachmentInfo):
"""
"""
# TODO Handle this better?
self._parent = parent
self._account = self._parent._account
self.id = attachmentInfo[A_ID]
self.filename = attachmentInfo[A_FILENAME]
self.mimetype = attachmentInfo[A_MIMETYPE]
self.filesize = attachmentInfo[A_FILESIZE]
self._content = None
def _getContent(self):
"""
"""
if not self._content:
# TODO: Do this a more nicely...?
self._content = self._account._retrievePage(
_buildURL(view=U_ATTACHMENT_VIEW, disp="attd",
attid=self.id, th=self._parent._parent.id))
return self._content
content = property(_getContent, doc = "")
def _getFullId(self):
"""
Returns the "full path"/"full id" of the attachment. (Used
to refer to the file when forwarding.)
The id is of the form: "<thread_id>_<msg_id>_<attachment_id>"
"""
return "%s_%s_%s" % (self._parent._parent.id,
self._parent.id,
self.id)
_fullId = property(_getFullId, doc = "")
class GmailComposedMessage:
"""
"""
def __init__(self, to, subject, body, cc = None, bcc = None,
filenames = None, files = None):
"""
`filenames` - list of the file paths of the files to attach.
`files` - list of objects implementing sub-set of
`email.Message.Message` interface (`get_filename`,
`get_content_type`, `get_payload`). This is to
allow use of payloads from Message instances.
TODO: Change this to be simpler class we define ourselves?
"""
self.to = to
self.subject = subject
self.body = body
self.cc = cc
self.bcc = bcc
self.filenames = filenames
self.files = files
if __name__ == "__main__":
import sys
from getpass import getpass
try:
name = sys.argv[1]
except IndexError:
name = raw_input("Gmail account name: ")
pw = getpass("Password: ")
ga = GmailAccount(name, pw)
print "\nPlease wait, logging in..."
try:
ga.login()
except GmailLoginFailure,e:
print "\nLogin failed. (%s)" % e.message
else:
print "Login successful.\n"
# TODO: Use properties instead?
quotaInfo = ga.getQuotaInfo()
quotaMbUsed = quotaInfo[QU_SPACEUSED]
quotaMbTotal = quotaInfo[QU_QUOTA]
quotaPercent = quotaInfo[QU_PERCENT]
print "%s of %s used. (%s)\n" % (quotaMbUsed, quotaMbTotal, quotaPercent)
searches = STANDARD_FOLDERS + ga.getLabelNames()
while 1:
try:
print "Select folder or label to list: (Ctrl-C to exit)"
for optionId, optionName in enumerate(searches):
print " %d. %s" % (optionId, optionName)
name = searches[int(raw_input("Choice: "))]
if name in STANDARD_FOLDERS:
result = ga.getMessagesByFolder(name, True)
else:
result = ga.getMessagesByLabel(name, True)
if not len(result):
print "No threads found in `%s`." % name
break
tot = len(result)
i = 0
for thread in result:
print "%s messages in thread" % len(thread)
print thread.id, len(thread), thread.subject
for msg in thread:
print "\n ", msg.id, msg.number, msg.author,msg.subject
# Just as an example of other usefull things
#print " ", msg.cc, msg.bcc,msg.sender
i += 1
print
print "number of threads:",tot
print "number of messages:",i
except KeyboardInterrupt:
break
print "\n\nDone."
| Python |
NAME = 'Technorati/Python'
VERSION = '0.05'
# Copyright (C) 2003 Phillip Pearson
URL = 'http://www.myelin.co.nz/technorati_py/'
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Related work:
#
# PyTechnorati by Mark Pilgrim:
# http://diveintomark.org/projects/pytechnorati/
#
# xmltramp/technorati.py by Aaron Swartz
# http://www.aaronsw.com/2002/xmltramp/technorati.py
#
# Technorati API documentation
# http://developers.technorati.com/wiki/CosmosQuery
__history__ = '''
v0.05 (changes by Kevin Marks - this is a merge from the modified 0.03 version distributed by Technorati)
- supports getUserInfo functions
v0.04 (changes by Mike Linksvayer)
- raises TechnoratiError when 'error' found in response
- print status messages to stderr
- API as specified at
http://developers.technorati.com/wiki/CosmosQuery
(no version=, added support for limit=, current=, and
type=)
v0.03
- now supporting the new 'search' command.
v0.02
- now using the latest version of the API (no .xml URLs, format=
and version= arguments)
- you can now get more than just the first page of cosmos results
(use start= or -s / --start)
- now throwing an exception when we get an HTTP error
- '--cosmos' command-line option added (same as --inbound)
- now supporting all license key locations used by PyTechnorati
v0.01
initial release
http://www.myelin.co.nz/post/2003/5/12/#200305124
'''
import urllib, sgmllib, os, sys
from pprint import pprint
def setLicense(license_key):
"Set the license key"
global LICENSE_KEY
LICENSE_KEY = license_key
def findkey(license_key=None):
"Find out the current user's API key"
class GotIt(Exception):
def __init__(self, key):
self.key = key
def tryvar(key):
if key:
raise GotIt(key)
def tryfile(fn):
if DEBUG: print >>sys.__stderr__,"trying",fn
if os.path.exists(fn):
tryvar(open(fn).readline().strip())
def modulepath():
return os.path.split(os.path.abspath(sys.argv[0]))[0]
try:
tryvar(license_key)
tryvar(LICENSE_KEY)
tryvar(os.environ.get('TECHNORATI_LICENSE_KEY', None))
for path in ('.',
os.path.expanduser('~'),
modulepath()):
for leaf in ('.technoratikey',
'technoratikey.txt',
'apikey.txt'):
tryfile(os.path.join(path, leaf))
except GotIt, g:
setLicense(g.key)
return LICENSE_KEY
raise Exception, "Can't find license key"
LICENSE_KEY = None
DEBUG = 0
class opener(urllib.FancyURLopener):
version = '%s v%s; %s' % (NAME, VERSION, URL)
def http_error_default(self, url, fp, errcode, errmsg, headers, data=None):
raise IOError, "HTTP error %s fetching http:%s" % (errcode, url)
callcache = {}
try:
callcache = eval(open('cache.txt').read())
except:
pass
class BadUrlError(Exception):
pass
def call(proc, args, license_key=None):
#if args['url'] in (None, ''):
# raise BadUrlError("No URL supplied")
args['key'] = findkey(license_key)
args['format'] = 'xml'
url = 'http://api.technorati.com/%s?%s' % (proc, urllib.urlencode(args))
print >>sys.__stderr__,"calling",url
if not callcache.has_key(url):
print >>sys.__stderr__,"(fetching)"
o = opener()
f = o.open(url)
callcache[url] = f.read()
xml = callcache[url]
if DEBUG:
print >>sys.__stderr__,xml
return xml
def parse(parser, xml):
parser.feed(xml)
parser.close()
return parser.data
class TechnoratiError(Exception):
pass
class genericParser(sgmllib.SGMLParser):
def __init__(self, itemsName):
sgmllib.SGMLParser.__init__(self)
self.data = {}
self.inresult = self.inweblog = self.initem = 0
self.weblog = None
self.item = None
self.data[itemsName] = self.items = []
self.collector = None
def collect(self):
assert self.collector is None, "already collecting: parse failure!"
self.collector = []
def grab(self):
s = "".join(self.collector)
self.collector = None
return s
def grab_int(self):
x = self.grab()
if not x:
return 0
return int(x)
def handle_data(self, s):
if self.collector is not None:
self.collector.append(s)
def start_document(self, attrs):
pass
def end_document(self):
pass
def start_result(self, attrs):
self.inresult = 1
def end_result(self):
self.inresult = 0
def start_item(self, attrs):
self.initem = 1
self.item = {}
def end_item(self):
self.initem = 0
self.items.append(self.item)
self.item = None
def start_nearestpermalink(self, attrs):
assert self.initem
self.collect()
def end_nearestpermalink(self):
self.item['nearestpermalink'] = self.grab()
def start_excerpt(self, attrs):
assert self.initem
self.collect()
def end_excerpt(self):
self.item['excerpt'] = self.grab()
def start_linkcreated(self, attrs):
assert self.initem
self.collect()
def end_linkcreated(self):
self.item['linkcreated'] = self.grab()
def start_weblog(self, attrs):
assert self.initem or self.inresult, "found <weblog> element outside <result> or <item>"
self.inweblog = 1
self.weblog = {}
def end_weblog(self):
self.inweblog = 0
if self.initem:
self.item['weblog'] = self.weblog
#self.weblogs.append(self.weblog)
elif self.inresult:
self.data['weblog'] = self.weblog
else:
raise AssertionFailure, "<weblog> element not in item or result...?"
self.weblog = None
def start_rankingstart(self, attrs):
self.collect()
def end_rankingstart(self):
self.data['rankingstart'] = int(self.grab())
def start_url(self, attrs):
self.collect()
def end_url(self):
if self.inweblog:
self.weblog['url'] = self.grab()
else:
self.data['url'] = self.grab()
def start_name(self, attrs):
self.collect()
def end_name(self):
self.weblog['name'] = self.grab()
def start_rssurl(self, attrs):
self.collect()
def end_rssurl(self):
self.weblog['rssurl'] = self.grab()
def start_inboundblogs(self, attrs):
self.collect()
def end_inboundblogs(self):
if self.inweblog:
x = self.weblog
elif self.inresult:
x = self.data
else:
raise AssertionFailure, "<inboundblogs> element not in <result> or <weblog>"
x['inboundblogs'] = self.grab_int()
def start_inboundlinks(self, attrs):
self.collect()
def end_inboundlinks(self):
if self.inweblog:
x = self.weblog
elif self.inresult:
x = self.data
else:
raise AssertionFailure, "<inboundlinks> element not in <result> or <weblog>"
x['inboundlinks'] = self.grab_int()
def start_lastupdate(self, attrs):
self.collect()
def end_lastupdate(self):
self.weblog['lastupdate'] = self.grab()
def start_error(self, attrs):
self.collect()
def end_error(self):
if self.inresult:
raise TechnoratiError, self.grab()
else:
raise AssertionFailure, "<error> element not in <result>"
def getCosmos(url, start=None, limit=None, querytype=None, current=None, license_key=None):
"gets a blog's cosmos and returns an ApiResponse containing a Weblog object ('weblog') for the blog and a list ('inLinks') of Link objects for its neighbours"
args = {'url': url}
if start is not None:
args['start'] = '%d' % start
if limit is not None:
args['limit'] = '%d' % limit
if current is not None:
args['current'] = current
if querytype is not None:
args['type'] = querytype
xml = call('cosmos', args, license_key)
data = parse(genericParser('inbound'), xml)
return data
def getUserInfo(username, license_key=None):
"gets info about a user and returns it as a User object"
xml = call('getinfo', {'username': username}, license_key)
data = parse(genericParser('user'), xml)
return data.get('user', None)
def getBlogInfo(url, license_key=None):
"gets info about a blog and returns it as a Weblog object"
xml = call('bloginfo', {'url': url}, license_key)
data = parse(genericParser('weblogs'), xml)
return data.get('weblog', None)
def getOutboundBlogs(url, license_key=None):
"gets a list of blogs linked to by a blog and returns an ApiResponse containing a Weblog object ('weblog') for the blog and a list ('outLinks') of Weblog objects for the linked-to blogs"
xml = call('outbound', {'url': url}, license_key)
data = parse(genericParser('outbound'), xml)
return data
def search(query, license_key=None):
xml = call('search', {'query': query}, license_key)
data = parse(genericParser('search'), xml)
return data
def test(url):
if not url: url='http://epeus.blogspot.com'
pprint(getUserInfo('kevinmarks'))
pprint(getCosmos(url))
pprint(getBlogInfo(url))
pprint(getOutboundBlogs(url))
pprint(search('"David Sifry"'))
def main():
import sys, getopt
opts, rest = getopt.getopt(sys.argv[1:], 'dts:u:q:c:l:', ('debug', 'test', 'inbound', 'cosmos', 'start=', 'info', 'outbound', 'url=', 'querytype=', 'current=', 'limit=', 'search', 'user'))
arg = " ".join([x for x in rest if x.strip()])
func = None
start = None
limit = None
for opt,val in opts:
_map = {'inbound': getCosmos,
'cosmos': getCosmos,
'info': getBlogInfo,
'outbound': getOutboundBlogs,
'search': search,
'user': getUserInfo,
}
if opt in ('-u', '--url'):
url = val
elif opt in ('-s', '--start'):
start = int(val)
elif opt in ('-l', '--limit'):
limit = int(val)
elif opt in ('-d', '--debug'):
global DEBUG
DEBUG = 1
elif opt in ('-t', '--test'):
func = test
elif opt.startswith('--') and _map.has_key(opt[2:]):
assert func is None, "Only one function (url, inbound, info or outbound) may be supplied"
func = _map[opt[2:]]
if func is None:
print >>sys.__stderr__,"No function supplied; --url, --inbound, --info, --search, --user or --outbound must be specified on the command line"
return
if start is not None:
r = func(arg, start)
else:
r = func(arg)
if func is not test:
pprint(r)
if __name__ == '__main__':
findkey()
main()
open('cache.txt', 'wt').write(`callcache`)
| Python |
# simpletest
print "Testing some rss calls"
import pydelicious as p
r = []
result = p.getrss(tag="python", popular=0)
r.append(result)
result = p.getrss(tag="python ajax", popular=0)
r.append(result)
result = p.getrss(tag="python", popular=1)
r.append(result)
result = p.getrss(tag="python ajax", popular=1)
r.append(result)
result = p.getrss(tag="python", user="delpy")
r.append(result)
result = p.getrss(user="delpy")
r.append(result)
result = p.getrss(tag="python ajax", user="pydelicious")
r.append(result)
result = p.getrss()
r.append(result)
result = p.getrss(url="http://www.heise.de/")
r.append(result)
result = p.get_userposts("delpy")
r.append(result)
result = p.get_tagposts("python")
r.append(result)
result = p.get_urlposts("http://www.heise.de/")
r.append(result)
result = p.get_popular()
r.append(result)
result = p.get_popular(tag="python")
r.append(result)
for i in range(len(r)):
if r[i].bozo == 1:
print "Catched a exception"
print i
print r[i]["debug"]
print "Exception", r[i].bozo_exception
print dir(r[i].bozo_exception)
elif type(r[i]["result"]) != type(p.posts()) and r[i]["result"] != p.posts():
print
print "Error with posts"
print i
print r[i]["debug"]
print "result", r[i]["result"]
print "type", type(r[i]["result"])
print "Exception", r[i].bozo_exception
print "done"
| Python |
""" Library for the syndic8 RPC API """
import xmlrpclib
import time
PROXY = "http://www.syndic8.com/xmlrpc.php"
def _new_instance():
""" Returns a new instance of the syndic8 proxy server """
server = xmlrpclib.ServerProxy(PROXY)
return server.syndic8
def findfeeds(pattern, sortfield='feedid', offset=0, limit=10):
""" Returns Array of FeedIDs of matching feeds """
syndic8 = _new_instance()
if offset < 0:
offset = 0
if limit < 0:
limit = 10
return syndic8.FindFeeds(pattern, sortfield, limit, offset)
def findsites(pattern):
""" Returns Array of FeedIDs of matching feeds """
return _new_instance().FindSites(pattern)
def getfeedfields():
""" Returns Array of feed field names """
return _new_instance().GetFeedFields()
def getfeedcount():
""" Returns Number of feeds """
return _new_instance().GetFeedCount()
def getfeedinfo(ids, fields=[]):
""" Returns Array of structures containing all feed fields (or requested fields only) from database, plus faultCode and faultMessage."""
return _new_instance().GetFeedInfo(ids, fields)
def getlastfeed():
""" Returns Highest assigned FeedID. """
return _new_instance().GetLastFeed()
def gettaggedfeeds(tag):
""" Returns Array of FeedIDs of all feeds with the given tag. """
return _new_instance().GetTaggedFeeds(tag)
def getchangedfeeds(fields, startdate, enddate, returnfields):
""" Returns Array of structures, each containing the requested fields from feeds with changes in the given date range. """
syndic8 = _new_instance()
return syndic8.GetChangedFeeds(fields, time.strftime('%Y-%m-%d', startdate),
time.strftime('%Y-%m-%d', enddate), returnfields)
| Python |
#!/usr/bin/env python
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit http://feedparser.org/ for the latest version
Visit http://feedparser.org/docs/ for the latest documentation
Required: Python 2.1 or later
Recommended: Python 2.3 or later
Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "4.1"# + "$Revision: 1.1 $"[11:15] + "-cvs"
__license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>"]
_debug = 0
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# ---------- required modules (should come with any Python distribution) ----------
import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2
try:
from cStringIO import StringIO as _StringIO
except:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except:
gzip = None
try:
import zlib
except:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
from xml.sax.saxutils import escape as _xmlescape
_XML_AVAILABLE = 1
except:
_XML_AVAILABLE = 0
def _xmlescape(data):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
return data
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except:
base64 = binascii = None
# cjkcodecs and iconv_codec provide support for more character encodings.
# Both are available from http://cjkpython.i18n.org/
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
if _debug:
import chardet.constants
chardet.constants._debug = 1
except:
chardet = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
sgmllib.special = re.compile('<!')
sgmllib.charref = re.compile('&#(x?[0-9A-Fa-f]+)[^0-9A-Fa-f]')
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
'hotrss': 'Hot RSS'
}
try:
UserDict = dict
except NameError:
# Python 2.1 does not have dict
from UserDict import UserDict
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
class FeedParserDict(UserDict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['subtitle', 'summary'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
return UserDict.__getitem__(self, 'tags')[0]['term']
if key == 'categories':
return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')]
realkey = self.keymap.get(key, key)
if type(realkey) == types.ListType:
for k in realkey:
if UserDict.has_key(self, k):
return UserDict.__getitem__(self, k)
if UserDict.has_key(self, key):
return UserDict.__getitem__(self, key)
return UserDict.__getitem__(self, realkey)
def __setitem__(self, key, value):
for k in self.keymap.keys():
if key == k:
key = self.keymap[k]
if type(key) == types.ListType:
key = key[0]
return UserDict.__setitem__(self, key, value)
def get(self, key, default=None):
if self.has_key(key):
return self[key]
else:
return default
def setdefault(self, key, value):
if not self.has_key(key):
self[key] = value
return self[key]
def has_key(self, key):
try:
return hasattr(self, key) or UserDict.has_key(self, key)
except AttributeError:
return False
def __getattr__(self, key):
try:
return self.__dict__[key]
except KeyError:
pass
try:
assert not key.startswith('_')
return self.__getitem__(key)
except:
raise AttributeError, "object has no attribute '%s'" % key
def __setattr__(self, key, value):
if key.startswith('_') or key == 'data':
self.__dict__[key] = value
else:
return self.__setitem__(key, value)
def __contains__(self, key):
return self.has_key(key)
def zopeCompatibilityHack():
global FeedParserDict
del FeedParserDict
def FeedParserDict(aDict=None):
rc = {}
if aDict:
rc.update(aDict)
return rc
_ebcdic_to_ascii_map = None
def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
)
import string
_ebcdic_to_ascii_map = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(_ebcdic_to_ascii_map)
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf'
}
_matchnamespaces = {}
can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'license', 'icon', 'logo']
can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if _debug: sys.stderr.write('initializing FeedParser\n')
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
if baselang:
self.feeddata['language'] = baselang
def unknown_starttag(self, tag, attrs):
if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
# normalize attrs
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
# Note: probably shouldn't simply recreate localname here, but
# our namespace handling isn't actually 100% correct in cases where
# the feed redefines the default namespace (which is actually
# the usual case for inline content, thanks Sam), so here we
# cheat and just reconstruct the element based on localname
# because that compensates for the bugs in our namespace handling.
# This will horribly munge inline content with non-empty qnames,
# but nobody actually does that, so I'm not fixing it.
tag = tag.split(':')[-1]
return self.handle_data('<%s%s>' % (tag, ''.join([' %s="%s"' % t for t in attrs])), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
return self.push(prefix + suffix, 1)
def unknown_endtag(self, tag):
if _debug: sys.stderr.write('end %s\n' % tag)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack: return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack: return
if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref)
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
else:
# entity resolution graciously donated by Aaron Swartz
def name2cp(k):
import htmlentitydefs
if hasattr(htmlentitydefs, 'name2codepoint'): # requires Python 2.3
return htmlentitydefs.name2codepoint[k]
k = htmlentitydefs.entitydefs[k]
if k.startswith('&#') and k.endswith(';'):
return int(k[2:-1]) # not in latin-1
return ord(k)
try: name2cp(ref)
except KeyError: text = '&%s;' % ref
else: text = unichr(name2cp(ref)).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack: return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if _debug: sys.stderr.write('entering parse_declaration\n')
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1: k = len(self.rawdata)
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
return k+1
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
self.version = 'rss090'
if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
self.version = 'rss10'
if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if self._matchnamespaces.has_key(loweruri):
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack: return
if self.elementstack[-1][0] != element: return
element, expectingText, pieces = self.elementstack.pop()
output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText: return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = base64.decodestring(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
# resolve relative URIs within embedded markup
if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding)
# sanitize embedded markup
if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding)
if self.encoding and type(output) != type(u''):
try:
output = unicode(output, self.encoding)
except:
pass
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource) and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value):
context = self._getContext()
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
if not self.version:
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_dlhottitles(self, attrsD):
self.version = 'hotrss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
_start_feedinfo = _start_channel
def _cdf_common(self, attrsD):
if attrsD.has_key('lastmod'):
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if attrsD.has_key('href'):
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
self.inimage = 1
self.push('image', 0)
context = self._getContext()
context.setdefault('image', FeedParserDict())
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
self.intextinput = 1
self.push('textinput', 0)
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['textinput']['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['image']['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['image']['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
elif self.inimage:
context = self._getContext()
context['image']['href'] = value
elif self.intextinput:
context = self._getContext()
context['textinput']['link'] = value
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author = context.get(key)
if not author: return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))''', author)
if not emailmatch: return
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
context.setdefault('%s_detail' % key, FeedParserDict())
context['%s_detail' % key]['name'] = author
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
_start_product = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value))
_end_dcterms_issued = _end_published
_end_issued = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_pubdate = _start_updated
_start_dc_date = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_pubdate = _end_updated
_end_dc_date = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value))
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')))
def _start_cc_license(self, attrsD):
self.push('license', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('license')
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
def _end_creativecommons_license(self):
self.pop('license')
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label): return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(FeedParserDict({'term': term, 'scheme': scheme, 'label': label}))
def _start_category(self, attrsD):
if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split():
self._addTag(term, 'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value: return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
attrsD.setdefault('type', 'text/html')
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context = self._getContext()
context.setdefault('links', [])
context['links'].append(FeedParserDict(attrsD))
if attrsD['rel'] == 'enclosure':
self._start_enclosure(attrsD)
if attrsD.has_key('href'):
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
_start_producturl = _start_link
def _end_link(self):
value = self.pop('link')
context = self._getContext()
if self.intextinput:
context['textinput']['link'] = value
if self.inimage:
context['image']['link'] = value
_end_producturl = _end_link
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
def _start_title(self, attrsD):
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
value = self.popContent('title')
context = self._getContext()
if self.intextinput:
context['textinput']['title'] = value
elif self.inimage:
context['image']['title'] = value
_end_dc_title = _end_title
_end_media_title = _end_title
def _start_description(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
context = self._getContext()
if self.intextinput:
context['textinput']['description'] = value
elif self.inimage:
context['image']['description'] = value
self._summaryKey = None
_end_abstract = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if context.has_key('generator_detail'):
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
self._getContext().setdefault('enclosures', []).append(FeedParserDict(attrsD))
href = attrsD.get('href')
if href:
context = self._getContext()
if not context.get('id'):
context['id'] = href
def _start_source(self, attrsD):
self.insource = 1
def _end_source(self):
self.insource = 0
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_prodlink(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToDescription:
self._save('description', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
_end_prodlink = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
if _debug: sys.stderr.write('trying StrictFeedParser\n')
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
def startPrefixMapping(self, prefix, uri):
self.trackNamespace(prefix, uri)
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
if prefix:
localname = prefix + ':' + localname
localname = str(localname).lower()
if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname))
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD = {}
for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param']
def __init__(self, encoding):
self.encoding = encoding
if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
#data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace
data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
if self.encoding and type(data) == type(u''):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
def normalize_attrs(self, attrs):
# utility method to be called by descendants
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
uattrs = []
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
for key, value in attrs:
if type(value) != type(u''):
value = unicode(value, self.encoding)
uattrs.append((unicode(key, self.encoding), value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding)
if tag in self.elements_no_end_tag:
self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
else:
self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
self.pieces.append('&#%(ref)s;' % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
self.pieces.append('&%(ref)s;' % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text)
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%(text)s-->' % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%(text)s>' % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%(text)s>' % locals())
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = [('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')]
def __init__(self, baseuri, encoding):
_BaseHTMLProcessor.__init__(self, encoding)
self.baseuri = baseuri
def resolveURI(self, uri):
return _urljoin(self.baseuri, uri)
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding):
if _debug: sys.stderr.write('entering _resolveRelativeURIs\n')
p = _RelativeURIResolver(baseURI, encoding)
p.feed(htmlSource)
return p.output()
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big',
'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col',
'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset',
'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input',
'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup',
'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike',
'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th',
'thead', 'tr', 'tt', 'u', 'ul', 'var']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing',
'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols',
'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled',
'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace',
'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method',
'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly',
'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type',
'usemap', 'valign', 'value', 'vspace', 'width']
unacceptable_elements_with_end_tag = ['script', 'applet']
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
def unknown_starttag(self, tag, attrs):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
return
attrs = self.normalize_attrs(attrs)
attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def _sanitizeHTML(htmlSource, encoding):
p = _HTMLSanitizer(encoding)
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = type(data) == type(u'')
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
if ((code / 100) == 3) and (code != 304):
return self.http_error_302(req, fp, code, msg, headers)
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
return infourl
def http_error_302(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
def http_error_301(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
http_error_300 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions)
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
try:
assert sys.version.split()[0] >= '2.3.3'
assert base64 != None
user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
except:
return self.http_error_default(req, fp, code, msg, headers)
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it must be a tuple of 9 integers
as returned by gmtime() in the standard Python time module. This MUST
be in GMT (Greenwich Mean Time). The formatted date/time will be used
as the value of an If-Modified-Since request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if url_file_stream_or_string == '-':
return sys.stdin
if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'):
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.encodestring(user_passwd).strip()
# try to open with urllib2 (to use optional headers)
request = urllib2.Request(url_file_stream_or_string)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
request.add_header('A-IM', 'feed') # RFC 3229 support
opener = apply(urllib2.build_opener, tuple(handlers + [_FeedURLHandler()]))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string)
except:
pass
# treat url_file_stream_or_string as string
return _StringIO(str(url_file_stream_or_string))
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
del tmpl
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
del regex
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m: break
if not m: return
if m.span() == (0, 0): return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params.keys():
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(params.get('second', 0))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
# daylight savings is complex, but not needed for feedparser's purposes
# as time zones, if specified, include mention of whether it is active
# (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and
# and most implementations have DST bugs
daylight_savings_flag = 0
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tm))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m: return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
_mssql_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
def _parse_date_mssql(dateString):
'''Parse a string according to the MS SQL date format'''
m = _mssql_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_mssql)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m: return
try:
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
except:
return
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date)
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m: return
try:
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
except:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<julian>\d\d\d)'
'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
rfc822._timezones.update(_additional_timezones)
registerDateHandler(_parse_date_rfc822)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
if not date9tuple: continue
if len(date9tuple) != 9:
if _debug: sys.stderr.write('date handler function must return 9-tuple\n')
raise ValueError
map(int, date9tuple)
return date9tuple
except Exception, e:
if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e)))
pass
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). CJKCodecs and iconv_codec help a lot;
you should definitely install them if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
return content_type, params.get('charset', '').replace("'", '')
sniffed_xml_encoding = ''
xml_encoding = ''
true_encoding = ''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type'))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = _ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
true_encoding = http_encoding or 'us-ascii'
elif http_headers and (not http_headers.has_key('content-type')):
true_encoding = xml_encoding or 'iso-8859-1'
else:
true_encoding = xml_encoding or 'utf-8'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding)
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16be':
sys.stderr.write('trying utf-16be instead\n')
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16le':
sys.stderr.write('trying utf-16le instead\n')
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-8':
sys.stderr.write('trying utf-8 instead\n')
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32be':
sys.stderr.write('trying utf-32be instead\n')
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32le':
sys.stderr.write('trying utf-32le instead\n')
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE)
data = entity_pattern.sub('', data)
doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE)
doctype_results = doctype_pattern.findall(data)
doctype = doctype_results and doctype_results[0] or ''
if doctype.lower().count('netscape'):
version = 'rss091n'
else:
version = None
data = doctype_pattern.sub('', data)
return version, data
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]):
'''Parse a feed from a URL, file, stream, or string'''
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
if _XML_AVAILABLE:
result['bozo'] = 0
if type(handlers) == types.InstanceType:
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
f = None
# if feed is gzip-compressed, decompress it
if f and data and hasattr(f, 'headers'):
if gzip and f.headers.get('content-encoding', '') == 'gzip':
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except Exception, e:
# Some feeds claim to be gzipped but they're not, so
# we get garbage. Ideally, we should re-request the
# feed without the 'Accept-encoding: gzip' header,
# but we don't.
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
elif zlib and f.headers.get('content-encoding', '') == 'deflate':
try:
data = zlib.decompress(data, -zlib.MAX_WBITS)
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
# save HTTP headers
if hasattr(f, 'info'):
info = f.info()
result['etag'] = info.getheader('ETag')
last_modified = info.getheader('Last-Modified')
if last_modified:
result['modified'] = _parse_date(last_modified)
if hasattr(f, 'url'):
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'headers'):
result['headers'] = f.headers.dict
if hasattr(f, 'close'):
f.close()
result['xml_data'] = data
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
http_headers = result.get('headers', {})
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if http_headers.has_key('content-type'):
bozo_message = '%s is not an XML media type' % http_headers['content-type']
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
result['version'], data = _stripDoctype(data)
baseuri = http_headers.get('content-location', result.get('href'))
baselang = http_headers.get('content-language', None)
# if server sent 304, we're done
if result.get('status', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if not data:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding):
if not proposed_encoding: continue
if proposed_encoding in tried_encodings: continue
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
break
except:
pass
# if no luck and we have auto-detection library, try that
if (not known_encoding) and chardet:
try:
proposed_encoding = chardet.detect(data)['encoding']
if proposed_encoding and (proposed_encoding not in tried_encodings):
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried utf-8 yet, try that
if (not known_encoding) and ('utf-8' not in tried_encodings):
try:
proposed_encoding = 'utf-8'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried windows-1252 yet, try that
if (not known_encoding) and ('windows-1252' not in tried_encodings):
try:
proposed_encoding = 'windows-1252'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck, give up
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, and windows-1252 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = ''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'documented declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
if hasattr(saxparser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
saxparser.parse(source)
except Exception, e:
if _debug:
import traceback
traceback.print_stack()
traceback.print_exc()
sys.stderr.write('xml parsing failed\n')
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '')
feedparser.feed(data)
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
if __name__ == '__main__':
if not sys.argv[1:]:
print __doc__
sys.exit(0)
else:
urls = sys.argv[1:]
zopeCompatibilityHack()
from pprint import pprint
for url in urls:
print url
print
result = parse(url)
pprint(result)
print
#REVISION HISTORY
#1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements,
# added Simon Fell's test suite
#1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections
#2.0 - 10/19/2002
# JD - use inchannel to watch out for image and textinput elements which can
# also contain title, link, and description elements
# JD - check for isPermaLink='false' attribute on guid elements
# JD - replaced openAnything with open_resource supporting ETag and
# If-Modified-Since request headers
# JD - parse now accepts etag, modified, agent, and referrer optional
# arguments
# JD - modified parse to return a dictionary instead of a tuple so that any
# etag or modified information can be returned and cached by the caller
#2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything
# because of etag/modified, return the old etag/modified to the caller to
# indicate why nothing is being returned
#2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its
# useless. Fixes the problem JD was addressing by adding it.
#2.1 - 11/14/2002 - MAP - added gzip support
#2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent.
# start_admingeneratoragent is an example of how to handle elements with
# only attributes, no content.
#2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify);
# also, make sure we send the User-Agent even if urllib2 isn't available.
# Match any variation of backend.userland.com/rss namespace.
#2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is.
#2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's
# snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed
# project name
#2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree);
# removed unnecessary urllib code -- urllib2 should always be available anyway;
# return actual url, status, and full HTTP headers (as result['url'],
# result['status'], and result['headers']) if parsing a remote feed over HTTP --
# this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>;
# added the latest namespace-of-the-week for RSS 2.0
#2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom
# User-Agent (otherwise urllib2 sends two, which confuses some servers)
#2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for
# inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds
#2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or
# textInput, and also to return the character encoding (if specified)
#2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking
# nested divs within content (JohnD); fixed missing sys import (JohanS);
# fixed regular expression to capture XML character encoding (Andrei);
# added support for Atom 0.3-style links; fixed bug with textInput tracking;
# added support for cloud (MartijnP); added support for multiple
# category/dc:subject (MartijnP); normalize content model: 'description' gets
# description (which can come from description, summary, or full content if no
# description), 'content' gets dict of base/language/type/value (which can come
# from content:encoded, xhtml:body, content, or fullitem);
# fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang
# tracking; fixed bug tracking unknown tags; fixed bug tracking content when
# <content> element is not in default namespace (like Pocketsoap feed);
# resolve relative URLs in link, guid, docs, url, comments, wfw:comment,
# wfw:commentRSS; resolve relative URLs within embedded HTML markup in
# description, xhtml:body, content, content:encoded, title, subtitle,
# summary, info, tagline, and copyright; added support for pingback and
# trackback namespaces
#2.7 - 1/5/2004 - MAP - really added support for trackback and pingback
# namespaces, as opposed to 2.6 when I said I did but didn't really;
# sanitize HTML markup within some elements; added mxTidy support (if
# installed) to tidy HTML markup within some elements; fixed indentation
# bug in _parse_date (FazalM); use socket.setdefaulttimeout if available
# (FazalM); universal date parsing and normalization (FazalM): 'created', modified',
# 'issued' are parsed into 9-tuple date format and stored in 'created_parsed',
# 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified'
# and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa
#2.7.1 - 1/9/2004 - MAP - fixed bug handling " and '. fixed memory
# leak not closing url opener (JohnD); added dc:publisher support (MarekK);
# added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK)
#2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in
# encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL);
# fixed relative URI processing for guid (skadz); added ICBM support; added
# base64 support
#2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many
# blogspot.com sites); added _debug variable
#2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing
#3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available);
# added several new supported namespaces; fixed bug tracking naked markup in
# description; added support for enclosure; added support for source; re-added
# support for cloud which got dropped somehow; added support for expirationDate
#3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking
# xml:base URI, one for documents that don't define one explicitly and one for
# documents that define an outer and an inner xml:base that goes out of scope
# before the end of the document
#3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level
#3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version']
# will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized;
# added support for creativeCommons:license and cc:license; added support for
# full Atom content model in title, tagline, info, copyright, summary; fixed bug
# with gzip encoding (not always telling server we support it when we do)
#3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail
# (dictionary of 'name', 'url', 'email'); map author to author_detail if author
# contains name + email address
#3.0b8 - 1/28/2004 - MAP - added support for contributor
#3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added
# support for summary
#3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from
# xml.util.iso8601
#3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain
# dangerous markup; fiddled with decodeEntities (not right); liberalized
# date parsing even further
#3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right);
# added support to Atom 0.2 subtitle; added support for Atom content model
# in copyright; better sanitizing of dangerous HTML elements with end tags
# (script, frameset)
#3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img,
# etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />)
#3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under
# Python 2.1
#3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS;
# fixed bug capturing author and contributor URL; fixed bug resolving relative
# links in author and contributor URL; fixed bug resolvin relative links in
# generator URL; added support for recognizing RSS 1.0; passed Simon Fell's
# namespace tests, and included them permanently in the test suite with his
# permission; fixed namespace handling under Python 2.1
#3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15)
#3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023
#3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei);
# use libxml2 (if available)
#3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author
# name was in parentheses; removed ultra-problematic mxTidy support; patch to
# workaround crash in PyXML/expat when encountering invalid entities
# (MarkMoraes); support for textinput/textInput
#3.0b20 - 4/7/2004 - MAP - added CDF support
#3.0b21 - 4/14/2004 - MAP - added Hot RSS support
#3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in
# results dict; changed results dict to allow getting values with results.key
# as well as results[key]; work around embedded illformed HTML with half
# a DOCTYPE; work around malformed Content-Type header; if character encoding
# is wrong, try several common ones before falling back to regexes (if this
# works, bozo_exception is set to CharacterEncodingOverride); fixed character
# encoding issues in BaseHTMLProcessor by tracking encoding and converting
# from Unicode to raw strings before feeding data to sgmllib.SGMLParser;
# convert each value in results to Unicode (if possible), even if using
# regex-based parsing
#3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain
# high-bit characters in attributes in embedded HTML in description (thanks
# Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in
# FeedParserDict; tweaked FeedParserDict.has_key to return True if asking
# about a mapped key
#3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and
# results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could
# cause the same encoding to be tried twice (even if it failed the first time);
# fixed DOCTYPE stripping when DOCTYPE contained entity declarations;
# better textinput and image tracking in illformed RSS 1.0 feeds
#3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed
# my blink tag tests
#3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that
# failed to parse utf-16 encoded feeds; made source into a FeedParserDict;
# duplicate admin:generatorAgent/@rdf:resource in generator_detail.url;
# added support for image; refactored parse() fallback logic to try other
# encodings if SAX parsing fails (previously it would only try other encodings
# if re-encoding failed); remove unichr madness in normalize_attrs now that
# we're properly tracking encoding in and out of BaseHTMLProcessor; set
# feed.language from root-level xml:lang; set entry.id from rdf:about;
# send Accept header
#3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between
# iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are
# windows-1252); fixed regression that could cause the same encoding to be
# tried twice (even if it failed the first time)
#3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types;
# recover from malformed content-type header parameter with no equals sign
# ('text/xml; charset:iso-8859-1')
#3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities
# to Unicode equivalents in illformed feeds (aaronsw); added and
# passed tests for converting character entities to Unicode equivalents
# in illformed feeds (aaronsw); test for valid parsers when setting
# XML_AVAILABLE; make version and encoding available when server returns
# a 304; add handlers parameter to pass arbitrary urllib2 handlers (like
# digest auth or proxy support); add code to parse username/password
# out of url and send as basic authentication; expose downloading-related
# exceptions in bozo_exception (aaronsw); added __contains__ method to
# FeedParserDict (aaronsw); added publisher_detail (aaronsw)
#3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always
# convert feed to UTF-8 before passing to XML parser; completely revamped
# logic for determining character encoding and attempting XML parsing
# (much faster); increased default timeout to 20 seconds; test for presence
# of Location header on redirects; added tests for many alternate character
# encodings; support various EBCDIC encodings; support UTF-16BE and
# UTF16-LE with or without a BOM; support UTF-8 with a BOM; support
# UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no
# XML parsers are available; added support for 'Content-encoding: deflate';
# send blank 'Accept-encoding: ' header if neither gzip nor zlib modules
# are available
#3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure
# problem tracking xml:base and xml:lang if element declares it, child
# doesn't, first grandchild redeclares it, and second grandchild doesn't;
# refactored date parsing; defined public registerDateHandler so callers
# can add support for additional date formats at runtime; added support
# for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added
# zopeCompatibilityHack() which turns FeedParserDict into a regular
# dictionary, required for Zope compatibility, and also makes command-
# line debugging easier because pprint module formats real dictionaries
# better than dictionary-like objects; added NonXMLContentType exception,
# which is stored in bozo_exception when a feed is served with a non-XML
# media type such as 'text/plain'; respect Content-Language as default
# language if not xml:lang is present; cloud dict is now FeedParserDict;
# generator dict is now FeedParserDict; better tracking of xml:lang,
# including support for xml:lang='' to unset the current language;
# recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default
# namespace; don't overwrite final status on redirects (scenarios:
# redirecting to a URL that returns 304, redirecting to a URL that
# redirects to another URL with a different type of redirect); add
# support for HTTP 303 redirects
#4.0 - MAP - support for relative URIs in xml:base attribute; fixed
# encoding issue with mxTidy (phopkins); preliminary support for RFC 3229;
# support for Atom 1.0; support for iTunes extensions; new 'tags' for
# categories/keywords/etc. as array of dict
# {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0
# terminology; parse RFC 822-style dates with no time; lots of other
# bug fixes
#4.1 - MAP - removed socket timeout; added support for chardet library
| Python |
from xml.sax import make_parser
from xml.sax.handler import feature_namespaces
from xml.sax import handler
from westom.feednut.utils.misc import storage
import xml.parsers.expat
class SubscriptionHandler(handler.ContentHandler):
"""
SubscriptionHandler creates a list of subscriptions from an OPML file
Subscriptions are represented by a Storage object containing subscription attributes
OPML subscriptions are a xml list of outline elements wrapped
in an catagory outline element.
Here is an example from bloglines:
<outline title="Subscriptions">
<outline title="Bloglines | News" htmlUrl="http://www.bloglines.com" type="rss" xmlUrl="http://www.bloglines.com/rss/about/news" />
<outline title="Fodor's Travel Wire" htmlUrl="http://www.fodors.com/wire/" type="rss" xmlUrl="http://www.fodors.com/blog/index.rdf" />
<outline title="NASA Breaking News" htmlUrl="http://www.nasa.gov/audience/formedia/features/index.html" type="rss" xmlUrl="http://www.nasa.gov/rss/breaking_news.rss" />
<outline title="Nature" htmlUrl="http://www.nature.com/nature/current_issue/" type="rss" xmlUrl="http://www.nature.com/nature/journal/v428/n6985/rss.rdf" />
<outline title="Slashdot" htmlUrl="http://slashdot.org/" type="rss" xmlUrl="http://rss.slashdot.org/slashdot/eqWf" />
<outline title="SPACE.com" htmlUrl="http://www.space.com/" type="rss" xmlUrl="http://feeds.feedburner.com/spaceheadlines" />
</outline>
"""
def __init__(self):
self.subscriptions = []
self.inSubscriptionRootOutline = False
self.inSubscriptionChildOutline = False
self.valid_subscription_text = ['subscription', 'subscriptions']
def getSubscriptions(self):
return self.subscriptions
def startElement(self, name, attrs):
if (name and name.lower() == 'outline'):
text = attrs.get('text', None)
title = attrs.get('title', None)
if (text and text.lower() in self.valid_subscription_text) or (title and title.lower() in self.valid_subscription_text):
self.inSubscriptionRootOutline = True
return
if self.inSubscriptionRootOutline:
#if you are here, you must be in a child outline element
#under a subscription outline element
self.inSubscriptionChildOutline = True
type = attrs.get('type', None)
xmlUrl = attrs.get('xmlUrl', None)
self.subscriptions.append(storage({'text':text, 'title':title, 'type':type, 'xmlUrl':xmlUrl}))
def endElement(self, name):
if (name and name.lower() == 'outline') and self.inSubscriptionRootOutline:
if self.inSubscriptionChildOutline:
self.inSubscriptionChildOutline = False
else:
self.inSubscriptionRootOutline = False
def parseOpml(opml):
"""
Parse some OPML and return its Subscriptions in a list of Storage objects
"""
parser = make_parser()
parser.setFeature(feature_namespaces, 0)
dh = SubscriptionHandler()
parser.setContentHandler(dh)
parser.parse(opml)
return dh.getSubscriptions() | Python |
"""
FYI: Yahoo puts a limit of 5000 requests per day...
"""
import urllib, string, re
APP_ID = 'yahoo_feednut'
YAHOO_URL = 'http://api.search.yahoo.com/ContentAnalysisService/V1/termExtraction'
RESULT_RE = re.compile('<Result>([\w\s]+)<\/Result>')
#filter out some words we don't want back
FILTER_LIST = ['a', 'for', 'the', 'of', 'or', 'and', 'in']
FILTER_DICT = dict((val, val) for val in FILTER_LIST)
def extract_terms(text, query='', appid = APP_ID, limit=0):
""" calls the Yahoo! term extraction service and returns a list of the results """
params = urllib.urlencode([('appid',appid), ('context',text), ('query',query)])
f = urllib.urlopen(YAHOO_URL, params)
content = string.join(f.readlines()).strip()
f.close()
results = RESULT_RE.findall(content)
tags = {}
for result in results:
words = result.split()
for word in words:
if word not in FILTER_DICT and word not in tags and len(word) > 2:
tags[word] = word
tags = list((tag) for tag in tags.keys())
if limit:
tags = tags[:limit]
return tags
if __name__ == '__main__':
""" example usage """
# print extract_terms('Alice in Wonderland is a great book!')
# print extract_terms('Italian sculptors and painters of the renaissance favored the Virgin Mary for inspiration.')
print extract_terms('www.slashdot.org')
| Python |
""""Scrape 'N' Feed
Simple PyRSS2Gen wrapper for screen-scraping RSS feeds
http://www.crummy.com/software/ScrapeNFeed"""
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "1.0.0"
__copyright__ = "Copyright (c) 2005 Leonard Richardson"
__license__ = "PSF"
import datetime
import md5
import os
import pickle
import time
import traceback
import urllib2
import urlparse
from StringIO import StringIO
from PyRSS2Gen import RSS2, RSSItem, Guid
class WebPageMetadata:
"""Keeps track of the most recent Last-Modified and Etag headers
obtained for a particular web page."""
def __init__(self, url, pickleFile=None, etag=None, lastModified=None):
self.url=url
self.baseURL = urlparse.urljoin(url, ' ')[:-1]
if not pickleFile:
pickleFile = self.digest() + '.pickle'
self.pickleFile = pickleFile
self.etag = etag
self.lastModified = lastModified
def digest(self):
m = md5.new()
m.update(self.url)
return m.hexdigest()
def pickle(self):
s = StringIO()
pickle.dump(self, s)
f = open(self.pickleFile, 'w')
f.write(s.getvalue())
f.close()
def fetch(self):
request = urllib2.Request(self.url)
if self.etag:
request.add_header('If-None-Match', self.etag)
if self.lastModified:
request.add_header('If-Modified-Since', self.lastModified)
response = urllib2.urlopen(request)
headers = response.info()
self.etag = headers.get('ETag', None)
self.lastModified = headers.get('Last-Modified', None)
return response
class ScrapedFeed(RSS2, WebPageMetadata):
"""This class makes it easy to maintain an RSS feed that's somehow derived
from a web page."""
def __init__(self, title, url, description, rssFile=None, pickleFile=None,
maxItems=20, **kwargs):
RSS2.__init__(self, title, url, description, **kwargs)
WebPageMetadata.__init__(self, url, pickleFile)
self.maxItems = maxItems
if not rssFile:
rssFile = self.digest() + '.xml'
self.rssFile = rssFile
self.currentGuids = {}
def refresh(self):
"""Re-fetches the source of this feed, updates the RSS feed
representation to match, outputs a new RSS feed in XML format,
and pickles the new state of the feed."""
try:
response = self.fetch()
headers = response.info()
body = response.read()
self.lastBuildDate = datetime.datetime.now()
try:
self.HTML2RSS(headers, body)
except Exception, e:
#Put the exception into the RSS feed.
import sys
exception = traceback.format_tb(sys.exc_traceback)
description="<p>Unable to finish scraping this webpage into a feed. Please get the person in charge of maintaining the scraped feed (<i>not</i> the person in charge of the original website) to fix this.</p> <p>Stack trace:</p> <pre>%s%s</pre>" % ('\n'.join(exception), e)
self.pushRSSItem(RSSItem(link=self.url + '#' + str(time.time()),
title='Error scraping this feed',
description=description))
self.writeRSS()
self.pickle()
except urllib2.HTTPError, e:
if e.code == 304:
#The page hasn't been modified. Doing nothing is exactly
#the right thing to do.
pass
else:
raise e
def writeRSS(self):
f = open(self.rssFile, 'w')
self.write_xml(f)
f.close()
def hasSeen(self, guid):
"Returns true iff the given guid is already present in this feed."
if isinstance(guid, Guid):
guid = guid.guid
return self.currentGuids.get(guid, False)
def addRSSItems(self, items):
"""Adds a number of RSS items to the top of an RSS feed. If
the resulting feed is longer than the maximum number of items,
and some of those items were put on the feed in previous runs,
the earliest such items will be shifted off the feed."""
for i in items[::-1]:
self.pushRSSItem(i)
def pushRSSItem(self, item):
"""Adds an RSS Item to the top of an RSS feed. If the
resulting feed is longer than the maximum number of items, and
some of those items were put on the feed in previous runs, the
earliest such item will be shifted off the feed."""
if not getattr(item, 'guid') and item.link:
item.guid = Guid(item.link)
if not getattr(item, 'pubDate') or item.pubDate is None:
item.pubDate = self.lastBuildDate
#Stringify data from external sources (eg. Beautiful Soup) to
#avoid complications with pickling.
for field in ('title', 'link', 'description', 'author', 'category',
'comments',' source'):
s = getattr(item, field, None)
if s:
setattr(item, field, unicode(s))
if self.hasSeen(item.guid):
#print "Checking for newer version of %s", item.guid.guid
#This item is already in this feed. Replace it with the possibly
#new version.
for i in range(0, len(self.items)):
check = self.items[i]
if check.guid.guid == item.guid.guid:
#print "Updating possibly old version of %s" % item.guid.guid
self.items[i] = item
break
else:
#We haven't seen this item before, so the new one can go in.
#print "Inserting ", item.guid.guid
self.items.insert(0, item)
self.currentGuids[item.guid.guid] = self.lastBuildDate
while len(self.items) > self.maxItems \
and self.currentGuids.get(self.items[-1].guid.guid) != self.lastBuildDate:
#There are too many items in the feed, and the oldest one
#was inserted in a previous update, so we can get rid of
#it.
#print "%s pushed off the edge!" % self.items[-1].guid.guid
old = self.items.pop(-1)
del(self.currentGuids[old.guid.guid])
def HTML2RSS(self, headers, body):
"""Override this method to build an RSS feed out of the given
HTTP response. This method should construct a number of
PyRSS2Gen.RSSItem objects and call self.addItem() on each
one. You may pass in your guid to self.hasSeen() if you want
to see whether or not to bother creating a particular
RSSItem that might already be in the feed."""
raise Exception, """Hey buddy! You forgot to override the HTML2RSS method
which actually creates the RSS feed out of a web page!"""
def load(cls, title, url, description, rssFile=None,
pickleFile=None, maxItems=20, refresh=True, **kwargs):
if pickleFile and os.path.exists(pickleFile):
f = open(pickleFile, 'r')
feed = pickle.load(f)
feed.title = title
feed.description = description
feed.rssFile=rssFile
feed.maxItems = maxItems
else:
feed = cls(title, url, description, rssFile, pickleFile, maxItems, **kwargs)
if refresh:
feed.refresh()
return feed
load = classmethod(load)
| Python |
"""Caching dictionary that uses access times to decide which objects to
flush from the cache.
When instantiating, you can set the maximum number of key/value pairs
to maintain in the cache:
c = Cache(size=1000)
When the maximum size of the cache is reached, it will be resized
automatically to 95% of its maximum size the next time a new key is added to
the dictionary. The least recently accessed items will be the ones that are
flushed. A cache size of 0 implies an unlimited size. (Cache size should
be controlled by an age parameter.)
You can also specify a maximum entry age when creating the cache:
c = Cache(size=100, age=24*60) # one hour
By default, entries will remain in the cache until they are bumped for space
or time. If an attempt is made to retrieve an item that was added to the
cache before the age threshold, ExpiredError is raised (which is a subclass
of KeyError), just as if they weren't present in the cache.
Ages can be specified as numbers of seconds or as strings consisting of a
number followed by one of the following units: d,h,m,s
"""
# This code has been placed in the public domain.
import time, string, re
class CacheEntry(object):
__slots__ = ("value", "ftime", "mtime")
def __init__(self, value):
self.set(value)
self.ftime = time.time()
def get(self):
self.ftime = time.time()
return self.value
def set(self, value):
self.value = value
self.mtime = time.time()
class ExpiredError(KeyError):
pass
class Cache(dict):
"""simple cache that uses least recently accessed time to trim size"""
def __init__(self,data=None,size=100,age=None):
self.size = size
self.requests = self.hits = 0
self.inserts = self.unused = 0
if isinstance(age, (str, unicode)):
age = self._cvtage(age)
self.age = age
def shrink(self):
"""trim cache to no more than 95% of desired size"""
trim = max(0, int(len(self)-0.95*self.size))
if trim:
# sort keys by access times
values = zip(self.ftimes(), self.keys())
values.sort()
for val,k in values[0:trim]:
if val == 0.0:
self.unused += 1
del self[k]
def purge_old_entries(self):
if self.age is None:
return
t = time.time()
for k in self.keys():
v = dict.__getitem__(self, k)
threshold = t - self.age
# modified or fetched in last self.age seconds?
if threshold > v.mtime and threshold > v.ftime:
if v.ftime == 0.0:
self.unused += 1
del self[k]
def __setitem__(self,key,val):
self.inserts += 1
if self.age is not None and self.requests % 1000 == 0:
self.purge_old_entries()
if (key not in self and
self.size and
len(self) >= self.size):
self.shrink()
dict.__setitem__(self, key, CacheEntry(val))
def __getitem__(self,key):
"""like normal __getitem__ but updates time of fetched entry"""
self.requests += 1
item = dict.__getitem__(self, key)
val = item.get()
if self.age is not None:
if self.requests % 1000 == 0:
self.purge_old_entries()
# check to make sure value has not expired
if time.time()-self.age > item.mtime:
if item.ftime == 0.0:
self.unused += 1
del self[key]
raise ExpiredError(key)
# if we get here there was no KeyError
self.hits = self.hits + 1
return val
def has_key(self,key):
try:
v = dict.__getitem__(self, key)
except (KeyError,ExpiredError):
return 0
if self.age is not None and time.time()-self.age > v.mtime:
return 0
return 1
def get(self,key,default=None):
"""like normal __getitem__ but updates time of fetched entry"""
try:
return self[key]
except KeyError:
return default
def values(self):
"""extract values from CacheEntry objects"""
return [dict.__getitem__(self,key).get() for key in self]
def ftimes(self):
"""return values' fetch times"""
return [dict.__getitem__(self,key).ftime for key in self]
def mtimes(self):
"""return values' mod times"""
return [dict.__getitem__(self,key).mtime for key in self]
def items(self):
return map(None, self.keys(), self.values())
def copy(self):
return self.__class__(self, self.size)
def update(self, dict):
for k in dict.keys():
self[k] = dict[k]
def stats(self):
return {
'hits': self.hits,
'inserts': self.inserts,
'requests': self.requests,
'unused': self.unused,
}
def __repr__(self):
l = []
for k in self.keys():
l.append("%s: %s" % (repr(k), repr(self[k])))
return "{" + string.join(l, ", ") + "}"
__str__=__repr__
_apat = re.compile("([0-9]+([.][0-9]+)?)\s*([dhms])?\s*$")
def _cvtage(self,age):
mat = self._apat.match(age)
if mat is None:
raise ValueError("invalid age spec: %s" % age)
n = float(mat.group(1))
units = mat.group(3) or "s"
if units == "s":
pass
elif units == "m":
n = n * 60
elif units == "h":
n = n * 60*60
elif units == "d":
n = n * 24*60*60
return n
def _test():
print "testing cache overflow properties"
c = Cache(size=100)
for i in range(120):
c[i] = i
if i > 5:
x = c[5]
time.sleep(0.01)
x = c.keys()
x.sort()
assert x == [5]+range(21,120), x
c.update({1:1})
x = c.keys()
x.sort()
assert x == [1,5]+range(26,120), x
print "testing cache aging properties"
c = Cache(age=3)
for i in range(5):
c[i] = i
time.sleep(1)
try:
x = c[3]
except ExpiredError:
pass
assert len(c) == 5 and x is not None
time.sleep(4)
try:
x = c[3]
except ExpiredError:
x = None
assert len(c) == 4 and x is None
assert not c.has_key(4)
c[4] = 1
assert c.has_key(4)
print "testing cache string age specification"
for age,n in [("3s",3),("1m",60),("1h",60*60),("1d",24*60*60)]:
c = Cache(age=age)
assert (c.age-n) < 0.00001
try:
c = Cache(age="3.0sec")
except ValueError:
c = None
assert c is None
print "testing zero size cache"
c = Cache(size=0, age="100s")
for n in range(100):
c[n] = n
assert len(c) == 100, len(c)
c.size = 50
c['1'] = 1
assert len(c) <= 50, len(c)
c.size = 0
for n in range(1000):
c[n] = n
assert len(c) == 1001, len(c)
print "testing age-based purge capability"
c = Cache(size=0, age="5s")
for n in range(10):
c[n] = n
assert len(c) == 10
assert c.inserts == len(c), c.inserts
for i in range(989):
x = c[n]
assert len(c) == 10
assert c.requests == 989, c.requests
time.sleep(10)
c[100] = 1
c.purge_old_entries()
assert len(c) == 1, len(c)
print "all cache tests passed"
if __name__ == "__main__":
_test()
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import MySQLdb, sys
def freshen():
"""
Helper function that deletes and creates the database
"""
#let's delete the database
db=MySQLdb.connect(db=settings.DATABASE_NAME,
user=settings.DATABASE_USER,
passwd=settings.DATABASE_PASSWORD)
c=db.cursor()
c.execute("DROP DATABASE " + settings.DATABASE_NAME)
print 'Dropped database: ' + settings.DATABASE_NAME
c.execute("CREATE DATABASE " + settings.DATABASE_NAME)
print 'Created database: ' + settings.DATABASE_NAME
sys.argv.remove('fresh')
#first, let's init it
sys.argv = [sys.argv[0], 'syncdb']
execute_manager(settings)
#now, update some fields that should be blobs
c.execute("USE %s" % settings.DATABASE_NAME)
# c.execute("ALTER TABLE FEEDNUT_FEED MODIFY FEED_DATA BLOB NOT NULL")
c.execute("ALTER TABLE FEEDNUT_FEED CONVERT TO CHARACTER SET utf8")
c.execute("ALTER TABLE FEEDNUT_FEEDENTRY CONVERT TO CHARACTER SET utf8")
db.close()
print 'Synced database'
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == 'fresh':
freshen()
elif len(sys.argv) > 1 and sys.argv[1] == 'compress':
#compresses the javascripts
import os, subprocess
os.chdir('scripts')
p = subprocess.Popen(
['python', 'compressJS.py'],
stdout=subprocess.PIPE,
)
#otherwise, process normally...
else:
execute_manager(settings)
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.