code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
import cgi
import urllib
import time
import random
import urlparse
import hmac
import binascii
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
# Generic exception class
class OAuthError(RuntimeError):
def __init__(self, message='OAuth error occured.'):
self.message = message
# optional WWW-Authenticate header (401 error)
def build_authenticate_header(realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# url escape
def escape(s):
# escape '/' too
return urllib.quote(s, safe='~')
# util function: current timestamp
# seconds since epoch (UTC)
def generate_timestamp():
return int(time.time())
# util function: nonce
# pseudorandom number
def generate_nonce(length=8):
return ''.join([str(random.randint(0, 9)) for i in range(length)])
# OAuthConsumer is a data type that represents the identity of the Consumer
# via its shared secret with the Service Provider.
class OAuthConsumer(object):
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
# OAuthToken is a data type that represents an End User via either an access
# or request token.
class OAuthToken(object):
# access tokens and request tokens
key = None
secret = None
'''
key = the token
secret = the token secret
'''
def __init__(self, key, secret):
self.key = key
self.secret = secret
def to_string(self):
return urllib.urlencode({'oauth_token': self.key, 'oauth_token_secret': self.secret})
# return a token from something like:
# oauth_token_secret=digg&oauth_token=digg
def from_string(s):
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
return OAuthToken(key, secret)
from_string = staticmethod(from_string)
def __str__(self):
return self.to_string()
# OAuthRequest represents the request and can be serialized
class OAuthRequest(object):
'''
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
... any additional parameters, as defined by the Service Provider.
'''
parameters = None # oauth parameters
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter('oauth_nonce')
# get any non-oauth parameters
def get_nonoauth_parameters(self):
parameters = {}
for k, v in self.parameters.iteritems():
# ignore oauth parameters
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
# serialize as a header for an HTTPAuth request
def to_header(self, realm=''):
auth_header = 'OAuth realm="%s"' % realm
# add the oauth parameters
if self.parameters:
for k, v in self.parameters.iteritems():
if k[:6] == 'oauth_':
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
# serialize as post data for a POST request
def to_postdata(self):
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in self.parameters.iteritems()])
# serialize as a url for a GET request
def to_url(self):
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
# return a string that consists of all the parameters that need to be signed
def get_normalized_parameters(self):
params = self.parameters
try:
# exclude the signature if it exists
del params['oauth_signature']
except:
pass
key_values = params.items()
# sort lexicographically, first after key, then after value
key_values.sort()
# combine key value pairs in string and escape
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in key_values])
# just uppercases the http method
def get_normalized_http_method(self):
return self.http_method.upper()
# parses the url and rebuilds it to be scheme://host/path
def get_normalized_http_url(self):
parts = urlparse.urlparse(self.http_url)
url_string = '%s://%s%s' % (parts[0], parts[1], parts[2]) # scheme, netloc, path
return url_string
# set the signature parameter to the result of build_signature
def sign_request(self, signature_method, consumer, token):
# set the signature method
self.set_parameter('oauth_signature_method', signature_method.get_name())
# set the signature
self.set_parameter('oauth_signature', self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
# call the build signature method within the signature method
return signature_method.build_signature(self, consumer, token)
def from_request(http_method, http_url, headers=None, parameters=None, query_string=None):
# combine multiple parameter sources
if parameters is None:
parameters = {}
# headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# check that the authorization header is OAuth
if auth_header.index('OAuth') > -1:
try:
# get the parameters from the header
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from Authorization header.')
# GET or POST query string
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
from_request = staticmethod(from_request)
def from_consumer_and_token(oauth_consumer, token=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
return OAuthRequest(http_method, http_url, parameters)
from_consumer_and_token = staticmethod(from_consumer_and_token)
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_token_and_callback = staticmethod(from_token_and_callback)
# util function: turn Authorization: header into parameters, has to do some unescaping
def _split_header(header):
params = {}
parts = header.split(',')
for param in parts:
# ignore realm parameter
if param.find('OAuth realm') > -1:
continue
# remove whitespace
param = param.strip()
# split key-value
param_parts = param.split('=', 1)
# remove quotes and unescape the value
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
_split_header = staticmethod(_split_header)
# util function: turn url string into parameters, has to do some unescaping
def _split_url_string(param_str):
parameters = cgi.parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
_split_url_string = staticmethod(_split_url_string)
# OAuthServer is a worker to check a requests validity against a data store
class OAuthServer(object):
timestamp_threshold = 300 # in seconds, five minutes
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, oauth_data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
# process a request_token request
# returns the request token on success
def fetch_request_token(self, oauth_request):
try:
# get the request token for authorization
token = self._get_token(oauth_request, 'request')
except OAuthError:
# no token required for the initial token request
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
self._check_signature(oauth_request, consumer, None)
# fetch a new token
token = self.data_store.fetch_request_token(consumer)
return token
# process an access_token request
# returns the access token on success
def fetch_access_token(self, oauth_request):
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# get the request token
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token)
return new_token
# verify an api call, checks all the parameters
def verify_request(self, oauth_request):
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# get the access token
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
# authorize a request token
def authorize_token(self, token, user):
return self.data_store.authorize_request_token(token, user)
# get the callback url
def get_callback(self, oauth_request):
return oauth_request.get_parameter('oauth_callback')
# optional support for the authenticate header
def build_authenticate_header(self, realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# verify the correct version request for this server
def _get_version(self, oauth_request):
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
# figure out the signature with some defaults
def _get_signature_method(self, oauth_request):
try:
signature_method = oauth_request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# get the signature method object
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
if not consumer_key:
raise OAuthError('Invalid consumer key.')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
# try to find the token for the provided request token key
def _get_token(self, oauth_request, token_type='access'):
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# validate the signature
valid_sig = signature_method.check_signature(oauth_request, consumer, token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(oauth_request, consumer, token)
raise OAuthError('Invalid signature. Expected signature base string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
# verify that timestamp is recentish
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
# verify that the nonce is uniqueish
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
# OAuthClient is a worker to attempt to execute a request
class OAuthClient(object):
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
# -> OAuthToken
raise NotImplementedError
def fetch_access_token(self, oauth_request):
# -> OAuthToken
raise NotImplementedError
def access_resource(self, oauth_request):
# -> some protected resource
raise NotImplementedError
# OAuthDataStore is a database abstraction used to lookup consumers and tokens
class OAuthDataStore(object):
def lookup_consumer(self, key):
# -> OAuthConsumer
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
# -> OAuthToken
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce, timestamp):
# -> OAuthToken
raise NotImplementedError
def fetch_request_token(self, oauth_consumer):
# -> OAuthToken
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token):
# -> OAuthToken
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
# -> OAuthToken
raise NotImplementedError
# OAuthSignatureMethod is a strategy class that implements a signature method
class OAuthSignatureMethod(object):
def get_name(self):
# -> str
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
# -> str key, str raw
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
# -> str
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
# build the base signature string
key, raw = self.build_signature_base_string(oauth_request, consumer, token)
# hmac object
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # deprecated
hashed = hmac.new(key, raw, sha)
# calculate the digest base 64
return binascii.b2a_base64(hashed.digest())[:-1]
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
# concatenate the consumer key and secret
sig = escape(consumer.secret) + '&'
if token:
sig = sig + escape(token.secret)
return sig
def build_signature(self, oauth_request, consumer, token):
return self.build_signature_base_string(oauth_request, consumer, token)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes representing Google Data elements.
Extends Atom classes to add Google Data specific elements.
"""
__author__ = 'api.jscudder (Jeffrey Scudder)'
import os
import atom
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
# XML namespaces which are often used in GData entities.
GDATA_NAMESPACE = 'http://schemas.google.com/g/2005'
GDATA_TEMPLATE = '{http://schemas.google.com/g/2005}%s'
OPENSEARCH_NAMESPACE = 'http://a9.com/-/spec/opensearchrss/1.0/'
OPENSEARCH_TEMPLATE = '{http://a9.com/-/spec/opensearchrss/1.0/}%s'
BATCH_NAMESPACE = 'http://schemas.google.com/gdata/batch'
GACL_NAMESPACE = 'http://schemas.google.com/acl/2007'
GACL_TEMPLATE = '{http://schemas.google.com/acl/2007}%s'
# Labels used in batch request entries to specify the desired CRUD operation.
BATCH_INSERT = 'insert'
BATCH_UPDATE = 'update'
BATCH_DELETE = 'delete'
BATCH_QUERY = 'query'
class Error(Exception):
pass
class MissingRequiredParameters(Error):
pass
class MediaSource(object):
"""GData Entries can refer to media sources, so this class provides a
place to store references to these objects along with some metadata.
"""
def __init__(self, file_handle=None, content_type=None, content_length=None,
file_path=None, file_name=None):
"""Creates an object of type MediaSource.
Args:
file_handle: A file handle pointing to the file to be encapsulated in the
MediaSource
content_type: string The MIME type of the file. Required if a file_handle
is given.
content_length: int The size of the file. Required if a file_handle is
given.
file_path: string (optional) A full path name to the file. Used in
place of a file_handle.
file_name: string The name of the file without any path information.
Required if a file_handle is given.
"""
self.file_handle = file_handle
self.content_type = content_type
self.content_length = content_length
self.file_name = file_name
if (file_handle is None and content_type is not None and
file_path is not None):
self.setFile(file_path, content_type)
def setFile(self, file_name, content_type):
"""A helper function which can create a file handle from a given filename
and set the content type and length all at once.
Args:
file_name: string The path and file name to the file containing the media
content_type: string A MIME type representing the type of the media
"""
self.file_handle = open(file_name, 'rb')
self.content_type = content_type
self.content_length = os.path.getsize(file_name)
self.file_name = os.path.basename(file_name)
class LinkFinder(atom.LinkFinder):
"""An "interface" providing methods to find link elements
GData Entry elements often contain multiple links which differ in the rel
attribute or content type. Often, developers are interested in a specific
type of link so this class provides methods to find specific classes of
links.
This class is used as a mixin in GData entries.
"""
def GetSelfLink(self):
"""Find the first link with rel set to 'self'
Returns:
An atom.Link or none if none of the links had rel equal to 'self'
"""
for a_link in self.link:
if a_link.rel == 'self':
return a_link
return None
def GetEditLink(self):
for a_link in self.link:
if a_link.rel == 'edit':
return a_link
return None
def GetEditMediaLink(self):
"""The Picasa API mistakenly returns media-edit rather than edit-media, but
this may change soon.
"""
for a_link in self.link:
if a_link.rel == 'edit-media':
return a_link
if a_link.rel == 'media-edit':
return a_link
return None
def GetHtmlLink(self):
"""Find the first link with rel of alternate and type of text/html
Returns:
An atom.Link or None if no links matched
"""
for a_link in self.link:
if a_link.rel == 'alternate' and a_link.type == 'text/html':
return a_link
return None
def GetPostLink(self):
"""Get a link containing the POST target URL.
The POST target URL is used to insert new entries.
Returns:
A link object with a rel matching the POST type.
"""
for a_link in self.link:
if a_link.rel == 'http://schemas.google.com/g/2005#post':
return a_link
return None
def GetAclLink(self):
for a_link in self.link:
if a_link.rel == 'http://schemas.google.com/acl/2007#accessControlList':
return a_link
return None
def GetFeedLink(self):
for a_link in self.link:
if a_link.rel == 'http://schemas.google.com/g/2005#feed':
return a_link
return None
def GetNextLink(self):
for a_link in self.link:
if a_link.rel == 'next':
return a_link
return None
def GetPrevLink(self):
for a_link in self.link:
if a_link.rel == 'previous':
return a_link
return None
class TotalResults(atom.AtomBase):
"""opensearch:TotalResults for a GData feed"""
_tag = 'totalResults'
_namespace = OPENSEARCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def TotalResultsFromString(xml_string):
return atom.CreateClassFromXMLString(TotalResults, xml_string)
class StartIndex(atom.AtomBase):
"""The opensearch:startIndex element in GData feed"""
_tag = 'startIndex'
_namespace = OPENSEARCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def StartIndexFromString(xml_string):
return atom.CreateClassFromXMLString(StartIndex, xml_string)
class ItemsPerPage(atom.AtomBase):
"""The opensearch:itemsPerPage element in GData feed"""
_tag = 'itemsPerPage'
_namespace = OPENSEARCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ItemsPerPageFromString(xml_string):
return atom.CreateClassFromXMLString(ItemsPerPage, xml_string)
class ExtendedProperty(atom.AtomBase):
"""The Google Data extendedProperty element.
Used to store arbitrary key-value information specific to your
application. The value can either be a text string stored as an XML
attribute (.value), or an XML node (XmlBlob) as a child element.
This element is used in the Google Calendar data API and the Google
Contacts data API.
"""
_tag = 'extendedProperty'
_namespace = GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['name'] = 'name'
_attributes['value'] = 'value'
def __init__(self, name=None, value=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GetXmlBlobExtensionElement(self):
"""Returns the XML blob as an atom.ExtensionElement.
Returns:
An atom.ExtensionElement representing the blob's XML, or None if no
blob was set.
"""
if len(self.extension_elements) < 1:
return None
else:
return self.extension_elements[0]
def GetXmlBlobString(self):
"""Returns the XML blob as a string.
Returns:
A string containing the blob's XML, or None if no blob was set.
"""
blob = self.GetXmlBlobExtensionElement()
if blob:
return blob.ToString()
return None
def SetXmlBlob(self, blob):
"""Sets the contents of the extendedProperty to XML as a child node.
Since the extendedProperty is only allowed one child element as an XML
blob, setting the XML blob will erase any preexisting extension elements
in this object.
Args:
blob: str, ElementTree Element or atom.ExtensionElement representing
the XML blob stored in the extendedProperty.
"""
# Erase any existing extension_elements, clears the child nodes from the
# extendedProperty.
self.extension_elements = []
if isinstance(blob, atom.ExtensionElement):
self.extension_elements.append(blob)
elif ElementTree.iselement(blob):
self.extension_elements.append(atom._ExtensionElementFromElementTree(
blob))
else:
self.extension_elements.append(atom.ExtensionElementFromString(blob))
def ExtendedPropertyFromString(xml_string):
return atom.CreateClassFromXMLString(ExtendedProperty, xml_string)
class GDataEntry(atom.Entry, LinkFinder):
"""Extends Atom Entry to provide data processing"""
_tag = atom.Entry._tag
_namespace = atom.Entry._namespace
_children = atom.Entry._children.copy()
_attributes = atom.Entry._attributes.copy()
def __GetId(self):
return self.__id
# This method was created to strip the unwanted whitespace from the id's
# text node.
def __SetId(self, id):
self.__id = id
if id is not None and id.text is not None:
self.__id.text = id.text.strip()
id = property(__GetId, __SetId)
def IsMedia(self):
"""Determines whether or not an entry is a GData Media entry.
"""
if (self.GetEditMediaLink()):
return True
else:
return False
def GetMediaURL(self):
"""Returns the URL to the media content, if the entry is a media entry.
Otherwise returns None.
"""
if not self.IsMedia():
return None
else:
return self.content.src
def GDataEntryFromString(xml_string):
"""Creates a new GDataEntry instance given a string of XML."""
return atom.CreateClassFromXMLString(GDataEntry, xml_string)
class GDataFeed(atom.Feed, LinkFinder):
"""A Feed from a GData service"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = atom.Feed._children.copy()
_attributes = atom.Feed._attributes.copy()
_children['{%s}totalResults' % OPENSEARCH_NAMESPACE] = ('total_results',
TotalResults)
_children['{%s}startIndex' % OPENSEARCH_NAMESPACE] = ('start_index',
StartIndex)
_children['{%s}itemsPerPage' % OPENSEARCH_NAMESPACE] = ('items_per_page',
ItemsPerPage)
# Add a conversion rule for atom:entry to make it into a GData
# Entry.
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GDataEntry])
def __GetId(self):
return self.__id
def __SetId(self, id):
self.__id = id
if id is not None and id.text is not None:
self.__id.text = id.text.strip()
id = property(__GetId, __SetId)
def __GetGenerator(self):
return self.__generator
def __SetGenerator(self, generator):
self.__generator = generator
if generator is not None:
self.__generator.text = generator.text.strip()
generator = property(__GetGenerator, __SetGenerator)
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None, entry=None,
total_results=None, start_index=None, items_per_page=None,
extension_elements=None, extension_attributes=None, text=None):
"""Constructor for Source
Args:
author: list (optional) A list of Author instances which belong to this
class.
category: list (optional) A list of Category instances
contributor: list (optional) A list on Contributor instances
generator: Generator (optional)
icon: Icon (optional)
id: Id (optional) The entry's Id element
link: list (optional) A list of Link instances
logo: Logo (optional)
rights: Rights (optional) The entry's Rights element
subtitle: Subtitle (optional) The entry's subtitle element
title: Title (optional) the entry's title element
updated: Updated (optional) the entry's updated element
entry: list (optional) A list of the Entry instances contained in the
feed.
text: String (optional) The text contents of the element. This is the
contents of the Entry's XML text node.
(Example: <foo>This is the text</foo>)
extension_elements: list (optional) A list of ExtensionElement instances
which are children of this element.
extension_attributes: dict (optional) A dictionary of strings which are
the values for additional XML attributes of this element.
"""
self.author = author or []
self.category = category or []
self.contributor = contributor or []
self.generator = generator
self.icon = icon
self.id = atom_id
self.link = link or []
self.logo = logo
self.rights = rights
self.subtitle = subtitle
self.title = title
self.updated = updated
self.entry = entry or []
self.total_results = total_results
self.start_index = start_index
self.items_per_page = items_per_page
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GDataFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GDataFeed, xml_string)
class BatchId(atom.AtomBase):
_tag = 'id'
_namespace = BATCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def BatchIdFromString(xml_string):
return atom.CreateClassFromXMLString(BatchId, xml_string)
class BatchOperation(atom.AtomBase):
_tag = 'operation'
_namespace = BATCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['type'] = 'type'
def __init__(self, op_type=None, extension_elements=None,
extension_attributes=None,
text=None):
self.type = op_type
atom.AtomBase.__init__(self,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def BatchOperationFromString(xml_string):
return atom.CreateClassFromXMLString(BatchOperation, xml_string)
class BatchStatus(atom.AtomBase):
"""The batch:status element present in a batch response entry.
A status element contains the code (HTTP response code) and
reason as elements. In a single request these fields would
be part of the HTTP response, but in a batch request each
Entry operation has a corresponding Entry in the response
feed which includes status information.
See http://code.google.com/apis/gdata/batch.html#Handling_Errors
"""
_tag = 'status'
_namespace = BATCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['code'] = 'code'
_attributes['reason'] = 'reason'
_attributes['content-type'] = 'content_type'
def __init__(self, code=None, reason=None, content_type=None,
extension_elements=None, extension_attributes=None, text=None):
self.code = code
self.reason = reason
self.content_type = content_type
atom.AtomBase.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def BatchStatusFromString(xml_string):
return atom.CreateClassFromXMLString(BatchStatus, xml_string)
class BatchEntry(GDataEntry):
"""An atom:entry for use in batch requests.
The BatchEntry contains additional members to specify the operation to be
performed on this entry and a batch ID so that the server can reference
individual operations in the response feed. For more information, see:
http://code.google.com/apis/gdata/batch.html
"""
_tag = GDataEntry._tag
_namespace = GDataEntry._namespace
_children = GDataEntry._children.copy()
_children['{%s}operation' % BATCH_NAMESPACE] = ('batch_operation', BatchOperation)
_children['{%s}id' % BATCH_NAMESPACE] = ('batch_id', BatchId)
_children['{%s}status' % BATCH_NAMESPACE] = ('batch_status', BatchStatus)
_attributes = GDataEntry._attributes.copy()
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, control=None, title=None, updated=None,
batch_operation=None, batch_id=None, batch_status=None,
extension_elements=None, extension_attributes=None, text=None):
self.batch_operation = batch_operation
self.batch_id = batch_id
self.batch_status = batch_status
GDataEntry.__init__(self, author=author, category=category,
content=content, contributor=contributor, atom_id=atom_id, link=link,
published=published, rights=rights, source=source, summary=summary,
control=control, title=title, updated=updated,
extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
def BatchEntryFromString(xml_string):
return atom.CreateClassFromXMLString(BatchEntry, xml_string)
class BatchInterrupted(atom.AtomBase):
"""The batch:interrupted element sent if batch request was interrupted.
Only appears in a feed if some of the batch entries could not be processed.
See: http://code.google.com/apis/gdata/batch.html#Handling_Errors
"""
_tag = 'interrupted'
_namespace = BATCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['reason'] = 'reason'
_attributes['success'] = 'success'
_attributes['failures'] = 'failures'
_attributes['parsed'] = 'parsed'
def __init__(self, reason=None, success=None, failures=None, parsed=None,
extension_elements=None, extension_attributes=None, text=None):
self.reason = reason
self.success = success
self.failures = failures
self.parsed = parsed
atom.AtomBase.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def BatchInterruptedFromString(xml_string):
return atom.CreateClassFromXMLString(BatchInterrupted, xml_string)
class BatchFeed(GDataFeed):
"""A feed containing a list of batch request entries."""
_tag = GDataFeed._tag
_namespace = GDataFeed._namespace
_children = GDataFeed._children.copy()
_attributes = GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BatchEntry])
_children['{%s}interrupted' % BATCH_NAMESPACE] = ('interrupted', BatchInterrupted)
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None, entry=None,
total_results=None, start_index=None, items_per_page=None,
interrupted=None,
extension_elements=None, extension_attributes=None, text=None):
self.interrupted = interrupted
GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results, start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def AddBatchEntry(self, entry=None, id_url_string=None,
batch_id_string=None, operation_string=None):
"""Logic for populating members of a BatchEntry and adding to the feed.
If the entry is not a BatchEntry, it is converted to a BatchEntry so
that the batch specific members will be present.
The id_url_string can be used in place of an entry if the batch operation
applies to a URL. For example query and delete operations require just
the URL of an entry, no body is sent in the HTTP request. If an
id_url_string is sent instead of an entry, a BatchEntry is created and
added to the feed.
This method also assigns the desired batch id to the entry so that it
can be referenced in the server's response. If the batch_id_string is
None, this method will assign a batch_id to be the index at which this
entry will be in the feed's entry list.
Args:
entry: BatchEntry, atom.Entry, or another Entry flavor (optional) The
entry which will be sent to the server as part of the batch request.
The item must have a valid atom id so that the server knows which
entry this request references.
id_url_string: str (optional) The URL of the entry to be acted on. You
can find this URL in the text member of the atom id for an entry.
If an entry is not sent, this id will be used to construct a new
BatchEntry which will be added to the request feed.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. Note that batch_ids should either always be specified or
never, mixing could potentially result in duplicate batch ids.
operation_string: str (optional) The desired batch operation which will
set the batch_operation.type member of the entry. Options are
'insert', 'update', 'delete', and 'query'
Raises:
MissingRequiredParameters: Raised if neither an id_ url_string nor an
entry are provided in the request.
Returns:
The added entry.
"""
if entry is None and id_url_string is None:
raise MissingRequiredParameters('supply either an entry or URL string')
if entry is None and id_url_string is not None:
entry = BatchEntry(atom_id=atom.Id(text=id_url_string))
# TODO: handle cases in which the entry lacks batch_... members.
#if not isinstance(entry, BatchEntry):
# Convert the entry to a batch entry.
if batch_id_string is not None:
entry.batch_id = BatchId(text=batch_id_string)
elif entry.batch_id is None or entry.batch_id.text is None:
entry.batch_id = BatchId(text=str(len(self.entry)))
if operation_string is not None:
entry.batch_operation = BatchOperation(op_type=operation_string)
self.entry.append(entry)
return entry
def AddInsert(self, entry, batch_id_string=None):
"""Add an insert request to the operations in this batch request feed.
If the entry doesn't yet have an operation or a batch id, these will
be set to the insert operation and a batch_id specified as a parameter.
Args:
entry: BatchEntry The entry which will be sent in the batch feed as an
insert request.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. Note that batch_ids should either always be specified or
never, mixing could potentially result in duplicate batch ids.
"""
entry = self.AddBatchEntry(entry=entry, batch_id_string=batch_id_string,
operation_string=BATCH_INSERT)
def AddUpdate(self, entry, batch_id_string=None):
"""Add an update request to the list of batch operations in this feed.
Sets the operation type of the entry to insert if it is not already set
and assigns the desired batch id to the entry so that it can be
referenced in the server's response.
Args:
entry: BatchEntry The entry which will be sent to the server as an
update (HTTP PUT) request. The item must have a valid atom id
so that the server knows which entry to replace.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. See also comments for AddInsert.
"""
entry = self.AddBatchEntry(entry=entry, batch_id_string=batch_id_string,
operation_string=BATCH_UPDATE)
def AddDelete(self, url_string=None, entry=None, batch_id_string=None):
"""Adds a delete request to the batch request feed.
This method takes either the url_string which is the atom id of the item
to be deleted, or the entry itself. The atom id of the entry must be
present so that the server knows which entry should be deleted.
Args:
url_string: str (optional) The URL of the entry to be deleted. You can
find this URL in the text member of the atom id for an entry.
entry: BatchEntry (optional) The entry to be deleted.
batch_id_string: str (optional)
Raises:
MissingRequiredParameters: Raised if neither a url_string nor an entry
are provided in the request.
"""
entry = self.AddBatchEntry(entry=entry, id_url_string=url_string,
batch_id_string=batch_id_string,
operation_string=BATCH_DELETE)
def AddQuery(self, url_string=None, entry=None, batch_id_string=None):
"""Adds a query request to the batch request feed.
This method takes either the url_string which is the query URL
whose results will be added to the result feed. The query URL will
be encapsulated in a BatchEntry, and you may pass in the BatchEntry
with a query URL instead of sending a url_string.
Args:
url_string: str (optional)
entry: BatchEntry (optional)
batch_id_string: str (optional)
Raises:
MissingRequiredParameters
"""
entry = self.AddBatchEntry(entry=entry, id_url_string=url_string,
batch_id_string=batch_id_string,
operation_string=BATCH_QUERY)
def GetBatchLink(self):
for link in self.link:
if link.rel == 'http://schemas.google.com/g/2005#batch':
return link
return None
def BatchFeedFromString(xml_string):
return atom.CreateClassFromXMLString(BatchFeed, xml_string)
class EntryLink(atom.AtomBase):
"""The gd:entryLink element"""
_tag = 'entryLink'
_namespace = GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
# The entry used to be an atom.Entry, now it is a GDataEntry.
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', GDataEntry)
_attributes['rel'] = 'rel'
_attributes['readOnly'] = 'read_only'
_attributes['href'] = 'href'
def __init__(self, href=None, read_only=None, rel=None,
entry=None, extension_elements=None,
extension_attributes=None, text=None):
self.href = href
self.read_only = read_only
self.rel = rel
self.entry = entry
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EntryLinkFromString(xml_string):
return atom.CreateClassFromXMLString(EntryLink, xml_string)
class FeedLink(atom.AtomBase):
"""The gd:feedLink element"""
_tag = 'feedLink'
_namespace = GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_children['{%s}feed' % atom.ATOM_NAMESPACE] = ('feed', GDataFeed)
_attributes['rel'] = 'rel'
_attributes['readOnly'] = 'read_only'
_attributes['countHint'] = 'count_hint'
_attributes['href'] = 'href'
def __init__(self, count_hint=None, href=None, read_only=None, rel=None,
feed=None, extension_elements=None, extension_attributes=None,
text=None):
self.count_hint = count_hint
self.href = href
self.read_only = read_only
self.rel = rel
self.feed = feed
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def FeedLinkFromString(xml_string):
return atom.CreateClassFromXMLString(EntryLink, xml_string)
| Python |
"""
TLS Lite is a free python library that implements SSL v3, TLS v1, and
TLS v1.1. TLS Lite supports non-traditional authentication methods
such as SRP, shared keys, and cryptoIDs, in addition to X.509
certificates. TLS Lite is pure python, however it can access OpenSSL,
cryptlib, pycrypto, and GMPY for faster crypto operations. TLS Lite
integrates with httplib, xmlrpclib, poplib, imaplib, smtplib,
SocketServer, asyncore, and Twisted.
To use, do::
from tlslite.api import *
Then use the L{tlslite.TLSConnection.TLSConnection} class with a socket,
or use one of the integration classes in L{tlslite.integration}.
@version: 0.3.8
"""
__version__ = "0.3.8"
__all__ = ["api",
"BaseDB",
"Checker",
"constants",
"errors",
"FileObject",
"HandshakeSettings",
"mathtls",
"messages",
"Session",
"SessionCache",
"SharedKeyDB",
"TLSConnection",
"TLSRecordLayer",
"VerifierDB",
"X509",
"X509CertChain",
"integration",
"utils"]
| Python |
"""Factory functions for asymmetric cryptography.
@sort: generateRSAKey, parseXMLKey, parsePEMKey, parseAsPublicKey,
parseAsPrivateKey
"""
from compat import *
from RSAKey import RSAKey
from Python_RSAKey import Python_RSAKey
import cryptomath
if cryptomath.m2cryptoLoaded:
from OpenSSL_RSAKey import OpenSSL_RSAKey
if cryptomath.pycryptoLoaded:
from PyCrypto_RSAKey import PyCrypto_RSAKey
# **************************************************************************
# Factory Functions for RSA Keys
# **************************************************************************
def generateRSAKey(bits, implementations=["openssl", "python"]):
"""Generate an RSA key with the specified bit length.
@type bits: int
@param bits: Desired bit length of the new key's modulus.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: A new RSA private key.
"""
for implementation in implementations:
if implementation == "openssl" and cryptomath.m2cryptoLoaded:
return OpenSSL_RSAKey.generate(bits)
elif implementation == "python":
return Python_RSAKey.generate(bits)
raise ValueError("No acceptable implementations")
def parseXMLKey(s, private=False, public=False, implementations=["python"]):
"""Parse an XML-format key.
The XML format used here is specific to tlslite and cryptoIDlib. The
format can store the public component of a key, or the public and
private components. For example::
<publicKey xmlns="http://trevp.net/rsa">
<n>4a5yzB8oGNlHo866CAspAC47M4Fvx58zwK8pou...
<e>Aw==</e>
</publicKey>
<privateKey xmlns="http://trevp.net/rsa">
<n>4a5yzB8oGNlHo866CAspAC47M4Fvx58zwK8pou...
<e>Aw==</e>
<d>JZ0TIgUxWXmL8KJ0VqyG1V0J3ern9pqIoB0xmy...
<p>5PreIj6z6ldIGL1V4+1C36dQFHNCQHJvW52GXc...
<q>/E/wDit8YXPCxx126zTq2ilQ3IcW54NJYyNjiZ...
<dP>mKc+wX8inDowEH45Qp4slRo1YveBgExKPROu6...
<dQ>qDVKtBz9lk0shL5PR3ickXDgkwS576zbl2ztB...
<qInv>j6E8EA7dNsTImaXexAmLA1DoeArsYeFAInr...
</privateKey>
@type s: str
@param s: A string containing an XML public or private key.
@type private: bool
@param private: If True, a L{SyntaxError} will be raised if the private
key component is not present.
@type public: bool
@param public: If True, the private key component (if present) will be
discarded, so this function will always return a public key.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA key.
@raise SyntaxError: If the key is not properly formatted.
"""
for implementation in implementations:
if implementation == "python":
key = Python_RSAKey.parseXML(s)
break
else:
raise ValueError("No acceptable implementations")
return _parseKeyHelper(key, private, public)
#Parse as an OpenSSL or Python key
def parsePEMKey(s, private=False, public=False, passwordCallback=None,
implementations=["openssl", "python"]):
"""Parse a PEM-format key.
The PEM format is used by OpenSSL and other tools. The
format is typically used to store both the public and private
components of a key. For example::
-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQDYscuoMzsGmW0pAYsmyHltxB2TdwHS0dImfjCMfaSDkfLdZY5+
dOWORVns9etWnr194mSGA1F0Pls/VJW8+cX9+3vtJV8zSdANPYUoQf0TP7VlJxkH
dSRkUbEoz5bAAs/+970uos7n7iXQIni+3erUTdYEk2iWnMBjTljfgbK/dQIDAQAB
AoGAJHoJZk75aKr7DSQNYIHuruOMdv5ZeDuJvKERWxTrVJqE32/xBKh42/IgqRrc
esBN9ZregRCd7YtxoL+EVUNWaJNVx2mNmezEznrc9zhcYUrgeaVdFO2yBF1889zO
gCOVwrO8uDgeyj6IKa25H6c1N13ih/o7ZzEgWbGG+ylU1yECQQDv4ZSJ4EjSh/Fl
aHdz3wbBa/HKGTjC8iRy476Cyg2Fm8MZUe9Yy3udOrb5ZnS2MTpIXt5AF3h2TfYV
VoFXIorjAkEA50FcJmzT8sNMrPaV8vn+9W2Lu4U7C+K/O2g1iXMaZms5PC5zV5aV
CKXZWUX1fq2RaOzlbQrpgiolhXpeh8FjxwJBAOFHzSQfSsTNfttp3KUpU0LbiVvv
i+spVSnA0O4rq79KpVNmK44Mq67hsW1P11QzrzTAQ6GVaUBRv0YS061td1kCQHnP
wtN2tboFR6lABkJDjxoGRvlSt4SOPr7zKGgrWjeiuTZLHXSAnCY+/hr5L9Q3ZwXG
6x6iBdgLjVIe4BZQNtcCQQDXGv/gWinCNTN3MPWfTW/RGzuMYVmyBFais0/VrgdH
h1dLpztmpQqfyH/zrBXQ9qL/zR4ojS6XYneO/U18WpEe
-----END RSA PRIVATE KEY-----
To generate a key like this with OpenSSL, run::
openssl genrsa 2048 > key.pem
This format also supports password-encrypted private keys. TLS
Lite can only handle password-encrypted private keys when OpenSSL
and M2Crypto are installed. In this case, passwordCallback will be
invoked to query the user for the password.
@type s: str
@param s: A string containing a PEM-encoded public or private key.
@type private: bool
@param private: If True, a L{SyntaxError} will be raised if the
private key component is not present.
@type public: bool
@param public: If True, the private key component (if present) will
be discarded, so this function will always return a public key.
@type passwordCallback: callable
@param passwordCallback: This function will be called, with no
arguments, if the PEM-encoded private key is password-encrypted.
The callback should return the password string. If the password is
incorrect, SyntaxError will be raised. If no callback is passed
and the key is password-encrypted, a prompt will be displayed at
the console.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA key.
@raise SyntaxError: If the key is not properly formatted.
"""
for implementation in implementations:
if implementation == "openssl" and cryptomath.m2cryptoLoaded:
key = OpenSSL_RSAKey.parse(s, passwordCallback)
break
elif implementation == "python":
key = Python_RSAKey.parsePEM(s)
break
else:
raise ValueError("No acceptable implementations")
return _parseKeyHelper(key, private, public)
def _parseKeyHelper(key, private, public):
if private:
if not key.hasPrivateKey():
raise SyntaxError("Not a private key!")
if public:
return _createPublicKey(key)
if private:
if hasattr(key, "d"):
return _createPrivateKey(key)
else:
return key
return key
def parseAsPublicKey(s):
"""Parse an XML or PEM-formatted public key.
@type s: str
@param s: A string containing an XML or PEM-encoded public or private key.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA public key.
@raise SyntaxError: If the key is not properly formatted.
"""
try:
return parsePEMKey(s, public=True)
except:
return parseXMLKey(s, public=True)
def parsePrivateKey(s):
"""Parse an XML or PEM-formatted private key.
@type s: str
@param s: A string containing an XML or PEM-encoded private key.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA private key.
@raise SyntaxError: If the key is not properly formatted.
"""
try:
return parsePEMKey(s, private=True)
except:
return parseXMLKey(s, private=True)
def _createPublicKey(key):
"""
Create a new public key. Discard any private component,
and return the most efficient key possible.
"""
if not isinstance(key, RSAKey):
raise AssertionError()
return _createPublicRSAKey(key.n, key.e)
def _createPrivateKey(key):
"""
Create a new private key. Return the most efficient key possible.
"""
if not isinstance(key, RSAKey):
raise AssertionError()
if not key.hasPrivateKey():
raise AssertionError()
return _createPrivateRSAKey(key.n, key.e, key.d, key.p, key.q, key.dP,
key.dQ, key.qInv)
def _createPublicRSAKey(n, e, implementations = ["openssl", "pycrypto",
"python"]):
for implementation in implementations:
if implementation == "openssl" and cryptomath.m2cryptoLoaded:
return OpenSSL_RSAKey(n, e)
elif implementation == "pycrypto" and cryptomath.pycryptoLoaded:
return PyCrypto_RSAKey(n, e)
elif implementation == "python":
return Python_RSAKey(n, e)
raise ValueError("No acceptable implementations")
def _createPrivateRSAKey(n, e, d, p, q, dP, dQ, qInv,
implementations = ["pycrypto", "python"]):
for implementation in implementations:
if implementation == "pycrypto" and cryptomath.pycryptoLoaded:
return PyCrypto_RSAKey(n, e, d, p, q, dP, dQ, qInv)
elif implementation == "python":
return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv)
raise ValueError("No acceptable implementations")
| Python |
"""Abstract class for RSA."""
from cryptomath import *
class RSAKey:
"""This is an abstract base class for RSA keys.
Particular implementations of RSA keys, such as
L{OpenSSL_RSAKey.OpenSSL_RSAKey},
L{Python_RSAKey.Python_RSAKey}, and
L{PyCrypto_RSAKey.PyCrypto_RSAKey},
inherit from this.
To create or parse an RSA key, don't use one of these classes
directly. Instead, use the factory functions in
L{tlslite.utils.keyfactory}.
"""
def __init__(self, n=0, e=0):
"""Create a new RSA key.
If n and e are passed in, the new key will be initialized.
@type n: int
@param n: RSA modulus.
@type e: int
@param e: RSA public exponent.
"""
raise NotImplementedError()
def __len__(self):
"""Return the length of this key in bits.
@rtype: int
"""
return numBits(self.n)
def hasPrivateKey(self):
"""Return whether or not this key has a private component.
@rtype: bool
"""
raise NotImplementedError()
def hash(self):
"""Return the cryptoID <keyHash> value corresponding to this
key.
@rtype: str
"""
raise NotImplementedError()
def getSigningAlgorithm(self):
"""Return the cryptoID sigAlgo value corresponding to this key.
@rtype: str
"""
return "pkcs1-sha1"
def hashAndSign(self, bytes):
"""Hash and sign the passed-in bytes.
This requires the key to have a private component. It performs
a PKCS1-SHA1 signature on the passed-in data.
@type bytes: str or L{array.array} of unsigned bytes
@param bytes: The value which will be hashed and signed.
@rtype: L{array.array} of unsigned bytes.
@return: A PKCS1-SHA1 signature on the passed-in data.
"""
if not isinstance(bytes, type("")):
bytes = bytesToString(bytes)
hashBytes = stringToBytes(sha.sha(bytes).digest())
prefixedHashBytes = self._addPKCS1SHA1Prefix(hashBytes)
sigBytes = self.sign(prefixedHashBytes)
return sigBytes
def hashAndVerify(self, sigBytes, bytes):
"""Hash and verify the passed-in bytes with the signature.
This verifies a PKCS1-SHA1 signature on the passed-in data.
@type sigBytes: L{array.array} of unsigned bytes
@param sigBytes: A PKCS1-SHA1 signature.
@type bytes: str or L{array.array} of unsigned bytes
@param bytes: The value which will be hashed and verified.
@rtype: bool
@return: Whether the signature matches the passed-in data.
"""
if not isinstance(bytes, type("")):
bytes = bytesToString(bytes)
hashBytes = stringToBytes(sha.sha(bytes).digest())
prefixedHashBytes = self._addPKCS1SHA1Prefix(hashBytes)
return self.verify(sigBytes, prefixedHashBytes)
def sign(self, bytes):
"""Sign the passed-in bytes.
This requires the key to have a private component. It performs
a PKCS1 signature on the passed-in data.
@type bytes: L{array.array} of unsigned bytes
@param bytes: The value which will be signed.
@rtype: L{array.array} of unsigned bytes.
@return: A PKCS1 signature on the passed-in data.
"""
if not self.hasPrivateKey():
raise AssertionError()
paddedBytes = self._addPKCS1Padding(bytes, 1)
m = bytesToNumber(paddedBytes)
if m >= self.n:
raise ValueError()
c = self._rawPrivateKeyOp(m)
sigBytes = numberToBytes(c)
return sigBytes
def verify(self, sigBytes, bytes):
"""Verify the passed-in bytes with the signature.
This verifies a PKCS1 signature on the passed-in data.
@type sigBytes: L{array.array} of unsigned bytes
@param sigBytes: A PKCS1 signature.
@type bytes: L{array.array} of unsigned bytes
@param bytes: The value which will be verified.
@rtype: bool
@return: Whether the signature matches the passed-in data.
"""
paddedBytes = self._addPKCS1Padding(bytes, 1)
c = bytesToNumber(sigBytes)
if c >= self.n:
return False
m = self._rawPublicKeyOp(c)
checkBytes = numberToBytes(m)
return checkBytes == paddedBytes
def encrypt(self, bytes):
"""Encrypt the passed-in bytes.
This performs PKCS1 encryption of the passed-in data.
@type bytes: L{array.array} of unsigned bytes
@param bytes: The value which will be encrypted.
@rtype: L{array.array} of unsigned bytes.
@return: A PKCS1 encryption of the passed-in data.
"""
paddedBytes = self._addPKCS1Padding(bytes, 2)
m = bytesToNumber(paddedBytes)
if m >= self.n:
raise ValueError()
c = self._rawPublicKeyOp(m)
encBytes = numberToBytes(c)
return encBytes
def decrypt(self, encBytes):
"""Decrypt the passed-in bytes.
This requires the key to have a private component. It performs
PKCS1 decryption of the passed-in data.
@type encBytes: L{array.array} of unsigned bytes
@param encBytes: The value which will be decrypted.
@rtype: L{array.array} of unsigned bytes or None.
@return: A PKCS1 decryption of the passed-in data or None if
the data is not properly formatted.
"""
if not self.hasPrivateKey():
raise AssertionError()
c = bytesToNumber(encBytes)
if c >= self.n:
return None
m = self._rawPrivateKeyOp(c)
decBytes = numberToBytes(m)
if (len(decBytes) != numBytes(self.n)-1): #Check first byte
return None
if decBytes[0] != 2: #Check second byte
return None
for x in range(len(decBytes)-1): #Scan through for zero separator
if decBytes[x]== 0:
break
else:
return None
return decBytes[x+1:] #Return everything after the separator
def _rawPrivateKeyOp(self, m):
raise NotImplementedError()
def _rawPublicKeyOp(self, c):
raise NotImplementedError()
def acceptsPassword(self):
"""Return True if the write() method accepts a password for use
in encrypting the private key.
@rtype: bool
"""
raise NotImplementedError()
def write(self, password=None):
"""Return a string containing the key.
@rtype: str
@return: A string describing the key, in whichever format (PEM
or XML) is native to the implementation.
"""
raise NotImplementedError()
def writeXMLPublicKey(self, indent=''):
"""Return a string containing the key.
@rtype: str
@return: A string describing the public key, in XML format.
"""
return Python_RSAKey(self.n, self.e).write(indent)
def generate(bits):
"""Generate a new key with the specified bit length.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
"""
raise NotImplementedError()
generate = staticmethod(generate)
# **************************************************************************
# Helper Functions for RSA Keys
# **************************************************************************
def _addPKCS1SHA1Prefix(self, bytes):
prefixBytes = createByteArraySequence(\
[48,33,48,9,6,5,43,14,3,2,26,5,0,4,20])
prefixedBytes = prefixBytes + bytes
return prefixedBytes
def _addPKCS1Padding(self, bytes, blockType):
padLength = (numBytes(self.n) - (len(bytes)+3))
if blockType == 1: #Signature padding
pad = [0xFF] * padLength
elif blockType == 2: #Encryption padding
pad = createByteArraySequence([])
while len(pad) < padLength:
padBytes = getRandomBytes(padLength * 2)
pad = [b for b in padBytes if b != 0]
pad = pad[:padLength]
else:
raise AssertionError()
#NOTE: To be proper, we should add [0,blockType]. However,
#the zero is lost when the returned padding is converted
#to a number, so we don't even bother with it. Also,
#adding it would cause a misalignment in verify()
padding = createByteArraySequence([blockType] + pad + [0])
paddedBytes = padding + bytes
return paddedBytes
| Python |
"""Classes for reading/writing binary data (such as TLS records)."""
from compat import *
class Writer:
def __init__(self, length=0):
#If length is zero, then this is just a "trial run" to determine length
self.index = 0
self.bytes = createByteArrayZeros(length)
def add(self, x, length):
if self.bytes:
newIndex = self.index+length-1
while newIndex >= self.index:
self.bytes[newIndex] = x & 0xFF
x >>= 8
newIndex -= 1
self.index += length
def addFixSeq(self, seq, length):
if self.bytes:
for e in seq:
self.add(e, length)
else:
self.index += len(seq)*length
def addVarSeq(self, seq, length, lengthLength):
if self.bytes:
self.add(len(seq)*length, lengthLength)
for e in seq:
self.add(e, length)
else:
self.index += lengthLength + (len(seq)*length)
class Parser:
def __init__(self, bytes):
self.bytes = bytes
self.index = 0
def get(self, length):
if self.index + length > len(self.bytes):
raise SyntaxError()
x = 0
for count in range(length):
x <<= 8
x |= self.bytes[self.index]
self.index += 1
return x
def getFixBytes(self, lengthBytes):
bytes = self.bytes[self.index : self.index+lengthBytes]
self.index += lengthBytes
return bytes
def getVarBytes(self, lengthLength):
lengthBytes = self.get(lengthLength)
return self.getFixBytes(lengthBytes)
def getFixList(self, length, lengthList):
l = [0] * lengthList
for x in range(lengthList):
l[x] = self.get(length)
return l
def getVarList(self, length, lengthLength):
lengthList = self.get(lengthLength)
if lengthList % length != 0:
raise SyntaxError()
lengthList = int(lengthList/length)
l = [0] * lengthList
for x in range(lengthList):
l[x] = self.get(length)
return l
def startLengthCheck(self, lengthLength):
self.lengthCheck = self.get(lengthLength)
self.indexCheck = self.index
def setLengthCheck(self, length):
self.lengthCheck = length
self.indexCheck = self.index
def stopLengthCheck(self):
if (self.index - self.indexCheck) != self.lengthCheck:
raise SyntaxError()
def atLengthCheck(self):
if (self.index - self.indexCheck) < self.lengthCheck:
return False
elif (self.index - self.indexCheck) == self.lengthCheck:
return True
else:
raise SyntaxError() | Python |
"""Class for parsing ASN.1"""
from compat import *
from codec import *
#Takes a byte array which has a DER TLV field at its head
class ASN1Parser:
def __init__(self, bytes):
p = Parser(bytes)
p.get(1) #skip Type
#Get Length
self.length = self._getASN1Length(p)
#Get Value
self.value = p.getFixBytes(self.length)
#Assuming this is a sequence...
def getChild(self, which):
p = Parser(self.value)
for x in range(which+1):
markIndex = p.index
p.get(1) #skip Type
length = self._getASN1Length(p)
p.getFixBytes(length)
return ASN1Parser(p.bytes[markIndex : p.index])
#Decode the ASN.1 DER length field
def _getASN1Length(self, p):
firstLength = p.get(1)
if firstLength<=127:
return firstLength
else:
lengthLength = firstLength & 0x7F
return p.get(lengthLength)
| Python |
"""cryptomath module
This module has basic math/crypto code."""
import os
import math
import base64
import binascii
import sha
from compat import *
# **************************************************************************
# Load Optional Modules
# **************************************************************************
# Try to load M2Crypto/OpenSSL
try:
from M2Crypto import m2
m2cryptoLoaded = True
except ImportError:
m2cryptoLoaded = False
# Try to load cryptlib
try:
import cryptlib_py
try:
cryptlib_py.cryptInit()
except cryptlib_py.CryptException, e:
#If tlslite and cryptoIDlib are both present,
#they might each try to re-initialize this,
#so we're tolerant of that.
if e[0] != cryptlib_py.CRYPT_ERROR_INITED:
raise
cryptlibpyLoaded = True
except ImportError:
cryptlibpyLoaded = False
#Try to load GMPY
try:
import gmpy
gmpyLoaded = True
except ImportError:
gmpyLoaded = False
#Try to load pycrypto
try:
import Crypto.Cipher.AES
pycryptoLoaded = True
except ImportError:
pycryptoLoaded = False
# **************************************************************************
# PRNG Functions
# **************************************************************************
# Get os.urandom PRNG
try:
os.urandom(1)
def getRandomBytes(howMany):
return stringToBytes(os.urandom(howMany))
prngName = "os.urandom"
except:
# Else get cryptlib PRNG
if cryptlibpyLoaded:
def getRandomBytes(howMany):
randomKey = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED,
cryptlib_py.CRYPT_ALGO_AES)
cryptlib_py.cryptSetAttribute(randomKey,
cryptlib_py.CRYPT_CTXINFO_MODE,
cryptlib_py.CRYPT_MODE_OFB)
cryptlib_py.cryptGenerateKey(randomKey)
bytes = createByteArrayZeros(howMany)
cryptlib_py.cryptEncrypt(randomKey, bytes)
return bytes
prngName = "cryptlib"
else:
#Else get UNIX /dev/urandom PRNG
try:
devRandomFile = open("/dev/urandom", "rb")
def getRandomBytes(howMany):
return stringToBytes(devRandomFile.read(howMany))
prngName = "/dev/urandom"
except IOError:
#Else get Win32 CryptoAPI PRNG
try:
import win32prng
def getRandomBytes(howMany):
s = win32prng.getRandomBytes(howMany)
if len(s) != howMany:
raise AssertionError()
return stringToBytes(s)
prngName ="CryptoAPI"
except ImportError:
#Else no PRNG :-(
def getRandomBytes(howMany):
raise NotImplementedError("No Random Number Generator "\
"available.")
prngName = "None"
# **************************************************************************
# Converter Functions
# **************************************************************************
def bytesToNumber(bytes):
total = 0L
multiplier = 1L
for count in range(len(bytes)-1, -1, -1):
byte = bytes[count]
total += multiplier * byte
multiplier *= 256
return total
def numberToBytes(n):
howManyBytes = numBytes(n)
bytes = createByteArrayZeros(howManyBytes)
for count in range(howManyBytes-1, -1, -1):
bytes[count] = int(n % 256)
n >>= 8
return bytes
def bytesToBase64(bytes):
s = bytesToString(bytes)
return stringToBase64(s)
def base64ToBytes(s):
s = base64ToString(s)
return stringToBytes(s)
def numberToBase64(n):
bytes = numberToBytes(n)
return bytesToBase64(bytes)
def base64ToNumber(s):
bytes = base64ToBytes(s)
return bytesToNumber(bytes)
def stringToNumber(s):
bytes = stringToBytes(s)
return bytesToNumber(bytes)
def numberToString(s):
bytes = numberToBytes(s)
return bytesToString(bytes)
def base64ToString(s):
try:
return base64.decodestring(s)
except binascii.Error, e:
raise SyntaxError(e)
except binascii.Incomplete, e:
raise SyntaxError(e)
def stringToBase64(s):
return base64.encodestring(s).replace("\n", "")
def mpiToNumber(mpi): #mpi is an openssl-format bignum string
if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number
raise AssertionError()
bytes = stringToBytes(mpi[4:])
return bytesToNumber(bytes)
def numberToMPI(n):
bytes = numberToBytes(n)
ext = 0
#If the high-order bit is going to be set,
#add an extra byte of zeros
if (numBits(n) & 0x7)==0:
ext = 1
length = numBytes(n) + ext
bytes = concatArrays(createByteArrayZeros(4+ext), bytes)
bytes[0] = (length >> 24) & 0xFF
bytes[1] = (length >> 16) & 0xFF
bytes[2] = (length >> 8) & 0xFF
bytes[3] = length & 0xFF
return bytesToString(bytes)
# **************************************************************************
# Misc. Utility Functions
# **************************************************************************
def numBytes(n):
if n==0:
return 0
bits = numBits(n)
return int(math.ceil(bits / 8.0))
def hashAndBase64(s):
return stringToBase64(sha.sha(s).digest())
def getBase64Nonce(numChars=22): #defaults to an 132 bit nonce
bytes = getRandomBytes(numChars)
bytesStr = "".join([chr(b) for b in bytes])
return stringToBase64(bytesStr)[:numChars]
# **************************************************************************
# Big Number Math
# **************************************************************************
def getRandomNumber(low, high):
if low >= high:
raise AssertionError()
howManyBits = numBits(high)
howManyBytes = numBytes(high)
lastBits = howManyBits % 8
while 1:
bytes = getRandomBytes(howManyBytes)
if lastBits:
bytes[0] = bytes[0] % (1 << lastBits)
n = bytesToNumber(bytes)
if n >= low and n < high:
return n
def gcd(a,b):
a, b = max(a,b), min(a,b)
while b:
a, b = b, a % b
return a
def lcm(a, b):
#This will break when python division changes, but we can't use // cause
#of Jython
return (a * b) / gcd(a, b)
#Returns inverse of a mod b, zero if none
#Uses Extended Euclidean Algorithm
def invMod(a, b):
c, d = a, b
uc, ud = 1, 0
while c != 0:
#This will break when python division changes, but we can't use //
#cause of Jython
q = d / c
c, d = d-(q*c), c
uc, ud = ud - (q * uc), uc
if d == 1:
return ud % b
return 0
if gmpyLoaded:
def powMod(base, power, modulus):
base = gmpy.mpz(base)
power = gmpy.mpz(power)
modulus = gmpy.mpz(modulus)
result = pow(base, power, modulus)
return long(result)
else:
#Copied from Bryan G. Olson's post to comp.lang.python
#Does left-to-right instead of pow()'s right-to-left,
#thus about 30% faster than the python built-in with small bases
def powMod(base, power, modulus):
nBitScan = 5
""" Return base**power mod modulus, using multi bit scanning
with nBitScan bits at a time."""
#TREV - Added support for negative exponents
negativeResult = False
if (power < 0):
power *= -1
negativeResult = True
exp2 = 2**nBitScan
mask = exp2 - 1
# Break power into a list of digits of nBitScan bits.
# The list is recursive so easy to read in reverse direction.
nibbles = None
while power:
nibbles = int(power & mask), nibbles
power = power >> nBitScan
# Make a table of powers of base up to 2**nBitScan - 1
lowPowers = [1]
for i in xrange(1, exp2):
lowPowers.append((lowPowers[i-1] * base) % modulus)
# To exponentiate by the first nibble, look it up in the table
nib, nibbles = nibbles
prod = lowPowers[nib]
# For the rest, square nBitScan times, then multiply by
# base^nibble
while nibbles:
nib, nibbles = nibbles
for i in xrange(nBitScan):
prod = (prod * prod) % modulus
if nib: prod = (prod * lowPowers[nib]) % modulus
#TREV - Added support for negative exponents
if negativeResult:
prodInv = invMod(prod, modulus)
#Check to make sure the inverse is correct
if (prod * prodInv) % modulus != 1:
raise AssertionError()
return prodInv
return prod
#Pre-calculate a sieve of the ~100 primes < 1000:
def makeSieve(n):
sieve = range(n)
for count in range(2, int(math.sqrt(n))):
if sieve[count] == 0:
continue
x = sieve[count] * 2
while x < len(sieve):
sieve[x] = 0
x += sieve[count]
sieve = [x for x in sieve[2:] if x]
return sieve
sieve = makeSieve(1000)
def isPrime(n, iterations=5, display=False):
#Trial division with sieve
for x in sieve:
if x >= n: return True
if n % x == 0: return False
#Passed trial division, proceed to Rabin-Miller
#Rabin-Miller implemented per Ferguson & Schneier
#Compute s, t for Rabin-Miller
if display: print "*",
s, t = n-1, 0
while s % 2 == 0:
s, t = s/2, t+1
#Repeat Rabin-Miller x times
a = 2 #Use 2 as a base for first iteration speedup, per HAC
for count in range(iterations):
v = powMod(a, s, n)
if v==1:
continue
i = 0
while v != n-1:
if i == t-1:
return False
else:
v, i = powMod(v, 2, n), i+1
a = getRandomNumber(2, n)
return True
def getRandomPrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2L ** (bits-1)) * 3/2
high = 2L ** bits - 30
p = getRandomNumber(low, high)
p += 29 - (p % 30)
while 1:
if display: print ".",
p += 30
if p >= high:
p = getRandomNumber(low, high)
p += 29 - (p % 30)
if isPrime(p, display=display):
return p
#Unused at the moment...
def getRandomSafePrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2 ** (bits-2)) * 3/2
high = (2 ** (bits-1)) - 30
q = getRandomNumber(low, high)
q += 29 - (q % 30)
while 1:
if display: print ".",
q += 30
if (q >= high):
q = getRandomNumber(low, high)
q += 29 - (q % 30)
#Ideas from Tom Wu's SRP code
#Do trial division on p and q before Rabin-Miller
if isPrime(q, 0, display=display):
p = (2 * q) + 1
if isPrime(p, display=display):
if isPrime(q, display=display):
return p
| Python |
"""Helper functions for XML.
This module has misc. helper functions for working with XML DOM nodes."""
import re
from compat import *
import os
if os.name != "java":
from xml.dom import minidom
from xml.sax import saxutils
def parseDocument(s):
return minidom.parseString(s)
else:
from javax.xml.parsers import *
import java
builder = DocumentBuilderFactory.newInstance().newDocumentBuilder()
def parseDocument(s):
stream = java.io.ByteArrayInputStream(java.lang.String(s).getBytes())
return builder.parse(stream)
def parseAndStripWhitespace(s):
try:
element = parseDocument(s).documentElement
except BaseException, e:
raise SyntaxError(str(e))
stripWhitespace(element)
return element
#Goes through a DOM tree and removes whitespace besides child elements,
#as long as this whitespace is correctly tab-ified
def stripWhitespace(element, tab=0):
element.normalize()
lastSpacer = "\n" + ("\t"*tab)
spacer = lastSpacer + "\t"
#Zero children aren't allowed (i.e. <empty/>)
#This makes writing output simpler, and matches Canonical XML
if element.childNodes.length==0: #DON'T DO len(element.childNodes) - doesn't work in Jython
raise SyntaxError("Empty XML elements not allowed")
#If there's a single child, it must be text context
if element.childNodes.length==1:
if element.firstChild.nodeType == element.firstChild.TEXT_NODE:
#If it's an empty element, remove
if element.firstChild.data == lastSpacer:
element.removeChild(element.firstChild)
return
#If not text content, give an error
elif element.firstChild.nodeType == element.firstChild.ELEMENT_NODE:
raise SyntaxError("Bad whitespace under '%s'" % element.tagName)
else:
raise SyntaxError("Unexpected node type in XML document")
#Otherwise there's multiple child element
child = element.firstChild
while child:
if child.nodeType == child.ELEMENT_NODE:
stripWhitespace(child, tab+1)
child = child.nextSibling
elif child.nodeType == child.TEXT_NODE:
if child == element.lastChild:
if child.data != lastSpacer:
raise SyntaxError("Bad whitespace under '%s'" % element.tagName)
elif child.data != spacer:
raise SyntaxError("Bad whitespace under '%s'" % element.tagName)
next = child.nextSibling
element.removeChild(child)
child = next
else:
raise SyntaxError("Unexpected node type in XML document")
def checkName(element, name):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Missing element: '%s'" % name)
if name == None:
return
if element.tagName != name:
raise SyntaxError("Wrong element name: should be '%s', is '%s'" % (name, element.tagName))
def getChild(element, index, name=None):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in getChild()")
child = element.childNodes.item(index)
if child == None:
raise SyntaxError("Missing child: '%s'" % name)
checkName(child, name)
return child
def getChildIter(element, index):
class ChildIter:
def __init__(self, element, index):
self.element = element
self.index = index
def next(self):
if self.index < len(self.element.childNodes):
retVal = self.element.childNodes.item(self.index)
self.index += 1
else:
retVal = None
return retVal
def checkEnd(self):
if self.index != len(self.element.childNodes):
raise SyntaxError("Too many elements under: '%s'" % self.element.tagName)
return ChildIter(element, index)
def getChildOrNone(element, index):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in getChild()")
child = element.childNodes.item(index)
return child
def getLastChild(element, index, name=None):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in getLastChild()")
child = element.childNodes.item(index)
if child == None:
raise SyntaxError("Missing child: '%s'" % name)
if child != element.lastChild:
raise SyntaxError("Too many elements under: '%s'" % element.tagName)
checkName(child, name)
return child
#Regular expressions for syntax-checking attribute and element content
nsRegEx = "http://trevp.net/cryptoID\Z"
cryptoIDRegEx = "([a-km-z3-9]{5}\.){3}[a-km-z3-9]{5}\Z"
urlRegEx = "http(s)?://.{1,100}\Z"
sha1Base64RegEx = "[A-Za-z0-9+/]{27}=\Z"
base64RegEx = "[A-Za-z0-9+/]+={0,4}\Z"
certsListRegEx = "(0)?(1)?(2)?(3)?(4)?(5)?(6)?(7)?(8)?(9)?\Z"
keyRegEx = "[A-Z]\Z"
keysListRegEx = "(A)?(B)?(C)?(D)?(E)?(F)?(G)?(H)?(I)?(J)?(K)?(L)?(M)?(N)?(O)?(P)?(Q)?(R)?(S)?(T)?(U)?(V)?(W)?(X)?(Y)?(Z)?\Z"
dateTimeRegEx = "\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ\Z"
shortStringRegEx = ".{1,100}\Z"
exprRegEx = "[a-zA-Z0-9 ,()]{1,200}\Z"
notAfterDeltaRegEx = "0|([1-9][0-9]{0,8})\Z" #A number from 0 to (1 billion)-1
booleanRegEx = "(true)|(false)"
def getReqAttribute(element, attrName, regEx=""):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in getReqAttribute()")
value = element.getAttribute(attrName)
if not value:
raise SyntaxError("Missing Attribute: " + attrName)
if not re.match(regEx, value):
raise SyntaxError("Bad Attribute Value for '%s': '%s' " % (attrName, value))
element.removeAttribute(attrName)
return str(value) #de-unicode it; this is needed for bsddb, for example
def getAttribute(element, attrName, regEx=""):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in getAttribute()")
value = element.getAttribute(attrName)
if value:
if not re.match(regEx, value):
raise SyntaxError("Bad Attribute Value for '%s': '%s' " % (attrName, value))
element.removeAttribute(attrName)
return str(value) #de-unicode it; this is needed for bsddb, for example
def checkNoMoreAttributes(element):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in checkNoMoreAttributes()")
if element.attributes.length!=0:
raise SyntaxError("Extra attributes on '%s'" % element.tagName)
def getText(element, regEx=""):
textNode = element.firstChild
if textNode == None:
raise SyntaxError("Empty element '%s'" % element.tagName)
if textNode.nodeType != textNode.TEXT_NODE:
raise SyntaxError("Non-text node: '%s'" % element.tagName)
if not re.match(regEx, textNode.data):
raise SyntaxError("Bad Text Value for '%s': '%s' " % (element.tagName, textNode.data))
return str(textNode.data) #de-unicode it; this is needed for bsddb, for example
#Function for adding tabs to a string
def indent(s, steps, ch="\t"):
tabs = ch*steps
if s[-1] != "\n":
s = tabs + s.replace("\n", "\n"+tabs)
else:
s = tabs + s.replace("\n", "\n"+tabs)
s = s[ : -len(tabs)]
return s
def escape(s):
return saxutils.escape(s)
| Python |
"""Toolkit for crypto and other stuff."""
__all__ = ["AES",
"ASN1Parser",
"cipherfactory",
"codec",
"Cryptlib_AES",
"Cryptlib_RC4",
"Cryptlib_TripleDES",
"cryptomath: cryptomath module",
"dateFuncs",
"hmac",
"JCE_RSAKey",
"compat",
"keyfactory",
"OpenSSL_AES",
"OpenSSL_RC4",
"OpenSSL_RSAKey",
"OpenSSL_TripleDES",
"PyCrypto_AES",
"PyCrypto_RC4",
"PyCrypto_RSAKey",
"PyCrypto_TripleDES",
"Python_AES",
"Python_RC4",
"Python_RSAKey",
"RC4",
"rijndael",
"RSAKey",
"TripleDES",
"xmltools"]
| Python |
"""Miscellaneous functions to mask Python version differences."""
import sys
import os
if sys.version_info < (2,2):
raise AssertionError("Python 2.2 or later required")
if sys.version_info < (2,3):
def enumerate(collection):
return zip(range(len(collection)), collection)
class Set:
def __init__(self, seq=None):
self.values = {}
if seq:
for e in seq:
self.values[e] = None
def add(self, e):
self.values[e] = None
def discard(self, e):
if e in self.values.keys():
del(self.values[e])
def union(self, s):
ret = Set()
for e in self.values.keys():
ret.values[e] = None
for e in s.values.keys():
ret.values[e] = None
return ret
def issubset(self, other):
for e in self.values.keys():
if e not in other.values.keys():
return False
return True
def __nonzero__( self):
return len(self.values.keys())
def __contains__(self, e):
return e in self.values.keys()
def __iter__(self):
return iter(set.values.keys())
if os.name != "java":
import array
def createByteArraySequence(seq):
return array.array('B', seq)
def createByteArrayZeros(howMany):
return array.array('B', [0] * howMany)
def concatArrays(a1, a2):
return a1+a2
def bytesToString(bytes):
return bytes.tostring()
def stringToBytes(s):
bytes = createByteArrayZeros(0)
bytes.fromstring(s)
return bytes
import math
def numBits(n):
if n==0:
return 0
s = "%x" % n
return ((len(s)-1)*4) + \
{'0':0, '1':1, '2':2, '3':2,
'4':3, '5':3, '6':3, '7':3,
'8':4, '9':4, 'a':4, 'b':4,
'c':4, 'd':4, 'e':4, 'f':4,
}[s[0]]
return int(math.floor(math.log(n, 2))+1)
BaseException = Exception
import sys
import traceback
def formatExceptionTrace(e):
newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
return newStr
else:
#Jython 2.1 is missing lots of python 2.3 stuff,
#which we have to emulate here:
#NOTE: JYTHON SUPPORT NO LONGER WORKS, DUE TO USE OF GENERATORS.
#THIS CODE IS LEFT IN SO THAT ONE JYTHON UPDATES TO 2.2, IT HAS A
#CHANCE OF WORKING AGAIN.
import java
import jarray
def createByteArraySequence(seq):
if isinstance(seq, type("")): #If it's a string, convert
seq = [ord(c) for c in seq]
return jarray.array(seq, 'h') #use short instead of bytes, cause bytes are signed
def createByteArrayZeros(howMany):
return jarray.zeros(howMany, 'h') #use short instead of bytes, cause bytes are signed
def concatArrays(a1, a2):
l = list(a1)+list(a2)
return createByteArraySequence(l)
#WAY TOO SLOW - MUST BE REPLACED------------
def bytesToString(bytes):
return "".join([chr(b) for b in bytes])
def stringToBytes(s):
bytes = createByteArrayZeros(len(s))
for count, c in enumerate(s):
bytes[count] = ord(c)
return bytes
#WAY TOO SLOW - MUST BE REPLACED------------
def numBits(n):
if n==0:
return 0
n= 1L * n; #convert to long, if it isn't already
return n.__tojava__(java.math.BigInteger).bitLength()
#Adjust the string to an array of bytes
def stringToJavaByteArray(s):
bytes = jarray.zeros(len(s), 'b')
for count, c in enumerate(s):
x = ord(c)
if x >= 128: x -= 256
bytes[count] = x
return bytes
BaseException = java.lang.Exception
import sys
import traceback
def formatExceptionTrace(e):
newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
return newStr | Python |
"""Pure-Python RSA implementation."""
from cryptomath import *
import xmltools
from ASN1Parser import ASN1Parser
from RSAKey import *
class Python_RSAKey(RSAKey):
def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0):
if (n and not e) or (e and not n):
raise AssertionError()
self.n = n
self.e = e
self.d = d
self.p = p
self.q = q
self.dP = dP
self.dQ = dQ
self.qInv = qInv
self.blinder = 0
self.unblinder = 0
def hasPrivateKey(self):
return self.d != 0
def hash(self):
s = self.writeXMLPublicKey('\t\t')
return hashAndBase64(s.strip())
def _rawPrivateKeyOp(self, m):
#Create blinding values, on the first pass:
if not self.blinder:
self.unblinder = getRandomNumber(2, self.n)
self.blinder = powMod(invMod(self.unblinder, self.n), self.e,
self.n)
#Blind the input
m = (m * self.blinder) % self.n
#Perform the RSA operation
c = self._rawPrivateKeyOpHelper(m)
#Unblind the output
c = (c * self.unblinder) % self.n
#Update blinding values
self.blinder = (self.blinder * self.blinder) % self.n
self.unblinder = (self.unblinder * self.unblinder) % self.n
#Return the output
return c
def _rawPrivateKeyOpHelper(self, m):
#Non-CRT version
#c = powMod(m, self.d, self.n)
#CRT version (~3x faster)
s1 = powMod(m, self.dP, self.p)
s2 = powMod(m, self.dQ, self.q)
h = ((s1 - s2) * self.qInv) % self.p
c = s2 + self.q * h
return c
def _rawPublicKeyOp(self, c):
m = powMod(c, self.e, self.n)
return m
def acceptsPassword(self): return False
def write(self, indent=''):
if self.d:
s = indent+'<privateKey xmlns="http://trevp.net/rsa">\n'
else:
s = indent+'<publicKey xmlns="http://trevp.net/rsa">\n'
s += indent+'\t<n>%s</n>\n' % numberToBase64(self.n)
s += indent+'\t<e>%s</e>\n' % numberToBase64(self.e)
if self.d:
s += indent+'\t<d>%s</d>\n' % numberToBase64(self.d)
s += indent+'\t<p>%s</p>\n' % numberToBase64(self.p)
s += indent+'\t<q>%s</q>\n' % numberToBase64(self.q)
s += indent+'\t<dP>%s</dP>\n' % numberToBase64(self.dP)
s += indent+'\t<dQ>%s</dQ>\n' % numberToBase64(self.dQ)
s += indent+'\t<qInv>%s</qInv>\n' % numberToBase64(self.qInv)
s += indent+'</privateKey>'
else:
s += indent+'</publicKey>'
#Only add \n if part of a larger structure
if indent != '':
s += '\n'
return s
def writeXMLPublicKey(self, indent=''):
return Python_RSAKey(self.n, self.e).write(indent)
def generate(bits):
key = Python_RSAKey()
p = getRandomPrime(bits/2, False)
q = getRandomPrime(bits/2, False)
t = lcm(p-1, q-1)
key.n = p * q
key.e = 3L #Needed to be long, for Java
key.d = invMod(key.e, t)
key.p = p
key.q = q
key.dP = key.d % (p-1)
key.dQ = key.d % (q-1)
key.qInv = invMod(q, p)
return key
generate = staticmethod(generate)
def parsePEM(s, passwordCallback=None):
"""Parse a string containing a <privateKey> or <publicKey>, or
PEM-encoded key."""
start = s.find("-----BEGIN PRIVATE KEY-----")
if start != -1:
end = s.find("-----END PRIVATE KEY-----")
if end == -1:
raise SyntaxError("Missing PEM Postfix")
s = s[start+len("-----BEGIN PRIVATE KEY -----") : end]
bytes = base64ToBytes(s)
return Python_RSAKey._parsePKCS8(bytes)
else:
start = s.find("-----BEGIN RSA PRIVATE KEY-----")
if start != -1:
end = s.find("-----END RSA PRIVATE KEY-----")
if end == -1:
raise SyntaxError("Missing PEM Postfix")
s = s[start+len("-----BEGIN RSA PRIVATE KEY -----") : end]
bytes = base64ToBytes(s)
return Python_RSAKey._parseSSLeay(bytes)
raise SyntaxError("Missing PEM Prefix")
parsePEM = staticmethod(parsePEM)
def parseXML(s):
element = xmltools.parseAndStripWhitespace(s)
return Python_RSAKey._parseXML(element)
parseXML = staticmethod(parseXML)
def _parsePKCS8(bytes):
p = ASN1Parser(bytes)
version = p.getChild(0).value[0]
if version != 0:
raise SyntaxError("Unrecognized PKCS8 version")
rsaOID = p.getChild(1).value
if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]:
raise SyntaxError("Unrecognized AlgorithmIdentifier")
#Get the privateKey
privateKeyP = p.getChild(2)
#Adjust for OCTET STRING encapsulation
privateKeyP = ASN1Parser(privateKeyP.value)
return Python_RSAKey._parseASN1PrivateKey(privateKeyP)
_parsePKCS8 = staticmethod(_parsePKCS8)
def _parseSSLeay(bytes):
privateKeyP = ASN1Parser(bytes)
return Python_RSAKey._parseASN1PrivateKey(privateKeyP)
_parseSSLeay = staticmethod(_parseSSLeay)
def _parseASN1PrivateKey(privateKeyP):
version = privateKeyP.getChild(0).value[0]
if version != 0:
raise SyntaxError("Unrecognized RSAPrivateKey version")
n = bytesToNumber(privateKeyP.getChild(1).value)
e = bytesToNumber(privateKeyP.getChild(2).value)
d = bytesToNumber(privateKeyP.getChild(3).value)
p = bytesToNumber(privateKeyP.getChild(4).value)
q = bytesToNumber(privateKeyP.getChild(5).value)
dP = bytesToNumber(privateKeyP.getChild(6).value)
dQ = bytesToNumber(privateKeyP.getChild(7).value)
qInv = bytesToNumber(privateKeyP.getChild(8).value)
return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv)
_parseASN1PrivateKey = staticmethod(_parseASN1PrivateKey)
def _parseXML(element):
try:
xmltools.checkName(element, "privateKey")
except SyntaxError:
xmltools.checkName(element, "publicKey")
#Parse attributes
xmltools.getReqAttribute(element, "xmlns", "http://trevp.net/rsa\Z")
xmltools.checkNoMoreAttributes(element)
#Parse public values (<n> and <e>)
n = base64ToNumber(xmltools.getText(xmltools.getChild(element, 0, "n"), xmltools.base64RegEx))
e = base64ToNumber(xmltools.getText(xmltools.getChild(element, 1, "e"), xmltools.base64RegEx))
d = 0
p = 0
q = 0
dP = 0
dQ = 0
qInv = 0
#Parse private values, if present
if element.childNodes.length>=3:
d = base64ToNumber(xmltools.getText(xmltools.getChild(element, 2, "d"), xmltools.base64RegEx))
p = base64ToNumber(xmltools.getText(xmltools.getChild(element, 3, "p"), xmltools.base64RegEx))
q = base64ToNumber(xmltools.getText(xmltools.getChild(element, 4, "q"), xmltools.base64RegEx))
dP = base64ToNumber(xmltools.getText(xmltools.getChild(element, 5, "dP"), xmltools.base64RegEx))
dQ = base64ToNumber(xmltools.getText(xmltools.getChild(element, 6, "dQ"), xmltools.base64RegEx))
qInv = base64ToNumber(xmltools.getText(xmltools.getLastChild(element, 7, "qInv"), xmltools.base64RegEx))
return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv)
_parseXML = staticmethod(_parseXML)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides HTTP functions for gdata.service to use on Google App Engine
AppEngineHttpClient: Provides an HTTP request method which uses App Engine's
urlfetch API. Set the http_client member of a GDataService object to an
instance of an AppEngineHttpClient to allow the gdata library to run on
Google App Engine.
run_on_appengine: Function which will modify an existing GDataService object
to allow it to run on App Engine. It works by creating a new instance of
the AppEngineHttpClient and replacing the GDataService object's
http_client.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import StringIO
import pickle
import atom.http_interface
import atom.token_store
from google.appengine.api import urlfetch
from google.appengine.ext import db
from google.appengine.api import users
from google.appengine.api import memcache
def run_on_appengine(gdata_service, store_tokens=True,
single_user_mode=False):
"""Modifies a GDataService object to allow it to run on App Engine.
Args:
gdata_service: An instance of AtomService, GDataService, or any
of their subclasses which has an http_client member and a
token_store member.
store_tokens: Boolean, defaults to True. If True, the gdata_service
will attempt to add each token to it's token_store when
SetClientLoginToken or SetAuthSubToken is called. If False
the tokens will not automatically be added to the
token_store.
single_user_mode: Boolean, defaults to False. If True, the current_token
member of gdata_service will be set when
SetClientLoginToken or SetAuthTubToken is called. If set
to True, the current_token is set in the gdata_service
and anyone who accesses the object will use the same
token.
Note: If store_tokens is set to False and
single_user_mode is set to False, all tokens will be
ignored, since the library assumes: the tokens should not
be stored in the datastore and they should not be stored
in the gdata_service object. This will make it
impossible to make requests which require authorization.
"""
gdata_service.http_client = AppEngineHttpClient()
gdata_service.token_store = AppEngineTokenStore()
gdata_service.auto_store_tokens = store_tokens
gdata_service.auto_set_current_token = single_user_mode
return gdata_service
class AppEngineHttpClient(atom.http_interface.GenericHttpClient):
def __init__(self, headers=None):
self.debug = False
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and
DELETE.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.http
client = atom.http.HttpClient()
http_response = client.request('GET', 'http://www.google.com/')
Args:
operation: str The HTTP operation to be performed. This is usually one
of 'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be converted
to a string. Should be set to None when performing a GET or DELETE.
If data is a file-like object which can be read, this method will
read a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be
evaluated and sent.
url: The full URL to which the request should be sent. Can be a string
or atom.url.Url.
headers: dict of strings. HTTP headers which should be sent
in the request.
"""
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
# Construct the full payload.
# Assume that data is None or a string.
data_str = data
if data:
if isinstance(data, list):
# If data is a list of different objects, convert them all to strings
# and join them together.
converted_parts = [_convert_data_part(x) for x in data]
data_str = ''.join(converted_parts)
else:
data_str = _convert_data_part(data)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
all_headers['Content-Length'] = str(len(data_str))
# Set the content type to the default value if none was set.
if 'Content-Type' not in all_headers:
all_headers['Content-Type'] = 'application/atom+xml'
# Lookup the urlfetch operation which corresponds to the desired HTTP verb.
if operation == 'GET':
method = urlfetch.GET
elif operation == 'POST':
method = urlfetch.POST
elif operation == 'PUT':
method = urlfetch.PUT
elif operation == 'DELETE':
method = urlfetch.DELETE
else:
method = None
return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str,
method=method, headers=all_headers, follow_redirects=False))
def _convert_data_part(data):
if not data or isinstance(data, str):
return data
elif hasattr(data, 'read'):
# data is a file like object, so read it completely.
return data.read()
# The data object was not a file.
# Try to convert to a string and send the data.
return str(data)
class HttpResponse(object):
"""Translates a urlfetch resoinse to look like an hhtplib resoinse.
Used to allow the resoinse from HttpRequest to be usable by gdata.service
methods.
"""
def __init__(self, urlfetch_response):
self.body = StringIO.StringIO(urlfetch_response.content)
self.headers = urlfetch_response.headers
self.status = urlfetch_response.status_code
self.reason = ''
def read(self, length=None):
if not length:
return self.body.read()
else:
return self.body.read(length)
def getheader(self, name):
if not self.headers.has_key(name):
return self.headers[name.lower()]
return self.headers[name]
class TokenCollection(db.Model):
"""Datastore Model which associates auth tokens with the current user."""
user = db.UserProperty()
pickled_tokens = db.BlobProperty()
class AppEngineTokenStore(atom.token_store.TokenStore):
"""Stores the user's auth tokens in the App Engine datastore.
Tokens are only written to the datastore if a user is signed in (if
users.get_current_user() returns a user object).
"""
def __init__(self):
pass
def add_token(self, token):
"""Associates the token with the current user and stores it.
If there is no current user, the token will not be stored.
Returns:
False if the token was not stored.
"""
tokens = load_auth_tokens()
if not hasattr(token, 'scopes') or not token.scopes:
return False
for scope in token.scopes:
tokens[str(scope)] = token
key = save_auth_tokens(tokens)
if key:
return True
return False
def find_token(self, url):
"""Searches the current user's collection of token for a token which can
be used for a request to the url.
Returns:
The stored token which belongs to the current user and is valid for the
desired URL. If there is no current user, or there is no valid user
token in the datastore, a atom.http_interface.GenericToken is returned.
"""
if url is None:
return None
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
tokens = load_auth_tokens()
if url in tokens:
token = tokens[url]
if token.valid_for_scope(url):
return token
else:
del tokens[url]
save_auth_tokens(tokens)
for scope, token in tokens.iteritems():
if token.valid_for_scope(url):
return token
return atom.http_interface.GenericToken()
def remove_token(self, token):
"""Removes the token from the current user's collection in the datastore.
Returns:
False if the token was not removed, this could be because the token was
not in the datastore, or because there is no current user.
"""
token_found = False
scopes_to_delete = []
tokens = load_auth_tokens()
for scope, stored_token in tokens.iteritems():
if stored_token == token:
scopes_to_delete.append(scope)
token_found = True
for scope in scopes_to_delete:
del tokens[scope]
if token_found:
save_auth_tokens(tokens)
return token_found
def remove_all_tokens(self):
"""Removes all of the current user's tokens from the datastore."""
save_auth_tokens({})
def save_auth_tokens(token_dict):
"""Associates the tokens with the current user and writes to the datastore.
If there us no current user, the tokens are not written and this function
returns None.
Returns:
The key of the datastore entity containing the user's tokens, or None if
there was no current user.
"""
if users.get_current_user() is None:
return None
user_tokens = TokenCollection.all().filter('user =', users.get_current_user()).get()
if user_tokens:
user_tokens.pickled_tokens = pickle.dumps(token_dict)
return user_tokens.put()
else:
user_tokens = TokenCollection(
user=users.get_current_user(),
pickled_tokens=pickle.dumps(token_dict))
return user_tokens.put()
def load_auth_tokens():
"""Reads a dictionary of the current user's tokens from the datastore.
If there is no current user (a user is not signed in to the app) or the user
does not have any tokens, an empty dictionary is returned.
"""
if users.get_current_user() is None:
return {}
user_tokens = TokenCollection.all().filter('user =', users.get_current_user()).get()
if user_tokens:
return pickle.loads(user_tokens.pickled_tokens)
return {}
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides HTTP functions for gdata.service to use on Google App Engine
AppEngineHttpClient: Provides an HTTP request method which uses App Engine's
urlfetch API. Set the http_client member of a GDataService object to an
instance of an AppEngineHttpClient to allow the gdata library to run on
Google App Engine.
run_on_appengine: Function which will modify an existing GDataService object
to allow it to run on App Engine. It works by creating a new instance of
the AppEngineHttpClient and replacing the GDataService object's
http_client.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import StringIO
import pickle
import atom.http_interface
import atom.token_store
from google.appengine.api import urlfetch
from google.appengine.ext import db
from google.appengine.api import users
from google.appengine.api import memcache
def run_on_appengine(gdata_service, store_tokens=True,
single_user_mode=False):
"""Modifies a GDataService object to allow it to run on App Engine.
Args:
gdata_service: An instance of AtomService, GDataService, or any
of their subclasses which has an http_client member and a
token_store member.
store_tokens: Boolean, defaults to True. If True, the gdata_service
will attempt to add each token to it's token_store when
SetClientLoginToken or SetAuthSubToken is called. If False
the tokens will not automatically be added to the
token_store.
single_user_mode: Boolean, defaults to False. If True, the current_token
member of gdata_service will be set when
SetClientLoginToken or SetAuthTubToken is called. If set
to True, the current_token is set in the gdata_service
and anyone who accesses the object will use the same
token.
Note: If store_tokens is set to False and
single_user_mode is set to False, all tokens will be
ignored, since the library assumes: the tokens should not
be stored in the datastore and they should not be stored
in the gdata_service object. This will make it
impossible to make requests which require authorization.
"""
gdata_service.http_client = AppEngineHttpClient()
gdata_service.token_store = AppEngineTokenStore()
gdata_service.auto_store_tokens = store_tokens
gdata_service.auto_set_current_token = single_user_mode
return gdata_service
class AppEngineHttpClient(atom.http_interface.GenericHttpClient):
def __init__(self, headers=None):
self.debug = False
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and
DELETE.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.http
client = atom.http.HttpClient()
http_response = client.request('GET', 'http://www.google.com/')
Args:
operation: str The HTTP operation to be performed. This is usually one
of 'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be converted
to a string. Should be set to None when performing a GET or DELETE.
If data is a file-like object which can be read, this method will
read a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be
evaluated and sent.
url: The full URL to which the request should be sent. Can be a string
or atom.url.Url.
headers: dict of strings. HTTP headers which should be sent
in the request.
"""
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
# Construct the full payload.
# Assume that data is None or a string.
data_str = data
if data:
if isinstance(data, list):
# If data is a list of different objects, convert them all to strings
# and join them together.
converted_parts = [_convert_data_part(x) for x in data]
data_str = ''.join(converted_parts)
else:
data_str = _convert_data_part(data)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
all_headers['Content-Length'] = str(len(data_str))
# Set the content type to the default value if none was set.
if 'Content-Type' not in all_headers:
all_headers['Content-Type'] = 'application/atom+xml'
# Lookup the urlfetch operation which corresponds to the desired HTTP verb.
if operation == 'GET':
method = urlfetch.GET
elif operation == 'POST':
method = urlfetch.POST
elif operation == 'PUT':
method = urlfetch.PUT
elif operation == 'DELETE':
method = urlfetch.DELETE
else:
method = None
return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str,
method=method, headers=all_headers, follow_redirects=False))
def _convert_data_part(data):
if not data or isinstance(data, str):
return data
elif hasattr(data, 'read'):
# data is a file like object, so read it completely.
return data.read()
# The data object was not a file.
# Try to convert to a string and send the data.
return str(data)
class HttpResponse(object):
"""Translates a urlfetch resoinse to look like an hhtplib resoinse.
Used to allow the resoinse from HttpRequest to be usable by gdata.service
methods.
"""
def __init__(self, urlfetch_response):
self.body = StringIO.StringIO(urlfetch_response.content)
self.headers = urlfetch_response.headers
self.status = urlfetch_response.status_code
self.reason = ''
def read(self, length=None):
if not length:
return self.body.read()
else:
return self.body.read(length)
def getheader(self, name):
if not self.headers.has_key(name):
return self.headers[name.lower()]
return self.headers[name]
class TokenCollection(db.Model):
"""Datastore Model which associates auth tokens with the current user."""
user = db.UserProperty()
pickled_tokens = db.BlobProperty()
class AppEngineTokenStore(atom.token_store.TokenStore):
"""Stores the user's auth tokens in the App Engine datastore.
Tokens are only written to the datastore if a user is signed in (if
users.get_current_user() returns a user object).
"""
def __init__(self):
pass
def add_token(self, token):
"""Associates the token with the current user and stores it.
If there is no current user, the token will not be stored.
Returns:
False if the token was not stored.
"""
tokens = load_auth_tokens()
if not hasattr(token, 'scopes') or not token.scopes:
return False
for scope in token.scopes:
tokens[str(scope)] = token
key = save_auth_tokens(tokens)
if key:
return True
return False
def find_token(self, url):
"""Searches the current user's collection of token for a token which can
be used for a request to the url.
Returns:
The stored token which belongs to the current user and is valid for the
desired URL. If there is no current user, or there is no valid user
token in the datastore, a atom.http_interface.GenericToken is returned.
"""
if url is None:
return None
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
tokens = load_auth_tokens()
if url in tokens:
token = tokens[url]
if token.valid_for_scope(url):
return token
else:
del tokens[url]
save_auth_tokens(tokens)
for scope, token in tokens.iteritems():
if token.valid_for_scope(url):
return token
return atom.http_interface.GenericToken()
def remove_token(self, token):
"""Removes the token from the current user's collection in the datastore.
Returns:
False if the token was not removed, this could be because the token was
not in the datastore, or because there is no current user.
"""
token_found = False
scopes_to_delete = []
tokens = load_auth_tokens()
for scope, stored_token in tokens.iteritems():
if stored_token == token:
scopes_to_delete.append(scope)
token_found = True
for scope in scopes_to_delete:
del tokens[scope]
if token_found:
save_auth_tokens(tokens)
return token_found
def remove_all_tokens(self):
"""Removes all of the current user's tokens from the datastore."""
save_auth_tokens({})
def save_auth_tokens(token_dict):
"""Associates the tokens with the current user and writes to the datastore.
If there us no current user, the tokens are not written and this function
returns None.
Returns:
The key of the datastore entity containing the user's tokens, or None if
there was no current user.
"""
if users.get_current_user() is None:
return None
user_tokens = TokenCollection.all().filter('user =', users.get_current_user()).get()
if user_tokens:
user_tokens.pickled_tokens = pickle.dumps(token_dict)
return user_tokens.put()
else:
user_tokens = TokenCollection(
user=users.get_current_user(),
pickled_tokens=pickle.dumps(token_dict))
return user_tokens.put()
def load_auth_tokens():
"""Reads a dictionary of the current user's tokens from the datastore.
If there is no current user (a user is not signed in to the app) or the user
does not have any tokens, an empty dictionary is returned.
"""
if users.get_current_user() is None:
return {}
user_tokens = TokenCollection.all().filter('user =', users.get_current_user()).get()
if user_tokens:
return pickle.loads(user_tokens.pickled_tokens)
return {}
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package's modules adapt the gdata library to run in other environments
The first example is the appengine module which contains functions and
classes which modify a GDataService object to run on Google App Engine.
"""
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package's modules adapt the gdata library to run in other environments
The first example is the appengine module which contains functions and
classes which modify a GDataService object to run on Google App Engine.
"""
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Alexey Malashin
#
# Licensed under GNU License
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""fooCMS package
Система управления содержанием на основе Google App Engine
для использования с языком python.
"""
__author__ = 'frozzzen@gmail.com (Alexey Malashin)'
__date__ = '28 January 2009'
__version__ = '$Revision: 00001 $'
__credits__ = 'for all'
__all__ = ['main'] | Python |
#!/usr/bin/env python
#
# Copyright (c) 2004, 2005 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Google nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#
# The sitemap_gen.py script is written in Python 2.2 and released to
# the open source community for continuous improvements under the BSD
# 2.0 new license, which can be found at:
#
# http://www.opensource.org/licenses/bsd-license.php
#
__usage__ = \
"""A simple script to automatically produce sitemaps for a webserver,
in the Google Sitemap Protocol (GSP).
Usage: python sitemap_gen.py --config=config.xml [--help] [--testing]
--config=config.xml, specifies config file location
--help, displays usage message
--testing, specified when user is experimenting
"""
# Please be careful that all syntax used in this file can be parsed on
# Python 1.5 -- this version check is not evaluated until after the
# entire file has been parsed.
import sys
if sys.hexversion < 0x02020000:
print 'This script requires Python 2.2 or later.'
print 'Currently run with version: %s' % sys.version
sys.exit(1)
import fnmatch
import glob
import gzip
import md5
import os
import re
import stat
import time
import types
import urllib
import urlparse
import xml.sax
# True and False were introduced in Python2.2.2
try:
testTrue=True
del testTrue
except NameError:
True=1
False=0
# Text encodings
ENC_ASCII = 'ASCII'
ENC_UTF8 = 'UTF-8'
ENC_IDNA = 'IDNA'
ENC_ASCII_LIST = ['ASCII', 'US-ASCII', 'US', 'IBM367', 'CP367', 'ISO646-US'
'ISO_646.IRV:1991', 'ISO-IR-6', 'ANSI_X3.4-1968',
'ANSI_X3.4-1986', 'CPASCII' ]
ENC_DEFAULT_LIST = ['ISO-8859-1', 'ISO-8859-2', 'ISO-8859-5']
# Available Sitemap types
SITEMAP_TYPES = ['web', 'mobile', 'news']
# General Sitemap tags
GENERAL_SITEMAP_TAGS = ['loc', 'changefreq', 'priority', 'lastmod']
# News specific tags
NEWS_SPECIFIC_TAGS = ['keywords', 'publication_date', 'stock_tickers']
# News Sitemap tags
NEWS_SITEMAP_TAGS = GENERAL_SITEMAP_TAGS + NEWS_SPECIFIC_TAGS
# Maximum number of urls in each sitemap, before next Sitemap is created
MAXURLS_PER_SITEMAP = 50000
# Suffix on a Sitemap index file
SITEINDEX_SUFFIX = '_index.xml'
# Regular expressions tried for extracting URLs from access logs.
ACCESSLOG_CLF_PATTERN = re.compile(
r'.+\s+"([^\s]+)\s+([^\s]+)\s+HTTP/\d+\.\d+"\s+200\s+.*'
)
# Match patterns for lastmod attributes
DATE_PATTERNS = map(re.compile, [
r'^\d\d\d\d$',
r'^\d\d\d\d-\d\d$',
r'^\d\d\d\d-\d\d-\d\d$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\dZ$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d[+-]\d\d:\d\d$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?Z$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?[+-]\d\d:\d\d$',
])
# Match patterns for changefreq attributes
CHANGEFREQ_PATTERNS = [
'always', 'hourly', 'daily', 'weekly', 'monthly', 'yearly', 'never'
]
# XML formats
GENERAL_SITEINDEX_HEADER = \
'<?xml version="1.0" encoding="UTF-8"?>\n' \
'<sitemapindex\n' \
' xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"\n' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9\n' \
' http://www.sitemaps.org/schemas/sitemap/0.9/' \
'siteindex.xsd">\n'
NEWS_SITEINDEX_HEADER = \
'<?xml version="1.0" encoding="UTF-8"?>\n' \
'<sitemapindex\n' \
' xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"\n' \
' xmlns:news="http://www.google.com/schemas/sitemap-news/0.9"\n' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9\n' \
' http://www.sitemaps.org/schemas/sitemap/0.9/' \
'siteindex.xsd">\n'
SITEINDEX_FOOTER = '</sitemapindex>\n'
SITEINDEX_ENTRY = \
' <sitemap>\n' \
' <loc>%(loc)s</loc>\n' \
' <lastmod>%(lastmod)s</lastmod>\n' \
' </sitemap>\n'
GENERAL_SITEMAP_HEADER = \
'<?xml version="1.0" encoding="UTF-8"?>\n' \
'<urlset\n' \
' xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"\n' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9\n' \
' http://www.sitemaps.org/schemas/sitemap/0.9/' \
'sitemap.xsd">\n'
NEWS_SITEMAP_HEADER = \
'<?xml version="1.0" encoding="UTF-8"?>\n' \
'<urlset\n' \
' xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"\n' \
' xmlns:news="http://www.google.com/schemas/sitemap-news/0.9"\n' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9\n' \
' http://www.sitemaps.org/schemas/sitemap/0.9/' \
'sitemap.xsd">\n'
SITEMAP_FOOTER = '</urlset>\n'
SITEURL_XML_PREFIX = ' <url>\n'
SITEURL_XML_SUFFIX = ' </url>\n'
NEWS_TAG_XML_PREFIX = ' <news:news>\n'
NEWS_TAG_XML_SUFFIX = ' </news:news>\n'
# Search engines to notify with the updated sitemaps
#
# This list is very non-obvious in what's going on. Here's the gist:
# Each item in the list is a 6-tuple of items. The first 5 are "almost"
# the same as the input arguments to urlparse.urlunsplit():
# 0 - schema
# 1 - netloc
# 2 - path
# 3 - query <-- EXCEPTION: specify a query map rather than a string
# 4 - fragment
# Additionally, add item 5:
# 5 - query attribute that should be set to the new Sitemap URL
# Clear as mud, I know.
NOTIFICATION_SITES = [
('http', 'www.google.com', 'webmasters/sitemaps/ping', {}, '', 'sitemap'),
]
class Error(Exception):
"""
Base exception class. In this module we tend not to use our own exception
types for very much, but they come in very handy on XML parsing with SAX.
"""
pass
#end class Error
class SchemaError(Error):
"""Failure to process an XML file according to the schema we know."""
pass
#end class SchemeError
class Encoder:
"""
Manages wide-character/narrow-character conversions for just about all
text that flows into or out of the script.
You should always use this class for string coercion, as opposed to
letting Python handle coercions automatically. Reason: Python
usually assumes ASCII (7-bit) as a default narrow character encoding,
which is not the kind of data we generally deal with.
General high-level methodologies used in sitemap_gen:
[PATHS]
File system paths may be wide or narrow, depending on platform.
This works fine, just be aware of it and be very careful to not
mix them. That is, if you have to pass several file path arguments
into a library call, make sure they are all narrow or all wide.
This class has MaybeNarrowPath() which should be called on every
file system path you deal with.
[URLS]
URL locations are stored in Narrow form, already escaped. This has the
benefit of keeping escaping and encoding as close as possible to the format
we read them in. The downside is we may end up with URLs that have
intermingled encodings -- the root path may be encoded in one way
while the filename is encoded in another. This is obviously wrong, but
it should hopefully be an issue hit by very few users. The workaround
from the user level (assuming they notice) is to specify a default_encoding
parameter in their config file.
[OTHER]
Other text, such as attributes of the URL class, configuration options,
etc, are generally stored in Unicode for simplicity.
"""
def __init__(self):
self._user = None # User-specified default encoding
self._learned = [] # Learned default encodings
self._widefiles = False # File system can be wide
# Can the file system be Unicode?
try:
self._widefiles = os.path.supports_unicode_filenames
except AttributeError:
try:
self._widefiles = sys.getwindowsversion() == os.VER_PLATFORM_WIN32_NT
except AttributeError:
pass
# Try to guess a working default
try:
encoding = sys.getfilesystemencoding()
if encoding and not (encoding.upper() in ENC_ASCII_LIST):
self._learned = [ encoding ]
except AttributeError:
pass
if not self._learned:
encoding = sys.getdefaultencoding()
if encoding and not (encoding.upper() in ENC_ASCII_LIST):
self._learned = [ encoding ]
# If we had no guesses, start with some European defaults
if not self._learned:
self._learned = ENC_DEFAULT_LIST
#end def __init__
def SetUserEncoding(self, encoding):
self._user = encoding
#end def SetUserEncoding
def NarrowText(self, text, encoding):
""" Narrow a piece of arbitrary text """
if type(text) != types.UnicodeType:
return text
# Try the passed in preference
if encoding:
try:
result = text.encode(encoding)
if not encoding in self._learned:
self._learned.append(encoding)
return result
except UnicodeError:
pass
except LookupError:
output.Warn('Unknown encoding: %s' % encoding)
# Try the user preference
if self._user:
try:
return text.encode(self._user)
except UnicodeError:
pass
except LookupError:
temp = self._user
self._user = None
output.Warn('Unknown default_encoding: %s' % temp)
# Look through learned defaults, knock any failing ones out of the list
while self._learned:
try:
return text.encode(self._learned[0])
except:
del self._learned[0]
# When all other defaults are exhausted, use UTF-8
try:
return text.encode(ENC_UTF8)
except UnicodeError:
pass
# Something is seriously wrong if we get to here
return text.encode(ENC_ASCII, 'ignore')
#end def NarrowText
def MaybeNarrowPath(self, text):
""" Paths may be allowed to stay wide """
if self._widefiles:
return text
return self.NarrowText(text, None)
#end def MaybeNarrowPath
def WidenText(self, text, encoding):
""" Widen a piece of arbitrary text """
if type(text) != types.StringType:
return text
# Try the passed in preference
if encoding:
try:
result = unicode(text, encoding)
if not encoding in self._learned:
self._learned.append(encoding)
return result
except UnicodeError:
pass
except LookupError:
output.Warn('Unknown encoding: %s' % encoding)
# Try the user preference
if self._user:
try:
return unicode(text, self._user)
except UnicodeError:
pass
except LookupError:
temp = self._user
self._user = None
output.Warn('Unknown default_encoding: %s' % temp)
# Look through learned defaults, knock any failing ones out of the list
while self._learned:
try:
return unicode(text, self._learned[0])
except:
del self._learned[0]
# When all other defaults are exhausted, use UTF-8
try:
return unicode(text, ENC_UTF8)
except UnicodeError:
pass
# Getting here means it wasn't UTF-8 and we had no working default.
# We really don't have anything "right" we can do anymore.
output.Warn('Unrecognized encoding in text: %s' % text)
if not self._user:
output.Warn('You may need to set a default_encoding in your '
'configuration file.')
return text.decode(ENC_ASCII, 'ignore')
#end def WidenText
#end class Encoder
encoder = Encoder()
class Output:
"""
Exposes logging functionality, and tracks how many errors
we have thus output.
Logging levels should be used as thus:
Fatal -- extremely sparingly
Error -- config errors, entire blocks of user 'intention' lost
Warn -- individual URLs lost
Log(,0) -- Un-suppressable text that's not an error
Log(,1) -- touched files, major actions
Log(,2) -- parsing notes, filtered or duplicated URLs
Log(,3) -- each accepted URL
"""
def __init__(self):
self.num_errors = 0 # Count of errors
self.num_warns = 0 # Count of warnings
self._errors_shown = {} # Shown errors
self._warns_shown = {} # Shown warnings
self._verbose = 0 # Level of verbosity
#end def __init__
def Log(self, text, level):
""" Output a blurb of diagnostic text, if the verbose level allows it """
if text:
text = encoder.NarrowText(text, None)
if self._verbose >= level:
print text
#end def Log
def Warn(self, text):
""" Output and count a warning. Suppress duplicate warnings. """
if text:
text = encoder.NarrowText(text, None)
hash = md5.new(text).digest()
if not self._warns_shown.has_key(hash):
self._warns_shown[hash] = 1
print '[WARNING] ' + text
else:
self.Log('(suppressed) [WARNING] ' + text, 3)
self.num_warns = self.num_warns + 1
#end def Warn
def Error(self, text):
""" Output and count an error. Suppress duplicate errors. """
if text:
text = encoder.NarrowText(text, None)
hash = md5.new(text).digest()
if not self._errors_shown.has_key(hash):
self._errors_shown[hash] = 1
print '[ERROR] ' + text
else:
self.Log('(suppressed) [ERROR] ' + text, 3)
self.num_errors = self.num_errors + 1
#end def Error
def Fatal(self, text):
""" Output an error and terminate the program. """
if text:
text = encoder.NarrowText(text, None)
print '[FATAL] ' + text
else:
print 'Fatal error.'
sys.exit(1)
#end def Fatal
def SetVerbose(self, level):
""" Sets the verbose level. """
try:
if type(level) != types.IntType:
level = int(level)
if (level >= 0) and (level <= 3):
self._verbose = level
return
except ValueError:
pass
self.Error('Verbose level (%s) must be between 0 and 3 inclusive.' % level)
#end def SetVerbose
#end class Output
output = Output()
class URL(object):
""" URL is a smart structure grouping together the properties we
care about for a single web reference. """
__slots__ = 'loc', 'lastmod', 'changefreq', 'priority'
def __init__(self):
self.loc = None # URL -- in Narrow characters
self.lastmod = None # ISO8601 timestamp of last modify
self.changefreq = None # Text term for update frequency
self.priority = None # Float between 0 and 1 (inc)
#end def __init__
def __cmp__(self, other):
if self.loc < other.loc:
return -1
if self.loc > other.loc:
return 1
return 0
#end def __cmp__
def TrySetAttribute(self, attribute, value):
""" Attempt to set the attribute to the value, with a pretty try
block around it. """
if attribute == 'loc':
self.loc = self.Canonicalize(value)
else:
try:
setattr(self, attribute, value)
except AttributeError:
output.Warn('Unknown URL attribute: %s' % attribute)
#end def TrySetAttribute
def IsAbsolute(loc):
""" Decide if the URL is absolute or not """
if not loc:
return False
narrow = encoder.NarrowText(loc, None)
(scheme, netloc, path, query, frag) = urlparse.urlsplit(narrow)
if (not scheme) or (not netloc):
return False
return True
#end def IsAbsolute
IsAbsolute = staticmethod(IsAbsolute)
def Canonicalize(loc):
""" Do encoding and canonicalization on a URL string """
if not loc:
return loc
# Let the encoder try to narrow it
narrow = encoder.NarrowText(loc, None)
# Escape components individually
(scheme, netloc, path, query, frag) = urlparse.urlsplit(narrow)
unr = '-._~'
sub = '!$&\'()*+,;='
netloc = urllib.quote(netloc, unr + sub + '%:@/[]')
path = urllib.quote(path, unr + sub + '%:@/')
query = urllib.quote(query, unr + sub + '%:@/?')
frag = urllib.quote(frag, unr + sub + '%:@/?')
# Try built-in IDNA encoding on the netloc
try:
(ignore, widenetloc, ignore, ignore, ignore) = urlparse.urlsplit(loc)
for c in widenetloc:
if c >= unichr(128):
netloc = widenetloc.encode(ENC_IDNA)
netloc = urllib.quote(netloc, unr + sub + '%:@/[]')
break
except UnicodeError:
# urlsplit must have failed, based on implementation differences in the
# library. There is not much we can do here, except ignore it.
pass
except LookupError:
output.Warn('An International Domain Name (IDN) is being used, but this '
'version of Python does not have support for IDNA encoding. '
' (IDNA support was introduced in Python 2.3) The encoding '
'we have used instead is wrong and will probably not yield '
'valid URLs.')
bad_netloc = False
if '%' in netloc:
bad_netloc = True
# Put it all back together
narrow = urlparse.urlunsplit((scheme, netloc, path, query, frag))
# I let '%' through. Fix any that aren't pre-existing escapes.
HEXDIG = '0123456789abcdefABCDEF'
list = narrow.split('%')
narrow = list[0]
del list[0]
for item in list:
if (len(item) >= 2) and (item[0] in HEXDIG) and (item[1] in HEXDIG):
narrow = narrow + '%' + item
else:
narrow = narrow + '%25' + item
# Issue a warning if this is a bad URL
if bad_netloc:
output.Warn('Invalid characters in the host or domain portion of a URL: '
+ narrow)
return narrow
#end def Canonicalize
Canonicalize = staticmethod(Canonicalize)
def VerifyDate(self, date, metatag):
"""Verify the date format is valid"""
match = False
if date:
date = date.upper()
for pattern in DATE_PATTERNS:
match = pattern.match(date)
if match:
return True
if not match:
output.Warn('The value for %s does not appear to be in ISO8601 '
'format on URL: %s' % (metatag, self.loc))
return False
#end of VerifyDate
def Validate(self, base_url, allow_fragment):
""" Verify the data in this URL is well-formed, and override if not. """
assert type(base_url) == types.StringType
# Test (and normalize) the ref
if not self.loc:
output.Warn('Empty URL')
return False
if allow_fragment:
self.loc = urlparse.urljoin(base_url, self.loc)
if not self.loc.startswith(base_url):
output.Warn('Discarded URL for not starting with the base_url: %s' %
self.loc)
self.loc = None
return False
# Test the lastmod
if self.lastmod:
if not self.VerifyDate(self.lastmod, "lastmod"):
self.lastmod = None
# Test the changefreq
if self.changefreq:
match = False
self.changefreq = self.changefreq.lower()
for pattern in CHANGEFREQ_PATTERNS:
if self.changefreq == pattern:
match = True
break
if not match:
output.Warn('Changefreq "%s" is not a valid change frequency on URL '
': %s' % (self.changefreq, self.loc))
self.changefreq = None
# Test the priority
if self.priority:
priority = -1.0
try:
priority = float(self.priority)
except ValueError:
pass
if (priority < 0.0) or (priority > 1.0):
output.Warn('Priority "%s" is not a number between 0 and 1 inclusive '
'on URL: %s' % (self.priority, self.loc))
self.priority = None
return True
#end def Validate
def MakeHash(self):
""" Provides a uniform way of hashing URLs """
if not self.loc:
return None
if self.loc.endswith('/'):
return md5.new(self.loc[:-1]).digest()
return md5.new(self.loc).digest()
#end def MakeHash
def Log(self, prefix='URL', level=3):
""" Dump the contents, empty or not, to the log. """
out = prefix + ':'
for attribute in self.__slots__:
value = getattr(self, attribute)
if not value:
value = ''
out = out + (' %s=[%s]' % (attribute, value))
output.Log('%s' % encoder.NarrowText(out, None), level)
#end def Log
def WriteXML(self, file):
""" Dump non-empty contents to the output file, in XML format. """
if not self.loc:
return
out = SITEURL_XML_PREFIX
for attribute in self.__slots__:
value = getattr(self, attribute)
if value:
if type(value) == types.UnicodeType:
value = encoder.NarrowText(value, None)
elif type(value) != types.StringType:
value = str(value)
value = xml.sax.saxutils.escape(value)
out = out + (' <%s>%s</%s>\n' % (attribute, value, attribute))
out = out + SITEURL_XML_SUFFIX
file.write(out)
#end def WriteXML
#end class URL
class NewsURL(URL):
""" NewsURL is a subclass of URL with News-Sitemap specific properties. """
__slots__ = 'loc', 'lastmod', 'changefreq', 'priority', 'publication_date', \
'keywords', 'stock_tickers'
def __init__(self):
URL.__init__(self)
self.publication_date = None # ISO8601 timestamp of publication date
self.keywords = None # Text keywords
self.stock_tickers = None # Text stock
#end def __init__
def Validate(self, base_url, allow_fragment):
""" Verify the data in this News URL is well-formed, and override if not. """
assert type(base_url) == types.StringType
if not URL.Validate(self, base_url, allow_fragment):
return False
if not URL.VerifyDate(self, self.publication_date, "publication_date"):
self.publication_date = None
return True
#end def Validate
def WriteXML(self, file):
""" Dump non-empty contents to the output file, in XML format. """
if not self.loc:
return
out = SITEURL_XML_PREFIX
# printed_news_tag indicates if news-specific metatags are present
printed_news_tag = False
for attribute in self.__slots__:
value = getattr(self, attribute)
if value:
if type(value) == types.UnicodeType:
value = encoder.NarrowText(value, None)
elif type(value) != types.StringType:
value = str(value)
value = xml.sax.saxutils.escape(value)
if attribute in NEWS_SPECIFIC_TAGS:
if not printed_news_tag:
printed_news_tag = True
out = out + NEWS_TAG_XML_PREFIX
out = out + (' <news:%s>%s</news:%s>\n' % (attribute, value, attribute))
else:
out = out + (' <%s>%s</%s>\n' % (attribute, value, attribute))
if printed_news_tag:
out = out + NEWS_TAG_XML_SUFFIX
out = out + SITEURL_XML_SUFFIX
file.write(out)
#end def WriteXML
#end class NewsURL
class Filter:
"""
A filter on the stream of URLs we find. A filter is, in essence,
a wildcard applied to the stream. You can think of this as an
operator that returns a tri-state when given a URL:
True -- this URL is to be included in the sitemap
None -- this URL is undecided
False -- this URL is to be dropped from the sitemap
"""
def __init__(self, attributes):
self._wildcard = None # Pattern for wildcard match
self._regexp = None # Pattern for regexp match
self._pass = False # "Drop" filter vs. "Pass" filter
if not ValidateAttributes('FILTER', attributes,
('pattern', 'type', 'action')):
return
# Check error count on the way in
num_errors = output.num_errors
# Fetch the attributes
pattern = attributes.get('pattern')
type = attributes.get('type', 'wildcard')
action = attributes.get('action', 'drop')
if type:
type = type.lower()
if action:
action = action.lower()
# Verify the attributes
if not pattern:
output.Error('On a filter you must specify a "pattern" to match')
elif (not type) or ((type != 'wildcard') and (type != 'regexp')):
output.Error('On a filter you must specify either \'type="wildcard"\' '
'or \'type="regexp"\'')
elif (action != 'pass') and (action != 'drop'):
output.Error('If you specify a filter action, it must be either '
'\'action="pass"\' or \'action="drop"\'')
# Set the rule
if action == 'drop':
self._pass = False
elif action == 'pass':
self._pass = True
if type == 'wildcard':
self._wildcard = pattern
elif type == 'regexp':
try:
self._regexp = re.compile(pattern)
except re.error:
output.Error('Bad regular expression: %s' % pattern)
# Log the final results iff we didn't add any errors
if num_errors == output.num_errors:
output.Log('Filter: %s any URL that matches %s "%s"' %
(action, type, pattern), 2)
#end def __init__
def Apply(self, url):
""" Process the URL, as above. """
if (not url) or (not url.loc):
return None
if self._wildcard:
if fnmatch.fnmatchcase(url.loc, self._wildcard):
return self._pass
return None
if self._regexp:
if self._regexp.search(url.loc):
return self._pass
return None
assert False # unreachable
#end def Apply
#end class Filter
class InputURL:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles a single URL, manually specified in the config file.
"""
def __init__(self, attributes):
self._url = None # The lonely URL
if not ValidateAttributes('URL', attributes,
('href', 'lastmod', 'changefreq', 'priority')):
return
url = URL()
for attr in attributes.keys():
if attr == 'href':
url.TrySetAttribute('loc', attributes[attr])
else:
url.TrySetAttribute(attr, attributes[attr])
if not url.loc:
output.Error('Url entries must have an href attribute.')
return
self._url = url
output.Log('Input: From URL "%s"' % self._url.loc, 2)
#end def __init__
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
if self._url:
consumer(self._url, True)
#end def ProduceURLs
#end class InputURL
class InputURLList:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles a text file with a list of URLs
"""
def __init__(self, attributes):
self._path = None # The file path
self._encoding = None # Encoding of that file
if not ValidateAttributes('URLLIST', attributes, ('path', 'encoding')):
return
self._path = attributes.get('path')
self._encoding = attributes.get('encoding', ENC_UTF8)
if self._path:
self._path = encoder.MaybeNarrowPath(self._path)
if os.path.isfile(self._path):
output.Log('Input: From URLLIST "%s"' % self._path, 2)
else:
output.Error('Can not locate file: %s' % self._path)
self._path = None
else:
output.Error('Urllist entries must have a "path" attribute.')
#end def __init__
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
# Open the file
(frame, file) = OpenFileForRead(self._path, 'URLLIST')
if not file:
return
# Iterate lines
linenum = 0
for line in file.readlines():
linenum = linenum + 1
# Strip comments and empty lines
if self._encoding:
line = encoder.WidenText(line, self._encoding)
line = line.strip()
if (not line) or line[0] == '#':
continue
# Split the line on space
url = URL()
cols = line.split(' ')
for i in range(0,len(cols)):
cols[i] = cols[i].strip()
url.TrySetAttribute('loc', cols[0])
# Extract attributes from the other columns
for i in range(1,len(cols)):
if cols[i]:
try:
(attr_name, attr_val) = cols[i].split('=', 1)
url.TrySetAttribute(attr_name, attr_val)
except ValueError:
output.Warn('Line %d: Unable to parse attribute: %s' %
(linenum, cols[i]))
# Pass it on
consumer(url, False)
file.close()
if frame:
frame.close()
#end def ProduceURLs
#end class InputURLList
class InputNewsURLList:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles a text file with a list of News URLs and their metadata
"""
def __init__(self, attributes):
self._path = None # The file path
self._encoding = None # Encoding of that file
self._tag_order = [] # Order of URL metadata
if not ValidateAttributes('URLLIST', attributes, ('path', 'encoding', \
'tag_order')):
return
self._path = attributes.get('path')
self._encoding = attributes.get('encoding', ENC_UTF8)
self._tag_order = attributes.get('tag_order')
if self._path:
self._path = encoder.MaybeNarrowPath(self._path)
if os.path.isfile(self._path):
output.Log('Input: From URLLIST "%s"' % self._path, 2)
else:
output.Error('Can not locate file: %s' % self._path)
self._path = None
else:
output.Error('Urllist entries must have a "path" attribute.')
# parse tag_order into an array
# tag_order_ascii created for more readable logging
tag_order_ascii = []
if self._tag_order:
self._tag_order = self._tag_order.split(",")
for i in range(0, len(self._tag_order)):
element = self._tag_order[i].strip().lower()
self._tag_order[i]= element
tag_order_ascii.append(element.encode('ascii'))
output.Log('Input: From URLLIST tag order is "%s"' % tag_order_ascii, 0)
else:
output.Error('News Urllist configuration file must contain tag_order '
'to define Sitemap metatags.')
# verify all tag_order inputs are valid
tag_order_dict = {}
for tag in self._tag_order:
tag_order_dict[tag] = ""
if not ValidateAttributes('URLLIST', tag_order_dict, \
NEWS_SITEMAP_TAGS):
return
# loc tag must be present
loc_tag = False
for tag in self._tag_order:
if tag == 'loc':
loc_tag = True
break
if not loc_tag:
output.Error('News Urllist tag_order in configuration file '
'does not contain "loc" value: %s' % tag_order_ascii)
#end def __init__
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
# Open the file
(frame, file) = OpenFileForRead(self._path, 'URLLIST')
if not file:
return
# Iterate lines
linenum = 0
for line in file.readlines():
linenum = linenum + 1
# Strip comments and empty lines
if self._encoding:
line = encoder.WidenText(line, self._encoding)
line = line.strip()
if (not line) or line[0] == '#':
continue
# Split the line on tabs
url = NewsURL()
cols = line.split('\t')
for i in range(0,len(cols)):
cols[i] = cols[i].strip()
for i in range(0,len(cols)):
if cols[i]:
attr_value = cols[i]
if i < len(self._tag_order):
attr_name = self._tag_order[i]
try:
url.TrySetAttribute(attr_name, attr_value)
except ValueError:
output.Warn('Line %d: Unable to parse attribute: %s' %
(linenum, cols[i]))
# Pass it on
consumer(url, False)
file.close()
if frame:
frame.close()
#end def ProduceURLs
#end class InputNewsURLList
class InputDirectory:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles a directory that acts as base for walking the filesystem.
"""
def __init__(self, attributes, base_url):
self._path = None # The directory
self._url = None # The URL equivalent
self._default_file = None
self._remove_empty_directories = False
if not ValidateAttributes('DIRECTORY', attributes, ('path', 'url',
'default_file', 'remove_empty_directories')):
return
# Prep the path -- it MUST end in a sep
path = attributes.get('path')
if not path:
output.Error('Directory entries must have both "path" and "url" '
'attributes')
return
path = encoder.MaybeNarrowPath(path)
if not path.endswith(os.sep):
path = path + os.sep
if not os.path.isdir(path):
output.Error('Can not locate directory: %s' % path)
return
# Prep the URL -- it MUST end in a sep
url = attributes.get('url')
if not url:
output.Error('Directory entries must have both "path" and "url" '
'attributes')
return
url = URL.Canonicalize(url)
if not url.endswith('/'):
url = url + '/'
if not url.startswith(base_url):
url = urlparse.urljoin(base_url, url)
if not url.startswith(base_url):
output.Error('The directory URL "%s" is not relative to the '
'base_url: %s' % (url, base_url))
return
# Prep the default file -- it MUST be just a filename
file = attributes.get('default_file')
if file:
file = encoder.MaybeNarrowPath(file)
if os.sep in file:
output.Error('The default_file "%s" can not include path information.'
% file)
file = None
# Prep the remove_empty_directories -- default is false
remove_empty_directories = attributes.get('remove_empty_directories')
if remove_empty_directories:
if (remove_empty_directories == '1') or \
(remove_empty_directories.lower() == 'true'):
remove_empty_directories = True
elif (remove_empty_directories == '0') or \
(remove_empty_directories.lower() == 'false'):
remove_empty_directories = False
# otherwise the user set a non-default value
else:
output.Error('Configuration file remove_empty_directories '
'value is not recognized. Value must be true or false.')
return
else:
remove_empty_directories = False
self._path = path
self._url = url
self._default_file = file
self._remove_empty_directories = remove_empty_directories
if file:
output.Log('Input: From DIRECTORY "%s" (%s) with default file "%s"'
% (path, url, file), 2)
else:
output.Log('Input: From DIRECTORY "%s" (%s) with no default file'
% (path, url), 2)
#end def __init__
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
if not self._path:
return
root_path = self._path
root_URL = self._url
root_file = self._default_file
remove_empty_directories = self._remove_empty_directories
def HasReadPermissions(path):
""" Verifies a given path has read permissions. """
stat_info = os.stat(path)
mode = stat_info[stat.ST_MODE]
if mode & stat.S_IREAD:
return True
else:
return None
def PerFile(dirpath, name):
"""
Called once per file.
Note that 'name' will occasionally be None -- for a directory itself
"""
# Pull a timestamp
url = URL()
isdir = False
try:
if name:
path = os.path.join(dirpath, name)
else:
path = dirpath
isdir = os.path.isdir(path)
time = None
if isdir and root_file:
file = os.path.join(path, root_file)
try:
time = os.stat(file)[stat.ST_MTIME];
except OSError:
pass
if not time:
time = os.stat(path)[stat.ST_MTIME];
url.lastmod = TimestampISO8601(time)
except OSError:
pass
except ValueError:
pass
# Build a URL
middle = dirpath[len(root_path):]
if os.sep != '/':
middle = middle.replace(os.sep, '/')
if middle:
middle = middle + '/'
if name:
middle = middle + name
if isdir:
middle = middle + '/'
url.TrySetAttribute('loc', root_URL + encoder.WidenText(middle, None))
# Suppress default files. (All the way down here so we can log it.)
if name and (root_file == name):
url.Log(prefix='IGNORED (default file)', level=2)
return
# Suppress directories when remove_empty_directories="true"
try:
if isdir:
if HasReadPermissions(path):
if remove_empty_directories == 'true' and \
len(os.listdir(path)) == 0:
output.Log('IGNORED empty directory %s' % str(path), level=1)
return
elif path == self._path:
output.Error('IGNORED configuration file directory input %s due '
'to file permissions' % self._path)
else:
output.Log('IGNORED files within directory %s due to file '
'permissions' % str(path), level=0)
except OSError:
pass
except ValueError:
pass
consumer(url, False)
#end def PerFile
def PerDirectory(ignore, dirpath, namelist):
"""
Called once per directory with a list of all the contained files/dirs.
"""
ignore = ignore # Avoid warnings of an unused parameter
if not dirpath.startswith(root_path):
output.Warn('Unable to decide what the root path is for directory: '
'%s' % dirpath)
return
for name in namelist:
PerFile(dirpath, name)
#end def PerDirectory
output.Log('Walking DIRECTORY "%s"' % self._path, 1)
PerFile(self._path, None)
os.path.walk(self._path, PerDirectory, None)
#end def ProduceURLs
#end class InputDirectory
class InputAccessLog:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles access logs. It's non-trivial in that we want to
auto-detect log files in the Common Logfile Format (as used by Apache,
for instance) and the Extended Log File Format (as used by IIS, for
instance).
"""
def __init__(self, attributes):
self._path = None # The file path
self._encoding = None # Encoding of that file
self._is_elf = False # Extended Log File Format?
self._is_clf = False # Common Logfile Format?
self._elf_status = -1 # ELF field: '200'
self._elf_method = -1 # ELF field: 'HEAD'
self._elf_uri = -1 # ELF field: '/foo?bar=1'
self._elf_urifrag1 = -1 # ELF field: '/foo'
self._elf_urifrag2 = -1 # ELF field: 'bar=1'
if not ValidateAttributes('ACCESSLOG', attributes, ('path', 'encoding')):
return
self._path = attributes.get('path')
self._encoding = attributes.get('encoding', ENC_UTF8)
if self._path:
self._path = encoder.MaybeNarrowPath(self._path)
if os.path.isfile(self._path):
output.Log('Input: From ACCESSLOG "%s"' % self._path, 2)
else:
output.Error('Can not locate file: %s' % self._path)
self._path = None
else:
output.Error('Accesslog entries must have a "path" attribute.')
#end def __init__
def RecognizeELFLine(self, line):
""" Recognize the Fields directive that heads an ELF file """
if not line.startswith('#Fields:'):
return False
fields = line.split(' ')
del fields[0]
for i in range(0, len(fields)):
field = fields[i].strip()
if field == 'sc-status':
self._elf_status = i
elif field == 'cs-method':
self._elf_method = i
elif field == 'cs-uri':
self._elf_uri = i
elif field == 'cs-uri-stem':
self._elf_urifrag1 = i
elif field == 'cs-uri-query':
self._elf_urifrag2 = i
output.Log('Recognized an Extended Log File Format file.', 2)
return True
#end def RecognizeELFLine
def GetELFLine(self, line):
""" Fetch the requested URL from an ELF line """
fields = line.split(' ')
count = len(fields)
# Verify status was Ok
if self._elf_status >= 0:
if self._elf_status >= count:
return None
if not fields[self._elf_status].strip() == '200':
return None
# Verify method was HEAD or GET
if self._elf_method >= 0:
if self._elf_method >= count:
return None
if not fields[self._elf_method].strip() in ('HEAD', 'GET'):
return None
# Pull the full URL if we can
if self._elf_uri >= 0:
if self._elf_uri >= count:
return None
url = fields[self._elf_uri].strip()
if url != '-':
return url
# Put together a fragmentary URL
if self._elf_urifrag1 >= 0:
if self._elf_urifrag1 >= count or self._elf_urifrag2 >= count:
return None
urlfrag1 = fields[self._elf_urifrag1].strip()
urlfrag2 = None
if self._elf_urifrag2 >= 0:
urlfrag2 = fields[self._elf_urifrag2]
if urlfrag1 and (urlfrag1 != '-'):
if urlfrag2 and (urlfrag2 != '-'):
urlfrag1 = urlfrag1 + '?' + urlfrag2
return urlfrag1
return None
#end def GetELFLine
def RecognizeCLFLine(self, line):
""" Try to tokenize a logfile line according to CLF pattern and see if
it works. """
match = ACCESSLOG_CLF_PATTERN.match(line)
recognize = match and (match.group(1) in ('HEAD', 'GET'))
if recognize:
output.Log('Recognized a Common Logfile Format file.', 2)
return recognize
#end def RecognizeCLFLine
def GetCLFLine(self, line):
""" Fetch the requested URL from a CLF line """
match = ACCESSLOG_CLF_PATTERN.match(line)
if match:
request = match.group(1)
if request in ('HEAD', 'GET'):
return match.group(2)
return None
#end def GetCLFLine
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
# Open the file
(frame, file) = OpenFileForRead(self._path, 'ACCESSLOG')
if not file:
return
# Iterate lines
for line in file.readlines():
if self._encoding:
line = encoder.WidenText(line, self._encoding)
line = line.strip()
# If we don't know the format yet, try them both
if (not self._is_clf) and (not self._is_elf):
self._is_elf = self.RecognizeELFLine(line)
self._is_clf = self.RecognizeCLFLine(line)
# Digest the line
match = None
if self._is_elf:
match = self.GetELFLine(line)
elif self._is_clf:
match = self.GetCLFLine(line)
if not match:
continue
# Pass it on
url = URL()
url.TrySetAttribute('loc', match)
consumer(url, True)
file.close()
if frame:
frame.close()
#end def ProduceURLs
#end class InputAccessLog
class FilePathGenerator:
"""
This class generates filenames in a series, upon request.
You can request any iteration number at any time, you don't
have to go in order.
Example of iterations for '/path/foo.xml.gz':
0 --> /path/foo.xml.gz
1 --> /path/foo1.xml.gz
2 --> /path/foo2.xml.gz
_index.xml --> /path/foo_index.xml
"""
def __init__(self):
self.is_gzip = False # Is this a GZIP file?
self._path = None # '/path/'
self._prefix = None # 'foo'
self._suffix = None # '.xml.gz'
#end def __init__
def Preload(self, path):
""" Splits up a path into forms ready for recombination. """
path = encoder.MaybeNarrowPath(path)
# Get down to a base name
path = os.path.normpath(path)
base = os.path.basename(path).lower()
if not base:
output.Error('Couldn\'t parse the file path: %s' % path)
return False
lenbase = len(base)
# Recognize extension
lensuffix = 0
compare_suffix = ['.xml', '.xml.gz', '.gz']
for suffix in compare_suffix:
if base.endswith(suffix):
lensuffix = len(suffix)
break
if not lensuffix:
output.Error('The path "%s" doesn\'t end in a supported file '
'extension.' % path)
return False
self.is_gzip = suffix.endswith('.gz')
# Split the original path
lenpath = len(path)
self._path = path[:lenpath-lenbase]
self._prefix = path[lenpath-lenbase:lenpath-lensuffix]
self._suffix = path[lenpath-lensuffix:]
return True
#end def Preload
def GeneratePath(self, instance):
""" Generates the iterations, as described above. """
prefix = self._path + self._prefix
if type(instance) == types.IntType:
if instance:
return '%s%d%s' % (prefix, instance, self._suffix)
return prefix + self._suffix
return prefix + instance
#end def GeneratePath
def GenerateURL(self, instance, root_url):
""" Generates iterations, but as a URL instead of a path. """
prefix = root_url + self._prefix
retval = None
if type(instance) == types.IntType:
if instance:
retval = '%s%d%s' % (prefix, instance, self._suffix)
else:
retval = prefix + self._suffix
else:
retval = prefix + instance
return URL.Canonicalize(retval)
#end def GenerateURL
def GenerateWildURL(self, root_url):
""" Generates a wildcard that should match all our iterations """
prefix = URL.Canonicalize(root_url + self._prefix)
temp = URL.Canonicalize(prefix + self._suffix)
suffix = temp[len(prefix):]
return prefix + '*' + suffix
#end def GenerateURL
#end class FilePathGenerator
class PerURLStatistics:
""" Keep track of some simple per-URL statistics, like file extension. """
def __init__(self):
self._extensions = {} # Count of extension instances
#end def __init__
def Consume(self, url):
""" Log some stats for the URL. At the moment, that means extension. """
if url and url.loc:
(scheme, netloc, path, query, frag) = urlparse.urlsplit(url.loc)
if not path:
return
# Recognize directories
if path.endswith('/'):
if self._extensions.has_key('/'):
self._extensions['/'] = self._extensions['/'] + 1
else:
self._extensions['/'] = 1
return
# Strip to a filename
i = path.rfind('/')
if i >= 0:
assert i < len(path)
path = path[i:]
# Find extension
i = path.rfind('.')
if i > 0:
assert i < len(path)
ext = path[i:].lower()
if self._extensions.has_key(ext):
self._extensions[ext] = self._extensions[ext] + 1
else:
self._extensions[ext] = 1
else:
if self._extensions.has_key('(no extension)'):
self._extensions['(no extension)'] = self._extensions[
'(no extension)'] + 1
else:
self._extensions['(no extension)'] = 1
#end def Consume
def Log(self):
""" Dump out stats to the output. """
if len(self._extensions):
output.Log('Count of file extensions on URLs:', 1)
set = self._extensions.keys()
set.sort()
for ext in set:
output.Log(' %7d %s' % (self._extensions[ext], ext), 1)
#end def Log
class Sitemap(xml.sax.handler.ContentHandler):
"""
This is the big workhorse class that processes your inputs and spits
out sitemap files. It is built as a SAX handler for set up purposes.
That is, it processes an XML stream to bring itself up.
"""
def __init__(self, suppress_notify):
xml.sax.handler.ContentHandler.__init__(self)
self._filters = [] # Filter objects
self._inputs = [] # Input objects
self._urls = {} # Maps URLs to count of dups
self._set = [] # Current set of URLs
self._filegen = None # Path generator for output files
self._wildurl1 = None # Sitemap URLs to filter out
self._wildurl2 = None # Sitemap URLs to filter out
self._sitemaps = 0 # Number of output files
# We init _dup_max to 2 so the default priority is 0.5 instead of 1.0
self._dup_max = 2 # Max number of duplicate URLs
self._stat = PerURLStatistics() # Some simple stats
self._in_site = False # SAX: are we in a Site node?
self._in_Site_ever = False # SAX: were we ever in a Site?
self._default_enc = None # Best encoding to try on URLs
self._base_url = None # Prefix to all valid URLs
self._store_into = None # Output filepath
self._sitemap_type = None # Sitemap type (web, mobile or news)
self._suppress = suppress_notify # Suppress notify of servers
#end def __init__
def ValidateBasicConfig(self):
""" Verifies (and cleans up) the basic user-configurable options. """
all_good = True
if self._default_enc:
encoder.SetUserEncoding(self._default_enc)
# Canonicalize the base_url
if all_good and not self._base_url:
output.Error('A site needs a "base_url" attribute.')
all_good = False
if all_good and not URL.IsAbsolute(self._base_url):
output.Error('The "base_url" must be absolute, not relative: %s' %
self._base_url)
all_good = False
if all_good:
self._base_url = URL.Canonicalize(self._base_url)
if not self._base_url.endswith('/'):
self._base_url = self._base_url + '/'
output.Log('BaseURL is set to: %s' % self._base_url, 2)
# Load store_into into a generator
if all_good:
if self._store_into:
self._filegen = FilePathGenerator()
if not self._filegen.Preload(self._store_into):
all_good = False
else:
output.Error('A site needs a "store_into" attribute.')
all_good = False
# Ask the generator for patterns on what its output will look like
if all_good:
self._wildurl1 = self._filegen.GenerateWildURL(self._base_url)
self._wildurl2 = self._filegen.GenerateURL(SITEINDEX_SUFFIX,
self._base_url)
# Unify various forms of False
if all_good:
if self._suppress:
if (type(self._suppress) == types.StringType) or (type(self._suppress)
== types.UnicodeType):
if (self._suppress == '0') or (self._suppress.lower() == 'false'):
self._suppress = False
# Clean up the sitemap_type
if all_good:
match = False
# If sitemap_type is not specified, default to web sitemap
if not self._sitemap_type:
self._sitemap_type = 'web'
else:
self._sitemap_type = self._sitemap_type.lower()
for pattern in SITEMAP_TYPES:
if self._sitemap_type == pattern:
match = True
break
if not match:
output.Error('The "sitemap_type" value must be "web", "mobile" '
'or "news": %s' % self._sitemap_type)
all_good = False
output.Log('The Sitemap type is %s Sitemap.' % \
self._sitemap_type.upper(), 0)
# Done
if not all_good:
output.Log('See "example_config.xml" for more information.', 0)
return all_good
#end def ValidateBasicConfig
def Generate(self):
""" Run over all the Inputs and ask them to Produce """
# Run the inputs
for input in self._inputs:
input.ProduceURLs(self.ConsumeURL)
# Do last flushes
if len(self._set):
self.FlushSet()
if not self._sitemaps:
output.Warn('No URLs were recorded, writing an empty sitemap.')
self.FlushSet()
# Write an index as needed
if self._sitemaps > 1:
self.WriteIndex()
# Notify
self.NotifySearch()
# Dump stats
self._stat.Log()
#end def Generate
def ConsumeURL(self, url, allow_fragment):
"""
All per-URL processing comes together here, regardless of Input.
Here we run filters, remove duplicates, spill to disk as needed, etc.
"""
if not url:
return
# Validate
if not url.Validate(self._base_url, allow_fragment):
return
# Run filters
accept = None
for filter in self._filters:
accept = filter.Apply(url)
if accept != None:
break
if not (accept or (accept == None)):
url.Log(prefix='FILTERED', level=2)
return
# Ignore our out output URLs
if fnmatch.fnmatchcase(url.loc, self._wildurl1) or fnmatch.fnmatchcase(
url.loc, self._wildurl2):
url.Log(prefix='IGNORED (output file)', level=2)
return
# Note the sighting
hash = url.MakeHash()
if self._urls.has_key(hash):
dup = self._urls[hash]
if dup > 0:
dup = dup + 1
self._urls[hash] = dup
if self._dup_max < dup:
self._dup_max = dup
url.Log(prefix='DUPLICATE')
return
# Acceptance -- add to set
self._urls[hash] = 1
self._set.append(url)
self._stat.Consume(url)
url.Log()
# Flush the set if needed
if len(self._set) >= MAXURLS_PER_SITEMAP:
self.FlushSet()
#end def ConsumeURL
def FlushSet(self):
"""
Flush the current set of URLs to the output. This is a little
slow because we like to sort them all and normalize the priorities
before dumping.
"""
# Determine what Sitemap header to use (News or General)
if self._sitemap_type == 'news':
sitemap_header = NEWS_SITEMAP_HEADER
else:
sitemap_header = GENERAL_SITEMAP_HEADER
# Sort and normalize
output.Log('Sorting and normalizing collected URLs.', 1)
self._set.sort()
for url in self._set:
hash = url.MakeHash()
dup = self._urls[hash]
if dup > 0:
self._urls[hash] = -1
if not url.priority:
url.priority = '%.4f' % (float(dup) / float(self._dup_max))
# Get the filename we're going to write to
filename = self._filegen.GeneratePath(self._sitemaps)
if not filename:
output.Fatal('Unexpected: Couldn\'t generate output filename.')
self._sitemaps = self._sitemaps + 1
output.Log('Writing Sitemap file "%s" with %d URLs' %
(filename, len(self._set)), 1)
# Write to it
frame = None
file = None
try:
if self._filegen.is_gzip:
basename = os.path.basename(filename);
frame = open(filename, 'wb')
file = gzip.GzipFile(fileobj=frame, filename=basename, mode='wt')
else:
file = open(filename, 'wt')
file.write(sitemap_header)
for url in self._set:
url.WriteXML(file)
file.write(SITEMAP_FOOTER)
file.close()
if frame:
frame.close()
frame = None
file = None
except IOError:
output.Fatal('Couldn\'t write out to file: %s' % filename)
os.chmod(filename, 0644)
# Flush
self._set = []
#end def FlushSet
def WriteIndex(self):
""" Write the master index of all Sitemap files """
# Make a filename
filename = self._filegen.GeneratePath(SITEINDEX_SUFFIX)
if not filename:
output.Fatal('Unexpected: Couldn\'t generate output index filename.')
output.Log('Writing index file "%s" with %d Sitemaps' %
(filename, self._sitemaps), 1)
# Determine what Sitemap index header to use (News or General)
if self._sitemap_type == 'news':
sitemap_index_header = NEWS_SITEMAP_HEADER
else:
sitemap__index_header = GENERAL_SITEMAP_HEADER
# Make a lastmod time
lastmod = TimestampISO8601(time.time())
# Write to it
try:
fd = open(filename, 'wt')
fd.write(sitemap_index_header)
for mapnumber in range(0,self._sitemaps):
# Write the entry
mapurl = self._filegen.GenerateURL(mapnumber, self._base_url)
mapattributes = { 'loc' : mapurl, 'lastmod' : lastmod }
fd.write(SITEINDEX_ENTRY % mapattributes)
fd.write(SITEINDEX_FOOTER)
fd.close()
fd = None
except IOError:
output.Fatal('Couldn\'t write out to file: %s' % filename)
os.chmod(filename, 0644)
#end def WriteIndex
def NotifySearch(self):
""" Send notification of the new Sitemap(s) to the search engines. """
if self._suppress:
output.Log('Search engine notification is suppressed.', 1)
return
output.Log('Notifying search engines.', 1)
# Override the urllib's opener class with one that doesn't ignore 404s
class ExceptionURLopener(urllib.FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
output.Log('HTTP error %d: %s' % (errcode, errmsg), 2)
raise IOError
#end def http_error_default
#end class ExceptionURLOpener
old_opener = urllib._urlopener
urllib._urlopener = ExceptionURLopener()
# Build the URL we want to send in
if self._sitemaps > 1:
url = self._filegen.GenerateURL(SITEINDEX_SUFFIX, self._base_url)
else:
url = self._filegen.GenerateURL(0, self._base_url)
# Test if we can hit it ourselves
try:
u = urllib.urlopen(url)
u.close()
except IOError:
output.Error('When attempting to access our generated Sitemap at the '
'following URL:\n %s\n we failed to read it. Please '
'verify the store_into path you specified in\n'
' your configuration file is web-accessable. Consult '
'the FAQ for more\n information.' % url)
output.Warn('Proceeding to notify with an unverifyable URL.')
# Cycle through notifications
# To understand this, see the comment near the NOTIFICATION_SITES comment
for ping in NOTIFICATION_SITES:
query_map = ping[3]
query_attr = ping[5]
query_map[query_attr] = url
query = urllib.urlencode(query_map)
notify = urlparse.urlunsplit((ping[0], ping[1], ping[2], query, ping[4]))
# Send the notification
output.Log('Notifying: %s' % ping[1], 0)
output.Log('Notification URL: %s' % notify, 2)
try:
u = urllib.urlopen(notify)
u.read()
u.close()
except IOError:
output.Warn('Cannot contact: %s' % ping[1])
if old_opener:
urllib._urlopener = old_opener
#end def NotifySearch
def startElement(self, tag, attributes):
""" SAX processing, called per node in the config stream. """
if tag == 'site':
if self._in_site:
output.Error('Can not nest Site entries in the configuration.')
else:
self._in_site = True
if not ValidateAttributes('SITE', attributes,
('verbose', 'default_encoding', 'base_url', 'store_into',
'suppress_search_engine_notify', 'sitemap_type')):
return
verbose = attributes.get('verbose', 0)
if verbose:
output.SetVerbose(verbose)
self._default_enc = attributes.get('default_encoding')
self._base_url = attributes.get('base_url')
self._store_into = attributes.get('store_into')
self._sitemap_type= attributes.get('sitemap_type')
if not self._suppress:
self._suppress = attributes.get('suppress_search_engine_notify',
False)
self.ValidateBasicConfig()
elif tag == 'filter':
self._filters.append(Filter(attributes))
elif tag == 'url':
print type(attributes)
self._inputs.append(InputURL(attributes))
elif tag == 'urllist':
for attributeset in ExpandPathAttribute(attributes, 'path'):
if self._sitemap_type == 'news':
self._inputs.append(InputNewsURLList(attributeset))
else:
self._inputs.append(InputURLList(attributeset))
elif tag == 'directory':
self._inputs.append(InputDirectory(attributes, self._base_url))
elif tag == 'accesslog':
for attributeset in ExpandPathAttribute(attributes, 'path'):
self._inputs.append(InputAccessLog(attributeset))
else:
output.Error('Unrecognized tag in the configuration: %s' % tag)
#end def startElement
def endElement(self, tag):
""" SAX processing, called per node in the config stream. """
if tag == 'site':
assert self._in_site
self._in_site = False
self._in_site_ever = True
#end def endElement
def endDocument(self):
""" End of SAX, verify we can proceed. """
if not self._in_site_ever:
output.Error('The configuration must specify a "site" element.')
else:
if not self._inputs:
output.Warn('There were no inputs to generate a sitemap from.')
#end def endDocument
#end class Sitemap
def ValidateAttributes(tag, attributes, goodattributes):
""" Makes sure 'attributes' does not contain any attribute not
listed in 'goodattributes' """
all_good = True
for attr in attributes.keys():
if not attr in goodattributes:
output.Error('Unknown %s attribute: %s' % (tag, attr))
all_good = False
return all_good
#end def ValidateAttributes
def ExpandPathAttribute(src, attrib):
""" Given a dictionary of attributes, return a list of dictionaries
with all the same attributes except for the one named attrib.
That one, we treat as a file path and expand into all its possible
variations. """
# Do the path expansion. On any error, just return the source dictionary.
path = src.get(attrib)
if not path:
return [src]
path = encoder.MaybeNarrowPath(path);
pathlist = glob.glob(path)
if not pathlist:
return [src]
# If this isn't actually a dictionary, make it one
if type(src) != types.DictionaryType:
tmp = {}
for key in src.keys():
tmp[key] = src[key]
src = tmp
# Create N new dictionaries
retval = []
for path in pathlist:
dst = src.copy()
dst[attrib] = path
retval.append(dst)
return retval
#end def ExpandPathAttribute
def OpenFileForRead(path, logtext):
""" Opens a text file, be it GZip or plain """
frame = None
file = None
if not path:
return (frame, file)
try:
if path.endswith('.gz'):
frame = open(path, 'rb')
file = gzip.GzipFile(fileobj=frame, mode='rt')
else:
file = open(path, 'rt')
if logtext:
output.Log('Opened %s file: %s' % (logtext, path), 1)
else:
output.Log('Opened file: %s' % path, 1)
except IOError:
output.Error('Can not open file: %s' % path)
return (frame, file)
#end def OpenFileForRead
def TimestampISO8601(t):
"""Seconds since epoch (1970-01-01) --> ISO 8601 time string."""
return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(t))
#end def TimestampISO8601
def CreateSitemapFromFile(configpath, suppress_notify):
""" Sets up a new Sitemap object from the specified configuration file. """
# Remember error count on the way in
num_errors = output.num_errors
# Rev up SAX to parse the config
sitemap = Sitemap(suppress_notify)
try:
output.Log('Reading configuration file: %s' % configpath, 0)
xml.sax.parse(configpath, sitemap)
except IOError:
output.Error('Cannot read configuration file: %s' % configpath)
except xml.sax._exceptions.SAXParseException, e:
output.Error('XML error in the config file (line %d, column %d): %s' %
(e._linenum, e._colnum, e.getMessage()))
except xml.sax._exceptions.SAXReaderNotAvailable:
output.Error('Some installs of Python 2.2 did not include complete support'
' for XML.\n Please try upgrading your version of Python'
' and re-running the script.')
# If we added any errors, return no sitemap
if num_errors == output.num_errors:
return sitemap
return None
#end def CreateSitemapFromFile
def ProcessCommandFlags(args):
"""
Parse command line flags per specified usage, pick off key, value pairs
All flags of type "--key=value" will be processed as __flags[key] = value,
"--option" will be processed as __flags[option] = option
"""
flags = {}
rkeyval = '--(?P<key>\S*)[=](?P<value>\S*)' # --key=val
roption = '--(?P<option>\S*)' # --key
r = '(' + rkeyval + ')|(' + roption + ')'
rc = re.compile(r)
for a in args:
try:
rcg = rc.search(a).groupdict()
if rcg.has_key('key'):
flags[rcg['key']] = rcg['value']
if rcg.has_key('option'):
flags[rcg['option']] = rcg['option']
except AttributeError:
return None
return flags
#end def ProcessCommandFlags
#
# __main__
#
if __name__ == '__main__':
flags = ProcessCommandFlags(sys.argv[1:])
if not flags or not flags.has_key('config') or flags.has_key('help'):
output.Log(__usage__, 0)
else:
suppress_notify = flags.has_key('testing')
sitemap = CreateSitemapFromFile(flags['config'], suppress_notify)
if not sitemap:
output.Log('Configuration file errors -- exiting.', 0)
else:
sitemap.Generate()
output.Log('Number of errors: %d' % output.num_errors, 1)
output.Log('Number of warnings: %d' % output.num_warns, 1)
| Python |
#!/usr/bin/env python
#
# Copyright (c) 2004, 2005 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Google nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#
# The sitemap_gen.py script is written in Python 2.2 and released to
# the open source community for continuous improvements under the BSD
# 2.0 new license, which can be found at:
#
# http://www.opensource.org/licenses/bsd-license.php
#
__usage__ = \
"""A simple script to automatically produce sitemaps for a webserver,
in the Google Sitemap Protocol (GSP).
Usage: python sitemap_gen.py --config=config.xml [--help] [--testing]
--config=config.xml, specifies config file location
--help, displays usage message
--testing, specified when user is experimenting
"""
# Please be careful that all syntax used in this file can be parsed on
# Python 1.5 -- this version check is not evaluated until after the
# entire file has been parsed.
import sys
if sys.hexversion < 0x02020000:
print 'This script requires Python 2.2 or later.'
print 'Currently run with version: %s' % sys.version
sys.exit(1)
import fnmatch
import glob
import gzip
import md5
import os
import re
import stat
import time
import types
import urllib
import urlparse
import xml.sax
# True and False were introduced in Python2.2.2
try:
testTrue=True
del testTrue
except NameError:
True=1
False=0
# Text encodings
ENC_ASCII = 'ASCII'
ENC_UTF8 = 'UTF-8'
ENC_IDNA = 'IDNA'
ENC_ASCII_LIST = ['ASCII', 'US-ASCII', 'US', 'IBM367', 'CP367', 'ISO646-US'
'ISO_646.IRV:1991', 'ISO-IR-6', 'ANSI_X3.4-1968',
'ANSI_X3.4-1986', 'CPASCII' ]
ENC_DEFAULT_LIST = ['ISO-8859-1', 'ISO-8859-2', 'ISO-8859-5']
# Available Sitemap types
SITEMAP_TYPES = ['web', 'mobile', 'news']
# General Sitemap tags
GENERAL_SITEMAP_TAGS = ['loc', 'changefreq', 'priority', 'lastmod']
# News specific tags
NEWS_SPECIFIC_TAGS = ['keywords', 'publication_date', 'stock_tickers']
# News Sitemap tags
NEWS_SITEMAP_TAGS = GENERAL_SITEMAP_TAGS + NEWS_SPECIFIC_TAGS
# Maximum number of urls in each sitemap, before next Sitemap is created
MAXURLS_PER_SITEMAP = 50000
# Suffix on a Sitemap index file
SITEINDEX_SUFFIX = '_index.xml'
# Regular expressions tried for extracting URLs from access logs.
ACCESSLOG_CLF_PATTERN = re.compile(
r'.+\s+"([^\s]+)\s+([^\s]+)\s+HTTP/\d+\.\d+"\s+200\s+.*'
)
# Match patterns for lastmod attributes
DATE_PATTERNS = map(re.compile, [
r'^\d\d\d\d$',
r'^\d\d\d\d-\d\d$',
r'^\d\d\d\d-\d\d-\d\d$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\dZ$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d[+-]\d\d:\d\d$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?Z$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?[+-]\d\d:\d\d$',
])
# Match patterns for changefreq attributes
CHANGEFREQ_PATTERNS = [
'always', 'hourly', 'daily', 'weekly', 'monthly', 'yearly', 'never'
]
# XML formats
GENERAL_SITEINDEX_HEADER = \
'<?xml version="1.0" encoding="UTF-8"?>\n' \
'<sitemapindex\n' \
' xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"\n' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9\n' \
' http://www.sitemaps.org/schemas/sitemap/0.9/' \
'siteindex.xsd">\n'
NEWS_SITEINDEX_HEADER = \
'<?xml version="1.0" encoding="UTF-8"?>\n' \
'<sitemapindex\n' \
' xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"\n' \
' xmlns:news="http://www.google.com/schemas/sitemap-news/0.9"\n' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9\n' \
' http://www.sitemaps.org/schemas/sitemap/0.9/' \
'siteindex.xsd">\n'
SITEINDEX_FOOTER = '</sitemapindex>\n'
SITEINDEX_ENTRY = \
' <sitemap>\n' \
' <loc>%(loc)s</loc>\n' \
' <lastmod>%(lastmod)s</lastmod>\n' \
' </sitemap>\n'
GENERAL_SITEMAP_HEADER = \
'<?xml version="1.0" encoding="UTF-8"?>\n' \
'<urlset\n' \
' xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"\n' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9\n' \
' http://www.sitemaps.org/schemas/sitemap/0.9/' \
'sitemap.xsd">\n'
NEWS_SITEMAP_HEADER = \
'<?xml version="1.0" encoding="UTF-8"?>\n' \
'<urlset\n' \
' xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"\n' \
' xmlns:news="http://www.google.com/schemas/sitemap-news/0.9"\n' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9\n' \
' http://www.sitemaps.org/schemas/sitemap/0.9/' \
'sitemap.xsd">\n'
SITEMAP_FOOTER = '</urlset>\n'
SITEURL_XML_PREFIX = ' <url>\n'
SITEURL_XML_SUFFIX = ' </url>\n'
NEWS_TAG_XML_PREFIX = ' <news:news>\n'
NEWS_TAG_XML_SUFFIX = ' </news:news>\n'
# Search engines to notify with the updated sitemaps
#
# This list is very non-obvious in what's going on. Here's the gist:
# Each item in the list is a 6-tuple of items. The first 5 are "almost"
# the same as the input arguments to urlparse.urlunsplit():
# 0 - schema
# 1 - netloc
# 2 - path
# 3 - query <-- EXCEPTION: specify a query map rather than a string
# 4 - fragment
# Additionally, add item 5:
# 5 - query attribute that should be set to the new Sitemap URL
# Clear as mud, I know.
NOTIFICATION_SITES = [
('http', 'www.google.com', 'webmasters/sitemaps/ping', {}, '', 'sitemap'),
]
class Error(Exception):
"""
Base exception class. In this module we tend not to use our own exception
types for very much, but they come in very handy on XML parsing with SAX.
"""
pass
#end class Error
class SchemaError(Error):
"""Failure to process an XML file according to the schema we know."""
pass
#end class SchemeError
class Encoder:
"""
Manages wide-character/narrow-character conversions for just about all
text that flows into or out of the script.
You should always use this class for string coercion, as opposed to
letting Python handle coercions automatically. Reason: Python
usually assumes ASCII (7-bit) as a default narrow character encoding,
which is not the kind of data we generally deal with.
General high-level methodologies used in sitemap_gen:
[PATHS]
File system paths may be wide or narrow, depending on platform.
This works fine, just be aware of it and be very careful to not
mix them. That is, if you have to pass several file path arguments
into a library call, make sure they are all narrow or all wide.
This class has MaybeNarrowPath() which should be called on every
file system path you deal with.
[URLS]
URL locations are stored in Narrow form, already escaped. This has the
benefit of keeping escaping and encoding as close as possible to the format
we read them in. The downside is we may end up with URLs that have
intermingled encodings -- the root path may be encoded in one way
while the filename is encoded in another. This is obviously wrong, but
it should hopefully be an issue hit by very few users. The workaround
from the user level (assuming they notice) is to specify a default_encoding
parameter in their config file.
[OTHER]
Other text, such as attributes of the URL class, configuration options,
etc, are generally stored in Unicode for simplicity.
"""
def __init__(self):
self._user = None # User-specified default encoding
self._learned = [] # Learned default encodings
self._widefiles = False # File system can be wide
# Can the file system be Unicode?
try:
self._widefiles = os.path.supports_unicode_filenames
except AttributeError:
try:
self._widefiles = sys.getwindowsversion() == os.VER_PLATFORM_WIN32_NT
except AttributeError:
pass
# Try to guess a working default
try:
encoding = sys.getfilesystemencoding()
if encoding and not (encoding.upper() in ENC_ASCII_LIST):
self._learned = [ encoding ]
except AttributeError:
pass
if not self._learned:
encoding = sys.getdefaultencoding()
if encoding and not (encoding.upper() in ENC_ASCII_LIST):
self._learned = [ encoding ]
# If we had no guesses, start with some European defaults
if not self._learned:
self._learned = ENC_DEFAULT_LIST
#end def __init__
def SetUserEncoding(self, encoding):
self._user = encoding
#end def SetUserEncoding
def NarrowText(self, text, encoding):
""" Narrow a piece of arbitrary text """
if type(text) != types.UnicodeType:
return text
# Try the passed in preference
if encoding:
try:
result = text.encode(encoding)
if not encoding in self._learned:
self._learned.append(encoding)
return result
except UnicodeError:
pass
except LookupError:
output.Warn('Unknown encoding: %s' % encoding)
# Try the user preference
if self._user:
try:
return text.encode(self._user)
except UnicodeError:
pass
except LookupError:
temp = self._user
self._user = None
output.Warn('Unknown default_encoding: %s' % temp)
# Look through learned defaults, knock any failing ones out of the list
while self._learned:
try:
return text.encode(self._learned[0])
except:
del self._learned[0]
# When all other defaults are exhausted, use UTF-8
try:
return text.encode(ENC_UTF8)
except UnicodeError:
pass
# Something is seriously wrong if we get to here
return text.encode(ENC_ASCII, 'ignore')
#end def NarrowText
def MaybeNarrowPath(self, text):
""" Paths may be allowed to stay wide """
if self._widefiles:
return text
return self.NarrowText(text, None)
#end def MaybeNarrowPath
def WidenText(self, text, encoding):
""" Widen a piece of arbitrary text """
if type(text) != types.StringType:
return text
# Try the passed in preference
if encoding:
try:
result = unicode(text, encoding)
if not encoding in self._learned:
self._learned.append(encoding)
return result
except UnicodeError:
pass
except LookupError:
output.Warn('Unknown encoding: %s' % encoding)
# Try the user preference
if self._user:
try:
return unicode(text, self._user)
except UnicodeError:
pass
except LookupError:
temp = self._user
self._user = None
output.Warn('Unknown default_encoding: %s' % temp)
# Look through learned defaults, knock any failing ones out of the list
while self._learned:
try:
return unicode(text, self._learned[0])
except:
del self._learned[0]
# When all other defaults are exhausted, use UTF-8
try:
return unicode(text, ENC_UTF8)
except UnicodeError:
pass
# Getting here means it wasn't UTF-8 and we had no working default.
# We really don't have anything "right" we can do anymore.
output.Warn('Unrecognized encoding in text: %s' % text)
if not self._user:
output.Warn('You may need to set a default_encoding in your '
'configuration file.')
return text.decode(ENC_ASCII, 'ignore')
#end def WidenText
#end class Encoder
encoder = Encoder()
class Output:
"""
Exposes logging functionality, and tracks how many errors
we have thus output.
Logging levels should be used as thus:
Fatal -- extremely sparingly
Error -- config errors, entire blocks of user 'intention' lost
Warn -- individual URLs lost
Log(,0) -- Un-suppressable text that's not an error
Log(,1) -- touched files, major actions
Log(,2) -- parsing notes, filtered or duplicated URLs
Log(,3) -- each accepted URL
"""
def __init__(self):
self.num_errors = 0 # Count of errors
self.num_warns = 0 # Count of warnings
self._errors_shown = {} # Shown errors
self._warns_shown = {} # Shown warnings
self._verbose = 0 # Level of verbosity
#end def __init__
def Log(self, text, level):
""" Output a blurb of diagnostic text, if the verbose level allows it """
if text:
text = encoder.NarrowText(text, None)
if self._verbose >= level:
print text
#end def Log
def Warn(self, text):
""" Output and count a warning. Suppress duplicate warnings. """
if text:
text = encoder.NarrowText(text, None)
hash = md5.new(text).digest()
if not self._warns_shown.has_key(hash):
self._warns_shown[hash] = 1
print '[WARNING] ' + text
else:
self.Log('(suppressed) [WARNING] ' + text, 3)
self.num_warns = self.num_warns + 1
#end def Warn
def Error(self, text):
""" Output and count an error. Suppress duplicate errors. """
if text:
text = encoder.NarrowText(text, None)
hash = md5.new(text).digest()
if not self._errors_shown.has_key(hash):
self._errors_shown[hash] = 1
print '[ERROR] ' + text
else:
self.Log('(suppressed) [ERROR] ' + text, 3)
self.num_errors = self.num_errors + 1
#end def Error
def Fatal(self, text):
""" Output an error and terminate the program. """
if text:
text = encoder.NarrowText(text, None)
print '[FATAL] ' + text
else:
print 'Fatal error.'
sys.exit(1)
#end def Fatal
def SetVerbose(self, level):
""" Sets the verbose level. """
try:
if type(level) != types.IntType:
level = int(level)
if (level >= 0) and (level <= 3):
self._verbose = level
return
except ValueError:
pass
self.Error('Verbose level (%s) must be between 0 and 3 inclusive.' % level)
#end def SetVerbose
#end class Output
output = Output()
class URL(object):
""" URL is a smart structure grouping together the properties we
care about for a single web reference. """
__slots__ = 'loc', 'lastmod', 'changefreq', 'priority'
def __init__(self):
self.loc = None # URL -- in Narrow characters
self.lastmod = None # ISO8601 timestamp of last modify
self.changefreq = None # Text term for update frequency
self.priority = None # Float between 0 and 1 (inc)
#end def __init__
def __cmp__(self, other):
if self.loc < other.loc:
return -1
if self.loc > other.loc:
return 1
return 0
#end def __cmp__
def TrySetAttribute(self, attribute, value):
""" Attempt to set the attribute to the value, with a pretty try
block around it. """
if attribute == 'loc':
self.loc = self.Canonicalize(value)
else:
try:
setattr(self, attribute, value)
except AttributeError:
output.Warn('Unknown URL attribute: %s' % attribute)
#end def TrySetAttribute
def IsAbsolute(loc):
""" Decide if the URL is absolute or not """
if not loc:
return False
narrow = encoder.NarrowText(loc, None)
(scheme, netloc, path, query, frag) = urlparse.urlsplit(narrow)
if (not scheme) or (not netloc):
return False
return True
#end def IsAbsolute
IsAbsolute = staticmethod(IsAbsolute)
def Canonicalize(loc):
""" Do encoding and canonicalization on a URL string """
if not loc:
return loc
# Let the encoder try to narrow it
narrow = encoder.NarrowText(loc, None)
# Escape components individually
(scheme, netloc, path, query, frag) = urlparse.urlsplit(narrow)
unr = '-._~'
sub = '!$&\'()*+,;='
netloc = urllib.quote(netloc, unr + sub + '%:@/[]')
path = urllib.quote(path, unr + sub + '%:@/')
query = urllib.quote(query, unr + sub + '%:@/?')
frag = urllib.quote(frag, unr + sub + '%:@/?')
# Try built-in IDNA encoding on the netloc
try:
(ignore, widenetloc, ignore, ignore, ignore) = urlparse.urlsplit(loc)
for c in widenetloc:
if c >= unichr(128):
netloc = widenetloc.encode(ENC_IDNA)
netloc = urllib.quote(netloc, unr + sub + '%:@/[]')
break
except UnicodeError:
# urlsplit must have failed, based on implementation differences in the
# library. There is not much we can do here, except ignore it.
pass
except LookupError:
output.Warn('An International Domain Name (IDN) is being used, but this '
'version of Python does not have support for IDNA encoding. '
' (IDNA support was introduced in Python 2.3) The encoding '
'we have used instead is wrong and will probably not yield '
'valid URLs.')
bad_netloc = False
if '%' in netloc:
bad_netloc = True
# Put it all back together
narrow = urlparse.urlunsplit((scheme, netloc, path, query, frag))
# I let '%' through. Fix any that aren't pre-existing escapes.
HEXDIG = '0123456789abcdefABCDEF'
list = narrow.split('%')
narrow = list[0]
del list[0]
for item in list:
if (len(item) >= 2) and (item[0] in HEXDIG) and (item[1] in HEXDIG):
narrow = narrow + '%' + item
else:
narrow = narrow + '%25' + item
# Issue a warning if this is a bad URL
if bad_netloc:
output.Warn('Invalid characters in the host or domain portion of a URL: '
+ narrow)
return narrow
#end def Canonicalize
Canonicalize = staticmethod(Canonicalize)
def VerifyDate(self, date, metatag):
"""Verify the date format is valid"""
match = False
if date:
date = date.upper()
for pattern in DATE_PATTERNS:
match = pattern.match(date)
if match:
return True
if not match:
output.Warn('The value for %s does not appear to be in ISO8601 '
'format on URL: %s' % (metatag, self.loc))
return False
#end of VerifyDate
def Validate(self, base_url, allow_fragment):
""" Verify the data in this URL is well-formed, and override if not. """
assert type(base_url) == types.StringType
# Test (and normalize) the ref
if not self.loc:
output.Warn('Empty URL')
return False
if allow_fragment:
self.loc = urlparse.urljoin(base_url, self.loc)
if not self.loc.startswith(base_url):
output.Warn('Discarded URL for not starting with the base_url: %s' %
self.loc)
self.loc = None
return False
# Test the lastmod
if self.lastmod:
if not self.VerifyDate(self.lastmod, "lastmod"):
self.lastmod = None
# Test the changefreq
if self.changefreq:
match = False
self.changefreq = self.changefreq.lower()
for pattern in CHANGEFREQ_PATTERNS:
if self.changefreq == pattern:
match = True
break
if not match:
output.Warn('Changefreq "%s" is not a valid change frequency on URL '
': %s' % (self.changefreq, self.loc))
self.changefreq = None
# Test the priority
if self.priority:
priority = -1.0
try:
priority = float(self.priority)
except ValueError:
pass
if (priority < 0.0) or (priority > 1.0):
output.Warn('Priority "%s" is not a number between 0 and 1 inclusive '
'on URL: %s' % (self.priority, self.loc))
self.priority = None
return True
#end def Validate
def MakeHash(self):
""" Provides a uniform way of hashing URLs """
if not self.loc:
return None
if self.loc.endswith('/'):
return md5.new(self.loc[:-1]).digest()
return md5.new(self.loc).digest()
#end def MakeHash
def Log(self, prefix='URL', level=3):
""" Dump the contents, empty or not, to the log. """
out = prefix + ':'
for attribute in self.__slots__:
value = getattr(self, attribute)
if not value:
value = ''
out = out + (' %s=[%s]' % (attribute, value))
output.Log('%s' % encoder.NarrowText(out, None), level)
#end def Log
def WriteXML(self, file):
""" Dump non-empty contents to the output file, in XML format. """
if not self.loc:
return
out = SITEURL_XML_PREFIX
for attribute in self.__slots__:
value = getattr(self, attribute)
if value:
if type(value) == types.UnicodeType:
value = encoder.NarrowText(value, None)
elif type(value) != types.StringType:
value = str(value)
value = xml.sax.saxutils.escape(value)
out = out + (' <%s>%s</%s>\n' % (attribute, value, attribute))
out = out + SITEURL_XML_SUFFIX
file.write(out)
#end def WriteXML
#end class URL
class NewsURL(URL):
""" NewsURL is a subclass of URL with News-Sitemap specific properties. """
__slots__ = 'loc', 'lastmod', 'changefreq', 'priority', 'publication_date', \
'keywords', 'stock_tickers'
def __init__(self):
URL.__init__(self)
self.publication_date = None # ISO8601 timestamp of publication date
self.keywords = None # Text keywords
self.stock_tickers = None # Text stock
#end def __init__
def Validate(self, base_url, allow_fragment):
""" Verify the data in this News URL is well-formed, and override if not. """
assert type(base_url) == types.StringType
if not URL.Validate(self, base_url, allow_fragment):
return False
if not URL.VerifyDate(self, self.publication_date, "publication_date"):
self.publication_date = None
return True
#end def Validate
def WriteXML(self, file):
""" Dump non-empty contents to the output file, in XML format. """
if not self.loc:
return
out = SITEURL_XML_PREFIX
# printed_news_tag indicates if news-specific metatags are present
printed_news_tag = False
for attribute in self.__slots__:
value = getattr(self, attribute)
if value:
if type(value) == types.UnicodeType:
value = encoder.NarrowText(value, None)
elif type(value) != types.StringType:
value = str(value)
value = xml.sax.saxutils.escape(value)
if attribute in NEWS_SPECIFIC_TAGS:
if not printed_news_tag:
printed_news_tag = True
out = out + NEWS_TAG_XML_PREFIX
out = out + (' <news:%s>%s</news:%s>\n' % (attribute, value, attribute))
else:
out = out + (' <%s>%s</%s>\n' % (attribute, value, attribute))
if printed_news_tag:
out = out + NEWS_TAG_XML_SUFFIX
out = out + SITEURL_XML_SUFFIX
file.write(out)
#end def WriteXML
#end class NewsURL
class Filter:
"""
A filter on the stream of URLs we find. A filter is, in essence,
a wildcard applied to the stream. You can think of this as an
operator that returns a tri-state when given a URL:
True -- this URL is to be included in the sitemap
None -- this URL is undecided
False -- this URL is to be dropped from the sitemap
"""
def __init__(self, attributes):
self._wildcard = None # Pattern for wildcard match
self._regexp = None # Pattern for regexp match
self._pass = False # "Drop" filter vs. "Pass" filter
if not ValidateAttributes('FILTER', attributes,
('pattern', 'type', 'action')):
return
# Check error count on the way in
num_errors = output.num_errors
# Fetch the attributes
pattern = attributes.get('pattern')
type = attributes.get('type', 'wildcard')
action = attributes.get('action', 'drop')
if type:
type = type.lower()
if action:
action = action.lower()
# Verify the attributes
if not pattern:
output.Error('On a filter you must specify a "pattern" to match')
elif (not type) or ((type != 'wildcard') and (type != 'regexp')):
output.Error('On a filter you must specify either \'type="wildcard"\' '
'or \'type="regexp"\'')
elif (action != 'pass') and (action != 'drop'):
output.Error('If you specify a filter action, it must be either '
'\'action="pass"\' or \'action="drop"\'')
# Set the rule
if action == 'drop':
self._pass = False
elif action == 'pass':
self._pass = True
if type == 'wildcard':
self._wildcard = pattern
elif type == 'regexp':
try:
self._regexp = re.compile(pattern)
except re.error:
output.Error('Bad regular expression: %s' % pattern)
# Log the final results iff we didn't add any errors
if num_errors == output.num_errors:
output.Log('Filter: %s any URL that matches %s "%s"' %
(action, type, pattern), 2)
#end def __init__
def Apply(self, url):
""" Process the URL, as above. """
if (not url) or (not url.loc):
return None
if self._wildcard:
if fnmatch.fnmatchcase(url.loc, self._wildcard):
return self._pass
return None
if self._regexp:
if self._regexp.search(url.loc):
return self._pass
return None
assert False # unreachable
#end def Apply
#end class Filter
class InputURL:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles a single URL, manually specified in the config file.
"""
def __init__(self, attributes):
self._url = None # The lonely URL
if not ValidateAttributes('URL', attributes,
('href', 'lastmod', 'changefreq', 'priority')):
return
url = URL()
for attr in attributes.keys():
if attr == 'href':
url.TrySetAttribute('loc', attributes[attr])
else:
url.TrySetAttribute(attr, attributes[attr])
if not url.loc:
output.Error('Url entries must have an href attribute.')
return
self._url = url
output.Log('Input: From URL "%s"' % self._url.loc, 2)
#end def __init__
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
if self._url:
consumer(self._url, True)
#end def ProduceURLs
#end class InputURL
class InputURLList:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles a text file with a list of URLs
"""
def __init__(self, attributes):
self._path = None # The file path
self._encoding = None # Encoding of that file
if not ValidateAttributes('URLLIST', attributes, ('path', 'encoding')):
return
self._path = attributes.get('path')
self._encoding = attributes.get('encoding', ENC_UTF8)
if self._path:
self._path = encoder.MaybeNarrowPath(self._path)
if os.path.isfile(self._path):
output.Log('Input: From URLLIST "%s"' % self._path, 2)
else:
output.Error('Can not locate file: %s' % self._path)
self._path = None
else:
output.Error('Urllist entries must have a "path" attribute.')
#end def __init__
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
# Open the file
(frame, file) = OpenFileForRead(self._path, 'URLLIST')
if not file:
return
# Iterate lines
linenum = 0
for line in file.readlines():
linenum = linenum + 1
# Strip comments and empty lines
if self._encoding:
line = encoder.WidenText(line, self._encoding)
line = line.strip()
if (not line) or line[0] == '#':
continue
# Split the line on space
url = URL()
cols = line.split(' ')
for i in range(0,len(cols)):
cols[i] = cols[i].strip()
url.TrySetAttribute('loc', cols[0])
# Extract attributes from the other columns
for i in range(1,len(cols)):
if cols[i]:
try:
(attr_name, attr_val) = cols[i].split('=', 1)
url.TrySetAttribute(attr_name, attr_val)
except ValueError:
output.Warn('Line %d: Unable to parse attribute: %s' %
(linenum, cols[i]))
# Pass it on
consumer(url, False)
file.close()
if frame:
frame.close()
#end def ProduceURLs
#end class InputURLList
class InputNewsURLList:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles a text file with a list of News URLs and their metadata
"""
def __init__(self, attributes):
self._path = None # The file path
self._encoding = None # Encoding of that file
self._tag_order = [] # Order of URL metadata
if not ValidateAttributes('URLLIST', attributes, ('path', 'encoding', \
'tag_order')):
return
self._path = attributes.get('path')
self._encoding = attributes.get('encoding', ENC_UTF8)
self._tag_order = attributes.get('tag_order')
if self._path:
self._path = encoder.MaybeNarrowPath(self._path)
if os.path.isfile(self._path):
output.Log('Input: From URLLIST "%s"' % self._path, 2)
else:
output.Error('Can not locate file: %s' % self._path)
self._path = None
else:
output.Error('Urllist entries must have a "path" attribute.')
# parse tag_order into an array
# tag_order_ascii created for more readable logging
tag_order_ascii = []
if self._tag_order:
self._tag_order = self._tag_order.split(",")
for i in range(0, len(self._tag_order)):
element = self._tag_order[i].strip().lower()
self._tag_order[i]= element
tag_order_ascii.append(element.encode('ascii'))
output.Log('Input: From URLLIST tag order is "%s"' % tag_order_ascii, 0)
else:
output.Error('News Urllist configuration file must contain tag_order '
'to define Sitemap metatags.')
# verify all tag_order inputs are valid
tag_order_dict = {}
for tag in self._tag_order:
tag_order_dict[tag] = ""
if not ValidateAttributes('URLLIST', tag_order_dict, \
NEWS_SITEMAP_TAGS):
return
# loc tag must be present
loc_tag = False
for tag in self._tag_order:
if tag == 'loc':
loc_tag = True
break
if not loc_tag:
output.Error('News Urllist tag_order in configuration file '
'does not contain "loc" value: %s' % tag_order_ascii)
#end def __init__
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
# Open the file
(frame, file) = OpenFileForRead(self._path, 'URLLIST')
if not file:
return
# Iterate lines
linenum = 0
for line in file.readlines():
linenum = linenum + 1
# Strip comments and empty lines
if self._encoding:
line = encoder.WidenText(line, self._encoding)
line = line.strip()
if (not line) or line[0] == '#':
continue
# Split the line on tabs
url = NewsURL()
cols = line.split('\t')
for i in range(0,len(cols)):
cols[i] = cols[i].strip()
for i in range(0,len(cols)):
if cols[i]:
attr_value = cols[i]
if i < len(self._tag_order):
attr_name = self._tag_order[i]
try:
url.TrySetAttribute(attr_name, attr_value)
except ValueError:
output.Warn('Line %d: Unable to parse attribute: %s' %
(linenum, cols[i]))
# Pass it on
consumer(url, False)
file.close()
if frame:
frame.close()
#end def ProduceURLs
#end class InputNewsURLList
class InputDirectory:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles a directory that acts as base for walking the filesystem.
"""
def __init__(self, attributes, base_url):
self._path = None # The directory
self._url = None # The URL equivalent
self._default_file = None
self._remove_empty_directories = False
if not ValidateAttributes('DIRECTORY', attributes, ('path', 'url',
'default_file', 'remove_empty_directories')):
return
# Prep the path -- it MUST end in a sep
path = attributes.get('path')
if not path:
output.Error('Directory entries must have both "path" and "url" '
'attributes')
return
path = encoder.MaybeNarrowPath(path)
if not path.endswith(os.sep):
path = path + os.sep
if not os.path.isdir(path):
output.Error('Can not locate directory: %s' % path)
return
# Prep the URL -- it MUST end in a sep
url = attributes.get('url')
if not url:
output.Error('Directory entries must have both "path" and "url" '
'attributes')
return
url = URL.Canonicalize(url)
if not url.endswith('/'):
url = url + '/'
if not url.startswith(base_url):
url = urlparse.urljoin(base_url, url)
if not url.startswith(base_url):
output.Error('The directory URL "%s" is not relative to the '
'base_url: %s' % (url, base_url))
return
# Prep the default file -- it MUST be just a filename
file = attributes.get('default_file')
if file:
file = encoder.MaybeNarrowPath(file)
if os.sep in file:
output.Error('The default_file "%s" can not include path information.'
% file)
file = None
# Prep the remove_empty_directories -- default is false
remove_empty_directories = attributes.get('remove_empty_directories')
if remove_empty_directories:
if (remove_empty_directories == '1') or \
(remove_empty_directories.lower() == 'true'):
remove_empty_directories = True
elif (remove_empty_directories == '0') or \
(remove_empty_directories.lower() == 'false'):
remove_empty_directories = False
# otherwise the user set a non-default value
else:
output.Error('Configuration file remove_empty_directories '
'value is not recognized. Value must be true or false.')
return
else:
remove_empty_directories = False
self._path = path
self._url = url
self._default_file = file
self._remove_empty_directories = remove_empty_directories
if file:
output.Log('Input: From DIRECTORY "%s" (%s) with default file "%s"'
% (path, url, file), 2)
else:
output.Log('Input: From DIRECTORY "%s" (%s) with no default file'
% (path, url), 2)
#end def __init__
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
if not self._path:
return
root_path = self._path
root_URL = self._url
root_file = self._default_file
remove_empty_directories = self._remove_empty_directories
def HasReadPermissions(path):
""" Verifies a given path has read permissions. """
stat_info = os.stat(path)
mode = stat_info[stat.ST_MODE]
if mode & stat.S_IREAD:
return True
else:
return None
def PerFile(dirpath, name):
"""
Called once per file.
Note that 'name' will occasionally be None -- for a directory itself
"""
# Pull a timestamp
url = URL()
isdir = False
try:
if name:
path = os.path.join(dirpath, name)
else:
path = dirpath
isdir = os.path.isdir(path)
time = None
if isdir and root_file:
file = os.path.join(path, root_file)
try:
time = os.stat(file)[stat.ST_MTIME];
except OSError:
pass
if not time:
time = os.stat(path)[stat.ST_MTIME];
url.lastmod = TimestampISO8601(time)
except OSError:
pass
except ValueError:
pass
# Build a URL
middle = dirpath[len(root_path):]
if os.sep != '/':
middle = middle.replace(os.sep, '/')
if middle:
middle = middle + '/'
if name:
middle = middle + name
if isdir:
middle = middle + '/'
url.TrySetAttribute('loc', root_URL + encoder.WidenText(middle, None))
# Suppress default files. (All the way down here so we can log it.)
if name and (root_file == name):
url.Log(prefix='IGNORED (default file)', level=2)
return
# Suppress directories when remove_empty_directories="true"
try:
if isdir:
if HasReadPermissions(path):
if remove_empty_directories == 'true' and \
len(os.listdir(path)) == 0:
output.Log('IGNORED empty directory %s' % str(path), level=1)
return
elif path == self._path:
output.Error('IGNORED configuration file directory input %s due '
'to file permissions' % self._path)
else:
output.Log('IGNORED files within directory %s due to file '
'permissions' % str(path), level=0)
except OSError:
pass
except ValueError:
pass
consumer(url, False)
#end def PerFile
def PerDirectory(ignore, dirpath, namelist):
"""
Called once per directory with a list of all the contained files/dirs.
"""
ignore = ignore # Avoid warnings of an unused parameter
if not dirpath.startswith(root_path):
output.Warn('Unable to decide what the root path is for directory: '
'%s' % dirpath)
return
for name in namelist:
PerFile(dirpath, name)
#end def PerDirectory
output.Log('Walking DIRECTORY "%s"' % self._path, 1)
PerFile(self._path, None)
os.path.walk(self._path, PerDirectory, None)
#end def ProduceURLs
#end class InputDirectory
class InputAccessLog:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles access logs. It's non-trivial in that we want to
auto-detect log files in the Common Logfile Format (as used by Apache,
for instance) and the Extended Log File Format (as used by IIS, for
instance).
"""
def __init__(self, attributes):
self._path = None # The file path
self._encoding = None # Encoding of that file
self._is_elf = False # Extended Log File Format?
self._is_clf = False # Common Logfile Format?
self._elf_status = -1 # ELF field: '200'
self._elf_method = -1 # ELF field: 'HEAD'
self._elf_uri = -1 # ELF field: '/foo?bar=1'
self._elf_urifrag1 = -1 # ELF field: '/foo'
self._elf_urifrag2 = -1 # ELF field: 'bar=1'
if not ValidateAttributes('ACCESSLOG', attributes, ('path', 'encoding')):
return
self._path = attributes.get('path')
self._encoding = attributes.get('encoding', ENC_UTF8)
if self._path:
self._path = encoder.MaybeNarrowPath(self._path)
if os.path.isfile(self._path):
output.Log('Input: From ACCESSLOG "%s"' % self._path, 2)
else:
output.Error('Can not locate file: %s' % self._path)
self._path = None
else:
output.Error('Accesslog entries must have a "path" attribute.')
#end def __init__
def RecognizeELFLine(self, line):
""" Recognize the Fields directive that heads an ELF file """
if not line.startswith('#Fields:'):
return False
fields = line.split(' ')
del fields[0]
for i in range(0, len(fields)):
field = fields[i].strip()
if field == 'sc-status':
self._elf_status = i
elif field == 'cs-method':
self._elf_method = i
elif field == 'cs-uri':
self._elf_uri = i
elif field == 'cs-uri-stem':
self._elf_urifrag1 = i
elif field == 'cs-uri-query':
self._elf_urifrag2 = i
output.Log('Recognized an Extended Log File Format file.', 2)
return True
#end def RecognizeELFLine
def GetELFLine(self, line):
""" Fetch the requested URL from an ELF line """
fields = line.split(' ')
count = len(fields)
# Verify status was Ok
if self._elf_status >= 0:
if self._elf_status >= count:
return None
if not fields[self._elf_status].strip() == '200':
return None
# Verify method was HEAD or GET
if self._elf_method >= 0:
if self._elf_method >= count:
return None
if not fields[self._elf_method].strip() in ('HEAD', 'GET'):
return None
# Pull the full URL if we can
if self._elf_uri >= 0:
if self._elf_uri >= count:
return None
url = fields[self._elf_uri].strip()
if url != '-':
return url
# Put together a fragmentary URL
if self._elf_urifrag1 >= 0:
if self._elf_urifrag1 >= count or self._elf_urifrag2 >= count:
return None
urlfrag1 = fields[self._elf_urifrag1].strip()
urlfrag2 = None
if self._elf_urifrag2 >= 0:
urlfrag2 = fields[self._elf_urifrag2]
if urlfrag1 and (urlfrag1 != '-'):
if urlfrag2 and (urlfrag2 != '-'):
urlfrag1 = urlfrag1 + '?' + urlfrag2
return urlfrag1
return None
#end def GetELFLine
def RecognizeCLFLine(self, line):
""" Try to tokenize a logfile line according to CLF pattern and see if
it works. """
match = ACCESSLOG_CLF_PATTERN.match(line)
recognize = match and (match.group(1) in ('HEAD', 'GET'))
if recognize:
output.Log('Recognized a Common Logfile Format file.', 2)
return recognize
#end def RecognizeCLFLine
def GetCLFLine(self, line):
""" Fetch the requested URL from a CLF line """
match = ACCESSLOG_CLF_PATTERN.match(line)
if match:
request = match.group(1)
if request in ('HEAD', 'GET'):
return match.group(2)
return None
#end def GetCLFLine
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
# Open the file
(frame, file) = OpenFileForRead(self._path, 'ACCESSLOG')
if not file:
return
# Iterate lines
for line in file.readlines():
if self._encoding:
line = encoder.WidenText(line, self._encoding)
line = line.strip()
# If we don't know the format yet, try them both
if (not self._is_clf) and (not self._is_elf):
self._is_elf = self.RecognizeELFLine(line)
self._is_clf = self.RecognizeCLFLine(line)
# Digest the line
match = None
if self._is_elf:
match = self.GetELFLine(line)
elif self._is_clf:
match = self.GetCLFLine(line)
if not match:
continue
# Pass it on
url = URL()
url.TrySetAttribute('loc', match)
consumer(url, True)
file.close()
if frame:
frame.close()
#end def ProduceURLs
#end class InputAccessLog
class FilePathGenerator:
"""
This class generates filenames in a series, upon request.
You can request any iteration number at any time, you don't
have to go in order.
Example of iterations for '/path/foo.xml.gz':
0 --> /path/foo.xml.gz
1 --> /path/foo1.xml.gz
2 --> /path/foo2.xml.gz
_index.xml --> /path/foo_index.xml
"""
def __init__(self):
self.is_gzip = False # Is this a GZIP file?
self._path = None # '/path/'
self._prefix = None # 'foo'
self._suffix = None # '.xml.gz'
#end def __init__
def Preload(self, path):
""" Splits up a path into forms ready for recombination. """
path = encoder.MaybeNarrowPath(path)
# Get down to a base name
path = os.path.normpath(path)
base = os.path.basename(path).lower()
if not base:
output.Error('Couldn\'t parse the file path: %s' % path)
return False
lenbase = len(base)
# Recognize extension
lensuffix = 0
compare_suffix = ['.xml', '.xml.gz', '.gz']
for suffix in compare_suffix:
if base.endswith(suffix):
lensuffix = len(suffix)
break
if not lensuffix:
output.Error('The path "%s" doesn\'t end in a supported file '
'extension.' % path)
return False
self.is_gzip = suffix.endswith('.gz')
# Split the original path
lenpath = len(path)
self._path = path[:lenpath-lenbase]
self._prefix = path[lenpath-lenbase:lenpath-lensuffix]
self._suffix = path[lenpath-lensuffix:]
return True
#end def Preload
def GeneratePath(self, instance):
""" Generates the iterations, as described above. """
prefix = self._path + self._prefix
if type(instance) == types.IntType:
if instance:
return '%s%d%s' % (prefix, instance, self._suffix)
return prefix + self._suffix
return prefix + instance
#end def GeneratePath
def GenerateURL(self, instance, root_url):
""" Generates iterations, but as a URL instead of a path. """
prefix = root_url + self._prefix
retval = None
if type(instance) == types.IntType:
if instance:
retval = '%s%d%s' % (prefix, instance, self._suffix)
else:
retval = prefix + self._suffix
else:
retval = prefix + instance
return URL.Canonicalize(retval)
#end def GenerateURL
def GenerateWildURL(self, root_url):
""" Generates a wildcard that should match all our iterations """
prefix = URL.Canonicalize(root_url + self._prefix)
temp = URL.Canonicalize(prefix + self._suffix)
suffix = temp[len(prefix):]
return prefix + '*' + suffix
#end def GenerateURL
#end class FilePathGenerator
class PerURLStatistics:
""" Keep track of some simple per-URL statistics, like file extension. """
def __init__(self):
self._extensions = {} # Count of extension instances
#end def __init__
def Consume(self, url):
""" Log some stats for the URL. At the moment, that means extension. """
if url and url.loc:
(scheme, netloc, path, query, frag) = urlparse.urlsplit(url.loc)
if not path:
return
# Recognize directories
if path.endswith('/'):
if self._extensions.has_key('/'):
self._extensions['/'] = self._extensions['/'] + 1
else:
self._extensions['/'] = 1
return
# Strip to a filename
i = path.rfind('/')
if i >= 0:
assert i < len(path)
path = path[i:]
# Find extension
i = path.rfind('.')
if i > 0:
assert i < len(path)
ext = path[i:].lower()
if self._extensions.has_key(ext):
self._extensions[ext] = self._extensions[ext] + 1
else:
self._extensions[ext] = 1
else:
if self._extensions.has_key('(no extension)'):
self._extensions['(no extension)'] = self._extensions[
'(no extension)'] + 1
else:
self._extensions['(no extension)'] = 1
#end def Consume
def Log(self):
""" Dump out stats to the output. """
if len(self._extensions):
output.Log('Count of file extensions on URLs:', 1)
set = self._extensions.keys()
set.sort()
for ext in set:
output.Log(' %7d %s' % (self._extensions[ext], ext), 1)
#end def Log
class Sitemap(xml.sax.handler.ContentHandler):
"""
This is the big workhorse class that processes your inputs and spits
out sitemap files. It is built as a SAX handler for set up purposes.
That is, it processes an XML stream to bring itself up.
"""
def __init__(self, suppress_notify):
xml.sax.handler.ContentHandler.__init__(self)
self._filters = [] # Filter objects
self._inputs = [] # Input objects
self._urls = {} # Maps URLs to count of dups
self._set = [] # Current set of URLs
self._filegen = None # Path generator for output files
self._wildurl1 = None # Sitemap URLs to filter out
self._wildurl2 = None # Sitemap URLs to filter out
self._sitemaps = 0 # Number of output files
# We init _dup_max to 2 so the default priority is 0.5 instead of 1.0
self._dup_max = 2 # Max number of duplicate URLs
self._stat = PerURLStatistics() # Some simple stats
self._in_site = False # SAX: are we in a Site node?
self._in_Site_ever = False # SAX: were we ever in a Site?
self._default_enc = None # Best encoding to try on URLs
self._base_url = None # Prefix to all valid URLs
self._store_into = None # Output filepath
self._sitemap_type = None # Sitemap type (web, mobile or news)
self._suppress = suppress_notify # Suppress notify of servers
#end def __init__
def ValidateBasicConfig(self):
""" Verifies (and cleans up) the basic user-configurable options. """
all_good = True
if self._default_enc:
encoder.SetUserEncoding(self._default_enc)
# Canonicalize the base_url
if all_good and not self._base_url:
output.Error('A site needs a "base_url" attribute.')
all_good = False
if all_good and not URL.IsAbsolute(self._base_url):
output.Error('The "base_url" must be absolute, not relative: %s' %
self._base_url)
all_good = False
if all_good:
self._base_url = URL.Canonicalize(self._base_url)
if not self._base_url.endswith('/'):
self._base_url = self._base_url + '/'
output.Log('BaseURL is set to: %s' % self._base_url, 2)
# Load store_into into a generator
if all_good:
if self._store_into:
self._filegen = FilePathGenerator()
if not self._filegen.Preload(self._store_into):
all_good = False
else:
output.Error('A site needs a "store_into" attribute.')
all_good = False
# Ask the generator for patterns on what its output will look like
if all_good:
self._wildurl1 = self._filegen.GenerateWildURL(self._base_url)
self._wildurl2 = self._filegen.GenerateURL(SITEINDEX_SUFFIX,
self._base_url)
# Unify various forms of False
if all_good:
if self._suppress:
if (type(self._suppress) == types.StringType) or (type(self._suppress)
== types.UnicodeType):
if (self._suppress == '0') or (self._suppress.lower() == 'false'):
self._suppress = False
# Clean up the sitemap_type
if all_good:
match = False
# If sitemap_type is not specified, default to web sitemap
if not self._sitemap_type:
self._sitemap_type = 'web'
else:
self._sitemap_type = self._sitemap_type.lower()
for pattern in SITEMAP_TYPES:
if self._sitemap_type == pattern:
match = True
break
if not match:
output.Error('The "sitemap_type" value must be "web", "mobile" '
'or "news": %s' % self._sitemap_type)
all_good = False
output.Log('The Sitemap type is %s Sitemap.' % \
self._sitemap_type.upper(), 0)
# Done
if not all_good:
output.Log('See "example_config.xml" for more information.', 0)
return all_good
#end def ValidateBasicConfig
def Generate(self):
""" Run over all the Inputs and ask them to Produce """
# Run the inputs
for input in self._inputs:
input.ProduceURLs(self.ConsumeURL)
# Do last flushes
if len(self._set):
self.FlushSet()
if not self._sitemaps:
output.Warn('No URLs were recorded, writing an empty sitemap.')
self.FlushSet()
# Write an index as needed
if self._sitemaps > 1:
self.WriteIndex()
# Notify
self.NotifySearch()
# Dump stats
self._stat.Log()
#end def Generate
def ConsumeURL(self, url, allow_fragment):
"""
All per-URL processing comes together here, regardless of Input.
Here we run filters, remove duplicates, spill to disk as needed, etc.
"""
if not url:
return
# Validate
if not url.Validate(self._base_url, allow_fragment):
return
# Run filters
accept = None
for filter in self._filters:
accept = filter.Apply(url)
if accept != None:
break
if not (accept or (accept == None)):
url.Log(prefix='FILTERED', level=2)
return
# Ignore our out output URLs
if fnmatch.fnmatchcase(url.loc, self._wildurl1) or fnmatch.fnmatchcase(
url.loc, self._wildurl2):
url.Log(prefix='IGNORED (output file)', level=2)
return
# Note the sighting
hash = url.MakeHash()
if self._urls.has_key(hash):
dup = self._urls[hash]
if dup > 0:
dup = dup + 1
self._urls[hash] = dup
if self._dup_max < dup:
self._dup_max = dup
url.Log(prefix='DUPLICATE')
return
# Acceptance -- add to set
self._urls[hash] = 1
self._set.append(url)
self._stat.Consume(url)
url.Log()
# Flush the set if needed
if len(self._set) >= MAXURLS_PER_SITEMAP:
self.FlushSet()
#end def ConsumeURL
def FlushSet(self):
"""
Flush the current set of URLs to the output. This is a little
slow because we like to sort them all and normalize the priorities
before dumping.
"""
# Determine what Sitemap header to use (News or General)
if self._sitemap_type == 'news':
sitemap_header = NEWS_SITEMAP_HEADER
else:
sitemap_header = GENERAL_SITEMAP_HEADER
# Sort and normalize
output.Log('Sorting and normalizing collected URLs.', 1)
self._set.sort()
for url in self._set:
hash = url.MakeHash()
dup = self._urls[hash]
if dup > 0:
self._urls[hash] = -1
if not url.priority:
url.priority = '%.4f' % (float(dup) / float(self._dup_max))
# Get the filename we're going to write to
filename = self._filegen.GeneratePath(self._sitemaps)
if not filename:
output.Fatal('Unexpected: Couldn\'t generate output filename.')
self._sitemaps = self._sitemaps + 1
output.Log('Writing Sitemap file "%s" with %d URLs' %
(filename, len(self._set)), 1)
# Write to it
frame = None
file = None
try:
if self._filegen.is_gzip:
basename = os.path.basename(filename);
frame = open(filename, 'wb')
file = gzip.GzipFile(fileobj=frame, filename=basename, mode='wt')
else:
file = open(filename, 'wt')
file.write(sitemap_header)
for url in self._set:
url.WriteXML(file)
file.write(SITEMAP_FOOTER)
file.close()
if frame:
frame.close()
frame = None
file = None
except IOError:
output.Fatal('Couldn\'t write out to file: %s' % filename)
os.chmod(filename, 0644)
# Flush
self._set = []
#end def FlushSet
def WriteIndex(self):
""" Write the master index of all Sitemap files """
# Make a filename
filename = self._filegen.GeneratePath(SITEINDEX_SUFFIX)
if not filename:
output.Fatal('Unexpected: Couldn\'t generate output index filename.')
output.Log('Writing index file "%s" with %d Sitemaps' %
(filename, self._sitemaps), 1)
# Determine what Sitemap index header to use (News or General)
if self._sitemap_type == 'news':
sitemap_index_header = NEWS_SITEMAP_HEADER
else:
sitemap__index_header = GENERAL_SITEMAP_HEADER
# Make a lastmod time
lastmod = TimestampISO8601(time.time())
# Write to it
try:
fd = open(filename, 'wt')
fd.write(sitemap_index_header)
for mapnumber in range(0,self._sitemaps):
# Write the entry
mapurl = self._filegen.GenerateURL(mapnumber, self._base_url)
mapattributes = { 'loc' : mapurl, 'lastmod' : lastmod }
fd.write(SITEINDEX_ENTRY % mapattributes)
fd.write(SITEINDEX_FOOTER)
fd.close()
fd = None
except IOError:
output.Fatal('Couldn\'t write out to file: %s' % filename)
os.chmod(filename, 0644)
#end def WriteIndex
def NotifySearch(self):
""" Send notification of the new Sitemap(s) to the search engines. """
if self._suppress:
output.Log('Search engine notification is suppressed.', 1)
return
output.Log('Notifying search engines.', 1)
# Override the urllib's opener class with one that doesn't ignore 404s
class ExceptionURLopener(urllib.FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
output.Log('HTTP error %d: %s' % (errcode, errmsg), 2)
raise IOError
#end def http_error_default
#end class ExceptionURLOpener
old_opener = urllib._urlopener
urllib._urlopener = ExceptionURLopener()
# Build the URL we want to send in
if self._sitemaps > 1:
url = self._filegen.GenerateURL(SITEINDEX_SUFFIX, self._base_url)
else:
url = self._filegen.GenerateURL(0, self._base_url)
# Test if we can hit it ourselves
try:
u = urllib.urlopen(url)
u.close()
except IOError:
output.Error('When attempting to access our generated Sitemap at the '
'following URL:\n %s\n we failed to read it. Please '
'verify the store_into path you specified in\n'
' your configuration file is web-accessable. Consult '
'the FAQ for more\n information.' % url)
output.Warn('Proceeding to notify with an unverifyable URL.')
# Cycle through notifications
# To understand this, see the comment near the NOTIFICATION_SITES comment
for ping in NOTIFICATION_SITES:
query_map = ping[3]
query_attr = ping[5]
query_map[query_attr] = url
query = urllib.urlencode(query_map)
notify = urlparse.urlunsplit((ping[0], ping[1], ping[2], query, ping[4]))
# Send the notification
output.Log('Notifying: %s' % ping[1], 0)
output.Log('Notification URL: %s' % notify, 2)
try:
u = urllib.urlopen(notify)
u.read()
u.close()
except IOError:
output.Warn('Cannot contact: %s' % ping[1])
if old_opener:
urllib._urlopener = old_opener
#end def NotifySearch
def startElement(self, tag, attributes):
""" SAX processing, called per node in the config stream. """
if tag == 'site':
if self._in_site:
output.Error('Can not nest Site entries in the configuration.')
else:
self._in_site = True
if not ValidateAttributes('SITE', attributes,
('verbose', 'default_encoding', 'base_url', 'store_into',
'suppress_search_engine_notify', 'sitemap_type')):
return
verbose = attributes.get('verbose', 0)
if verbose:
output.SetVerbose(verbose)
self._default_enc = attributes.get('default_encoding')
self._base_url = attributes.get('base_url')
self._store_into = attributes.get('store_into')
self._sitemap_type= attributes.get('sitemap_type')
if not self._suppress:
self._suppress = attributes.get('suppress_search_engine_notify',
False)
self.ValidateBasicConfig()
elif tag == 'filter':
self._filters.append(Filter(attributes))
elif tag == 'url':
print type(attributes)
self._inputs.append(InputURL(attributes))
elif tag == 'urllist':
for attributeset in ExpandPathAttribute(attributes, 'path'):
if self._sitemap_type == 'news':
self._inputs.append(InputNewsURLList(attributeset))
else:
self._inputs.append(InputURLList(attributeset))
elif tag == 'directory':
self._inputs.append(InputDirectory(attributes, self._base_url))
elif tag == 'accesslog':
for attributeset in ExpandPathAttribute(attributes, 'path'):
self._inputs.append(InputAccessLog(attributeset))
else:
output.Error('Unrecognized tag in the configuration: %s' % tag)
#end def startElement
def endElement(self, tag):
""" SAX processing, called per node in the config stream. """
if tag == 'site':
assert self._in_site
self._in_site = False
self._in_site_ever = True
#end def endElement
def endDocument(self):
""" End of SAX, verify we can proceed. """
if not self._in_site_ever:
output.Error('The configuration must specify a "site" element.')
else:
if not self._inputs:
output.Warn('There were no inputs to generate a sitemap from.')
#end def endDocument
#end class Sitemap
def ValidateAttributes(tag, attributes, goodattributes):
""" Makes sure 'attributes' does not contain any attribute not
listed in 'goodattributes' """
all_good = True
for attr in attributes.keys():
if not attr in goodattributes:
output.Error('Unknown %s attribute: %s' % (tag, attr))
all_good = False
return all_good
#end def ValidateAttributes
def ExpandPathAttribute(src, attrib):
""" Given a dictionary of attributes, return a list of dictionaries
with all the same attributes except for the one named attrib.
That one, we treat as a file path and expand into all its possible
variations. """
# Do the path expansion. On any error, just return the source dictionary.
path = src.get(attrib)
if not path:
return [src]
path = encoder.MaybeNarrowPath(path);
pathlist = glob.glob(path)
if not pathlist:
return [src]
# If this isn't actually a dictionary, make it one
if type(src) != types.DictionaryType:
tmp = {}
for key in src.keys():
tmp[key] = src[key]
src = tmp
# Create N new dictionaries
retval = []
for path in pathlist:
dst = src.copy()
dst[attrib] = path
retval.append(dst)
return retval
#end def ExpandPathAttribute
def OpenFileForRead(path, logtext):
""" Opens a text file, be it GZip or plain """
frame = None
file = None
if not path:
return (frame, file)
try:
if path.endswith('.gz'):
frame = open(path, 'rb')
file = gzip.GzipFile(fileobj=frame, mode='rt')
else:
file = open(path, 'rt')
if logtext:
output.Log('Opened %s file: %s' % (logtext, path), 1)
else:
output.Log('Opened file: %s' % path, 1)
except IOError:
output.Error('Can not open file: %s' % path)
return (frame, file)
#end def OpenFileForRead
def TimestampISO8601(t):
"""Seconds since epoch (1970-01-01) --> ISO 8601 time string."""
return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(t))
#end def TimestampISO8601
def CreateSitemapFromFile(configpath, suppress_notify):
""" Sets up a new Sitemap object from the specified configuration file. """
# Remember error count on the way in
num_errors = output.num_errors
# Rev up SAX to parse the config
sitemap = Sitemap(suppress_notify)
try:
output.Log('Reading configuration file: %s' % configpath, 0)
xml.sax.parse(configpath, sitemap)
except IOError:
output.Error('Cannot read configuration file: %s' % configpath)
except xml.sax._exceptions.SAXParseException, e:
output.Error('XML error in the config file (line %d, column %d): %s' %
(e._linenum, e._colnum, e.getMessage()))
except xml.sax._exceptions.SAXReaderNotAvailable:
output.Error('Some installs of Python 2.2 did not include complete support'
' for XML.\n Please try upgrading your version of Python'
' and re-running the script.')
# If we added any errors, return no sitemap
if num_errors == output.num_errors:
return sitemap
return None
#end def CreateSitemapFromFile
def ProcessCommandFlags(args):
"""
Parse command line flags per specified usage, pick off key, value pairs
All flags of type "--key=value" will be processed as __flags[key] = value,
"--option" will be processed as __flags[option] = option
"""
flags = {}
rkeyval = '--(?P<key>\S*)[=](?P<value>\S*)' # --key=val
roption = '--(?P<option>\S*)' # --key
r = '(' + rkeyval + ')|(' + roption + ')'
rc = re.compile(r)
for a in args:
try:
rcg = rc.search(a).groupdict()
if rcg.has_key('key'):
flags[rcg['key']] = rcg['value']
if rcg.has_key('option'):
flags[rcg['option']] = rcg['option']
except AttributeError:
return None
return flags
#end def ProcessCommandFlags
#
# __main__
#
if __name__ == '__main__':
flags = ProcessCommandFlags(sys.argv[1:])
if not flags or not flags.has_key('config') or flags.has_key('help'):
output.Log(__usage__, 0)
else:
suppress_notify = flags.has_key('testing')
sitemap = CreateSitemapFromFile(flags['config'], suppress_notify)
if not sitemap:
output.Log('Configuration file errors -- exiting.', 0)
else:
sitemap.Generate()
output.Log('Number of errors: %d' % output.num_errors, 1)
output.Log('Number of warnings: %d' % output.num_warns, 1)
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Админка
"""
__author__ = 'frozzzen@gmail.com (Alexey Malashin)'
import logging
from engine import settings
from engine import pagegen
from engine import backup
from engine.data import news
from engine.data import cites
from engine.data import pages
from engine.data import feedback
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import memcache
# enable info logging by the app
logging.getLogger().setLevel(logging.INFO)
class AdminHandler(webapp.RequestHandler):
"""Класс страницы администрирования
"""
def get(self, category):
"""По методу GET забираем страницу администрирования и раздел
Args:
self: класс страницы
category: раздел администрирования
"""
if category == '/':
category = ''
category = '/admin' + category
page_template_values = { # устанавливаем параметры шаблона
'portal_title': 'Режим администрирования ' + settings.SITETITLE, # заголовок
'form_action': category # по какому адресу отправлять форму
}
# выбираем категорию - что будем выводить
if category == '/admin/news':
page_template_values.update(news = news.GetNews(False))
elif category == '/admin/cite':
page_template_values.update(cites = cites.GetCites(False))
elif category == '/admin/pages':
page_template_values.update(pages = pages.GetPages())
elif category == '/admin/feedback':
page_template_values.update(msgs = feedback.GetMsgs())
elif category == '/admin/cache':
is_flush = self.request.get('flush')
if is_flush == 'True':
memcache.flush_all()
page_template_values.update(cache = memcache.get_stats())
pagegen.generatePage(self, category, page_template_values)
def post(self, category):
"""По методу POST забираем форму администрирования
Args:
self: класс страницы
category: раздел администрирования
"""
if category == '/news': # работаем с новостями
action = self.request.get('action')
if action == 'add': # добавляем новость
news.AddNews(self.request.get('news'))
elif action == 'delete': # удаляем помеченные новости
news.DelNews(self.request.get('existentnews', allow_multiple = True))
logging.info('Admin: deleting news!')
elif category == '/cite': # работаем с цитатами
action = self.request.get('action')
if action == 'add': # добавляем цитату
cites.AddCite(self.request.get('citeText'), self.request.get('citeAuthor'))
elif action == 'delete': # удаляем помеченные цитаты
cites.DelCites(self.request.get('existentcite', allow_multiple = True))
logging.info('Admin: deleting cites')
elif category == '/pages': # работаем со страницами
action = self.request.get('action')
if action == 'add': # добавляем новую страницу
page = {'url' : self.request.get('pageURL'),
'title' : self.request.get('pageTitle'),
'description' : self.request.get('pageDescription'),
'content' : self.request.get('pageContent')}
pages.AddPage(page)
elif action == 'delete': # удаляем помеченную страницу
pages.DelPage(self.request.get('existentpage', allow_multiple = True))
logging.info('Admin: deleting pages')
elif category == '/feedback': # работаем с сообщениями от пользователей
action = self.request.get('action')
if action == 'delete': # удаляем помеченные сообщения
feedback.DelMsg(self.request.get('existentmsg', allow_multiple = True))
logging.info('Admin: deleting messages')
category = '/admin' + category # перенаправляем на ту же страницу
self.redirect(category)
application = webapp.WSGIApplication([('/admin/backup', backup.BackupHandler), ('/admin(.*)', AdminHandler)], debug = settings.DEBUG)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main() | Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HttpClients in this module use httplib to make HTTP requests.
This module make HTTP requests based on httplib, but there are environments
in which an httplib based approach will not work (if running in Google App
Engine for example). In those cases, higher level classes (like AtomService
and GDataService) can swap out the HttpClient to transparently use a
different mechanism for making HTTP requests.
HttpClient: Contains a request method which performs an HTTP call to the
server.
ProxiedHttpClient: Contains a request method which connects to a proxy using
settings stored in operating system environment variables then
performs an HTTP call to the endpoint server.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import types
import os
import httplib
import atom.url
import atom.http_interface
import socket
import base64
class ProxyError(atom.http_interface.Error):
pass
DEFAULT_CONTENT_TYPE = 'application/atom+xml'
class HttpClient(atom.http_interface.GenericHttpClient):
def __init__(self, headers=None):
self.debug = False
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and
DELETE.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.http
client = atom.http.HttpClient()
http_response = client.request('GET', 'http://www.google.com/')
Args:
operation: str The HTTP operation to be performed. This is usually one
of 'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be converted
to a string. Should be set to None when performing a GET or DELETE.
If data is a file-like object which can be read, this method will
read a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be
evaluated and sent.
url: The full URL to which the request should be sent. Can be a string
or atom.url.Url.
headers: dict of strings. HTTP headers which should be sent
in the request.
"""
if not isinstance(url, atom.url.Url):
if isinstance(url, types.StringTypes):
url = atom.url.parse_url(url)
else:
raise atom.http_interface.UnparsableUrlObject('Unable to parse url '
'parameter because it was not a string or atom.url.Url')
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
connection = self._prepare_connection(url, all_headers)
if self.debug:
connection.debuglevel = 1
connection.putrequest(operation, self._get_access_url(url),
skip_host=True)
connection.putheader('Host', url.host)
# Overcome a bug in Python 2.4 and 2.5
# httplib.HTTPConnection.putrequest adding
# HTTP request header 'Host: www.google.com:443' instead of
# 'Host: www.google.com', and thus resulting the error message
# 'Token invalid - AuthSub token has wrong scope' in the HTTP response.
if (url.protocol == 'https' and int(url.port or 443) == 443 and
hasattr(connection, '_buffer') and
isinstance(connection._buffer, list)):
header_line = 'Host: %s:443' % url.host
replacement_header_line = 'Host: %s' % url.host
try:
connection._buffer[connection._buffer.index(header_line)] = (
replacement_header_line)
except ValueError: # header_line missing from connection._buffer
pass
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
if isinstance(data, types.StringTypes):
all_headers['Content-Length'] = len(data)
else:
raise atom.http_interface.ContentLengthRequired('Unable to calculate '
'the length of the data parameter. Specify a value for '
'Content-Length')
# Set the content type to the default value if none was set.
if 'Content-Type' not in all_headers:
all_headers['Content-Type'] = DEFAULT_CONTENT_TYPE
# Send the HTTP headers.
for header_name in all_headers:
connection.putheader(header_name, all_headers[header_name])
connection.endheaders()
# If there is data, send it in the request.
if data:
if isinstance(data, list):
for data_part in data:
_send_data_part(data_part, connection)
else:
_send_data_part(data, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def _prepare_connection(self, url, headers):
if not isinstance(url, atom.url.Url):
if isinstance(url, types.StringTypes):
url = atom.url.parse_url(url)
else:
raise atom.http_interface.UnparsableUrlObject('Unable to parse url '
'parameter because it was not a string or atom.url.Url')
if url.protocol == 'https':
if not url.port:
return httplib.HTTPSConnection(url.host)
return httplib.HTTPSConnection(url.host, int(url.port))
else:
if not url.port:
return httplib.HTTPConnection(url.host)
return httplib.HTTPConnection(url.host, int(url.port))
def _get_access_url(self, url):
return url.to_string()
class ProxiedHttpClient(HttpClient):
"""Performs an HTTP request through a proxy.
The proxy settings are obtained from enviroment variables. The URL of the
proxy server is assumed to be stored in the environment variables
'https_proxy' and 'http_proxy' respectively. If the proxy server requires
a Basic Auth authorization header, the username and password are expected to
be in the 'proxy-username' or 'proxy_username' variable and the
'proxy-password' or 'proxy_password' variable.
After connecting to the proxy server, the request is completed as in
HttpClient.request.
"""
def _prepare_connection(self, url, headers):
proxy_auth = _get_proxy_auth()
if url.protocol == 'https':
# destination is https
proxy = os.environ.get('https_proxy')
if proxy:
# Set any proxy auth headers
if proxy_auth:
proxy_auth = 'Proxy-authorization: %s' % proxy_auth
# Construct the proxy connect command.
port = url.port
if not port:
port = '443'
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (url.host, port)
# Set the user agent to send to the proxy
if headers and 'User-Agent' in headers:
user_agent = 'User-Agent: %s\r\n' % (headers['User-Agent'])
else:
user_agent = ''
proxy_pieces = '%s%s%s\r\n' % (proxy_connect, proxy_auth, user_agent)
# Find the proxy host and port.
proxy_url = atom.url.parse_url(proxy)
if not proxy_url.port:
proxy_url.port = '80'
# Connect to the proxy server, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((proxy_url.host, int(proxy_url.port)))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status = response.split()[1]
if p_status != str(200):
raise ProxyError('Error status=%s' % str(p_status))
# Trivial setup for ssl socket.
ssl = socket.ssl(p_sock, None, None)
fake_sock = httplib.FakeSocket(p_sock, ssl)
# Initalize httplib and replace with the proxy socket.
connection = httplib.HTTPConnection(proxy_url.host)
connection.sock=fake_sock
return connection
else:
# The request was HTTPS, but there was no https_proxy set.
return HttpClient._prepare_connection(self, url, headers)
else:
proxy = os.environ.get('http_proxy')
if proxy:
# Find the proxy host and port.
proxy_url = atom.url.parse_url(proxy)
if not proxy_url.port:
proxy_url.port = '80'
if proxy_auth:
headers['Proxy-Authorization'] = proxy_auth.strip()
return httplib.HTTPConnection(proxy_url.host, int(proxy_url.port))
else:
# The request was HTTP, but there was no http_proxy set.
return HttpClient._prepare_connection(self, url, headers)
def _get_access_url(self, url):
return url.to_string()
def _get_proxy_auth():
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
user_auth = base64.encodestring('%s:%s' % (proxy_username,
proxy_password))
return 'Basic %s\r\n' % (user_auth.strip())
else:
return ''
def _send_data_part(data, connection):
if isinstance(data, types.StringTypes):
connection.send(data)
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides a TokenStore class which is designed to manage
auth tokens required for different services.
Each token is valid for a set of scopes which is the start of a URL. An HTTP
client will use a token store to find a valid Authorization header to send
in requests to the specified URL. If the HTTP client determines that a token
has expired or been revoked, it can remove the token from the store so that
it will not be used in future requests.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import atom.http_interface
import atom.url
SCOPE_ALL = 'http'
class TokenStore(object):
"""Manages Authorization tokens which will be sent in HTTP headers."""
def __init__(self, scoped_tokens=None):
self._tokens = scoped_tokens or {}
def add_token(self, token):
"""Adds a new token to the store (replaces tokens with the same scope).
Args:
token: A subclass of http_interface.GenericToken. The token object is
responsible for adding the Authorization header to the HTTP request.
The scopes defined in the token are used to determine if the token
is valid for a requested scope when find_token is called.
Returns:
True if the token was added, False if the token was not added becase
no scopes were provided.
"""
if not hasattr(token, 'scopes') or not token.scopes:
return False
for scope in token.scopes:
self._tokens[str(scope)] = token
return True
def find_token(self, url):
"""Selects an Authorization header token which can be used for the URL.
Args:
url: str or atom.url.Url or a list containing the same.
The URL which is going to be requested. All
tokens are examined to see if any scopes begin match the beginning
of the URL. The first match found is returned.
Returns:
The token object which should execute the HTTP request. If there was
no token for the url (the url did not begin with any of the token
scopes available), then the atom.http_interface.GenericToken will be
returned because the GenericToken calls through to the http client
without adding an Authorization header.
"""
if url is None:
return None
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
if url in self._tokens:
token = self._tokens[url]
if token.valid_for_scope(url):
return token
else:
del self._tokens[url]
for scope, token in self._tokens.iteritems():
if token.valid_for_scope(url):
return token
return atom.http_interface.GenericToken()
def remove_token(self, token):
"""Removes the token from the token_store.
This method is used when a token is determined to be invalid. If the
token was found by find_token, but resulted in a 401 or 403 error stating
that the token was invlid, then the token should be removed to prevent
future use.
Returns:
True if a token was found and then removed from the token
store. False if the token was not in the TokenStore.
"""
token_found = False
scopes_to_delete = []
for scope, stored_token in self._tokens.iteritems():
if stored_token == token:
scopes_to_delete.append(scope)
token_found = True
for scope in scopes_to_delete:
del self._tokens[scope]
return token_found
def remove_all_tokens(self):
self._tokens = {}
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes representing Atom elements.
Module objective: provide data classes for Atom constructs. These classes hide
the XML-ness of Atom and provide a set of native Python classes to interact
with.
Conversions to and from XML should only be necessary when the Atom classes
"touch the wire" and are sent over HTTP. For this reason this module
provides methods and functions to convert Atom classes to and from strings.
For more information on the Atom data model, see RFC 4287
(http://www.ietf.org/rfc/rfc4287.txt)
AtomBase: A foundation class on which Atom classes are built. It
handles the parsing of attributes and children which are common to all
Atom classes. By default, the AtomBase class translates all XML child
nodes into ExtensionElements.
ExtensionElement: Atom allows Atom objects to contain XML which is not part
of the Atom specification, these are called extension elements. If a
classes parser encounters an unexpected XML construct, it is translated
into an ExtensionElement instance. ExtensionElement is designed to fully
capture the information in the XML. Child nodes in an XML extension are
turned into ExtensionElements as well.
"""
__author__ = 'api.jscudder (Jeffrey Scudder)'
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
# XML namespaces which are often used in Atom entities.
ATOM_NAMESPACE = 'http://www.w3.org/2005/Atom'
ELEMENT_TEMPLATE = '{http://www.w3.org/2005/Atom}%s'
APP_NAMESPACE = 'http://purl.org/atom/app#'
APP_TEMPLATE = '{http://purl.org/atom/app#}%s'
# This encoding is used for converting strings before translating the XML
# into an object.
XML_STRING_ENCODING = 'utf-8'
# The desired string encoding for object members. set or monkey-patch to
# unicode if you want object members to be Python unicode strings, instead of
# encoded strings
MEMBER_STRING_ENCODING = 'utf-8'
#MEMBER_STRING_ENCODING = unicode
def CreateClassFromXMLString(target_class, xml_string, string_encoding=None):
"""Creates an instance of the target class from the string contents.
Args:
target_class: class The class which will be instantiated and populated
with the contents of the XML. This class must have a _tag and a
_namespace class variable.
xml_string: str A string which contains valid XML. The root element
of the XML string should match the tag and namespace of the desired
class.
string_encoding: str The character encoding which the xml_string should
be converted to before it is interpreted and translated into
objects. The default is None in which case the string encoding
is not changed.
Returns:
An instance of the target class with members assigned according to the
contents of the XML - or None if the root XML tag and namespace did not
match those of the target class.
"""
encoding = string_encoding or XML_STRING_ENCODING
if encoding and isinstance(xml_string, unicode):
xml_string = xml_string.encode(encoding)
tree = ElementTree.fromstring(xml_string)
return _CreateClassFromElementTree(target_class, tree)
def _CreateClassFromElementTree(target_class, tree, namespace=None, tag=None):
"""Instantiates the class and populates members according to the tree.
Note: Only use this function with classes that have _namespace and _tag
class members.
Args:
target_class: class The class which will be instantiated and populated
with the contents of the XML.
tree: ElementTree An element tree whose contents will be converted into
members of the new target_class instance.
namespace: str (optional) The namespace which the XML tree's root node must
match. If omitted, the namespace defaults to the _namespace of the
target class.
tag: str (optional) The tag which the XML tree's root node must match. If
omitted, the tag defaults to the _tag class member of the target
class.
Returns:
An instance of the target class - or None if the tag and namespace of
the XML tree's root node did not match the desired namespace and tag.
"""
if namespace is None:
namespace = target_class._namespace
if tag is None:
tag = target_class._tag
if tree.tag == '{%s}%s' % (namespace, tag):
target = target_class()
target._HarvestElementTree(tree)
return target
else:
return None
class ExtensionContainer(object):
def __init__(self, extension_elements=None, extension_attributes=None,
text=None):
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
# Three methods to create an object from an ElementTree
def _HarvestElementTree(self, tree):
# Fill in the instance members from the contents of the XML tree.
for child in tree:
self._ConvertElementTreeToMember(child)
for attribute, value in tree.attrib.iteritems():
self._ConvertElementAttributeToMember(attribute, value)
# Encode the text string according to the desired encoding type. (UTF-8)
if tree.text:
if MEMBER_STRING_ENCODING is unicode:
self.text = tree.text
else:
self.text = tree.text.encode(MEMBER_STRING_ENCODING)
def _ConvertElementTreeToMember(self, child_tree, current_class=None):
self.extension_elements.append(_ExtensionElementFromElementTree(
child_tree))
def _ConvertElementAttributeToMember(self, attribute, value):
# Encode the attribute value's string with the desired type Default UTF-8
if value:
if MEMBER_STRING_ENCODING is unicode:
self.extension_attributes[attribute] = value
else:
self.extension_attributes[attribute] = value.encode(
MEMBER_STRING_ENCODING)
# One method to create an ElementTree from an object
def _AddMembersToElementTree(self, tree):
for child in self.extension_elements:
child._BecomeChildElement(tree)
for attribute, value in self.extension_attributes.iteritems():
if value:
if isinstance(value, unicode) or MEMBER_STRING_ENCODING is unicode:
tree.attrib[attribute] = value
else:
# Decode the value from the desired encoding (default UTF-8).
tree.attrib[attribute] = value.decode(MEMBER_STRING_ENCODING)
if self.text:
if isinstance(self.text, unicode) or MEMBER_STRING_ENCODING is unicode:
tree.text = self.text
else:
tree.text = self.text.decode(MEMBER_STRING_ENCODING)
def FindExtensions(self, tag=None, namespace=None):
"""Searches extension elements for child nodes with the desired name.
Returns a list of extension elements within this object whose tag
and/or namespace match those passed in. To find all extensions in
a particular namespace, specify the namespace but not the tag name.
If you specify only the tag, the result list may contain extension
elements in multiple namespaces.
Args:
tag: str (optional) The desired tag
namespace: str (optional) The desired namespace
Returns:
A list of elements whose tag and/or namespace match the parameters
values
"""
results = []
if tag and namespace:
for element in self.extension_elements:
if element.tag == tag and element.namespace == namespace:
results.append(element)
elif tag and not namespace:
for element in self.extension_elements:
if element.tag == tag:
results.append(element)
elif namespace and not tag:
for element in self.extension_elements:
if element.namespace == namespace:
results.append(element)
else:
for element in self.extension_elements:
results.append(element)
return results
class AtomBase(ExtensionContainer):
_children = {}
_attributes = {}
def __init__(self, extension_elements=None, extension_attributes=None,
text=None):
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
def _ConvertElementTreeToMember(self, child_tree):
# Find the element's tag in this class's list of child members
if self.__class__._children.has_key(child_tree.tag):
member_name = self.__class__._children[child_tree.tag][0]
member_class = self.__class__._children[child_tree.tag][1]
# If the class member is supposed to contain a list, make sure the
# matching member is set to a list, then append the new member
# instance to the list.
if isinstance(member_class, list):
if getattr(self, member_name) is None:
setattr(self, member_name, [])
getattr(self, member_name).append(_CreateClassFromElementTree(
member_class[0], child_tree))
else:
setattr(self, member_name,
_CreateClassFromElementTree(member_class, child_tree))
else:
ExtensionContainer._ConvertElementTreeToMember(self, child_tree)
def _ConvertElementAttributeToMember(self, attribute, value):
# Find the attribute in this class's list of attributes.
if self.__class__._attributes.has_key(attribute):
# Find the member of this class which corresponds to the XML attribute
# (lookup in current_class._attributes) and set this member to the
# desired value (using self.__dict__).
if value:
# Encode the string to capture non-ascii characters (default UTF-8)
if MEMBER_STRING_ENCODING is unicode:
setattr(self, self.__class__._attributes[attribute], value)
else:
setattr(self, self.__class__._attributes[attribute],
value.encode(MEMBER_STRING_ENCODING))
else:
ExtensionContainer._ConvertElementAttributeToMember(self, attribute,
value)
# Three methods to create an ElementTree from an object
def _AddMembersToElementTree(self, tree):
# Convert the members of this class which are XML child nodes.
# This uses the class's _children dictionary to find the members which
# should become XML child nodes.
member_node_names = [values[0] for tag, values in
self.__class__._children.iteritems()]
for member_name in member_node_names:
member = getattr(self, member_name)
if member is None:
pass
elif isinstance(member, list):
for instance in member:
instance._BecomeChildElement(tree)
else:
member._BecomeChildElement(tree)
# Convert the members of this class which are XML attributes.
for xml_attribute, member_name in self.__class__._attributes.iteritems():
member = getattr(self, member_name)
if member is not None:
if isinstance(member, unicode) or MEMBER_STRING_ENCODING is unicode:
tree.attrib[xml_attribute] = member
else:
tree.attrib[xml_attribute] = member.decode(MEMBER_STRING_ENCODING)
# Lastly, call the ExtensionContainers's _AddMembersToElementTree to
# convert any extension attributes.
ExtensionContainer._AddMembersToElementTree(self, tree)
def _BecomeChildElement(self, tree):
"""
Note: Only for use with classes that have a _tag and _namespace class
member. It is in AtomBase so that it can be inherited but it should
not be called on instances of AtomBase.
"""
new_child = ElementTree.Element('')
tree.append(new_child)
new_child.tag = '{%s}%s' % (self.__class__._namespace,
self.__class__._tag)
self._AddMembersToElementTree(new_child)
def _ToElementTree(self):
"""
Note, this method is designed to be used only with classes that have a
_tag and _namespace. It is placed in AtomBase for inheritance but should
not be called on this class.
"""
new_tree = ElementTree.Element('{%s}%s' % (self.__class__._namespace,
self.__class__._tag))
self._AddMembersToElementTree(new_tree)
return new_tree
def ToString(self, string_encoding='UTF-8'):
"""Converts the Atom object to a string containing XML."""
return ElementTree.tostring(self._ToElementTree(), encoding=string_encoding)
def __str__(self):
return self.ToString()
class Name(AtomBase):
"""The atom:name element"""
_tag = 'name'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Name
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def NameFromString(xml_string):
return CreateClassFromXMLString(Name, xml_string)
class Email(AtomBase):
"""The atom:email element"""
_tag = 'email'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Email
Args:
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EmailFromString(xml_string):
return CreateClassFromXMLString(Email, xml_string)
class Uri(AtomBase):
"""The atom:uri element"""
_tag = 'uri'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Uri
Args:
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def UriFromString(xml_string):
return CreateClassFromXMLString(Uri, xml_string)
class Person(AtomBase):
"""A foundation class from which atom:author and atom:contributor extend.
A person contains information like name, email address, and web page URI for
an author or contributor to an Atom feed.
"""
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_children['{%s}name' % (ATOM_NAMESPACE)] = ('name', Name)
_children['{%s}email' % (ATOM_NAMESPACE)] = ('email', Email)
_children['{%s}uri' % (ATOM_NAMESPACE)] = ('uri', Uri)
def __init__(self, name=None, email=None, uri=None,
extension_elements=None, extension_attributes=None, text=None):
"""Foundation from which author and contributor are derived.
The constructor is provided for illustrative purposes, you should not
need to instantiate a Person.
Args:
name: Name The person's name
email: Email The person's email address
uri: Uri The URI of the person's webpage
extension_elements: list A list of ExtensionElement instances which are
children of this element.
extension_attributes: dict A dictionary of strings which are the values
for additional XML attributes of this element.
text: String The text contents of the element. This is the contents
of the Entry's XML text node. (Example: <foo>This is the text</foo>)
"""
self.name = name
self.email = email
self.uri = uri
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
class Author(Person):
"""The atom:author element
An author is a required element in Feed.
"""
_tag = 'author'
_namespace = ATOM_NAMESPACE
_children = Person._children.copy()
_attributes = Person._attributes.copy()
#_children = {}
#_attributes = {}
def __init__(self, name=None, email=None, uri=None,
extension_elements=None, extension_attributes=None, text=None):
"""Constructor for Author
Args:
name: Name
email: Email
uri: Uri
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.name = name
self.email = email
self.uri = uri
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
def AuthorFromString(xml_string):
return CreateClassFromXMLString(Author, xml_string)
class Contributor(Person):
"""The atom:contributor element"""
_tag = 'contributor'
_namespace = ATOM_NAMESPACE
_children = Person._children.copy()
_attributes = Person._attributes.copy()
def __init__(self, name=None, email=None, uri=None,
extension_elements=None, extension_attributes=None, text=None):
"""Constructor for Contributor
Args:
name: Name
email: Email
uri: Uri
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.name = name
self.email = email
self.uri = uri
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
def ContributorFromString(xml_string):
return CreateClassFromXMLString(Contributor, xml_string)
class Link(AtomBase):
"""The atom:link element"""
_tag = 'link'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['href'] = 'href'
_attributes['type'] = 'type'
_attributes['title'] = 'title'
_attributes['length'] = 'length'
_attributes['hreflang'] = 'hreflang'
def __init__(self, href=None, rel=None, link_type=None, hreflang=None,
title=None, length=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Link
Args:
href: string The href attribute of the link
rel: string
type: string
hreflang: string The language for the href
title: string
length: string The length of the href's destination
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.href = href
self.rel = rel
self.type = link_type
self.hreflang = hreflang
self.title = title
self.length = length
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def LinkFromString(xml_string):
return CreateClassFromXMLString(Link, xml_string)
class Generator(AtomBase):
"""The atom:generator element"""
_tag = 'generator'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_attributes['uri'] = 'uri'
_attributes['version'] = 'version'
def __init__(self, uri=None, version=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for Generator
Args:
uri: string
version: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.uri = uri
self.version = version
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GeneratorFromString(xml_string):
return CreateClassFromXMLString(Generator, xml_string)
class Text(AtomBase):
"""A foundation class from which atom:title, summary, etc. extend.
This class should never be instantiated.
"""
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_attributes['type'] = 'type'
def __init__(self, text_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Text
Args:
text_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = text_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Title(Text):
"""The atom:title element"""
_tag = 'title'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
def __init__(self, title_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Title
Args:
title_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = title_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def TitleFromString(xml_string):
return CreateClassFromXMLString(Title, xml_string)
class Subtitle(Text):
"""The atom:subtitle element"""
_tag = 'subtitle'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
def __init__(self, subtitle_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Subtitle
Args:
subtitle_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = subtitle_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SubtitleFromString(xml_string):
return CreateClassFromXMLString(Subtitle, xml_string)
class Rights(Text):
"""The atom:rights element"""
_tag = 'rights'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
def __init__(self, rights_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Rights
Args:
rights_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = rights_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def RightsFromString(xml_string):
return CreateClassFromXMLString(Rights, xml_string)
class Summary(Text):
"""The atom:summary element"""
_tag = 'summary'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
def __init__(self, summary_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Summary
Args:
summary_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = summary_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SummaryFromString(xml_string):
return CreateClassFromXMLString(Summary, xml_string)
class Content(Text):
"""The atom:content element"""
_tag = 'content'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
_attributes['src'] = 'src'
def __init__(self, content_type=None, src=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for Content
Args:
content_type: string
src: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = content_type
self.src = src
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ContentFromString(xml_string):
return CreateClassFromXMLString(Content, xml_string)
class Category(AtomBase):
"""The atom:category element"""
_tag = 'category'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_attributes['term'] = 'term'
_attributes['scheme'] = 'scheme'
_attributes['label'] = 'label'
def __init__(self, term=None, scheme=None, label=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for Category
Args:
term: str
scheme: str
label: str
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.term = term
self.scheme = scheme
self.label = label
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def CategoryFromString(xml_string):
return CreateClassFromXMLString(Category, xml_string)
class Id(AtomBase):
"""The atom:id element."""
_tag = 'id'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Id
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def IdFromString(xml_string):
return CreateClassFromXMLString(Id, xml_string)
class Icon(AtomBase):
"""The atom:icon element."""
_tag = 'icon'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Icon
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def IconFromString(xml_string):
return CreateClassFromXMLString(Icon, xml_string)
class Logo(AtomBase):
"""The atom:logo element."""
_tag = 'logo'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Logo
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def LogoFromString(xml_string):
return CreateClassFromXMLString(Logo, xml_string)
class Draft(AtomBase):
"""The app:draft element which indicates if this entry should be public."""
_tag = 'draft'
_namespace = APP_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for app:draft
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def DraftFromString(xml_string):
return CreateClassFromXMLString(Draft, xml_string)
class Control(AtomBase):
"""The app:control element indicating restrictions on publication.
The APP control element may contain a draft element indicating whether or
not this entry should be publicly available.
"""
_tag = 'control'
_namespace = APP_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_children['{%s}draft' % APP_NAMESPACE] = ('draft', Draft)
def __init__(self, draft=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for app:control"""
self.draft = draft
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ControlFromString(xml_string):
return CreateClassFromXMLString(Control, xml_string)
class Date(AtomBase):
"""A parent class for atom:updated, published, etc."""
#TODO Add text to and from time conversion methods to allow users to set
# the contents of a Date to a python DateTime object.
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Updated(Date):
"""The atom:updated element."""
_tag = 'updated'
_namespace = ATOM_NAMESPACE
_children = Date._children.copy()
_attributes = Date._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Updated
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def UpdatedFromString(xml_string):
return CreateClassFromXMLString(Updated, xml_string)
class Published(Date):
"""The atom:published element."""
_tag = 'published'
_namespace = ATOM_NAMESPACE
_children = Date._children.copy()
_attributes = Date._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Published
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def PublishedFromString(xml_string):
return CreateClassFromXMLString(Published, xml_string)
class LinkFinder(object):
"""An "interface" providing methods to find link elements
Entry elements often contain multiple links which differ in the rel
attribute or content type. Often, developers are interested in a specific
type of link so this class provides methods to find specific classes of
links.
This class is used as a mixin in Atom entries and feeds.
"""
def GetSelfLink(self):
"""Find the first link with rel set to 'self'
Returns:
An atom.Link or none if none of the links had rel equal to 'self'
"""
for a_link in self.link:
if a_link.rel == 'self':
return a_link
return None
def GetEditLink(self):
for a_link in self.link:
if a_link.rel == 'edit':
return a_link
return None
def GetNextLink(self):
for a_link in self.link:
if a_link.rel == 'next':
return a_link
return None
def GetLicenseLink(self):
for a_link in self.link:
if a_link.rel == 'license':
return a_link
return None
def GetAlternateLink(self):
for a_link in self.link:
if a_link.rel == 'alternate':
return a_link
return None
class FeedEntryParent(AtomBase, LinkFinder):
"""A super class for atom:feed and entry, contains shared attributes"""
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_children['{%s}author' % ATOM_NAMESPACE] = ('author', [Author])
_children['{%s}category' % ATOM_NAMESPACE] = ('category', [Category])
_children['{%s}contributor' % ATOM_NAMESPACE] = ('contributor', [Contributor])
_children['{%s}id' % ATOM_NAMESPACE] = ('id', Id)
_children['{%s}link' % ATOM_NAMESPACE] = ('link', [Link])
_children['{%s}rights' % ATOM_NAMESPACE] = ('rights', Rights)
_children['{%s}title' % ATOM_NAMESPACE] = ('title', Title)
_children['{%s}updated' % ATOM_NAMESPACE] = ('updated', Updated)
def __init__(self, author=None, category=None, contributor=None,
atom_id=None, link=None, rights=None, title=None, updated=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.rights = rights
self.title = title
self.updated = updated
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Source(FeedEntryParent):
"""The atom:source element"""
_tag = 'source'
_namespace = ATOM_NAMESPACE
_children = FeedEntryParent._children.copy()
_attributes = FeedEntryParent._attributes.copy()
_children['{%s}generator' % ATOM_NAMESPACE] = ('generator', Generator)
_children['{%s}icon' % ATOM_NAMESPACE] = ('icon', Icon)
_children['{%s}logo' % ATOM_NAMESPACE] = ('logo', Logo)
_children['{%s}subtitle' % ATOM_NAMESPACE] = ('subtitle', Subtitle)
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for Source
Args:
author: list (optional) A list of Author instances which belong to this
class.
category: list (optional) A list of Category instances
contributor: list (optional) A list on Contributor instances
generator: Generator (optional)
icon: Icon (optional)
id: Id (optional) The entry's Id element
link: list (optional) A list of Link instances
logo: Logo (optional)
rights: Rights (optional) The entry's Rights element
subtitle: Subtitle (optional) The entry's subtitle element
title: Title (optional) the entry's title element
updated: Updated (optional) the entry's updated element
text: String (optional) The text contents of the element. This is the
contents of the Entry's XML text node.
(Example: <foo>This is the text</foo>)
extension_elements: list (optional) A list of ExtensionElement instances
which are children of this element.
extension_attributes: dict (optional) A dictionary of strings which are
the values for additional XML attributes of this element.
"""
self.author = author or []
self.category = category or []
self.contributor = contributor or []
self.generator = generator
self.icon = icon
self.id = atom_id
self.link = link or []
self.logo = logo
self.rights = rights
self.subtitle = subtitle
self.title = title
self.updated = updated
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SourceFromString(xml_string):
return CreateClassFromXMLString(Source, xml_string)
class Entry(FeedEntryParent):
"""The atom:entry element"""
_tag = 'entry'
_namespace = ATOM_NAMESPACE
_children = FeedEntryParent._children.copy()
_attributes = FeedEntryParent._attributes.copy()
_children['{%s}content' % ATOM_NAMESPACE] = ('content', Content)
_children['{%s}published' % ATOM_NAMESPACE] = ('published', Published)
_children['{%s}source' % ATOM_NAMESPACE] = ('source', Source)
_children['{%s}summary' % ATOM_NAMESPACE] = ('summary', Summary)
_children['{%s}control' % APP_NAMESPACE] = ('control', Control)
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, control=None, title=None, updated=None,
extension_elements=None, extension_attributes=None, text=None):
"""Constructor for atom:entry
Args:
author: list A list of Author instances which belong to this class.
category: list A list of Category instances
content: Content The entry's Content
contributor: list A list on Contributor instances
id: Id The entry's Id element
link: list A list of Link instances
published: Published The entry's Published element
rights: Rights The entry's Rights element
source: Source the entry's source element
summary: Summary the entry's summary element
title: Title the entry's title element
updated: Updated the entry's updated element
control: The entry's app:control element which can be used to mark an
entry as a draft which should not be publicly viewable.
text: String The text contents of the element. This is the contents
of the Entry's XML text node. (Example: <foo>This is the text</foo>)
extension_elements: list A list of ExtensionElement instances which are
children of this element.
extension_attributes: dict A dictionary of strings which are the values
for additional XML attributes of this element.
"""
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.title = title
self.updated = updated
self.control = control
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EntryFromString(xml_string):
return CreateClassFromXMLString(Entry, xml_string)
class Feed(Source):
"""The atom:feed element"""
_tag = 'feed'
_namespace = ATOM_NAMESPACE
_children = Source._children.copy()
_attributes = Source._attributes.copy()
_children['{%s}entry' % ATOM_NAMESPACE] = ('entry', [Entry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None, entry=None,
text=None, extension_elements=None, extension_attributes=None):
"""Constructor for Source
Args:
author: list (optional) A list of Author instances which belong to this
class.
category: list (optional) A list of Category instances
contributor: list (optional) A list on Contributor instances
generator: Generator (optional)
icon: Icon (optional)
id: Id (optional) The entry's Id element
link: list (optional) A list of Link instances
logo: Logo (optional)
rights: Rights (optional) The entry's Rights element
subtitle: Subtitle (optional) The entry's subtitle element
title: Title (optional) the entry's title element
updated: Updated (optional) the entry's updated element
entry: list (optional) A list of the Entry instances contained in the
feed.
text: String (optional) The text contents of the element. This is the
contents of the Entry's XML text node.
(Example: <foo>This is the text</foo>)
extension_elements: list (optional) A list of ExtensionElement instances
which are children of this element.
extension_attributes: dict (optional) A dictionary of strings which are
the values for additional XML attributes of this element.
"""
self.author = author or []
self.category = category or []
self.contributor = contributor or []
self.generator = generator
self.icon = icon
self.id = atom_id
self.link = link or []
self.logo = logo
self.rights = rights
self.subtitle = subtitle
self.title = title
self.updated = updated
self.entry = entry or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def FeedFromString(xml_string):
return CreateClassFromXMLString(Feed, xml_string)
class ExtensionElement(object):
"""Represents extra XML elements contained in Atom classes."""
def __init__(self, tag, namespace=None, attributes=None,
children=None, text=None):
"""Constructor for EtensionElement
Args:
namespace: string (optional) The XML namespace for this element.
tag: string (optional) The tag (without the namespace qualifier) for
this element. To reconstruct the full qualified name of the element,
combine this tag with the namespace.
attributes: dict (optinal) The attribute value string pairs for the XML
attributes of this element.
children: list (optional) A list of ExtensionElements which represent
the XML child nodes of this element.
"""
self.namespace = namespace
self.tag = tag
self.attributes = attributes or {}
self.children = children or []
self.text = text
def ToString(self):
element_tree = self._TransferToElementTree(ElementTree.Element(''))
return ElementTree.tostring(element_tree, encoding="UTF-8")
def _TransferToElementTree(self, element_tree):
if self.tag is None:
return None
if self.namespace is not None:
element_tree.tag = '{%s}%s' % (self.namespace, self.tag)
else:
element_tree.tag = self.tag
for key, value in self.attributes.iteritems():
element_tree.attrib[key] = value
for child in self.children:
child._BecomeChildElement(element_tree)
element_tree.text = self.text
return element_tree
def _BecomeChildElement(self, element_tree):
"""Converts this object into an etree element and adds it as a child node.
Adds self to the ElementTree. This method is required to avoid verbose XML
which constantly redefines the namespace.
Args:
element_tree: ElementTree._Element The element to which this object's XML
will be added.
"""
new_element = ElementTree.Element('')
element_tree.append(new_element)
self._TransferToElementTree(new_element)
def FindChildren(self, tag=None, namespace=None):
"""Searches child nodes for objects with the desired tag/namespace.
Returns a list of extension elements within this object whose tag
and/or namespace match those passed in. To find all children in
a particular namespace, specify the namespace but not the tag name.
If you specify only the tag, the result list may contain extension
elements in multiple namespaces.
Args:
tag: str (optional) The desired tag
namespace: str (optional) The desired namespace
Returns:
A list of elements whose tag and/or namespace match the parameters
values
"""
results = []
if tag and namespace:
for element in self.children:
if element.tag == tag and element.namespace == namespace:
results.append(element)
elif tag and not namespace:
for element in self.children:
if element.tag == tag:
results.append(element)
elif namespace and not tag:
for element in self.children:
if element.namespace == namespace:
results.append(element)
else:
for element in self.children:
results.append(element)
return results
def ExtensionElementFromString(xml_string):
element_tree = ElementTree.fromstring(xml_string)
return _ExtensionElementFromElementTree(element_tree)
def _ExtensionElementFromElementTree(element_tree):
element_tag = element_tree.tag
if '}' in element_tag:
namespace = element_tag[1:element_tag.index('}')]
tag = element_tag[element_tag.index('}')+1:]
else:
namespace = None
tag = element_tag
extension = ExtensionElement(namespace=namespace, tag=tag)
for key, value in element_tree.attrib.iteritems():
extension.attributes[key] = value
for child in element_tree:
extension.children.append(_ExtensionElementFromElementTree(child))
extension.text = element_tree.text
return extension
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides a common interface for all HTTP requests.
HttpResponse: Represents the server's response to an HTTP request. Provides
an interface identical to httplib.HTTPResponse which is the response
expected from higher level classes which use HttpClient.request.
GenericHttpClient: Provides an interface (superclass) for an object
responsible for making HTTP requests. Subclasses of this object are
used in AtomService and GDataService to make requests to the server. By
changing the http_client member object, the AtomService is able to make
HTTP requests using different logic (for example, when running on
Google App Engine, the http_client makes requests using the App Engine
urlfetch API).
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import StringIO
USER_AGENT = '%s GData-Python/1.2.2'
class Error(Exception):
pass
class UnparsableUrlObject(Error):
pass
class ContentLengthRequired(Error):
pass
class HttpResponse(object):
def __init__(self, body=None, status=None, reason=None, headers=None):
"""Constructor for an HttpResponse object.
HttpResponse represents the server's response to an HTTP request from
the client. The HttpClient.request method returns a httplib.HTTPResponse
object and this HttpResponse class is designed to mirror the interface
exposed by httplib.HTTPResponse.
Args:
body: A file like object, with a read() method. The body could also
be a string, and the constructor will wrap it so that
HttpResponse.read(self) will return the full string.
status: The HTTP status code as an int. Example: 200, 201, 404.
reason: The HTTP status message which follows the code. Example:
OK, Created, Not Found
headers: A dictionary containing the HTTP headers in the server's
response. A common header in the response is Content-Length.
"""
if body:
if hasattr(body, 'read'):
self._body = body
else:
self._body = StringIO.StringIO(body)
else:
self._body = None
if status is not None:
self.status = int(status)
else:
self.status = None
self.reason = reason
self._headers = headers or {}
def getheader(self, name, default=None):
if name in self._headers:
return self._headers[name]
else:
return default
def read(self, amt=None):
if not amt:
return self._body.read()
else:
return self._body.read(amt)
class GenericHttpClient(object):
debug = False
def __init__(self, http_client, headers=None):
"""
Args:
http_client: An object which provides a request method to make an HTTP
request. The request method in GenericHttpClient performs a
call-through to the contained HTTP client object.
headers: A dictionary containing HTTP headers which should be included
in every HTTP request. Common persistent headers include
'User-Agent'.
"""
self.http_client = http_client
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
return self.http_client.request(operation, url, data=data,
headers=all_headers)
def get(self, url, headers=None):
return self.request('GET', url, headers=headers)
def post(self, url, data, headers=None):
return self.request('POST', url, data=data, headers=headers)
def put(self, url, data, headers=None):
return self.request('PUT', url, data=data, headers=headers)
def delete(self, url, headers=None):
return self.request('DELETE', url, headers=headers)
class GenericToken(object):
"""Represents an Authorization token to be added to HTTP requests.
Some Authorization headers included calculated fields (digital
signatures for example) which are based on the parameters of the HTTP
request. Therefore the token is responsible for signing the request
and adding the Authorization header.
"""
def perform_request(self, http_client, operation, url, data=None,
headers=None):
"""For the GenericToken, no Authorization token is set."""
return http_client.request(operation, url, data=data, headers=headers)
def valid_for_scope(self, url):
"""Tells the caller if the token authorizes access to the desired URL.
Since the generic token doesn't add an auth header, it is not valid for
any scope.
"""
return False
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import urlparse
import urllib
DEFAULT_PROTOCOL = 'http'
DEFAULT_PORT = 80
def parse_url(url_string):
"""Creates a Url object which corresponds to the URL string.
This method can accept partial URLs, but it will leave missing
members of the Url unset.
"""
parts = urlparse.urlparse(url_string)
url = Url()
if parts[0]:
url.protocol = parts[0]
if parts[1]:
host_parts = parts[1].split(':')
if host_parts[0]:
url.host = host_parts[0]
if len(host_parts) > 1:
url.port = host_parts[1]
if parts[2]:
url.path = parts[2]
if parts[4]:
param_pairs = parts[4].split('&')
for pair in param_pairs:
pair_parts = pair.split('=')
if len(pair_parts) > 1:
url.params[urllib.unquote_plus(pair_parts[0])] = (
urllib.unquote_plus(pair_parts[1]))
elif len(pair_parts) == 1:
url.params[urllib.unquote_plus(pair_parts[0])] = None
return url
class Url(object):
"""Represents a URL and implements comparison logic.
URL strings which are not identical can still be equivalent, so this object
provides a better interface for comparing and manipulating URLs than
strings. URL parameters are represented as a dictionary of strings, and
defaults are used for the protocol (http) and port (80) if not provided.
"""
def __init__(self, protocol=None, host=None, port=None, path=None,
params=None):
self.protocol = protocol
self.host = host
self.port = port
self.path = path
self.params = params or {}
def to_string(self):
url_parts = ['', '', '', '', '', '']
if self.protocol:
url_parts[0] = self.protocol
if self.host:
if self.port:
url_parts[1] = ':'.join((self.host, str(self.port)))
else:
url_parts[1] = self.host
if self.path:
url_parts[2] = self.path
if self.params:
url_parts[4] = self.get_param_string()
return urlparse.urlunparse(url_parts)
def get_param_string(self):
param_pairs = []
for key, value in self.params.iteritems():
param_pairs.append('='.join((urllib.quote_plus(key),
urllib.quote_plus(str(value)))))
return '&'.join(param_pairs)
def get_request_uri(self):
"""Returns the path with the parameters escaped and appended."""
param_string = self.get_param_string()
if param_string:
return '?'.join([self.path, param_string])
else:
return self.path
def __cmp__(self, other):
if not isinstance(other, Url):
return cmp(self.to_string(), str(other))
difference = 0
# Compare the protocol
if self.protocol and other.protocol:
difference = cmp(self.protocol, other.protocol)
elif self.protocol and not other.protocol:
difference = cmp(self.protocol, DEFAULT_PROTOCOL)
elif not self.protocol and other.protocol:
difference = cmp(DEFAULT_PROTOCOL, other.protocol)
if difference != 0:
return difference
# Compare the host
difference = cmp(self.host, other.host)
if difference != 0:
return difference
# Compare the port
if self.port and other.port:
difference = cmp(self.port, other.port)
elif self.port and not other.port:
difference = cmp(self.port, DEFAULT_PORT)
elif not self.port and other.port:
difference = cmp(DEFAULT_PORT, other.port)
if difference != 0:
return difference
# Compare the path
difference = cmp(self.path, other.path)
if difference != 0:
return difference
# Compare the parameters
return cmp(self.params, other.params)
def __str__(self):
return self.to_string()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006, 2007, 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AtomService provides CRUD ops. in line with the Atom Publishing Protocol.
AtomService: Encapsulates the ability to perform insert, update and delete
operations with the Atom Publishing Protocol on which GData is
based. An instance can perform query, insertion, deletion, and
update.
HttpRequest: Function that performs a GET, POST, PUT, or DELETE HTTP request
to the specified end point. An AtomService object or a subclass can be
used to specify information about the request.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import atom.http_interface
import atom.url
import atom.http
import atom.token_store
import os
import httplib
import urllib
import re
import base64
import socket
import warnings
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
class AtomService(object):
"""Performs Atom Publishing Protocol CRUD operations.
The AtomService contains methods to perform HTTP CRUD operations.
"""
# Default values for members
port = 80
ssl = False
# Set the current_token to force the AtomService to use this token
# instead of searching for an appropriate token in the token_store.
current_token = None
auto_store_tokens = True
auto_set_current_token = True
def _get_override_token(self):
return self.current_token
def _set_override_token(self, token):
self.current_token = token
override_token = property(_get_override_token, _set_override_token)
def __init__(self, server=None, additional_headers=None,
application_name='', http_client=None, token_store=None):
"""Creates a new AtomService client.
Args:
server: string (optional) The start of a URL for the server
to which all operations should be directed. Example:
'www.google.com'
additional_headers: dict (optional) Any additional HTTP headers which
should be included with CRUD operations.
http_client: An object responsible for making HTTP requests using a
request method. If none is provided, a new instance of
atom.http.ProxiedHttpClient will be used.
token_store: Keeps a collection of authorization tokens which can be
applied to requests for a specific URLs. Critical methods are
find_token based on a URL (atom.url.Url or a string), add_token,
and remove_token.
"""
self.http_client = http_client or atom.http.ProxiedHttpClient()
self.token_store = token_store or atom.token_store.TokenStore()
self.server = server
self.additional_headers = additional_headers or {}
self.additional_headers['User-Agent'] = atom.http_interface.USER_AGENT % (
application_name,)
# If debug is True, the HTTPConnection will display debug information
self._set_debug(False)
def _get_debug(self):
return self.http_client.debug
def _set_debug(self, value):
self.http_client.debug = value
debug = property(_get_debug, _set_debug,
doc='If True, HTTP debug information is printed.')
def use_basic_auth(self, username, password, scopes=None):
if username is not None and password is not None:
if scopes is None:
scopes = [atom.token_store.SCOPE_ALL]
base_64_string = base64.encodestring('%s:%s' % (username, password))
token = BasicAuthToken('Basic %s' % base_64_string.strip(),
scopes=[atom.token_store.SCOPE_ALL])
if self.auto_set_current_token:
self.current_token = token
if self.auto_store_tokens:
return self.token_store.add_token(token)
return True
return False
def UseBasicAuth(self, username, password, for_proxy=False):
"""Sets an Authenticaiton: Basic HTTP header containing plaintext.
Deprecated, use use_basic_auth instead.
The username and password are base64 encoded and added to an HTTP header
which will be included in each request. Note that your username and
password are sent in plaintext.
Args:
username: str
password: str
"""
self.use_basic_auth(username, password)
def request(self, operation, url, data=None, headers=None,
url_params=None):
if isinstance(url, str):
if not url.startswith('http') and self.ssl:
url = atom.url.parse_url('https://%s%s' % (self.server, url))
elif not url.startswith('http'):
url = atom.url.parse_url('http://%s%s' % (self.server, url))
else:
url = atom.url.parse_url(url)
if url_params:
for name, value in url_params.iteritems():
url.params[name] = value
all_headers = self.additional_headers.copy()
if headers:
all_headers.update(headers)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
content_length = CalculateDataLength(data)
if content_length:
all_headers['Content-Length'] = str(content_length)
# Find an Authorization token for this URL if one is available.
if self.override_token:
auth_token = self.override_token
else:
auth_token = self.token_store.find_token(url)
return auth_token.perform_request(self.http_client, operation, url,
data=data, headers=all_headers)
# CRUD operations
def Get(self, uri, extra_headers=None, url_params=None, escape_params=True):
"""Query the APP server with the given URI
The uri is the portion of the URI after the server value
(server example: 'www.google.com').
Example use:
To perform a query against Google Base, set the server to
'base.google.com' and set the uri to '/base/feeds/...', where ... is
your query. For example, to find snippets for all digital cameras uri
should be set to: '/base/feeds/snippets?bq=digital+camera'
Args:
uri: string The query in the form of a URI. Example:
'/base/feeds/snippets?bq=digital+camera'.
extra_headers: dicty (optional) Extra HTTP headers to be included
in the GET request. These headers are in addition to
those stored in the client's additional_headers property.
The client automatically sets the Content-Type and
Authorization headers.
url_params: dict (optional) Additional URL parameters to be included
in the query. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse The server's response to the GET request.
"""
return self.request('GET', uri, data=None, headers=extra_headers,
url_params=url_params)
def Post(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, content_type='application/atom+xml'):
"""Insert data into an APP server at the given URI.
Args:
data: string, ElementTree._Element, or something with a __str__ method
The XML to be sent to the uri.
uri: string The location (feed) to which the data should be inserted.
Example: '/base/feeds/items'.
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse Server's response to the POST request.
"""
if extra_headers is None:
extra_headers = {}
if content_type:
extra_headers['Content-Type'] = content_type
return self.request('POST', uri, data=data, headers=extra_headers,
url_params=url_params)
def Put(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, content_type='application/atom+xml'):
"""Updates an entry at the given URI.
Args:
data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The
XML containing the updated data.
uri: string A URI indicating entry to which the update will be applied.
Example: '/base/feeds/items/ITEM-ID'
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse Server's response to the PUT request.
"""
if extra_headers is None:
extra_headers = {}
if content_type:
extra_headers['Content-Type'] = content_type
return self.request('PUT', uri, data=data, headers=extra_headers,
url_params=url_params)
def Delete(self, uri, extra_headers=None, url_params=None,
escape_params=True):
"""Deletes the entry at the given URI.
Args:
uri: string The URI of the entry to be deleted. Example:
'/base/feeds/items/ITEM-ID'
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type and
Authorization headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse Server's response to the DELETE request.
"""
return self.request('DELETE', uri, data=None, headers=extra_headers,
url_params=url_params)
class BasicAuthToken(atom.http_interface.GenericToken):
def __init__(self, auth_header, scopes=None):
"""Creates a token used to add Basic Auth headers to HTTP requests.
Args:
auth_header: str The value for the Authorization header.
scopes: list of str or atom.url.Url specifying the beginnings of URLs
for which this token can be used. For example, if scopes contains
'http://example.com/foo', then this token can be used for a request to
'http://example.com/foo/bar' but it cannot be used for a request to
'http://example.com/baz'
"""
self.auth_header = auth_header
self.scopes = scopes or []
def perform_request(self, http_client, operation, url, data=None,
headers=None):
"""Sets the Authorization header to the basic auth string."""
if headers is None:
headers = {'Authorization':self.auth_header}
else:
headers['Authorization'] = self.auth_header
return http_client.request(operation, url, data=data, headers=headers)
def __str__(self):
return self.auth_header
def valid_for_scope(self, url):
"""Tells the caller if the token authorizes access to the desired URL.
"""
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
for scope in self.scopes:
if scope == atom.token_store.SCOPE_ALL:
return True
if isinstance(scope, (str, unicode)):
scope = atom.url.parse_url(scope)
if scope == url:
return True
# Check the host and the path, but ignore the port and protocol.
elif scope.host == url.host and not scope.path:
return True
elif scope.host == url.host and scope.path and not url.path:
continue
elif scope.host == url.host and url.path.startswith(scope.path):
return True
return False
def PrepareConnection(service, full_uri):
"""Opens a connection to the server based on the full URI.
This method is deprecated, instead use atom.http.HttpClient.request.
Examines the target URI and the proxy settings, which are set as
environment variables, to open a connection with the server. This
connection is used to make an HTTP request.
Args:
service: atom.AtomService or a subclass. It must have a server string which
represents the server host to which the request should be made. It may also
have a dictionary of additional_headers to send in the HTTP request.
full_uri: str Which is the target relative (lacks protocol and host) or
absolute URL to be opened. Example:
'https://www.google.com/accounts/ClientLogin' or
'base/feeds/snippets' where the server is set to www.google.com.
Returns:
A tuple containing the httplib.HTTPConnection and the full_uri for the
request.
"""
deprecation('calling deprecated function PrepareConnection')
(server, port, ssl, partial_uri) = ProcessUrl(service, full_uri)
if ssl:
# destination is https
proxy = os.environ.get('https_proxy')
if proxy:
(p_server, p_port, p_ssl, p_uri) = ProcessUrl(service, proxy, True)
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
user_auth = base64.encodestring('%s:%s' % (proxy_username,
proxy_password))
proxy_authorization = ('Proxy-authorization: Basic %s\r\n' % (
user_auth.strip()))
else:
proxy_authorization = ''
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (server, port)
user_agent = 'User-Agent: %s\r\n' % (
service.additional_headers['User-Agent'])
proxy_pieces = (proxy_connect + proxy_authorization + user_agent
+ '\r\n')
#now connect, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((p_server,p_port))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status=response.split()[1]
if p_status!=str(200):
raise 'Error status=',str(p_status)
# Trivial setup for ssl socket.
ssl = socket.ssl(p_sock, None, None)
fake_sock = httplib.FakeSocket(p_sock, ssl)
# Initalize httplib and replace with the proxy socket.
connection = httplib.HTTPConnection(server)
connection.sock=fake_sock
full_uri = partial_uri
else:
connection = httplib.HTTPSConnection(server, port)
full_uri = partial_uri
else:
# destination is http
proxy = os.environ.get('http_proxy')
if proxy:
(p_server, p_port, p_ssl, p_uri) = ProcessUrl(service.server, proxy, True)
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
UseBasicAuth(service, proxy_username, proxy_password, True)
connection = httplib.HTTPConnection(p_server, p_port)
if not full_uri.startswith("http://"):
if full_uri.startswith("/"):
full_uri = "http://%s%s" % (service.server, full_uri)
else:
full_uri = "http://%s/%s" % (service.server, full_uri)
else:
connection = httplib.HTTPConnection(server, port)
full_uri = partial_uri
return (connection, full_uri)
def UseBasicAuth(service, username, password, for_proxy=False):
"""Sets an Authenticaiton: Basic HTTP header containing plaintext.
Deprecated, use AtomService.use_basic_auth insread.
The username and password are base64 encoded and added to an HTTP header
which will be included in each request. Note that your username and
password are sent in plaintext. The auth header is added to the
additional_headers dictionary in the service object.
Args:
service: atom.AtomService or a subclass which has an
additional_headers dict as a member.
username: str
password: str
"""
deprecation('calling deprecated function UseBasicAuth')
base_64_string = base64.encodestring('%s:%s' % (username, password))
base_64_string = base_64_string.strip()
if for_proxy:
header_name = 'Proxy-Authorization'
else:
header_name = 'Authorization'
service.additional_headers[header_name] = 'Basic %s' % (base_64_string,)
def ProcessUrl(service, url, for_proxy=False):
"""Processes a passed URL. If the URL does not begin with https?, then
the default value for server is used
This method is deprecated, use atom.url.parse_url instead.
"""
if not isinstance(url, atom.url.Url):
url = atom.url.parse_url(url)
server = url.host
ssl = False
port = 80
if not server:
if hasattr(service, 'server'):
server = service.server
else:
server = service
if not url.protocol and hasattr(service, 'ssl'):
ssl = service.ssl
if hasattr(service, 'port'):
port = service.port
else:
if url.protocol == 'https':
ssl = True
elif url.protocol == 'http':
ssl = False
if url.port:
port = int(url.port)
elif port == 80 and ssl:
port = 443
return (server, port, ssl, url.get_request_uri())
def DictionaryToParamList(url_parameters, escape_params=True):
"""Convert a dictionary of URL arguments into a URL parameter string.
This function is deprcated, use atom.url.Url instead.
Args:
url_parameters: The dictionaty of key-value pairs which will be converted
into URL parameters. For example,
{'dry-run': 'true', 'foo': 'bar'}
will become ['dry-run=true', 'foo=bar'].
Returns:
A list which contains a string for each key-value pair. The strings are
ready to be incorporated into a URL by using '&'.join([] + parameter_list)
"""
# Choose which function to use when modifying the query and parameters.
# Use quote_plus when escape_params is true.
transform_op = [str, urllib.quote_plus][bool(escape_params)]
# Create a list of tuples containing the escaped version of the
# parameter-value pairs.
parameter_tuples = [(transform_op(param), transform_op(value))
for param, value in (url_parameters or {}).items()]
# Turn parameter-value tuples into a list of strings in the form
# 'PARAMETER=VALUE'.
return ['='.join(x) for x in parameter_tuples]
def BuildUri(uri, url_params=None, escape_params=True):
"""Converts a uri string and a collection of parameters into a URI.
This function is deprcated, use atom.url.Url instead.
Args:
uri: string
url_params: dict (optional)
escape_params: boolean (optional)
uri: string The start of the desired URI. This string can alrady contain
URL parameters. Examples: '/base/feeds/snippets',
'/base/feeds/snippets?bq=digital+camera'
url_parameters: dict (optional) Additional URL parameters to be included
in the query. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
string The URI consisting of the escaped URL parameters appended to the
initial uri string.
"""
# Prepare URL parameters for inclusion into the GET request.
parameter_list = DictionaryToParamList(url_params, escape_params)
# Append the URL parameters to the URL.
if parameter_list:
if uri.find('?') != -1:
# If there are already URL parameters in the uri string, add the
# parameters after a new & character.
full_uri = '&'.join([uri] + parameter_list)
else:
# The uri string did not have any URL parameters (no ? character)
# so put a ? between the uri and URL parameters.
full_uri = '%s%s' % (uri, '?%s' % ('&'.join([] + parameter_list)))
else:
full_uri = uri
return full_uri
def HttpRequest(service, operation, data, uri, extra_headers=None,
url_params=None, escape_params=True, content_type='application/atom+xml'):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and DELETE.
This method is deprecated, use atom.http.HttpClient.request instead.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.service
client = atom.service.AtomService()
http_response = client.Get('http://www.google.com/')
or you could set the client.server to 'www.google.com' and use the
following:
client.server = 'www.google.com'
http_response = client.Get('/')
Args:
service: atom.AtomService object which contains some of the parameters
needed to make the request. The following members are used to
construct the HTTP call: server (str), additional_headers (dict),
port (int), and ssl (bool).
operation: str The HTTP operation to be performed. This is usually one of
'GET', 'POST', 'PUT', or 'DELETE'
data: ElementTree, filestream, list of parts, or other object which can be
converted to a string.
Should be set to None when performing a GET or PUT.
If data is a file-like object which can be read, this method will read
a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be evaluated
and sent.
uri: The beginning of the URL to which the request should be sent.
Examples: '/', '/base/feeds/snippets',
'/m8/feeds/contacts/default/base'
extra_headers: dict of strings. HTTP headers which should be sent
in the request. These headers are in addition to those stored in
service.additional_headers.
url_params: dict of strings. Key value pairs to be added to the URL as
URL parameters. For example {'foo':'bar', 'test':'param'} will
become ?foo=bar&test=param.
escape_params: bool default True. If true, the keys and values in
url_params will be URL escaped when the form is constructed
(Special characters converted to %XX form.)
content_type: str The MIME type for the data being sent. Defaults to
'application/atom+xml', this is only used if data is set.
"""
deprecation('call to deprecated function HttpRequest')
full_uri = BuildUri(uri, url_params, escape_params)
(connection, full_uri) = PrepareConnection(service, full_uri)
if extra_headers is None:
extra_headers = {}
# Turn on debug mode if the debug member is set.
if service.debug:
connection.debuglevel = 1
connection.putrequest(operation, full_uri)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if (data and not service.additional_headers.has_key('Content-Length') and
not extra_headers.has_key('Content-Length')):
content_length = CalculateDataLength(data)
if content_length:
extra_headers['Content-Length'] = str(content_length)
if content_type:
extra_headers['Content-Type'] = content_type
# Send the HTTP headers.
if isinstance(service.additional_headers, dict):
for header in service.additional_headers:
connection.putheader(header, service.additional_headers[header])
if isinstance(extra_headers, dict):
for header in extra_headers:
connection.putheader(header, extra_headers[header])
connection.endheaders()
# If there is data, send it in the request.
if data:
if isinstance(data, list):
for data_part in data:
__SendDataPart(data_part, connection)
else:
__SendDataPart(data, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def __SendDataPart(data, connection):
"""This method is deprecated, use atom.http._send_data_part"""
deprecated('call to deprecated function __SendDataPart')
if isinstance(data, str):
#TODO add handling for unicode.
connection.send(data)
return
elif ElementTree.iselement(data):
connection.send(ElementTree.tostring(data))
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return
def CalculateDataLength(data):
"""Attempts to determine the length of the data to send.
This method will respond with a length only if the data is a string or
and ElementTree element.
Args:
data: object If this is not a string or ElementTree element this funtion
will return None.
"""
if isinstance(data, str):
return len(data)
elif isinstance(data, list):
return None
elif ElementTree.iselement(data):
return len(ElementTree.tostring(data))
elif hasattr(data, 'read'):
# If this is a file-like object, don't try to guess the length.
return None
else:
return len(str(data))
def deprecation(message):
warnings.warn(message, DeprecationWarning, stacklevel=2)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HttpClients in this module use httplib to make HTTP requests.
This module make HTTP requests based on httplib, but there are environments
in which an httplib based approach will not work (if running in Google App
Engine for example). In those cases, higher level classes (like AtomService
and GDataService) can swap out the HttpClient to transparently use a
different mechanism for making HTTP requests.
HttpClient: Contains a request method which performs an HTTP call to the
server.
ProxiedHttpClient: Contains a request method which connects to a proxy using
settings stored in operating system environment variables then
performs an HTTP call to the endpoint server.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import types
import os
import httplib
import atom.url
import atom.http_interface
import socket
import base64
class ProxyError(atom.http_interface.Error):
pass
DEFAULT_CONTENT_TYPE = 'application/atom+xml'
class HttpClient(atom.http_interface.GenericHttpClient):
def __init__(self, headers=None):
self.debug = False
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and
DELETE.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.http
client = atom.http.HttpClient()
http_response = client.request('GET', 'http://www.google.com/')
Args:
operation: str The HTTP operation to be performed. This is usually one
of 'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be converted
to a string. Should be set to None when performing a GET or DELETE.
If data is a file-like object which can be read, this method will
read a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be
evaluated and sent.
url: The full URL to which the request should be sent. Can be a string
or atom.url.Url.
headers: dict of strings. HTTP headers which should be sent
in the request.
"""
if not isinstance(url, atom.url.Url):
if isinstance(url, types.StringTypes):
url = atom.url.parse_url(url)
else:
raise atom.http_interface.UnparsableUrlObject('Unable to parse url '
'parameter because it was not a string or atom.url.Url')
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
connection = self._prepare_connection(url, all_headers)
if self.debug:
connection.debuglevel = 1
connection.putrequest(operation, self._get_access_url(url),
skip_host=True)
connection.putheader('Host', url.host)
# Overcome a bug in Python 2.4 and 2.5
# httplib.HTTPConnection.putrequest adding
# HTTP request header 'Host: www.google.com:443' instead of
# 'Host: www.google.com', and thus resulting the error message
# 'Token invalid - AuthSub token has wrong scope' in the HTTP response.
if (url.protocol == 'https' and int(url.port or 443) == 443 and
hasattr(connection, '_buffer') and
isinstance(connection._buffer, list)):
header_line = 'Host: %s:443' % url.host
replacement_header_line = 'Host: %s' % url.host
try:
connection._buffer[connection._buffer.index(header_line)] = (
replacement_header_line)
except ValueError: # header_line missing from connection._buffer
pass
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
if isinstance(data, types.StringTypes):
all_headers['Content-Length'] = len(data)
else:
raise atom.http_interface.ContentLengthRequired('Unable to calculate '
'the length of the data parameter. Specify a value for '
'Content-Length')
# Set the content type to the default value if none was set.
if 'Content-Type' not in all_headers:
all_headers['Content-Type'] = DEFAULT_CONTENT_TYPE
# Send the HTTP headers.
for header_name in all_headers:
connection.putheader(header_name, all_headers[header_name])
connection.endheaders()
# If there is data, send it in the request.
if data:
if isinstance(data, list):
for data_part in data:
_send_data_part(data_part, connection)
else:
_send_data_part(data, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def _prepare_connection(self, url, headers):
if not isinstance(url, atom.url.Url):
if isinstance(url, types.StringTypes):
url = atom.url.parse_url(url)
else:
raise atom.http_interface.UnparsableUrlObject('Unable to parse url '
'parameter because it was not a string or atom.url.Url')
if url.protocol == 'https':
if not url.port:
return httplib.HTTPSConnection(url.host)
return httplib.HTTPSConnection(url.host, int(url.port))
else:
if not url.port:
return httplib.HTTPConnection(url.host)
return httplib.HTTPConnection(url.host, int(url.port))
def _get_access_url(self, url):
return url.to_string()
class ProxiedHttpClient(HttpClient):
"""Performs an HTTP request through a proxy.
The proxy settings are obtained from enviroment variables. The URL of the
proxy server is assumed to be stored in the environment variables
'https_proxy' and 'http_proxy' respectively. If the proxy server requires
a Basic Auth authorization header, the username and password are expected to
be in the 'proxy-username' or 'proxy_username' variable and the
'proxy-password' or 'proxy_password' variable.
After connecting to the proxy server, the request is completed as in
HttpClient.request.
"""
def _prepare_connection(self, url, headers):
proxy_auth = _get_proxy_auth()
if url.protocol == 'https':
# destination is https
proxy = os.environ.get('https_proxy')
if proxy:
# Set any proxy auth headers
if proxy_auth:
proxy_auth = 'Proxy-authorization: %s' % proxy_auth
# Construct the proxy connect command.
port = url.port
if not port:
port = '443'
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (url.host, port)
# Set the user agent to send to the proxy
if headers and 'User-Agent' in headers:
user_agent = 'User-Agent: %s\r\n' % (headers['User-Agent'])
else:
user_agent = ''
proxy_pieces = '%s%s%s\r\n' % (proxy_connect, proxy_auth, user_agent)
# Find the proxy host and port.
proxy_url = atom.url.parse_url(proxy)
if not proxy_url.port:
proxy_url.port = '80'
# Connect to the proxy server, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((proxy_url.host, int(proxy_url.port)))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status = response.split()[1]
if p_status != str(200):
raise ProxyError('Error status=%s' % str(p_status))
# Trivial setup for ssl socket.
ssl = socket.ssl(p_sock, None, None)
fake_sock = httplib.FakeSocket(p_sock, ssl)
# Initalize httplib and replace with the proxy socket.
connection = httplib.HTTPConnection(proxy_url.host)
connection.sock=fake_sock
return connection
else:
# The request was HTTPS, but there was no https_proxy set.
return HttpClient._prepare_connection(self, url, headers)
else:
proxy = os.environ.get('http_proxy')
if proxy:
# Find the proxy host and port.
proxy_url = atom.url.parse_url(proxy)
if not proxy_url.port:
proxy_url.port = '80'
if proxy_auth:
headers['Proxy-Authorization'] = proxy_auth.strip()
return httplib.HTTPConnection(proxy_url.host, int(proxy_url.port))
else:
# The request was HTTP, but there was no http_proxy set.
return HttpClient._prepare_connection(self, url, headers)
def _get_access_url(self, url):
return url.to_string()
def _get_proxy_auth():
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
user_auth = base64.encodestring('%s:%s' % (proxy_username,
proxy_password))
return 'Basic %s\r\n' % (user_auth.strip())
else:
return ''
def _send_data_part(data, connection):
if isinstance(data, types.StringTypes):
connection.send(data)
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides a common interface for all HTTP requests.
HttpResponse: Represents the server's response to an HTTP request. Provides
an interface identical to httplib.HTTPResponse which is the response
expected from higher level classes which use HttpClient.request.
GenericHttpClient: Provides an interface (superclass) for an object
responsible for making HTTP requests. Subclasses of this object are
used in AtomService and GDataService to make requests to the server. By
changing the http_client member object, the AtomService is able to make
HTTP requests using different logic (for example, when running on
Google App Engine, the http_client makes requests using the App Engine
urlfetch API).
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import StringIO
USER_AGENT = '%s GData-Python/1.2.2'
class Error(Exception):
pass
class UnparsableUrlObject(Error):
pass
class ContentLengthRequired(Error):
pass
class HttpResponse(object):
def __init__(self, body=None, status=None, reason=None, headers=None):
"""Constructor for an HttpResponse object.
HttpResponse represents the server's response to an HTTP request from
the client. The HttpClient.request method returns a httplib.HTTPResponse
object and this HttpResponse class is designed to mirror the interface
exposed by httplib.HTTPResponse.
Args:
body: A file like object, with a read() method. The body could also
be a string, and the constructor will wrap it so that
HttpResponse.read(self) will return the full string.
status: The HTTP status code as an int. Example: 200, 201, 404.
reason: The HTTP status message which follows the code. Example:
OK, Created, Not Found
headers: A dictionary containing the HTTP headers in the server's
response. A common header in the response is Content-Length.
"""
if body:
if hasattr(body, 'read'):
self._body = body
else:
self._body = StringIO.StringIO(body)
else:
self._body = None
if status is not None:
self.status = int(status)
else:
self.status = None
self.reason = reason
self._headers = headers or {}
def getheader(self, name, default=None):
if name in self._headers:
return self._headers[name]
else:
return default
def read(self, amt=None):
if not amt:
return self._body.read()
else:
return self._body.read(amt)
class GenericHttpClient(object):
debug = False
def __init__(self, http_client, headers=None):
"""
Args:
http_client: An object which provides a request method to make an HTTP
request. The request method in GenericHttpClient performs a
call-through to the contained HTTP client object.
headers: A dictionary containing HTTP headers which should be included
in every HTTP request. Common persistent headers include
'User-Agent'.
"""
self.http_client = http_client
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
return self.http_client.request(operation, url, data=data,
headers=all_headers)
def get(self, url, headers=None):
return self.request('GET', url, headers=headers)
def post(self, url, data, headers=None):
return self.request('POST', url, data=data, headers=headers)
def put(self, url, data, headers=None):
return self.request('PUT', url, data=data, headers=headers)
def delete(self, url, headers=None):
return self.request('DELETE', url, headers=headers)
class GenericToken(object):
"""Represents an Authorization token to be added to HTTP requests.
Some Authorization headers included calculated fields (digital
signatures for example) which are based on the parameters of the HTTP
request. Therefore the token is responsible for signing the request
and adding the Authorization header.
"""
def perform_request(self, http_client, operation, url, data=None,
headers=None):
"""For the GenericToken, no Authorization token is set."""
return http_client.request(operation, url, data=data, headers=headers)
def valid_for_scope(self, url):
"""Tells the caller if the token authorizes access to the desired URL.
Since the generic token doesn't add an auth header, it is not valid for
any scope.
"""
return False
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006, 2007, 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AtomService provides CRUD ops. in line with the Atom Publishing Protocol.
AtomService: Encapsulates the ability to perform insert, update and delete
operations with the Atom Publishing Protocol on which GData is
based. An instance can perform query, insertion, deletion, and
update.
HttpRequest: Function that performs a GET, POST, PUT, or DELETE HTTP request
to the specified end point. An AtomService object or a subclass can be
used to specify information about the request.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import atom.http_interface
import atom.url
import atom.http
import atom.token_store
import os
import httplib
import urllib
import re
import base64
import socket
import warnings
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
class AtomService(object):
"""Performs Atom Publishing Protocol CRUD operations.
The AtomService contains methods to perform HTTP CRUD operations.
"""
# Default values for members
port = 80
ssl = False
# Set the current_token to force the AtomService to use this token
# instead of searching for an appropriate token in the token_store.
current_token = None
auto_store_tokens = True
auto_set_current_token = True
def _get_override_token(self):
return self.current_token
def _set_override_token(self, token):
self.current_token = token
override_token = property(_get_override_token, _set_override_token)
def __init__(self, server=None, additional_headers=None,
application_name='', http_client=None, token_store=None):
"""Creates a new AtomService client.
Args:
server: string (optional) The start of a URL for the server
to which all operations should be directed. Example:
'www.google.com'
additional_headers: dict (optional) Any additional HTTP headers which
should be included with CRUD operations.
http_client: An object responsible for making HTTP requests using a
request method. If none is provided, a new instance of
atom.http.ProxiedHttpClient will be used.
token_store: Keeps a collection of authorization tokens which can be
applied to requests for a specific URLs. Critical methods are
find_token based on a URL (atom.url.Url or a string), add_token,
and remove_token.
"""
self.http_client = http_client or atom.http.ProxiedHttpClient()
self.token_store = token_store or atom.token_store.TokenStore()
self.server = server
self.additional_headers = additional_headers or {}
self.additional_headers['User-Agent'] = atom.http_interface.USER_AGENT % (
application_name,)
# If debug is True, the HTTPConnection will display debug information
self._set_debug(False)
def _get_debug(self):
return self.http_client.debug
def _set_debug(self, value):
self.http_client.debug = value
debug = property(_get_debug, _set_debug,
doc='If True, HTTP debug information is printed.')
def use_basic_auth(self, username, password, scopes=None):
if username is not None and password is not None:
if scopes is None:
scopes = [atom.token_store.SCOPE_ALL]
base_64_string = base64.encodestring('%s:%s' % (username, password))
token = BasicAuthToken('Basic %s' % base_64_string.strip(),
scopes=[atom.token_store.SCOPE_ALL])
if self.auto_set_current_token:
self.current_token = token
if self.auto_store_tokens:
return self.token_store.add_token(token)
return True
return False
def UseBasicAuth(self, username, password, for_proxy=False):
"""Sets an Authenticaiton: Basic HTTP header containing plaintext.
Deprecated, use use_basic_auth instead.
The username and password are base64 encoded and added to an HTTP header
which will be included in each request. Note that your username and
password are sent in plaintext.
Args:
username: str
password: str
"""
self.use_basic_auth(username, password)
def request(self, operation, url, data=None, headers=None,
url_params=None):
if isinstance(url, str):
if not url.startswith('http') and self.ssl:
url = atom.url.parse_url('https://%s%s' % (self.server, url))
elif not url.startswith('http'):
url = atom.url.parse_url('http://%s%s' % (self.server, url))
else:
url = atom.url.parse_url(url)
if url_params:
for name, value in url_params.iteritems():
url.params[name] = value
all_headers = self.additional_headers.copy()
if headers:
all_headers.update(headers)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
content_length = CalculateDataLength(data)
if content_length:
all_headers['Content-Length'] = str(content_length)
# Find an Authorization token for this URL if one is available.
if self.override_token:
auth_token = self.override_token
else:
auth_token = self.token_store.find_token(url)
return auth_token.perform_request(self.http_client, operation, url,
data=data, headers=all_headers)
# CRUD operations
def Get(self, uri, extra_headers=None, url_params=None, escape_params=True):
"""Query the APP server with the given URI
The uri is the portion of the URI after the server value
(server example: 'www.google.com').
Example use:
To perform a query against Google Base, set the server to
'base.google.com' and set the uri to '/base/feeds/...', where ... is
your query. For example, to find snippets for all digital cameras uri
should be set to: '/base/feeds/snippets?bq=digital+camera'
Args:
uri: string The query in the form of a URI. Example:
'/base/feeds/snippets?bq=digital+camera'.
extra_headers: dicty (optional) Extra HTTP headers to be included
in the GET request. These headers are in addition to
those stored in the client's additional_headers property.
The client automatically sets the Content-Type and
Authorization headers.
url_params: dict (optional) Additional URL parameters to be included
in the query. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse The server's response to the GET request.
"""
return self.request('GET', uri, data=None, headers=extra_headers,
url_params=url_params)
def Post(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, content_type='application/atom+xml'):
"""Insert data into an APP server at the given URI.
Args:
data: string, ElementTree._Element, or something with a __str__ method
The XML to be sent to the uri.
uri: string The location (feed) to which the data should be inserted.
Example: '/base/feeds/items'.
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse Server's response to the POST request.
"""
if extra_headers is None:
extra_headers = {}
if content_type:
extra_headers['Content-Type'] = content_type
return self.request('POST', uri, data=data, headers=extra_headers,
url_params=url_params)
def Put(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, content_type='application/atom+xml'):
"""Updates an entry at the given URI.
Args:
data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The
XML containing the updated data.
uri: string A URI indicating entry to which the update will be applied.
Example: '/base/feeds/items/ITEM-ID'
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse Server's response to the PUT request.
"""
if extra_headers is None:
extra_headers = {}
if content_type:
extra_headers['Content-Type'] = content_type
return self.request('PUT', uri, data=data, headers=extra_headers,
url_params=url_params)
def Delete(self, uri, extra_headers=None, url_params=None,
escape_params=True):
"""Deletes the entry at the given URI.
Args:
uri: string The URI of the entry to be deleted. Example:
'/base/feeds/items/ITEM-ID'
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type and
Authorization headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse Server's response to the DELETE request.
"""
return self.request('DELETE', uri, data=None, headers=extra_headers,
url_params=url_params)
class BasicAuthToken(atom.http_interface.GenericToken):
def __init__(self, auth_header, scopes=None):
"""Creates a token used to add Basic Auth headers to HTTP requests.
Args:
auth_header: str The value for the Authorization header.
scopes: list of str or atom.url.Url specifying the beginnings of URLs
for which this token can be used. For example, if scopes contains
'http://example.com/foo', then this token can be used for a request to
'http://example.com/foo/bar' but it cannot be used for a request to
'http://example.com/baz'
"""
self.auth_header = auth_header
self.scopes = scopes or []
def perform_request(self, http_client, operation, url, data=None,
headers=None):
"""Sets the Authorization header to the basic auth string."""
if headers is None:
headers = {'Authorization':self.auth_header}
else:
headers['Authorization'] = self.auth_header
return http_client.request(operation, url, data=data, headers=headers)
def __str__(self):
return self.auth_header
def valid_for_scope(self, url):
"""Tells the caller if the token authorizes access to the desired URL.
"""
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
for scope in self.scopes:
if scope == atom.token_store.SCOPE_ALL:
return True
if isinstance(scope, (str, unicode)):
scope = atom.url.parse_url(scope)
if scope == url:
return True
# Check the host and the path, but ignore the port and protocol.
elif scope.host == url.host and not scope.path:
return True
elif scope.host == url.host and scope.path and not url.path:
continue
elif scope.host == url.host and url.path.startswith(scope.path):
return True
return False
def PrepareConnection(service, full_uri):
"""Opens a connection to the server based on the full URI.
This method is deprecated, instead use atom.http.HttpClient.request.
Examines the target URI and the proxy settings, which are set as
environment variables, to open a connection with the server. This
connection is used to make an HTTP request.
Args:
service: atom.AtomService or a subclass. It must have a server string which
represents the server host to which the request should be made. It may also
have a dictionary of additional_headers to send in the HTTP request.
full_uri: str Which is the target relative (lacks protocol and host) or
absolute URL to be opened. Example:
'https://www.google.com/accounts/ClientLogin' or
'base/feeds/snippets' where the server is set to www.google.com.
Returns:
A tuple containing the httplib.HTTPConnection and the full_uri for the
request.
"""
deprecation('calling deprecated function PrepareConnection')
(server, port, ssl, partial_uri) = ProcessUrl(service, full_uri)
if ssl:
# destination is https
proxy = os.environ.get('https_proxy')
if proxy:
(p_server, p_port, p_ssl, p_uri) = ProcessUrl(service, proxy, True)
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
user_auth = base64.encodestring('%s:%s' % (proxy_username,
proxy_password))
proxy_authorization = ('Proxy-authorization: Basic %s\r\n' % (
user_auth.strip()))
else:
proxy_authorization = ''
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (server, port)
user_agent = 'User-Agent: %s\r\n' % (
service.additional_headers['User-Agent'])
proxy_pieces = (proxy_connect + proxy_authorization + user_agent
+ '\r\n')
#now connect, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((p_server,p_port))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status=response.split()[1]
if p_status!=str(200):
raise 'Error status=',str(p_status)
# Trivial setup for ssl socket.
ssl = socket.ssl(p_sock, None, None)
fake_sock = httplib.FakeSocket(p_sock, ssl)
# Initalize httplib and replace with the proxy socket.
connection = httplib.HTTPConnection(server)
connection.sock=fake_sock
full_uri = partial_uri
else:
connection = httplib.HTTPSConnection(server, port)
full_uri = partial_uri
else:
# destination is http
proxy = os.environ.get('http_proxy')
if proxy:
(p_server, p_port, p_ssl, p_uri) = ProcessUrl(service.server, proxy, True)
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
UseBasicAuth(service, proxy_username, proxy_password, True)
connection = httplib.HTTPConnection(p_server, p_port)
if not full_uri.startswith("http://"):
if full_uri.startswith("/"):
full_uri = "http://%s%s" % (service.server, full_uri)
else:
full_uri = "http://%s/%s" % (service.server, full_uri)
else:
connection = httplib.HTTPConnection(server, port)
full_uri = partial_uri
return (connection, full_uri)
def UseBasicAuth(service, username, password, for_proxy=False):
"""Sets an Authenticaiton: Basic HTTP header containing plaintext.
Deprecated, use AtomService.use_basic_auth insread.
The username and password are base64 encoded and added to an HTTP header
which will be included in each request. Note that your username and
password are sent in plaintext. The auth header is added to the
additional_headers dictionary in the service object.
Args:
service: atom.AtomService or a subclass which has an
additional_headers dict as a member.
username: str
password: str
"""
deprecation('calling deprecated function UseBasicAuth')
base_64_string = base64.encodestring('%s:%s' % (username, password))
base_64_string = base_64_string.strip()
if for_proxy:
header_name = 'Proxy-Authorization'
else:
header_name = 'Authorization'
service.additional_headers[header_name] = 'Basic %s' % (base_64_string,)
def ProcessUrl(service, url, for_proxy=False):
"""Processes a passed URL. If the URL does not begin with https?, then
the default value for server is used
This method is deprecated, use atom.url.parse_url instead.
"""
if not isinstance(url, atom.url.Url):
url = atom.url.parse_url(url)
server = url.host
ssl = False
port = 80
if not server:
if hasattr(service, 'server'):
server = service.server
else:
server = service
if not url.protocol and hasattr(service, 'ssl'):
ssl = service.ssl
if hasattr(service, 'port'):
port = service.port
else:
if url.protocol == 'https':
ssl = True
elif url.protocol == 'http':
ssl = False
if url.port:
port = int(url.port)
elif port == 80 and ssl:
port = 443
return (server, port, ssl, url.get_request_uri())
def DictionaryToParamList(url_parameters, escape_params=True):
"""Convert a dictionary of URL arguments into a URL parameter string.
This function is deprcated, use atom.url.Url instead.
Args:
url_parameters: The dictionaty of key-value pairs which will be converted
into URL parameters. For example,
{'dry-run': 'true', 'foo': 'bar'}
will become ['dry-run=true', 'foo=bar'].
Returns:
A list which contains a string for each key-value pair. The strings are
ready to be incorporated into a URL by using '&'.join([] + parameter_list)
"""
# Choose which function to use when modifying the query and parameters.
# Use quote_plus when escape_params is true.
transform_op = [str, urllib.quote_plus][bool(escape_params)]
# Create a list of tuples containing the escaped version of the
# parameter-value pairs.
parameter_tuples = [(transform_op(param), transform_op(value))
for param, value in (url_parameters or {}).items()]
# Turn parameter-value tuples into a list of strings in the form
# 'PARAMETER=VALUE'.
return ['='.join(x) for x in parameter_tuples]
def BuildUri(uri, url_params=None, escape_params=True):
"""Converts a uri string and a collection of parameters into a URI.
This function is deprcated, use atom.url.Url instead.
Args:
uri: string
url_params: dict (optional)
escape_params: boolean (optional)
uri: string The start of the desired URI. This string can alrady contain
URL parameters. Examples: '/base/feeds/snippets',
'/base/feeds/snippets?bq=digital+camera'
url_parameters: dict (optional) Additional URL parameters to be included
in the query. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
string The URI consisting of the escaped URL parameters appended to the
initial uri string.
"""
# Prepare URL parameters for inclusion into the GET request.
parameter_list = DictionaryToParamList(url_params, escape_params)
# Append the URL parameters to the URL.
if parameter_list:
if uri.find('?') != -1:
# If there are already URL parameters in the uri string, add the
# parameters after a new & character.
full_uri = '&'.join([uri] + parameter_list)
else:
# The uri string did not have any URL parameters (no ? character)
# so put a ? between the uri and URL parameters.
full_uri = '%s%s' % (uri, '?%s' % ('&'.join([] + parameter_list)))
else:
full_uri = uri
return full_uri
def HttpRequest(service, operation, data, uri, extra_headers=None,
url_params=None, escape_params=True, content_type='application/atom+xml'):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and DELETE.
This method is deprecated, use atom.http.HttpClient.request instead.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.service
client = atom.service.AtomService()
http_response = client.Get('http://www.google.com/')
or you could set the client.server to 'www.google.com' and use the
following:
client.server = 'www.google.com'
http_response = client.Get('/')
Args:
service: atom.AtomService object which contains some of the parameters
needed to make the request. The following members are used to
construct the HTTP call: server (str), additional_headers (dict),
port (int), and ssl (bool).
operation: str The HTTP operation to be performed. This is usually one of
'GET', 'POST', 'PUT', or 'DELETE'
data: ElementTree, filestream, list of parts, or other object which can be
converted to a string.
Should be set to None when performing a GET or PUT.
If data is a file-like object which can be read, this method will read
a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be evaluated
and sent.
uri: The beginning of the URL to which the request should be sent.
Examples: '/', '/base/feeds/snippets',
'/m8/feeds/contacts/default/base'
extra_headers: dict of strings. HTTP headers which should be sent
in the request. These headers are in addition to those stored in
service.additional_headers.
url_params: dict of strings. Key value pairs to be added to the URL as
URL parameters. For example {'foo':'bar', 'test':'param'} will
become ?foo=bar&test=param.
escape_params: bool default True. If true, the keys and values in
url_params will be URL escaped when the form is constructed
(Special characters converted to %XX form.)
content_type: str The MIME type for the data being sent. Defaults to
'application/atom+xml', this is only used if data is set.
"""
deprecation('call to deprecated function HttpRequest')
full_uri = BuildUri(uri, url_params, escape_params)
(connection, full_uri) = PrepareConnection(service, full_uri)
if extra_headers is None:
extra_headers = {}
# Turn on debug mode if the debug member is set.
if service.debug:
connection.debuglevel = 1
connection.putrequest(operation, full_uri)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if (data and not service.additional_headers.has_key('Content-Length') and
not extra_headers.has_key('Content-Length')):
content_length = CalculateDataLength(data)
if content_length:
extra_headers['Content-Length'] = str(content_length)
if content_type:
extra_headers['Content-Type'] = content_type
# Send the HTTP headers.
if isinstance(service.additional_headers, dict):
for header in service.additional_headers:
connection.putheader(header, service.additional_headers[header])
if isinstance(extra_headers, dict):
for header in extra_headers:
connection.putheader(header, extra_headers[header])
connection.endheaders()
# If there is data, send it in the request.
if data:
if isinstance(data, list):
for data_part in data:
__SendDataPart(data_part, connection)
else:
__SendDataPart(data, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def __SendDataPart(data, connection):
"""This method is deprecated, use atom.http._send_data_part"""
deprecated('call to deprecated function __SendDataPart')
if isinstance(data, str):
#TODO add handling for unicode.
connection.send(data)
return
elif ElementTree.iselement(data):
connection.send(ElementTree.tostring(data))
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return
def CalculateDataLength(data):
"""Attempts to determine the length of the data to send.
This method will respond with a length only if the data is a string or
and ElementTree element.
Args:
data: object If this is not a string or ElementTree element this funtion
will return None.
"""
if isinstance(data, str):
return len(data)
elif isinstance(data, list):
return None
elif ElementTree.iselement(data):
return len(ElementTree.tostring(data))
elif hasattr(data, 'read'):
# If this is a file-like object, don't try to guess the length.
return None
else:
return len(str(data))
def deprecation(message):
warnings.warn(message, DeprecationWarning, stacklevel=2)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import urlparse
import urllib
DEFAULT_PROTOCOL = 'http'
DEFAULT_PORT = 80
def parse_url(url_string):
"""Creates a Url object which corresponds to the URL string.
This method can accept partial URLs, but it will leave missing
members of the Url unset.
"""
parts = urlparse.urlparse(url_string)
url = Url()
if parts[0]:
url.protocol = parts[0]
if parts[1]:
host_parts = parts[1].split(':')
if host_parts[0]:
url.host = host_parts[0]
if len(host_parts) > 1:
url.port = host_parts[1]
if parts[2]:
url.path = parts[2]
if parts[4]:
param_pairs = parts[4].split('&')
for pair in param_pairs:
pair_parts = pair.split('=')
if len(pair_parts) > 1:
url.params[urllib.unquote_plus(pair_parts[0])] = (
urllib.unquote_plus(pair_parts[1]))
elif len(pair_parts) == 1:
url.params[urllib.unquote_plus(pair_parts[0])] = None
return url
class Url(object):
"""Represents a URL and implements comparison logic.
URL strings which are not identical can still be equivalent, so this object
provides a better interface for comparing and manipulating URLs than
strings. URL parameters are represented as a dictionary of strings, and
defaults are used for the protocol (http) and port (80) if not provided.
"""
def __init__(self, protocol=None, host=None, port=None, path=None,
params=None):
self.protocol = protocol
self.host = host
self.port = port
self.path = path
self.params = params or {}
def to_string(self):
url_parts = ['', '', '', '', '', '']
if self.protocol:
url_parts[0] = self.protocol
if self.host:
if self.port:
url_parts[1] = ':'.join((self.host, str(self.port)))
else:
url_parts[1] = self.host
if self.path:
url_parts[2] = self.path
if self.params:
url_parts[4] = self.get_param_string()
return urlparse.urlunparse(url_parts)
def get_param_string(self):
param_pairs = []
for key, value in self.params.iteritems():
param_pairs.append('='.join((urllib.quote_plus(key),
urllib.quote_plus(str(value)))))
return '&'.join(param_pairs)
def get_request_uri(self):
"""Returns the path with the parameters escaped and appended."""
param_string = self.get_param_string()
if param_string:
return '?'.join([self.path, param_string])
else:
return self.path
def __cmp__(self, other):
if not isinstance(other, Url):
return cmp(self.to_string(), str(other))
difference = 0
# Compare the protocol
if self.protocol and other.protocol:
difference = cmp(self.protocol, other.protocol)
elif self.protocol and not other.protocol:
difference = cmp(self.protocol, DEFAULT_PROTOCOL)
elif not self.protocol and other.protocol:
difference = cmp(DEFAULT_PROTOCOL, other.protocol)
if difference != 0:
return difference
# Compare the host
difference = cmp(self.host, other.host)
if difference != 0:
return difference
# Compare the port
if self.port and other.port:
difference = cmp(self.port, other.port)
elif self.port and not other.port:
difference = cmp(self.port, DEFAULT_PORT)
elif not self.port and other.port:
difference = cmp(DEFAULT_PORT, other.port)
if difference != 0:
return difference
# Compare the path
difference = cmp(self.path, other.path)
if difference != 0:
return difference
# Compare the parameters
return cmp(self.params, other.params)
def __str__(self):
return self.to_string()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides a TokenStore class which is designed to manage
auth tokens required for different services.
Each token is valid for a set of scopes which is the start of a URL. An HTTP
client will use a token store to find a valid Authorization header to send
in requests to the specified URL. If the HTTP client determines that a token
has expired or been revoked, it can remove the token from the store so that
it will not be used in future requests.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import atom.http_interface
import atom.url
SCOPE_ALL = 'http'
class TokenStore(object):
"""Manages Authorization tokens which will be sent in HTTP headers."""
def __init__(self, scoped_tokens=None):
self._tokens = scoped_tokens or {}
def add_token(self, token):
"""Adds a new token to the store (replaces tokens with the same scope).
Args:
token: A subclass of http_interface.GenericToken. The token object is
responsible for adding the Authorization header to the HTTP request.
The scopes defined in the token are used to determine if the token
is valid for a requested scope when find_token is called.
Returns:
True if the token was added, False if the token was not added becase
no scopes were provided.
"""
if not hasattr(token, 'scopes') or not token.scopes:
return False
for scope in token.scopes:
self._tokens[str(scope)] = token
return True
def find_token(self, url):
"""Selects an Authorization header token which can be used for the URL.
Args:
url: str or atom.url.Url or a list containing the same.
The URL which is going to be requested. All
tokens are examined to see if any scopes begin match the beginning
of the URL. The first match found is returned.
Returns:
The token object which should execute the HTTP request. If there was
no token for the url (the url did not begin with any of the token
scopes available), then the atom.http_interface.GenericToken will be
returned because the GenericToken calls through to the http client
without adding an Authorization header.
"""
if url is None:
return None
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
if url in self._tokens:
token = self._tokens[url]
if token.valid_for_scope(url):
return token
else:
del self._tokens[url]
for scope, token in self._tokens.iteritems():
if token.valid_for_scope(url):
return token
return atom.http_interface.GenericToken()
def remove_token(self, token):
"""Removes the token from the token_store.
This method is used when a token is determined to be invalid. If the
token was found by find_token, but resulted in a 401 or 403 error stating
that the token was invlid, then the token should be removed to prevent
future use.
Returns:
True if a token was found and then removed from the token
store. False if the token was not in the TokenStore.
"""
token_found = False
scopes_to_delete = []
for scope, stored_token in self._tokens.iteritems():
if stored_token == token:
scopes_to_delete.append(scope)
token_found = True
for scope in scopes_to_delete:
del self._tokens[scope]
return token_found
def remove_all_tokens(self):
self._tokens = {}
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes representing Atom elements.
Module objective: provide data classes for Atom constructs. These classes hide
the XML-ness of Atom and provide a set of native Python classes to interact
with.
Conversions to and from XML should only be necessary when the Atom classes
"touch the wire" and are sent over HTTP. For this reason this module
provides methods and functions to convert Atom classes to and from strings.
For more information on the Atom data model, see RFC 4287
(http://www.ietf.org/rfc/rfc4287.txt)
AtomBase: A foundation class on which Atom classes are built. It
handles the parsing of attributes and children which are common to all
Atom classes. By default, the AtomBase class translates all XML child
nodes into ExtensionElements.
ExtensionElement: Atom allows Atom objects to contain XML which is not part
of the Atom specification, these are called extension elements. If a
classes parser encounters an unexpected XML construct, it is translated
into an ExtensionElement instance. ExtensionElement is designed to fully
capture the information in the XML. Child nodes in an XML extension are
turned into ExtensionElements as well.
"""
__author__ = 'api.jscudder (Jeffrey Scudder)'
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
# XML namespaces which are often used in Atom entities.
ATOM_NAMESPACE = 'http://www.w3.org/2005/Atom'
ELEMENT_TEMPLATE = '{http://www.w3.org/2005/Atom}%s'
APP_NAMESPACE = 'http://purl.org/atom/app#'
APP_TEMPLATE = '{http://purl.org/atom/app#}%s'
# This encoding is used for converting strings before translating the XML
# into an object.
XML_STRING_ENCODING = 'utf-8'
# The desired string encoding for object members. set or monkey-patch to
# unicode if you want object members to be Python unicode strings, instead of
# encoded strings
MEMBER_STRING_ENCODING = 'utf-8'
#MEMBER_STRING_ENCODING = unicode
def CreateClassFromXMLString(target_class, xml_string, string_encoding=None):
"""Creates an instance of the target class from the string contents.
Args:
target_class: class The class which will be instantiated and populated
with the contents of the XML. This class must have a _tag and a
_namespace class variable.
xml_string: str A string which contains valid XML. The root element
of the XML string should match the tag and namespace of the desired
class.
string_encoding: str The character encoding which the xml_string should
be converted to before it is interpreted and translated into
objects. The default is None in which case the string encoding
is not changed.
Returns:
An instance of the target class with members assigned according to the
contents of the XML - or None if the root XML tag and namespace did not
match those of the target class.
"""
encoding = string_encoding or XML_STRING_ENCODING
if encoding and isinstance(xml_string, unicode):
xml_string = xml_string.encode(encoding)
tree = ElementTree.fromstring(xml_string)
return _CreateClassFromElementTree(target_class, tree)
def _CreateClassFromElementTree(target_class, tree, namespace=None, tag=None):
"""Instantiates the class and populates members according to the tree.
Note: Only use this function with classes that have _namespace and _tag
class members.
Args:
target_class: class The class which will be instantiated and populated
with the contents of the XML.
tree: ElementTree An element tree whose contents will be converted into
members of the new target_class instance.
namespace: str (optional) The namespace which the XML tree's root node must
match. If omitted, the namespace defaults to the _namespace of the
target class.
tag: str (optional) The tag which the XML tree's root node must match. If
omitted, the tag defaults to the _tag class member of the target
class.
Returns:
An instance of the target class - or None if the tag and namespace of
the XML tree's root node did not match the desired namespace and tag.
"""
if namespace is None:
namespace = target_class._namespace
if tag is None:
tag = target_class._tag
if tree.tag == '{%s}%s' % (namespace, tag):
target = target_class()
target._HarvestElementTree(tree)
return target
else:
return None
class ExtensionContainer(object):
def __init__(self, extension_elements=None, extension_attributes=None,
text=None):
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
# Three methods to create an object from an ElementTree
def _HarvestElementTree(self, tree):
# Fill in the instance members from the contents of the XML tree.
for child in tree:
self._ConvertElementTreeToMember(child)
for attribute, value in tree.attrib.iteritems():
self._ConvertElementAttributeToMember(attribute, value)
# Encode the text string according to the desired encoding type. (UTF-8)
if tree.text:
if MEMBER_STRING_ENCODING is unicode:
self.text = tree.text
else:
self.text = tree.text.encode(MEMBER_STRING_ENCODING)
def _ConvertElementTreeToMember(self, child_tree, current_class=None):
self.extension_elements.append(_ExtensionElementFromElementTree(
child_tree))
def _ConvertElementAttributeToMember(self, attribute, value):
# Encode the attribute value's string with the desired type Default UTF-8
if value:
if MEMBER_STRING_ENCODING is unicode:
self.extension_attributes[attribute] = value
else:
self.extension_attributes[attribute] = value.encode(
MEMBER_STRING_ENCODING)
# One method to create an ElementTree from an object
def _AddMembersToElementTree(self, tree):
for child in self.extension_elements:
child._BecomeChildElement(tree)
for attribute, value in self.extension_attributes.iteritems():
if value:
if isinstance(value, unicode) or MEMBER_STRING_ENCODING is unicode:
tree.attrib[attribute] = value
else:
# Decode the value from the desired encoding (default UTF-8).
tree.attrib[attribute] = value.decode(MEMBER_STRING_ENCODING)
if self.text:
if isinstance(self.text, unicode) or MEMBER_STRING_ENCODING is unicode:
tree.text = self.text
else:
tree.text = self.text.decode(MEMBER_STRING_ENCODING)
def FindExtensions(self, tag=None, namespace=None):
"""Searches extension elements for child nodes with the desired name.
Returns a list of extension elements within this object whose tag
and/or namespace match those passed in. To find all extensions in
a particular namespace, specify the namespace but not the tag name.
If you specify only the tag, the result list may contain extension
elements in multiple namespaces.
Args:
tag: str (optional) The desired tag
namespace: str (optional) The desired namespace
Returns:
A list of elements whose tag and/or namespace match the parameters
values
"""
results = []
if tag and namespace:
for element in self.extension_elements:
if element.tag == tag and element.namespace == namespace:
results.append(element)
elif tag and not namespace:
for element in self.extension_elements:
if element.tag == tag:
results.append(element)
elif namespace and not tag:
for element in self.extension_elements:
if element.namespace == namespace:
results.append(element)
else:
for element in self.extension_elements:
results.append(element)
return results
class AtomBase(ExtensionContainer):
_children = {}
_attributes = {}
def __init__(self, extension_elements=None, extension_attributes=None,
text=None):
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
def _ConvertElementTreeToMember(self, child_tree):
# Find the element's tag in this class's list of child members
if self.__class__._children.has_key(child_tree.tag):
member_name = self.__class__._children[child_tree.tag][0]
member_class = self.__class__._children[child_tree.tag][1]
# If the class member is supposed to contain a list, make sure the
# matching member is set to a list, then append the new member
# instance to the list.
if isinstance(member_class, list):
if getattr(self, member_name) is None:
setattr(self, member_name, [])
getattr(self, member_name).append(_CreateClassFromElementTree(
member_class[0], child_tree))
else:
setattr(self, member_name,
_CreateClassFromElementTree(member_class, child_tree))
else:
ExtensionContainer._ConvertElementTreeToMember(self, child_tree)
def _ConvertElementAttributeToMember(self, attribute, value):
# Find the attribute in this class's list of attributes.
if self.__class__._attributes.has_key(attribute):
# Find the member of this class which corresponds to the XML attribute
# (lookup in current_class._attributes) and set this member to the
# desired value (using self.__dict__).
if value:
# Encode the string to capture non-ascii characters (default UTF-8)
if MEMBER_STRING_ENCODING is unicode:
setattr(self, self.__class__._attributes[attribute], value)
else:
setattr(self, self.__class__._attributes[attribute],
value.encode(MEMBER_STRING_ENCODING))
else:
ExtensionContainer._ConvertElementAttributeToMember(self, attribute,
value)
# Three methods to create an ElementTree from an object
def _AddMembersToElementTree(self, tree):
# Convert the members of this class which are XML child nodes.
# This uses the class's _children dictionary to find the members which
# should become XML child nodes.
member_node_names = [values[0] for tag, values in
self.__class__._children.iteritems()]
for member_name in member_node_names:
member = getattr(self, member_name)
if member is None:
pass
elif isinstance(member, list):
for instance in member:
instance._BecomeChildElement(tree)
else:
member._BecomeChildElement(tree)
# Convert the members of this class which are XML attributes.
for xml_attribute, member_name in self.__class__._attributes.iteritems():
member = getattr(self, member_name)
if member is not None:
if isinstance(member, unicode) or MEMBER_STRING_ENCODING is unicode:
tree.attrib[xml_attribute] = member
else:
tree.attrib[xml_attribute] = member.decode(MEMBER_STRING_ENCODING)
# Lastly, call the ExtensionContainers's _AddMembersToElementTree to
# convert any extension attributes.
ExtensionContainer._AddMembersToElementTree(self, tree)
def _BecomeChildElement(self, tree):
"""
Note: Only for use with classes that have a _tag and _namespace class
member. It is in AtomBase so that it can be inherited but it should
not be called on instances of AtomBase.
"""
new_child = ElementTree.Element('')
tree.append(new_child)
new_child.tag = '{%s}%s' % (self.__class__._namespace,
self.__class__._tag)
self._AddMembersToElementTree(new_child)
def _ToElementTree(self):
"""
Note, this method is designed to be used only with classes that have a
_tag and _namespace. It is placed in AtomBase for inheritance but should
not be called on this class.
"""
new_tree = ElementTree.Element('{%s}%s' % (self.__class__._namespace,
self.__class__._tag))
self._AddMembersToElementTree(new_tree)
return new_tree
def ToString(self, string_encoding='UTF-8'):
"""Converts the Atom object to a string containing XML."""
return ElementTree.tostring(self._ToElementTree(), encoding=string_encoding)
def __str__(self):
return self.ToString()
class Name(AtomBase):
"""The atom:name element"""
_tag = 'name'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Name
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def NameFromString(xml_string):
return CreateClassFromXMLString(Name, xml_string)
class Email(AtomBase):
"""The atom:email element"""
_tag = 'email'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Email
Args:
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EmailFromString(xml_string):
return CreateClassFromXMLString(Email, xml_string)
class Uri(AtomBase):
"""The atom:uri element"""
_tag = 'uri'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Uri
Args:
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def UriFromString(xml_string):
return CreateClassFromXMLString(Uri, xml_string)
class Person(AtomBase):
"""A foundation class from which atom:author and atom:contributor extend.
A person contains information like name, email address, and web page URI for
an author or contributor to an Atom feed.
"""
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_children['{%s}name' % (ATOM_NAMESPACE)] = ('name', Name)
_children['{%s}email' % (ATOM_NAMESPACE)] = ('email', Email)
_children['{%s}uri' % (ATOM_NAMESPACE)] = ('uri', Uri)
def __init__(self, name=None, email=None, uri=None,
extension_elements=None, extension_attributes=None, text=None):
"""Foundation from which author and contributor are derived.
The constructor is provided for illustrative purposes, you should not
need to instantiate a Person.
Args:
name: Name The person's name
email: Email The person's email address
uri: Uri The URI of the person's webpage
extension_elements: list A list of ExtensionElement instances which are
children of this element.
extension_attributes: dict A dictionary of strings which are the values
for additional XML attributes of this element.
text: String The text contents of the element. This is the contents
of the Entry's XML text node. (Example: <foo>This is the text</foo>)
"""
self.name = name
self.email = email
self.uri = uri
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
class Author(Person):
"""The atom:author element
An author is a required element in Feed.
"""
_tag = 'author'
_namespace = ATOM_NAMESPACE
_children = Person._children.copy()
_attributes = Person._attributes.copy()
#_children = {}
#_attributes = {}
def __init__(self, name=None, email=None, uri=None,
extension_elements=None, extension_attributes=None, text=None):
"""Constructor for Author
Args:
name: Name
email: Email
uri: Uri
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.name = name
self.email = email
self.uri = uri
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
def AuthorFromString(xml_string):
return CreateClassFromXMLString(Author, xml_string)
class Contributor(Person):
"""The atom:contributor element"""
_tag = 'contributor'
_namespace = ATOM_NAMESPACE
_children = Person._children.copy()
_attributes = Person._attributes.copy()
def __init__(self, name=None, email=None, uri=None,
extension_elements=None, extension_attributes=None, text=None):
"""Constructor for Contributor
Args:
name: Name
email: Email
uri: Uri
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.name = name
self.email = email
self.uri = uri
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
def ContributorFromString(xml_string):
return CreateClassFromXMLString(Contributor, xml_string)
class Link(AtomBase):
"""The atom:link element"""
_tag = 'link'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['href'] = 'href'
_attributes['type'] = 'type'
_attributes['title'] = 'title'
_attributes['length'] = 'length'
_attributes['hreflang'] = 'hreflang'
def __init__(self, href=None, rel=None, link_type=None, hreflang=None,
title=None, length=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Link
Args:
href: string The href attribute of the link
rel: string
type: string
hreflang: string The language for the href
title: string
length: string The length of the href's destination
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.href = href
self.rel = rel
self.type = link_type
self.hreflang = hreflang
self.title = title
self.length = length
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def LinkFromString(xml_string):
return CreateClassFromXMLString(Link, xml_string)
class Generator(AtomBase):
"""The atom:generator element"""
_tag = 'generator'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_attributes['uri'] = 'uri'
_attributes['version'] = 'version'
def __init__(self, uri=None, version=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for Generator
Args:
uri: string
version: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.uri = uri
self.version = version
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GeneratorFromString(xml_string):
return CreateClassFromXMLString(Generator, xml_string)
class Text(AtomBase):
"""A foundation class from which atom:title, summary, etc. extend.
This class should never be instantiated.
"""
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_attributes['type'] = 'type'
def __init__(self, text_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Text
Args:
text_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = text_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Title(Text):
"""The atom:title element"""
_tag = 'title'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
def __init__(self, title_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Title
Args:
title_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = title_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def TitleFromString(xml_string):
return CreateClassFromXMLString(Title, xml_string)
class Subtitle(Text):
"""The atom:subtitle element"""
_tag = 'subtitle'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
def __init__(self, subtitle_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Subtitle
Args:
subtitle_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = subtitle_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SubtitleFromString(xml_string):
return CreateClassFromXMLString(Subtitle, xml_string)
class Rights(Text):
"""The atom:rights element"""
_tag = 'rights'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
def __init__(self, rights_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Rights
Args:
rights_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = rights_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def RightsFromString(xml_string):
return CreateClassFromXMLString(Rights, xml_string)
class Summary(Text):
"""The atom:summary element"""
_tag = 'summary'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
def __init__(self, summary_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Summary
Args:
summary_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = summary_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SummaryFromString(xml_string):
return CreateClassFromXMLString(Summary, xml_string)
class Content(Text):
"""The atom:content element"""
_tag = 'content'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
_attributes['src'] = 'src'
def __init__(self, content_type=None, src=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for Content
Args:
content_type: string
src: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = content_type
self.src = src
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ContentFromString(xml_string):
return CreateClassFromXMLString(Content, xml_string)
class Category(AtomBase):
"""The atom:category element"""
_tag = 'category'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_attributes['term'] = 'term'
_attributes['scheme'] = 'scheme'
_attributes['label'] = 'label'
def __init__(self, term=None, scheme=None, label=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for Category
Args:
term: str
scheme: str
label: str
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.term = term
self.scheme = scheme
self.label = label
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def CategoryFromString(xml_string):
return CreateClassFromXMLString(Category, xml_string)
class Id(AtomBase):
"""The atom:id element."""
_tag = 'id'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Id
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def IdFromString(xml_string):
return CreateClassFromXMLString(Id, xml_string)
class Icon(AtomBase):
"""The atom:icon element."""
_tag = 'icon'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Icon
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def IconFromString(xml_string):
return CreateClassFromXMLString(Icon, xml_string)
class Logo(AtomBase):
"""The atom:logo element."""
_tag = 'logo'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Logo
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def LogoFromString(xml_string):
return CreateClassFromXMLString(Logo, xml_string)
class Draft(AtomBase):
"""The app:draft element which indicates if this entry should be public."""
_tag = 'draft'
_namespace = APP_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for app:draft
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def DraftFromString(xml_string):
return CreateClassFromXMLString(Draft, xml_string)
class Control(AtomBase):
"""The app:control element indicating restrictions on publication.
The APP control element may contain a draft element indicating whether or
not this entry should be publicly available.
"""
_tag = 'control'
_namespace = APP_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_children['{%s}draft' % APP_NAMESPACE] = ('draft', Draft)
def __init__(self, draft=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for app:control"""
self.draft = draft
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ControlFromString(xml_string):
return CreateClassFromXMLString(Control, xml_string)
class Date(AtomBase):
"""A parent class for atom:updated, published, etc."""
#TODO Add text to and from time conversion methods to allow users to set
# the contents of a Date to a python DateTime object.
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Updated(Date):
"""The atom:updated element."""
_tag = 'updated'
_namespace = ATOM_NAMESPACE
_children = Date._children.copy()
_attributes = Date._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Updated
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def UpdatedFromString(xml_string):
return CreateClassFromXMLString(Updated, xml_string)
class Published(Date):
"""The atom:published element."""
_tag = 'published'
_namespace = ATOM_NAMESPACE
_children = Date._children.copy()
_attributes = Date._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Published
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def PublishedFromString(xml_string):
return CreateClassFromXMLString(Published, xml_string)
class LinkFinder(object):
"""An "interface" providing methods to find link elements
Entry elements often contain multiple links which differ in the rel
attribute or content type. Often, developers are interested in a specific
type of link so this class provides methods to find specific classes of
links.
This class is used as a mixin in Atom entries and feeds.
"""
def GetSelfLink(self):
"""Find the first link with rel set to 'self'
Returns:
An atom.Link or none if none of the links had rel equal to 'self'
"""
for a_link in self.link:
if a_link.rel == 'self':
return a_link
return None
def GetEditLink(self):
for a_link in self.link:
if a_link.rel == 'edit':
return a_link
return None
def GetNextLink(self):
for a_link in self.link:
if a_link.rel == 'next':
return a_link
return None
def GetLicenseLink(self):
for a_link in self.link:
if a_link.rel == 'license':
return a_link
return None
def GetAlternateLink(self):
for a_link in self.link:
if a_link.rel == 'alternate':
return a_link
return None
class FeedEntryParent(AtomBase, LinkFinder):
"""A super class for atom:feed and entry, contains shared attributes"""
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_children['{%s}author' % ATOM_NAMESPACE] = ('author', [Author])
_children['{%s}category' % ATOM_NAMESPACE] = ('category', [Category])
_children['{%s}contributor' % ATOM_NAMESPACE] = ('contributor', [Contributor])
_children['{%s}id' % ATOM_NAMESPACE] = ('id', Id)
_children['{%s}link' % ATOM_NAMESPACE] = ('link', [Link])
_children['{%s}rights' % ATOM_NAMESPACE] = ('rights', Rights)
_children['{%s}title' % ATOM_NAMESPACE] = ('title', Title)
_children['{%s}updated' % ATOM_NAMESPACE] = ('updated', Updated)
def __init__(self, author=None, category=None, contributor=None,
atom_id=None, link=None, rights=None, title=None, updated=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.rights = rights
self.title = title
self.updated = updated
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Source(FeedEntryParent):
"""The atom:source element"""
_tag = 'source'
_namespace = ATOM_NAMESPACE
_children = FeedEntryParent._children.copy()
_attributes = FeedEntryParent._attributes.copy()
_children['{%s}generator' % ATOM_NAMESPACE] = ('generator', Generator)
_children['{%s}icon' % ATOM_NAMESPACE] = ('icon', Icon)
_children['{%s}logo' % ATOM_NAMESPACE] = ('logo', Logo)
_children['{%s}subtitle' % ATOM_NAMESPACE] = ('subtitle', Subtitle)
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for Source
Args:
author: list (optional) A list of Author instances which belong to this
class.
category: list (optional) A list of Category instances
contributor: list (optional) A list on Contributor instances
generator: Generator (optional)
icon: Icon (optional)
id: Id (optional) The entry's Id element
link: list (optional) A list of Link instances
logo: Logo (optional)
rights: Rights (optional) The entry's Rights element
subtitle: Subtitle (optional) The entry's subtitle element
title: Title (optional) the entry's title element
updated: Updated (optional) the entry's updated element
text: String (optional) The text contents of the element. This is the
contents of the Entry's XML text node.
(Example: <foo>This is the text</foo>)
extension_elements: list (optional) A list of ExtensionElement instances
which are children of this element.
extension_attributes: dict (optional) A dictionary of strings which are
the values for additional XML attributes of this element.
"""
self.author = author or []
self.category = category or []
self.contributor = contributor or []
self.generator = generator
self.icon = icon
self.id = atom_id
self.link = link or []
self.logo = logo
self.rights = rights
self.subtitle = subtitle
self.title = title
self.updated = updated
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SourceFromString(xml_string):
return CreateClassFromXMLString(Source, xml_string)
class Entry(FeedEntryParent):
"""The atom:entry element"""
_tag = 'entry'
_namespace = ATOM_NAMESPACE
_children = FeedEntryParent._children.copy()
_attributes = FeedEntryParent._attributes.copy()
_children['{%s}content' % ATOM_NAMESPACE] = ('content', Content)
_children['{%s}published' % ATOM_NAMESPACE] = ('published', Published)
_children['{%s}source' % ATOM_NAMESPACE] = ('source', Source)
_children['{%s}summary' % ATOM_NAMESPACE] = ('summary', Summary)
_children['{%s}control' % APP_NAMESPACE] = ('control', Control)
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, control=None, title=None, updated=None,
extension_elements=None, extension_attributes=None, text=None):
"""Constructor for atom:entry
Args:
author: list A list of Author instances which belong to this class.
category: list A list of Category instances
content: Content The entry's Content
contributor: list A list on Contributor instances
id: Id The entry's Id element
link: list A list of Link instances
published: Published The entry's Published element
rights: Rights The entry's Rights element
source: Source the entry's source element
summary: Summary the entry's summary element
title: Title the entry's title element
updated: Updated the entry's updated element
control: The entry's app:control element which can be used to mark an
entry as a draft which should not be publicly viewable.
text: String The text contents of the element. This is the contents
of the Entry's XML text node. (Example: <foo>This is the text</foo>)
extension_elements: list A list of ExtensionElement instances which are
children of this element.
extension_attributes: dict A dictionary of strings which are the values
for additional XML attributes of this element.
"""
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.title = title
self.updated = updated
self.control = control
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EntryFromString(xml_string):
return CreateClassFromXMLString(Entry, xml_string)
class Feed(Source):
"""The atom:feed element"""
_tag = 'feed'
_namespace = ATOM_NAMESPACE
_children = Source._children.copy()
_attributes = Source._attributes.copy()
_children['{%s}entry' % ATOM_NAMESPACE] = ('entry', [Entry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None, entry=None,
text=None, extension_elements=None, extension_attributes=None):
"""Constructor for Source
Args:
author: list (optional) A list of Author instances which belong to this
class.
category: list (optional) A list of Category instances
contributor: list (optional) A list on Contributor instances
generator: Generator (optional)
icon: Icon (optional)
id: Id (optional) The entry's Id element
link: list (optional) A list of Link instances
logo: Logo (optional)
rights: Rights (optional) The entry's Rights element
subtitle: Subtitle (optional) The entry's subtitle element
title: Title (optional) the entry's title element
updated: Updated (optional) the entry's updated element
entry: list (optional) A list of the Entry instances contained in the
feed.
text: String (optional) The text contents of the element. This is the
contents of the Entry's XML text node.
(Example: <foo>This is the text</foo>)
extension_elements: list (optional) A list of ExtensionElement instances
which are children of this element.
extension_attributes: dict (optional) A dictionary of strings which are
the values for additional XML attributes of this element.
"""
self.author = author or []
self.category = category or []
self.contributor = contributor or []
self.generator = generator
self.icon = icon
self.id = atom_id
self.link = link or []
self.logo = logo
self.rights = rights
self.subtitle = subtitle
self.title = title
self.updated = updated
self.entry = entry or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def FeedFromString(xml_string):
return CreateClassFromXMLString(Feed, xml_string)
class ExtensionElement(object):
"""Represents extra XML elements contained in Atom classes."""
def __init__(self, tag, namespace=None, attributes=None,
children=None, text=None):
"""Constructor for EtensionElement
Args:
namespace: string (optional) The XML namespace for this element.
tag: string (optional) The tag (without the namespace qualifier) for
this element. To reconstruct the full qualified name of the element,
combine this tag with the namespace.
attributes: dict (optinal) The attribute value string pairs for the XML
attributes of this element.
children: list (optional) A list of ExtensionElements which represent
the XML child nodes of this element.
"""
self.namespace = namespace
self.tag = tag
self.attributes = attributes or {}
self.children = children or []
self.text = text
def ToString(self):
element_tree = self._TransferToElementTree(ElementTree.Element(''))
return ElementTree.tostring(element_tree, encoding="UTF-8")
def _TransferToElementTree(self, element_tree):
if self.tag is None:
return None
if self.namespace is not None:
element_tree.tag = '{%s}%s' % (self.namespace, self.tag)
else:
element_tree.tag = self.tag
for key, value in self.attributes.iteritems():
element_tree.attrib[key] = value
for child in self.children:
child._BecomeChildElement(element_tree)
element_tree.text = self.text
return element_tree
def _BecomeChildElement(self, element_tree):
"""Converts this object into an etree element and adds it as a child node.
Adds self to the ElementTree. This method is required to avoid verbose XML
which constantly redefines the namespace.
Args:
element_tree: ElementTree._Element The element to which this object's XML
will be added.
"""
new_element = ElementTree.Element('')
element_tree.append(new_element)
self._TransferToElementTree(new_element)
def FindChildren(self, tag=None, namespace=None):
"""Searches child nodes for objects with the desired tag/namespace.
Returns a list of extension elements within this object whose tag
and/or namespace match those passed in. To find all children in
a particular namespace, specify the namespace but not the tag name.
If you specify only the tag, the result list may contain extension
elements in multiple namespaces.
Args:
tag: str (optional) The desired tag
namespace: str (optional) The desired namespace
Returns:
A list of elements whose tag and/or namespace match the parameters
values
"""
results = []
if tag and namespace:
for element in self.children:
if element.tag == tag and element.namespace == namespace:
results.append(element)
elif tag and not namespace:
for element in self.children:
if element.tag == tag:
results.append(element)
elif namespace and not tag:
for element in self.children:
if element.namespace == namespace:
results.append(element)
else:
for element in self.children:
results.append(element)
return results
def ExtensionElementFromString(xml_string):
element_tree = ElementTree.fromstring(xml_string)
return _ExtensionElementFromElementTree(element_tree)
def _ExtensionElementFromElementTree(element_tree):
element_tag = element_tree.tag
if '}' in element_tag:
namespace = element_tag[1:element_tag.index('}')]
tag = element_tag[element_tag.index('}')+1:]
else:
namespace = None
tag = element_tag
extension = ExtensionElement(namespace=namespace, tag=tag)
for key, value in element_tree.attrib.iteritems():
extension.attributes[key] = value
for child in element_tree:
extension.children.append(_ExtensionElementFromElementTree(child))
extension.text = element_tree.text
return extension
| Python |
#!/usr/bin/python
import glob, os, sys, string as s
# cc files from ocr-* folders are compiled into libocropus by default
# extra directories for libocropus
extradirs = """
""".split()
# extra files which should not go to libocropus
exclude = """
""".split()
# optional files
exclude = []
def print_header():
print """# Copyright 2008 Deutsches Forschungszentrum fuer Kuenstliche Intelligenz
# or its licensors, as applicable.
#
# You may not use this file except under the terms of the accompanying license.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Project: OCRopus - the open source document analysis and OCR system
# File: Makefile.am
# Purpose: building OCRopus
# Responsible: kofler
# Reviewer:
# Primary Repository: http://ocropus.googlecode.com/svn/trunk/
# Web Sites: www.iupr.org, www.dfki.de
"""
print_header()
print """
# first build this (libocropus)
SUBDIRS = .
# the folder where all ocropus headers will be installed
ocropusincludedir=$(includedir)/ocropus
AM_CPPFLAGS = -I$(srcdir)/include -I$(srcdir)/ocr-utils \
-I@iulibheaders@
AM_LDFLAGS =
AM_CXXFLAGS = $(CXXFLAGS) -Wall -Wno-sign-compare -Wno-write-strings -Wno-deprecated
lib_LIBRARIES = libocropus.a
"""
### libocroups
print "# the default files to compile into libocropus"
print "libocropus_a_SOURCES = ",
# gather files from ocr- folders
for cc in glob.glob("ocr-*/*.cc"):
if cc in exclude: continue
if os.path.basename(cc).startswith("main-"): continue
if os.path.basename(cc).startswith("test-"): continue
print "$(srcdir)/" + cc,
# gather files from extra folders, e.g. ext/voronoi
for d in extradirs:
for cc in glob.glob(d + "/*.cc"):
if cc in exclude: continue
if os.path.basename(cc).startswith("main-"): continue
if os.path.basename(cc).startswith("test-"): continue
print "$(srcdir)/" + cc,
print """
# folders for installing models and words
modeldir=${datadir}/ocropus/models
worddir=${datadir}/ocropus/words
# install the data
model_DATA = $(srcdir)/data/models/*
word_DATA = $(srcdir)/data/words/*
"""
# optional stuff
print
print "noinst_PROGRAMS = "
print
print "if use_gsl"
print " AM_CPPFLAGS += -DHAVE_GSL"
#print " AM_LDFLAGS += -lgsl -lblas"
print "endif"
print
print "if use_leptonica"
print " AM_CPPFLAGS += -I@leptheaders@ -DHAVE_LEPTONICA"
print "endif"
print
print
print "ocropusinclude_HEADERS = ",
for h in glob.glob("include/*.h"):
print "$(srcdir)/" + h,
for h in glob.glob("ocr-utils/*.h"):
print "$(srcdir)/" + h,
print
print
# binaries, which are also installed
binaries = glob.glob("commands/*.cc")
print "bin_PROGRAMS = " + s.join(" " + os.path.basename(b)[:-3] for b in binaries)
for b in binaries:
bName = os.path.basename(b)[:-3].replace('-','_')
print bName + "_SOURCES = $(srcdir)/" + b
print bName + "_LDADD = libocropus.a"
print
# gather all main-* files
mains = glob.glob("*/main-*.cc")
# name the resulting binaries (strip folder and suffix)
print "noinst_PROGRAMS += " + s.join(" " + os.path.basename(m)[:-3] for m in mains)
for m in mains:
mName = os.path.basename(m)[:-3].replace('-','_')
print mName + "_SOURCES = $(srcdir)/" + m
print mName + "_LDADD = libocropus.a"
print
# gather all test-* files
tests = glob.glob("*/test-*.cc")
tests += glob.glob("*/tests/test-*.cc")
# name the resulting binaries (strip folder and suffix)
print "check_PROGRAMS = " + s.join(" " + os.path.basename(t)[:-3] for t in tests)
for t in tests:
tName = os.path.basename(t)[:-3].replace('-','_')
print tName + "_SOURCES = $(srcdir)/" + t
print tName + "_LDADD = libocropus.a"
print tName + "_CPPFLAGS = -I$(srcdir)/include -I$(srcdir)/ocr-utils \\"
print "-I@iulibheaders@ -I@colibheaders@"
# run all test-* binaries with make check
print
print "check:"
print ' @echo "# running tests"'
for t in tests:
print " $(srcdir)/" + os.path.basename(t)[:-3] + " $(srcdir)/data/testimages"
print """
# run check-style everytime and give a hint about make check
all:
$(srcdir)/utilities/check-style -f $(srcdir)
@echo
@echo "Use 'make check' to run tests!"
@echo
"""
| Python |
# -*- python -*-
# vi: ft=python
# Copyright 2008 Deutsches Forschungszentrum fuer Kuenstliche Intelligenz
# or its licensors, as applicable.
#
# You may not use this file except under the terms of the accompanying license.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Project: OCRopus - the open source document analysis and OCR system
# File: SConstruct
# Purpose: building OCRopus
# Responsible: tmb
# Reviewer:
# Primary Repository: http://ocropus.googlecode.com/svn/trunk/
# Web Sites: www.iupr.org, www.dfki.de
import os
print "Currently supported OS version: Ubuntu 10.04"
EnsureSConsVersion(0,97)
from SCons.Script import *
import os,sys,string,re
from glob import glob
################################################################
### ocropus source files
################################################################
os.system("./generate_version_cc.sh ./version.cc")
sources = glob("ocr-*/*.cc") + ["version.cc"]
exclude = r'.*/(main|test|bigtest)-.*\.cc'
sources = [f for f in sources if not re.search(exclude,f)]
headers = glob("*/*.h")
################################################################
### command line options
################################################################
opts = Variables('custom.py')
opts.Add('opt', 'Compiler flags for optimization/debugging', "-O2")
opts.Add('warn', 'Compiler flags for warnings',
"-Wall -Wno-sign-compare -Wno-write-strings -Wno-unknown-pragmas "+
" -D__warn_unused_result__=__far__"+
" -D_BACKWARD_BACKWARD_WARNING_H=1")
### path options
opts.Add(PathVariable('prefix', 'The installation root for OCRopus ', "/usr/local"))
opts.Add(PathVariable('iulib', 'The installation root of iulib', "/usr/local"))
opts.Add(PathVariable('destdir', 'Destination root directory', "", PathVariable.PathAccept))
opts.Add(PathVariable('leptonica', 'The installation root of leptonica', "/usr/local"))
opts.Add(BoolVariable('gsl', "use GSL-dependent features", "no"))
opts.Add(BoolVariable('omp', "use OpenMP", "yes"))
opts.Add(BoolVariable('lept', "use Leptonica", "no"))
opts.Add(BoolVariable('sqlite3', "use sqlite3", "yes"))
opts.Add(BoolVariable('test', "Run some tests after the build", "no"))
opts.Add(BoolVariable('style', 'Check style', "no"))
destdir = "${destdir}"
prefix = "${prefix}"
incdir = prefix+"/include/ocropus"
libdir = prefix+"/lib"
datadir = prefix+"/share/ocropus"
bindir = prefix+"/bin"
scriptsdir = datadir + '/scripts'
env = Environment(options=opts)
env.Append(CXXFLAGS=["-DDATADIR='\""+datadir+"\"'"])
env.Append(CXXFLAGS=["-DDEFAULT_DATA_DIR='\""+datadir+"/models"+"\"'"])
env.Append(CXXFLAGS=["-DDEFAULT_EXT_DIR='\""+datadir+"/extensions"+"\"'"])
env.Append(CXXFLAGS=["-g","-fPIC"])
env.Append(CXXFLAGS=env["opt"])
env.Append(CXXFLAGS=env["warn"])
conf = Configure(env)
Help(opts.GenerateHelpText(env))
if 0:
if "-DUNSAFE" in env["opt"]:
print "WARNING: do not compile with -DUNSAFE except for benchmarking or profiling"
if re.search(r'-O[234]',env["opt"]):
print "NOTE: compile with high optimization only for production use"
else:
print "NOTE: compiling for development (slower but safer)"
################################################################
### libraries
################################################################
### iulib
env.Append(LIBPATH=["${iulib}/lib"])
env.Append(CPPPATH=["${iulib}/include"])
env.Append(LIBS=["iulib"])
assert conf.CheckLibWithHeader("iulib","iulib/iulib.h","C++");
assert conf.CheckHeader("colib/colib.h",language="C++")
# dl (do we need this?)
env.Append(LIBS=["dl"])
assert conf.CheckLib('dl')
### TIFF, JPEG, PNG
env.Append(LIBS=["tiff","jpeg","png","gif"])
assert conf.CheckLib('gif')
assert conf.CheckLib('tiff')
assert conf.CheckLib('jpeg')
assert conf.CheckLib('png')
# sources = [s for s in sources if not "/fst" in s]
### SDL (include if it's there in case iulib needs it)
if conf.CheckLibWithHeader('SDL', 'SDL/SDL.h', 'C'):
if conf.CheckLibWithHeader('SDL_gfx', 'SDL/SDL_gfxPrimitives.h', 'C'):
env.Append(LIBS=["SDL","SDL_gfx"])
### Leptonica
if env["lept"]:
env.Append(CPPPATH='${leptonica}/include')
env.Append(LIBS=["lept"])
env.Append(CPPDEFINES=['HAVE_LEPTONICA'])
if conf.CheckLibWithHeader('lept', ['stdlib.h', 'stdio.h', 'leptonica/allheaders.h'], 'C'):
# This happens if you install it with apt-get install libleptonica-dev.
env.Append(CPPPATH=['/usr/include/leptonica',
'/usr/local/include/leptonica',
'${leptonica}/include/leptonica'])
elif conf.CheckLibWithHeader('lept', ['stdlib.h', 'stdio.h', 'liblept/allheaders.h'], 'C'):
# This happens if you install from a tarball.
env.Append(CPPPATH=['/usr/include/liblept',
'/usr/local/include/liblept',
'${leptonica}/include/liblept'])
else:
# And this probably doesn't happen unless you manually specify the path.
assert conf.CheckLibWithHeader('lept', ['stdlib.h', 'stdio.h', 'allheaders.h'], 'C')
else:
sources = [s for s in sources if not "leptonica" in s]
if env["sqlite3"]:
assert conf.CheckLibWithHeader("sqlite3","sqlite3.h","C");
env.Append(CPPDEFINES=['HAVE_SQLITE3'])
env.Append(LIBS=["sqlite3"])
### gsl
if env["gsl"]:
env.Append(CPPDEFINES=['HAVE_GSL'])
env.Append(LIBS=["gsl","blas"])
# enable OpenMP for high optimization
if env["omp"]:
env.Append(CXXFLAGS=["-fopenmp"])
env.Append(LINKFLAGS=["-fopenmp"])
conf.Finish()
################################################################
### main targets
################################################################
env.Prepend(CPPPATH=glob("ocr-*"))
env.Prepend(LIBPATH=['.'])
# libocropus = env.StaticLibrary('libocropus.a',sources)
libocropus = env.SharedLibrary('libocropus',sources)
# env.Prepend(LIBS=[File("libocropus.so")])
################################################################
### install
################################################################
env.Install(destdir+libdir,libocropus)
env.Install(destdir+datadir + '/models', glob('data/models/*'))
env.Install(destdir+datadir + '/words', glob('data/words/*'))
for header in headers: env.Install(destdir+incdir,header)
for file in glob('data/models/*.gz'):
base = re.sub(r'\.gz$','',file)
base = re.sub(r'^[./]*data/','',base)
base = destdir+datadir+"/"+base
env.Command(base,file,"gunzip -9v < %s > %s" % (file,base))
env.Alias('install',base)
env.Alias('install',destdir+bindir)
env.Alias('install',destdir+libdir)
env.Alias('install',destdir+incdir)
env.Alias('install',destdir+datadir)
################################################################
### commands
################################################################
penv = env.Clone()
penv.Append(LIBS=[File("libocropus.so")])
penv.Append(CCFLAGS=["-Xlinker","-rpath=${iulib}/lib"])
penv.Append(LINKFLAGS=["-Xlinker","-rpath=${iulib}/lib"])
for cmd in glob("commands/*.cc"):
penv.Program(cmd,LIBS=File("libocropus.so"))
penv.Install(destdir+bindir,re.sub('.cc$','',cmd))
################################################################
### unit tests
################################################################
if env["test"]:
test_builder = Builder(action='$SOURCE && touch $TARGET',
suffix = '.passed',
src_suffix = '')
env.Append(BUILDERS={'Test':test_builder})
for cmd in Glob("*/test-*.cc")+Glob("*/test*/test-*.cc"):
cmd = str(cmd)
penv.Program(cmd)
print cmd
cmd = re.sub('.cc$','',cmd)
penv.Test(cmd)
################################################################
### style checking
################################################################
if env["style"]:
os.system("utilities/check-style -f ocr-*")
| Python |
#!/usr/bin/python
import sys,os,re,string,os.path,glob
if not os.path.isdir("ocroscript") and os.path.isdir("ocr-layout"):
sys.stderr.write("must run this script from the top directory of ocropus")
sys.exit(1)
dirs = {}
edges = []
files = glob.glob("*/*.h") + glob.glob("*/*.cc")
for file in files:
dir = os.path.dirname(file)
base = os.path.basename(file)
dirs[base] = dir
for line in open(file,"r").xreadlines():
match = re.search(r'^#\s*include\s+"(.*?)"',line)
if match:
include = match.group(1)
edges.append((include,dir))
# print include,dir
dedges = {}
for include,dir in edges:
sdir = dirs.get(include,None)
if sdir:
l = dedges.get((sdir,dir),[])
if not include in l:
dedges[(sdir,dir)] = l+[include]
# print sdir,"->",dir,"(",include,")"
stream = open("_deps.dot","w")
stream.write("""
digraph "Include Dependencies"
{
rankdir = "LR";
size = "8,20";
ratio = compress;
""")
iter = dedges.keys()
for sdir,dir in iter:
stream.write('"%s"'%sdir+"->"+'"%s"'%dir+";\n")
# stream.write('"%s"'%sdir+"->"+'"%s"'%dir+" [label=\"%s\"];\n"%dedges[(sdir,dir)])
stream.write("""
}
""")
stream.close()
out = "doc/includes/include-hierarchy.png"
os.system("dot -o %s -Tpng _deps.dot"%out)
sys.stderr.write("output in %s\n"%out)
| Python |
#!/usr/bin/python
import os,sys,re,string,glob
if len(glob.glob("*/*.a"))<10:
sys.stderr.write("not enough libraries; you probably need to build ocropus first")
sys.exit(255)
sources = {}
nsymbols = 0
for line in os.popen("nm -o -C */*.a","r").readlines():
line = line[:-1]
fields = line.split(None,3)
if len(fields)!=3: continue
source,kind,symbol = fields
source = re.sub(":[0-9a-zA-Z]*$","",source)
if kind!="T": continue
nsymbols += 1
if [s for s in sources.get(symbol,[]) if s==source]!=[]: continue
# print [source,symbol]
sources[symbol]= sources.get(symbol,[]) + [source]
for symbol in sources:
if len(sources[symbol])>1:
print symbol,"is multiply defined:"
for source in sources[symbol]: print " ",source
print "checked",nsymbols,"symbols"
| Python |
#!/usr/bin/python
# Copyright 2008 Deutsches Forschungszentrum fuer Kuenstliche Intelligenz
# or its licensors, as applicable.
#
# You may not use this file except under the terms of the accompanying license.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Project:
# File: check-am.py
# Purpose: identify files which are not handled in OCRopus automake
# Responsible: kofler
# Reviewer:
# Primary Repository:
# Web Sites: www.iupr.org, www.dfki.de
import os, sys, glob
# verify we are in the right folder, i.e. OCRopus top-level
if not os.path.exists('ocr-utils') or not os.path.exists('ocroscript'):
print >> sys.stderr
print >> sys.stderr, "This script must be run from the OCRopus top-level folder!"
print >> sys.stderr
exit(1)
if not os.path.exists('Makefile.am'):
print >> sys.stderr
print >> sys.stderr, "Makefile.am not found!"
print >> sys.stderr
if not os.path.exists('ocroscript/Makefile.am'):
print >> sys.stderr
print >> sys.stderr, "ocroscript Makefile.am not found!"
print >> sys.stderr
def output(files, kind=""):
"""
Produce some helpful output for maintaining automake
"""
if len(files) > 0:
print
print "These", kind, "files are not handled:"
for src in files:
print src
print "---"
else:
print
print "OK, all", kind, "files are handled."
# get all ocr-* and additional folders with sources
dirs = [ d for d in glob.glob('ocr-*') if os.path.isdir(d) ]
dirs.append('ext/voronoi')
# switch to this later
#pkgs = [ p for p in glob.glob('*/*.pkg') ]
# get all cc and h files
ccs = []
for d in dirs:
ccs += glob.glob(d+"/*.cc")
hs = []
for d in dirs:
hs += glob.glob(d+"/*.h")
# get all pkg files in ocroscript
pkgs = [ p for p in os.listdir('ocroscript') if p.endswith('.pkg') ]
# get all ocroscript sources
ocroccs = [ c for c in os.listdir('ocroscript') if c.endswith('.cc') ]
# read automake file
amfile = open('Makefile.am')
am = amfile.read()
amfile.close()
# read ocroscript automake file
osamfile = open('ocroscript/Makefile.am')
osam = osamfile.read()
osamfile.close()
# identify missing cc files, also mains and tests
missingccs = []
missingmains = []
missingtests = []
for src in ccs:
if src not in am:
if "main-" in src:
missingmains.append(src)
elif "test-" in src:
missingtests.append(src)
else:
missingccs.append(src)
# identify missing h files
missinghs = []
for h in hs:
if h not in am:
missinghs.append(h)
# identify missing pkg files
missingpkgs = []
for p in pkgs:
if p not in osam:
missingpkgs.append(p)
# identify missing cc files for ocroscript
missingocroccs = []
for src in ocroccs:
if src not in osam:
missingocroccs.append(src)
print
print "Please remember: This script only checks if files are handled at all."
print "It does NOT check whether they are handled correctly!"
# output maintainance information for cc, h, main- and test- files
output(missingccs, "cc")
output(missinghs, "h")
output(missingpkgs, "pkg")
output(missingocroccs, "ocroscript cc")
output(missingmains, "main")
#output(missingtests, "test")
#print "dirs", dirs
#print "ccs", ccs
#print "hs", hs
#print pkgs
#print am | Python |
#!/usr/bin/python
import os,sys,re,string
def die(s):
sys.stderr.write(s)
sys.exit(1)
def run(s):
sys.stderr.write("# "+s+"\n")
return os.system(s)
def tick(s):
return os.popen(s,"r").read()
file = sys.argv[-1]
rest = sys.argv[1:-1]
temp = "___temp___.cc"
tempo = "___temp___.o"
def clean():
try: os.remove(temp)
except: pass
try: os.remove(tempo)
except: pass
run("cp %s %s"%(file,temp))
if run("gcc %s -c %s"%(string.join(rest),temp))!=0:
die("simple compile fails")
checksum = tick("md5sum %s"%tempo)
lines = open(file,"r").readlines()
includes = []
for i in range(len(lines)):
if re.search(r'^\s*#\s*include',lines[i]):
includes.append(i)
def write_skipping(file,lines,skips,soft=0):
stream = open(file,"w")
for i in range(len(lines)):
line = lines[i]
if i in skips:
if not soft:
stream.write("// "+line)
else:
stream.write(line+" // NOT NEEDED")
else:
stream.write(line)
stream.close()
skips = []
for k in range(len(includes)):
clean()
testcase = []
write_skipping("___temp___.cc",lines,skips+[includes[k]])
if run("gcc %s -c %s"%(string.join(rest),temp))==0:
nchecksum = tick("md5sum %s"%tempo)
if checksum!=nchecksum:
print "WARNING checksum changed for ",lines[includes[k]][:-1]
else:
print "REMOVING",lines[includes[k]][:-1]
skips.append(includes[k])
clean()
if skips!=[]:
write_skipping(file+".new",lines,skips,os.getenv("fixstyle")=="soft")
if os.getenv("fixincludes")=="modify":
os.rename(file,file+".before-fix-includes")
os.rename(file+".new",file)
| Python |
#!/usr/bin/python
# Copyright 2007 Deutsches Forschungszentrum fuer Kuenstliche Intelligenz
# or its licensors, as applicable.
#
# You may not use this file except under the terms of the accompanying license.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Project:
# File:
# Purpose: simple style checker (run from top of project)
# Responsible: tmb
# Reviewer:
# Primary Repository:
# Web Sites: www.iupr.org, www.dfki.de
# FIXME add checks for include file ordering
# FIXME add checks for function length
import os,re,getopt,sys
from os.path import join, getsize
optlist,args = getopt.getopt(sys.argv[1:],'hcsdfor:e:')
options = {}
for k,v in optlist: options[k] = v
linelength = options.has_key("-l")
deprecation = options.has_key("-d")
fixmes = not options.has_key("-f")
suppress = options.has_key("-s")
copyright = options.has_key("-c")
oldstyle = options.has_key("-o")
want_author = options.get("-r",None)
if want_author: want_author = want_author.lower()
want_svnauthor = options.get("-e",None)
if want_svnauthor: want_author = want_svnauthor.lower()
help = """
usage: %s [-c] [-s] [-f] [-o] [-r responsible] [-e last_edited_by] dir...
-l: line length
-c: missing headers/copyrights
-f: FIXME lines
-o: old style C comments
-d: deprecation
-s: report problems despite suppress warning directives (CODE-OK--username)
-r: name: report problems only for files with Responsible: name
-e: name: report problems only for files last checked in by name
(Some options turn on a feature, others turn it off.)
""" %sys.argv[0]
if options.has_key("-h"):
sys.stderr.write(help)
sys.exit(0)
if args==[]:
args = ["."]
for arg in args:
if not os.path.isdir(arg):
sys.stderr.write("%s: not a directory\n"%arg)
sys.stderr.write("\n")
sys.stderr.write(help)
sys.exit(1)
author = None
svnauthor = None
def warn(file,line,message):
print "%s:%d:%s [resp: %s, edit: %s]"%(file,line,message,author,svnauthor)
warned = {}
def warnonce(file,line,message):
if warned.get(file,None): return
warned[file] = 1
print "%s:%d:%s [resp: %s, edit: %s]"%(file,line,message,author,svnauthor)
def check(arg):
global author
global svnauthor
for root,dirs,files in os.walk(arg):
if re.search(r'(/.svn|/EXTERNAL|/ext|/doc|/data|ocr-voronoi|/CLEAN-ME-UP|/.deps)(/|$)',root): continue
if root is '.':
for d in dirs:
if d.startswith('ocr'):
check(d)
break
#print "checking", root
sources = [file for file in files if re.search("\.(c|cc|cpp|pkg|h)$",file)]
for source in sources:
if re.search(r'(^|/)(test|try|#|.)-',source): continue
# don't check tolua-generated sources
if source.endswith('.cc') and \
os.path.exists(os.path.join(root,
re.sub(r'\.cc$', '.pkg', source))):
continue
if source=="version.cc": continue
path = os.path.join(root,source)
author = None
svnauthor = None
# we do check for english output of svn info
# hence we need to ensure we get it
info = os.popen("LANG=C svn info '%s' 2>&1"%path).read(10000)
m = re.search(r'Last Changed Author:\s+(\S+)',info)
if m: svnauthor = m.group(1)
if os.path.isdir(".svn") and svnauthor==None: continue
if svnauthor: svnauthor = svnauthor.lower()
if want_svnauthor and want_svnauthor!=svnauthor: continue
info = open(path).read(10000)
m = re.search(r'Responsible:\s+([a-zA-Z0-9_-]+)',info)
if m: author = m.group(1).lower()
if want_author and want_author!=author: continue
lnumber = 0
stream = open(path,"r")
copyright = 0
includeguard = 0
includeguard_name = "###"
modeline = 0
info_responsible = 0
for line in stream.readlines():
lnumber += 1
if fixmes:
m = re.search(r'FIXME.*',line)
if m: warn(path,lnumber,m.group(0))
if not deprecation:
m = re.search(r'#if.*DEPRECATE.*',line)
if m: warn(path,lnumber,m.group(0))
m = re.search(r'DEPRECATE *;',line)
if m: warn(path,lnumber,m.group(0))
if not suppress:
if re.search(r'CODE-OK--[a-z]+',line): continue
# formatting
if linelength and len(line)>132:
warn(path,lnumber,"line too long")
if re.search(r'^\s+#',line):
warn(path,lnumber,"preprocessor directive doesn't start at beginning")
if "\t" in line:
warnonce(path,lnumber,'file contains tabs')
# C++ style
if oldstyle:
if not re.search(r'\.pkg$',source) and (re.search(r'^\s*/\*[^*]',line) or re.search(r'^( | )\* ',line)):
warn(path,lnumber,"old style comment")
if re.search(r'^\s*#if\s+0',line):
warnonce(path,lnumber,"dead code")
if re.search(r'^\s*#if\s*\(\s*0\s*\)',line):
warnonce(path,lnumber,"dead code")
if re.search(r'^( | | | | | )({|if|for|while|void|int|float|})',line):
warnonce(path,lnumber,"wrong indentation (should be multiple of 4)")
if re.search(r'[a-z0-9]+\s+\(.*,(?i)',line) and \
re.search(r'^ ',line) and \
not re.search(r'//|"|/\*|printf|\b(if|for|return|while|sizeof)\b',line):
warn(path,lnumber,'space before argument list')
if re.search(r'template\s*[<].*\(.*\).*\{',line):
warn(path,lnumber,'template on same line as function definition')
# headers
if re.search(r'-\*- C\+\+ -\*-',line): modeline = 1
if re.search(r'^\s*#include.*<string>',line): warn(path,lnumber,'consider using strbuf instead of string')
elif re.search(r'^\s*#include.*<limits>',line): warn(path,lnumber,'consider using C99 equivalent')
elif re.search(r'^\s*#include.*<[a-z]*stream>',line): warn(path,lnumber,'use stdio instead of iostream')
elif re.search(r'^\s*#include.*<[a-z]+>',line): warn(path,lnumber,'C++ library reference')
if re.search(r'Copyright [0-9]+ Deutsches Forschungszentrum',line): copyright |= 1
if re.search(r'You may not use this file except under the terms of the accompanying license',line): copyright |= 2
if re.search(r'WITHOUT WARRANTIES',line): copyright |= 4
m = re.search(r'Responsible: (\S*)',line)
if m:
responsible = m.group(1)
if not re.match(r'[a-z][a-z](?i)',responsible):
warnonce(path,lnumber,'no responsible author')
info_responsible = 1
m = re.search(r'#ifndef\s+([a-zA-Z0-9_]+)',line)
if m:
includeguard |= 1
includeguard_name = m.group(1)
if re.search(r'#define\s+'+includeguard_name,line):
includeguard |= 2
stream.close()
if not copyright:
if copyright!=7:
warn(path,1,'insufficient copyright header')
elif not info_responsible:
warn(path,1,'no Responsible: header')
if re.search(r'\.h$',path) and includeguard!=3: warn(path,1,'missing include guard')
for arg in args:
check(arg)
print "all checks completed"
| Python |
#!/usr/bin/python
# Copyright 2007 Deutsches Forschungszentrum fuer Kuenstliche Intelligenz
# or its licensors, as applicable.
#
# You may not use this file except under the terms of the accompanying license.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Project:
# File:
# Purpose: simple consistency checks for Jamfiles (run from top of project)
# Responsible: tmb
# Reviewer:
# Primary Repository:
# Web Sites: www.iupr.org, www.dfki.de
import os,re,getopt,sys
from os.path import join, getsize
def search(r,s):
global groups
groups = re.search(r,s)
return groups
def warn(s):
global jamfile,nr,line
print "%s:%d:%s"%(jamfile,nr,s)
print " ",line
for jamfile in os.popen("find . -name Jamfile -print","r").readlines():
jamfile = jamfile[:-1]
features = []
nr = 0
for line in open(jamfile,"r").readlines():
jamdir = re.sub(r'/[^/]*$','',jamfile)
jamdir = re.sub(r'^\./','',jamdir)
nr += 1
line = line[:-1]
if search(r'SubDir\s+TOP\s+(\S+(\s+\S+)*)\s*;',line):
features += "subdir"
dir = groups.group(1)
dir = re.sub(r'\s+','/',dir)
if dir!=jamdir: warn("bad SubDir: "+dir+" wanted: "+jamdir)
elif jamfile!="./Jamfile" and search("SubDir",line):
warn("bad SubDir syntax")
elif search(r'ImportDir\s+TOP\s+(\S+(\s+\S+)*)\s*;',line):
features += "importdir"
dir = groups.group(1)
dir = re.sub(r'\s+','/',dir)
if not os.path.isdir(dir): warn("bad ImportDir: "+dir+" wanted: "+jamdir)
elif search("ImportDir",line):
warn("bad ImportDir syntax")
elif search(r'[^>]\b([a-zA-Z0-9_-]+\.(?:cc|h))\b',line):
for file in groups.groups():
path = jamdir+"/"+file
if not os.path.exists(path):
warn("file does not exist: "+path)
print "all checks completed"
| Python |
#!/usr/bin/python
import sys,os,re,string,os.path,glob
if not os.path.exists("ocr-utils"):
sys.stderr.write("must be run from top level of ocropus installation")
sys.exit(1)
sources = glob.glob("*/*.h") + glob.glob("*/*.cc")
class UnionFind:
def __init__(self):
self.num_weights = {}
self.parent_pointers = {}
self.num_to_objects = {}
self.objects_to_num = {}
self.__repr__ = self.__str__
def insert_objects(self, objects):
for object in objects:
self.find(object);
def find(self, object):
if not object in self.objects_to_num:
obj_num = len(self.objects_to_num)
self.num_weights[obj_num] = 1
self.objects_to_num[object] = obj_num
self.num_to_objects[obj_num] = object
self.parent_pointers[obj_num] = obj_num
return object
stk = [self.objects_to_num[object]]
par = self.parent_pointers[stk[-1]]
while par != stk[-1]:
stk.append(par)
par = self.parent_pointers[par]
for i in stk:
self.parent_pointers[i] = par
return self.num_to_objects[par]
def union(self, object1, object2):
o1p = self.find(object1)
o2p = self.find(object2)
if o1p != o2p:
on1 = self.objects_to_num[o1p]
on2 = self.objects_to_num[o2p]
w1 = self.num_weights[on1]
w2 = self.num_weights[on2]
if w1 < w2:
o1p, o2p, on1, on2, w1, w2 = o2p, o1p, on2, on1, w2, w1
self.num_weights[on1] = w1+w2
del self.num_weights[on2]
self.parent_pointers[on2] = on1
def __str__(self):
sets = {}
for i in xrange(len(self.objects_to_num)):
sets[i] = []
for i in self.objects_to_num:
sets[self.objects_to_num[self.find(i)]].append(i)
out = []
for i in sets.itervalues():
if i:
out.append(repr(i))
return ', '.join(out)
def unique(l):
h = {}
for k in l: h[k] = 1
return h.keys()
edges = []
for file in sources:
text = open(file,"r").read()
text = re.sub(r'/\*.*?\*/',' ',text)
text = re.sub(r'//.*?\n',' ',text)
text = re.sub("\n"," ",text)
print file
for cls in re.findall(r'(?:\s|^)((?:class|struct)\s[^.*/!]*?)[{;]',text):
print cls
m = re.search(r'(?:class|struct)\s+(\S+)\s*:\s*(.*)',cls)
if m:
derived = m.group(1).strip()
for base in re.split(r'\s*,\s*',m.group(2)):
base = re.sub(r'\s*public\s*','',base.strip())
base = re.sub(r'colib::','',base)
derived = re.sub(r'colib::','',derived)
base = re.sub(r'\s*virtual\s*','',base)
derived = re.sub(r'\s*virtual\s*','',derived)
edges.append((derived,base))
sys.stderr.write("%s: %s %s\n"%(file,derived,base))
edges = unique(edges)
objects = unique([k for k,v in edges] + [v for k,v in edges])
uf = UnionFind()
for derived,base in edges:
uf.find(derived)
uf.find(base)
uf.union(derived,base)
sets = {}
for derived,base in edges:
key = uf.find(derived)
sets[key] = sets.get(key,[]) + [(derived,base)]
dir = "doc/hierarchy"
def graph(stream,name,edges):
stream.write("""
digraph "%s"
{
rankdir = "LR";
size = "8,20";
ratio = compress;
""" % name)
for derived,base in edges:
stream.write('"%s"'%derived+"->"+'"%s"'%base+";\n")
stream.write("""
}
""")
html = open(dir+"/index.html","w")
count = 1
for key in sets.keys():
set = sets[key]
prefix = dir+("/graph%03d"%count)
stream = open(prefix+".dot","w")
graph(stream,"graph%d"%count,set)
stream.close()
os.system("dot -Tpng %s.dot -o %s.png"%(prefix,prefix))
html.write("<h2>Diagram %d</h2>\n"%count)
html.write("<p>\n")
html.write("<img src='graph%03d.png'\n"%count)
html.write("<p>\n")
count += 1
html.close()
| Python |
#!/usr/bin/python
import os,string,glob,sys,getopt,re
script = sys.argv[0]
assert os.path.exists(script)
def uptodate(source=None,target=None):
if not os.path.exists(target): return 0
return os.path.getmtime(source)<os.path.getmtime(target)
def keywords(**keywords): return keywords
extraction_params = keywords(
element="ocr_cinfo",
max_lines=4000,
min_len=2,
max_len=10,
regex=r'^([A-Za-z][a-z-]*[,.;?!-]?|[0-9]+)$',
dict="/usr/share/dict/words")
nnet_file = "/usr/local/share/ocropus/models/neural-net-file.nn"
def base(s):
return re.sub(r'\.\w+$','',s)
def run(command,*args,**keywords):
print "#",command,string.join(args)
env = os.environ.copy()
for k in keywords: env[k] = str(keywords[k])
os.spawnvpe(os.P_WAIT,command,[command]+list(args),env)
for vol in sys.argv[1:]:
# perform page-level binarization
for page in glob.glob(vol+"/Image_????.JPEG"):
if uptodate(script,base(page)+".png") and uptodate(page,base(page)+".png"):
continue
run("ocroscript","sauvola",
page,
base(page)+".png",
sauvola_k=0.2)
# extract line elements from the pages
sample = glob.glob(vol+"/????/????.png")
if sample==[] or not uptodate(script,sample[0]):
run("hocr-extract-g1000",
vol+"/hOCR.html",
vol+"/Image_????.png",
vol+"/%04d/%04d",
**extraction_params)
# align each line with its corresponding transcription
lines = glob.glob(vol+"/????/????.png")
print len(lines),"lines to be processed"
for line in lines:
if uptodate(source=base(line)+".png",target=base(line)+".costs"):
continue
run("ocroscript","line-clean",
line,
base(line)+".clean.png")
run("ocroscript","align-transcription","--cut",
base(line)+".clean.png",
base(line)+".txt",
base(line)+".cseg.png",
base(line)+".rseg.png",
base(line)+".costs",
bpnet=nnet_file)
| Python |
#!/usr/bin/python
# these are possible justifications; please stick to one of them
STANDARD = "it's a textbook algorithm"
APIDOCS = "it's based on a sample from the API documentation of the corresponding library"
PERFORMANCE = "strict inlining is demonstrably necessary for performance"
STABLE = "we don't expect to be making any changes to this code"
DEBUGGING = "code used for debugging"
exceptions = [
(r'quicksort',STANDARD,"tmb"),
(r'rowsort',STANDARD,"tmb"),
(r'io_png',APIDOCS,"tmb"),
(r'io_pbm',STABLE,"tmb"),
(r'debug_array',DEBUGGING,"tmb"),
(r'/commands/',DEBUGGING,"tmb"),
(r'seg-cuts',STABLE,"tmb"),
]
################################################################
# nothing configurable below this line
################################################################
import sys,os,string,re,getopt
optlist,args = getopt.getopt(sys.argv[1:],'jr')
options = {}
for k,v in optlist: options[k] = v
justifications = options.has_key("-j")
want_author = options.get("-r",None)
if want_author: want_author = want_author.lower()
if os.system("pmccabe -V > /dev/null")!=0:
sys.stderr.write("you must install the pmccabe tool (apt-get install pmccabe)")
sys.exit(1)
maxlines = 150
maxcyc1 = 15
temp = ".___temp___.cc"
for file in os.popen("find . -name '*.cc' -o -name '*.h'","r").readlines():
file = file[:-1]
# open each of the source files
source = open(file,"r").read()
# check the responsible person
m = re.search(r'Responsible:\s+([a-zA-Z0-9_-]+)',source)
if m:
author = m.group(1).lower()
else:
author = "nobody"
if want_author and want_author!=author: continue
# pmccabe dies on templates and namespaces, so we remove those
if re.search('tolua_.*_open',source): continue
source = re.sub(r'template *\<.*?\>',r'',source)
source = re.sub(r'namespace[ a-zA-Z0-9_]*{',r'',source)
stream = open(temp,"w"); stream.write(source); stream.close()
# read the output from pmccabe and try to find bad cases
for line in os.popen("pmccabe %s 2> .pmccabe.errs"%temp):
if "too many }'s" in line: continue
line = line[:-1]
line = line.replace(temp,file)
fields = string.split(line,"\t")
try:
cyc1,cyc,nstat,where,nlines,location = fields
except:
sys.stderr.write("OOPS %s\n"%line)
continue
cyc1=int(cyc1)
nlines=int(nlines)
location = re.sub(r'\(([0-9]+)\)\s*:\s*',r':\1:',location)
if cyc1>maxcyc1 or nlines>maxlines:
exception = None
for e in exceptions:
pattern,reason,person = e
if re.search(pattern,location)>0:
exception = e
if not justifications:
# print everything that is not an exception
if not exception:
sys.stdout.write("%s cyc=%d nlines=%d [%s]\n"%(location,cyc1,nlines,author))
else:
# print all the justifications
if exception:
pattern,reason,person = exception
sys.stdout.write("%s cyc=%d nlines=%d [%s] %s\n"%(location,cyc1,nlines,person,reason))
| Python |
#!/usr/bin/python
# extract lines from Google 1000 book sample
import string,re,sys,os,shutil,glob
import xml.sax as sax
from PIL import Image
usage = """
... hocr image_pattern output_prefix
Process Google 1000 books volumes and prepares line or word images
for alignment using OCRopus.
Run ocroscript align-... Volume_0000/0000/0000.{png,txt}
Arguments:
hocr: hocr source file
image_pattern: either a glob pattern that results in a list
of image files in order, or @filename for a file containing
a list of image files in order; DON'T FORGET TO QUOTE THIS
output_pattern: output images are of the form
output_pattern%(pageno,lineno)
Environment Variables:
element="ocr_line": which element to extract; ocrx_word and
ocr_cinfo are also useful
regex=".": the text for any transcription must match this pattern
dict=None: a dictionary; if provided, all the words in any line
that's output by the program must occur in the dictionary
min_len=20: minimum length of text for which lines are output
max_len=50: maximum length of text for which lines are output
max_lines=1000000: maximum number of lines output
pad=2: pad the bounding box by this many pixels prior to extraction
output_format=png: format for line image files
"""
if len(sys.argv)!=4:
sys.stderr.write(usage)
print "args:",sys.argv
sys.exit(1)
exe,hocr,image_pattern,output_pattern = sys.argv
if image_pattern[0]=="@":
image_list = open(image_pattern[1:]).readlines()
image_list = [s[:-1] for s in image_list]
image_list.sort()
else:
image_list = glob.glob(image_pattern)
image_list.sort()
if not os.path.exists(hocr):
sys.stderr.write(hocr+": not found")
sys.exit(1)
element = os.getenv("element","ocr_line")
regex = os.getenv("regex",".")
min_len = int(os.getenv("min_len","5"))
max_len = int(os.getenv("max_len","50"))
dict = None
dictfile = os.getenv("dict")
max_lines = int(os.getenv("max_lines","1000000"))
pad = int(os.getenv("pad","2"))
output_format = os.getenv("output_format","png")
if dictfile:
stream = open(dictfile,"r")
words = stream.read().split()
stream.close()
dict = {}
for word in words: dict[word.lower()] = 1
print "[read %d words from %s]\n"%(len(words),dictfile)
def check_dict(dict,s):
if not dict: return 1
words = re.split(r'\s+',s)
for word in words:
if word=="": continue
if not dict.get(word.lower()): return 0
return 1
def write_string(file,text):
stream = open(file,"w")
stream.write(text.encode("utf-8"))
stream.close()
def get_prop(title,name):
props = title.split(';')
for prop in props:
(key,args) = prop.split(None,1)
if key==name: return args
return None
class docHandler(sax.handler.ContentHandler):
def __init__(self):
self.element = element
self.regex = regex
def startDocument(self):
self.total = 0
self.pageno = -1
self.text = None
self.depth = 0
self.start = -1
self.copied = {}
def endDocument(self):
pass
def startElement(self,name,attrs):
self.depth += 1
if attrs.get("class","")=="ocr_page":
self.lineno = -1
self.pageno += 1
self.page = image_list[self.pageno]
self.image = Image.open(self.page)
if attrs.get("class","")==self.element:
self.lineno += 1
props = attrs.get("title","")
self.bbox = get_prop(props,"bbox")
self.start = self.depth
self.text = u""
def endElement(self,name):
if self.depth==self.start:
if len(self.text)>=min_len and \
len(self.text)<=max_len and \
re.match(self.regex,self.text) and \
check_dict(dict,self.text):
print self.page,self.bbox,self.text.encode("utf-8")
w,h = self.image.size
x0,y0,x1,y1 = [int(s) for s in self.bbox.split()]
assert y0<y1 and x0<x1 and x1<=w and y1<=h
x0 = max(0,x0-pad)
y0 = max(0,y0-pad)
x1 = min(w,x1+pad)
y1 = min(h,y1+pad)
limage = self.image.crop((x0,y0,x1,y1))
base = output_pattern%(self.pageno,self.lineno)
basedir = os.path.dirname(base)
if not os.path.exists(basedir): os.makedirs(basedir)
limage.save(base+"."+output_format)
write_string(base+".gt.txt",self.text)
write_string(base+".bbox",self.bbox)
self.total += 1
if self.total>=max_lines: sys.exit(0)
self.text = None
self.start = -1
self.depth -= 1
def characters(self,content):
if self.text!=None:
self.text += content
parser = sax.make_parser()
parser.setContentHandler(docHandler())
stream = os.popen("tidy -q -wrap 9999 -asxhtml < %s 2> /tmp/tidy_errs"%hocr,"r")
parser.parse(stream)
| Python |
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
import string,re,sys,os,shutil,glob
import xml.sax as sax
from PIL import Image
usage = """
... hocr image_pattern output_prefix
Arguments:
xml_pattern: either a glob pattern that results in a list
of xml files, or @filename for a file containing
a list of image files in order; DON'T FORGET TO QUOTE THIS
image_pattern: either a glob pattern that results in a list
of image files, or @filename for a file containing
a list of image files in order; DON'T FORGET TO QUOTE THIS
output_pattern: output images are of the form
output_pattern%(pageno,lineno)
Environment Variables:
element="ln": which element to extract; ln or wd
regex=".": the text for any transcription must match this pattern
dict=None: a dictionary; if provided, all the words in any line
that's output by the program must occur in the dictionary
min_len=5: minimum length of text for which lines are output
max_len=200: maximum length of text for which lines are output
max_lines=1000000: maximum number of lines output
pad=2: pad the bounding box by this many pixels prior to extraction
output_format=png: format for line image files
"""
if len(sys.argv)!=4:
sys.stderr.write(usage)
print "args:",sys.argv
sys.exit(1)
exe,xml_pattern,image_pattern,output_pattern = sys.argv
if image_pattern[0]=="@":
image_list = open(image_pattern[1:]).readlines()
image_list = [s[:-1] for s in image_list]
image_list.sort()
else:
image_list = glob.glob(image_pattern)
image_list.sort()
if xml_pattern[0]=="@":
xml_list = open(xml_pattern[1:]).readlines()
xml_list = [s[:-1] for s in xml_list]
xml_list.sort()
else:
xml_list = glob.glob(xml_pattern)
xml_list.sort()
if len(image_list) != len(xml_list):
print "different number of images and xml files"
sys.exit(-1);
element = os.getenv("element","ln")
regex = os.getenv("regex",".")
min_len = int(os.getenv("min_len","5"))
max_len = int(os.getenv("max_len","200"))
dict = None
dictfile = os.getenv("dict")
max_lines = int(os.getenv("max_lines","1000000"))
pad = int(os.getenv("pad","2"))
output_format = os.getenv("output_format","png")
if dictfile:
stream = open(dictfile,"r")
words = stream.read().split()
stream.close()
dict = {}
for word in words: dict[word.lower()] = 1
# print "[read %d words from %s]\n"%(len(words),dictfile)
def check_dict(dict,s):
if not dict: return 1
words = re.split(r'\W+',s)
for word in words:
if word=="": continue
if not dict.get(word.lower()): return 0
return 1
def write_string(file,text):
stream = open(file,"w")
stream.write(text.encode("utf-8"))
stream.close()
def get_prop(title,name):
props = title.split(';')
for prop in props:
(key,args) = prop.split(None,1)
if key==name: return args
return None
class DocHandler(sax.handler.ContentHandler):
def __init__(self, pageno, image):
self.element = element
self.regex = regex
self.pageno = pageno
self.image = image
self.p = re.compile("^( )*$")
def startDocument(self):
self.lineno = 0
self.total = 0
self.text = None
def endDocument(self):
pass
def startElement(self,name,attrs):
if name == self.element:
if name == "ln":
self.l = int(attrs.get("lx",""))
self.t = int(attrs.get("ly",""))
self.r = int(attrs.get("rx",""))
self.b = int(attrs.get("ry",""))
if name == "wd":
self.l = int(attrs.get("l",""))
self.t = int(attrs.get("t",""))
self.r = int(attrs.get("r",""))
self.b = int(attrs.get("b",""))
self.text = u""
self.lineno += 1
def endElement(self,name):
if name == self.element:
if len(self.text) >= min_len and \
len(self.text) <= max_len and \
re.match(self.regex,self.text) and \
check_dict(dict,self.text):
w,h = self.image.size
x0 = max(0,self.l-pad)
y0 = max(0,self.t-pad)
x1 = min(w,self.r+pad)
y1 = min(h,self.b+pad)
limage = self.image.crop((x0,y0,x1,y1))
base = output_pattern%(self.pageno, self.lineno)
basedir = os.path.dirname(base)
if not os.path.exists(basedir):
os.makedirs(basedir)
imgFile = base + "." + output_format
txtFile = base+".gt.txt"
limage.save(imgFile)
write_string(txtFile, self.text)
#write_string(base+".bbox",self.bbox)
print x0, y0, x1, y1
print imgFile
print txtFile
print self.text.encode("utf-8")
print
self.total += 1
if self.total >= max_lines:
sys.exit(0)
self.text = None
def characters(self, content):
if self.text != None and not self.p.match(content):
if self.text != u"":
self.text += " "
self.text += content
print "element: " , element
print "regex: " , regex
print "min_len: " , min_len
print "max_len: " , max_len
print "max_lines: " , max_lines
print "pad: " , pad
print "dict: " , dict
print "output_format: " , output_format
for i in range(len(image_list)):
#stream = os.popen("tidy -q -wrap 9999 -asxhtml < %s 2> /tmp/tidy_errs" % xml_list[i],"r")
pageno = i+1
print "\n\n%04d" % pageno, xml_list[i], image_list[i]
parser = sax.make_parser()
parser.setContentHandler(DocHandler(pageno, Image.open(image_list[i])))
stream = open(xml_list[i],"r")
parser.parse(stream)
stream.close()
| Python |
#!/usr/bin/python
import os,sys,os.path,glob,re,string,random
from pylab import *
# FIXME
# -- fix boxplot to use list-of-list representation (currently broken)
stream = sys.stdin
if len(sys.argv)>1: stream = open(sys.argv[1],"r")
data = []
for line in stream.xreadlines():
f = line.split()
if len(f)>0 and f[0]=="cost":
cost = float(f[2])
value = float(f[3])
data.append((cost,value))
hists = {}
for cost,value in data:
hists.setdefault(cost,[]).append(value)
keys = hists.keys()
keys.sort()
values = [array(hists[key]) for key in keys]
for value in values: value.shape = (-1,1)
try:
boxplot(x=values,positions=keys)
except:
sys.stderr.write("(list boxplot failed, trying array)\n")
try:
values = concatenate(values,1) # FIXME -- we shouldn't need this
boxplot(x=values,positions=keys)
except:
sys.stderr.write("(array boxplot failed, trying simple boxplot)\n")
boxplot(x=values)
show()
| Python |
#!/usr/bin/python
import os,sys,os.path,glob,re,string,random
ocropus = "../ocropus-cmd/ocropus"
simp_re = re.compile(r'[^a-zA-Z0-9,.+=/*?!-]+')
def die(s):
sys.stderr.write("FATAL: %s\n"%s)
sys.exit(1)
def ensure(s):
if not os.path.exists(s):
die("%s: file not found" % s)
ensure(ocropus)
directory = "../data/lines"
if len(sys.argv)>1: directory = sys.argv[1]
lines = glob.glob(directory+"/*.png")
lines = [line for line in lines if os.path.exists(os.path.splitext(line)[0]+".txt")]
sys.stderr.write("evaluating the %d lines with ground truth in %s\n" % (len(lines),directory))
print "directory",directory
for line in lines:
root,ext = os.path.splitext(line)
truth = open(root+".txt","r").read()
cmd = "env hocr=0 remove_hyphens=0 %s linerec %s 2> _err" % (ocropus,line)
ocr = os.popen(cmd,"r").read()
if os.path.getsize("_err")>2: # we allow two characters because something outputs a newline
err = open("_err","r").read()
err = re.sub(r'\n',' ',err)
err = err[:40]
sys.stdout.write("badmsg %s\t%s\n" % (line,err))
simp_truth = simp_re.sub('',truth).lower()
simp_ocr = simp_re.sub('',ocr).lower()
if simp_truth != simp_ocr:
sys.stdout.write("badocr %s\t%s\t%s\n" % (line,simp_truth,simp_ocr))
sys.stdout.flush()
| Python |
#!/usr/bin/python
import os,sys,os.path,glob,re,string,random
# FIXME
# -- better documentation
# -- better command line error checking
# -- use temporary file names instead of _truth, _ocr
# -- more informative error messages
# -- record machine, processing times
# -- get rid of ./distance
# -- output data into sqlite database
# -- use MD5 checksums on executables, page images
ocropus = "../ocropus-cmd/ocropus"
distance = "../ocropus-cmd/ocr-distance"
simp_re = re.compile(r'[^a-zA-Z0-9,.+=/*?! \n-]+')
def simplify(s):
s = simp_re.sub('',s)
s = re.sub(r'^\s+','',s)
s = re.sub(r'\s+','\n',s)
return s
def die(s):
sys.stderr.write("FATAL: %s\n"%s)
sys.exit(1)
def ensure(s):
if not os.path.exists(s):
die("%s: file not found" % s)
def compare_ocr(truth,ocr,cost):
truth_simp = simplify(truth)
ocr_simp = simplify(ocr)
stream = open("_truth","w")
stream.write(truth_simp)
stream.close()
stream = open("_ocr","w")
stream.write(ocr_simp)
stream.close()
stream = os.popen("%s _truth _ocr %s" % (distance,cost))
result = stream.read()[:-1]
stream.close()
os.unlink("_truth")
os.unlink("_ocr")
return float(result)
ensure(ocropus)
ensure(distance)
data = []
directory = "../data/pages"
if len(sys.argv)>1: directory = sys.argv[1]
pages = glob.glob(directory+"/*.png")
pages = [page for page in pages if os.path.exists(os.path.splitext(page)[0]+".txt")]
sys.stderr.write("evaluating the %d pages with ground truth in %s\n" % (len(pages),directory))
print "directory",directory
for page in pages:
sys.stderr.write("=== %s ===\n" % page)
print "page",page
root,ext = os.path.splitext(page)
truth = open(root+".txt","r").read()
cmd = "env hocr=0 remove_hyphens=0 %s ocr %s" % (ocropus,page)
ocr = os.popen(cmd,"r").read()
for cost in [1]+range(5,51,5):
result = compare_ocr(truth,ocr,cost)
print "cost",page,cost,result
data.append((cost,result))
sys.stdout.flush()
| Python |
#!/usr/bin/python
import sys,os
vars = {}
results = []
for file in sys.argv[1:]:
current = {}
for line in open(file).readlines():
f = line.split()
if len(f)>1 and f[0]=="pick":
key,value = f[1].split("=")
if len(value.split(":"))==2:
m,s = value.split(":")
current[key+"_m"] = m
vars[key+"_m"] = 1
current[key+"_s"] = s
vars[key+"_s"] = 1
else:
current[key] = value
vars[key] = 1
elif len(f)>4 and f[1]=="rate":
row = current.copy()
row["result"] = f[2]
results.append(row)
vars = sorted(vars.keys())
vars = [var for var in vars if var!="result"]
print "result",
for var in vars: print var,
print
for row in results:
print row["result"],
for var in vars:
print row.get(var,"NA"),
print
| Python |
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
import urllib2, zlib, gzip, StringIO, Image, threading
import sys, zipfile, os, os.path, re, tempfile, datetime, shutil
import xml.dom.minidom as dom
nThreads = 8
def getImages(siteRoot, siteName, dir=None):
global logFile
if dir == None:
dir = tempfile.mkdtemp()
try:
link = siteRoot + "/" + siteName + "_tif.zip"
print("try " + link)
f = urllib2.urlopen(link)
logFile.write(link+"\n")
imagesType = "_tif"
except urllib2.URLError, e:
print("error " + str(e.code))
if e.code == 404:
try:
link = siteRoot + "/" + siteName + "_jp2.zip"
print("try " + link)
f = urllib2.urlopen(link)
logFile.write(link+"\n")
imagesType = "_jp2"
except urllib2.URLError, e1:
print("error " + str(e1.code))
sys.exit()
imagesZipFilename = dir+"/"+siteName+imagesType+".zip"
imagesZipFile = open(imagesZipFilename, 'w')
imagesZipFile.write(f.read())
imagesZipFile.close()
return imagesZipFilename, imagesType
def getAbbyy(link, zipFilename=None):
global logFile
try:
zipLink = link+".gz"
print("try " + zipLink)
f = urllib2.urlopen(zipLink)
logFile.write(zipLink+"\n")
abbyyZip = f.read()
return gzip.GzipFile(fileobj=StringIO.StringIO(abbyyZip)).read()
except urllib2.URLError, e:
print("error " + str(e.code))
if e.code == 404:
try:
zipLink = link+".zip"
print("try " + zipLink)
f = urllib2.urlopen(zipLink)
logFile.write(zipLink + "\n")
if zipFilename == None:
zipFile, zipFilename = tempfile.mkstemp(dir="/scratch")
else:
zipFile = os.open(zipFilename, "w")
zipFile.write(f.read())
zipFile.close()
zfobj = zipfile.ZipFile(zipFilename)
xml = zfobj.read(zfobj.namelist()[0])
os.remove(zipFilename)
return xml
except urllib2.URLError, e:
print("error " + str(e.code))
sys.exit()
def convertOne(inFile, directory, cmd, f, t, dst=None):
inFile = os.path.join(directory, inFile)
if dst == None:
outFile = inFile[:-len(f)] + t;
else:
outFile = dst + "/" + inFile.split("/")[-1][:-len(f)] + t
logFile.write(inFile + " => " + outFile + "\n")
os.system(cmd % (inFile, outFile) + " 2> /dev/null > /dev/null");
class Convert(threading.Thread):
def __init__ (self, directory, inFiles, cmd, f, t, descr, dst=None):
threading.Thread.__init__(self)
self.directory = directory
self.inFiles = inFiles
self.cmd = cmd
self.f = f
self.t = t
self.descr = descr
self.dst = dst
def run(self):
if(len(self.inFiles) > 0):
sys.stdout.write("\r"+self.descr+"...")
n = len(self.inFiles)*1.0
i = 0.0
for inFile in self.inFiles:
convertOne(inFile, self.directory, self.cmd, self.f, self.t, self.dst)
i += 100.0
sys.stdout.write("\r%s...%.2f%%" %(self.descr, i/n))
sys.stdout.flush()
sys.stdout.write("\r%s...%.2f%%\n" %(self.descr, i/n))
def convertThreaded(directory, cmd, f, t, descr, n, dst=None):
l = []
threads = []
for i in range(n):
l.append([])
i = 0
for inFile in os.listdir(directory):
if inFile[-len(f):] == f and inFile[-len(t):] != t:
l[i%n].append(inFile)
i += 1
for i in range(n):
thread = Convert(directory, l[i], cmd, f, t, descr, dst)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
def unzip(file, dir=None):
print("unzip " + file)
if dir == None:
dir = tempfile.mkdtemp(dir="/scratch")
# os.mkdir(dir, 0777)
zfobj = zipfile.ZipFile(file)
for name in zfobj.namelist():
if name.endswith('/'):
os.mkdir(os.path.join(dir, name))
else:
if name.find("/") >= 0:
try:
os.mkdir(os.path.join(dir, name.split('/')[0]))
except:
pass
outfile = open(os.path.join(dir, name), 'wb')
outfile.write(zfobj.read(name))
outfile.close()
return dir
def rename(srcDir, dstDir):
for inFile in os.listdir(srcDir):
outFile = inFile[inFile.find('_')+1:]
inFile = os.path.join(srcDir, inFile)
outFile = os.path.join(dstDir, outFile)
if inFile != outFile:
shutil.move(inFile, outFile)
siteRoot = sys.argv[1]
siteRoot = siteRoot.strip("/")
siteName = siteRoot.split("/")[-1]
outDir = sys.argv[2] + "/" + siteName
try:
os.mkdir(outDir)
except:
pass
logFile = open(outDir + "/log.txt", "w")
logFile.write(datetime.datetime.now().isoformat() + "\n")
logFile.write(siteRoot + "\n")
abbyyXml = getAbbyy(siteRoot + "/" + siteName + "_abbyy", outDir)
sys.stdout.write("\rcut xml into pages...")
sys.stdout.flush()
abbyyXmls = abbyyXml.split("<page")[1:]
for i in range(len(abbyyXmls)):
abbyyXmls[i] = "<page" + abbyyXmls[i]
abbyyXmls[-1] = abbyyXmls[-1].split("</page>")[0] + "</page>"
abbyyXml = 0
numPages = len(abbyyXmls);
try:
for pageNo in range(numPages):
bboxes = ""
page = dom.parseString(abbyyXmls[pageNo])
for line in page.getElementsByTagName("line"):
for char in line.getElementsByTagName("charParams"):
x0 = int(char.getAttribute("l"))
x1 = int(char.getAttribute("r"))
y0 = int(char.getAttribute("t"))
y1 = int(char.getAttribute("b"))
bboxes += "%d,%d,%d,%d,%c\n" % (x0, y0, x1, y1, char.childNodes[0].data)
path = outDir+"/%04d.bbox.txt" % (pageNo+1)
logFile.write(path + "\n")
bboxFile = open(path, "w")
bboxFile.write(bboxes.encode("utf-8"))
bboxFile.close()
pageNo += 1
sys.stdout.write("\rcut xml into pages...%.2f%%" % (pageNo*100.0/(numPages*1.0)))
sys.stdout.flush()
sys.stdout.write("\rcut xml into pages...%.2f%%\n" % (pageNo*100.0/(numPages*1.0)))
except Exception , ex:
os.system("rm -rf " + tmpDstDir)
logFile.close()
raise
imagesZipFile, imagesType = getImages(siteRoot, siteName, outDir)
imagesTmpDir = unzip(imagesZipFile, outDir) + "/" + siteName + imagesType
#os.remove(imagesZipFile)
rename(imagesTmpDir, outDir);
convertThreaded(outDir, "j2k_to_image -i %s -o %s", ".jp2", ".tif", "convert jp2 to tif", nThreads)
# convertThreaded(imagesDir, "convert %s %s", ".tif", ".png", "convert tif to png", nThreads, outDir)
convertThreaded(outDir, "./pageseg %s %s", ".tif", ".bbox.txt", "pageseg", nThreads)
os.system("/usr/local/bin/ocropus trainseg " + siteName + ".model " + outDir);
| Python |
'''
Created on 03.11.2009
@author: anamariastoica
'''
from sna.crawler.crawler import CommunityCrawler
import sna.db.dbaccess as dbaccess
import sna.db.services as snaserv
import urllib
defAvatar = 'http://wire.wiscnet.net/wp-content/uploads/2009/04/delicious-60x60.png'
class DeliciousCrawler(CommunityCrawler):
'''
Crawls delicious and updates social network and tagged resources to repository
'''
def __init__(self):
self.service = snaserv.DELICIOUS
import deliciousapi
self.dapi = deliciousapi.DeliciousAPI()
self.userBaseUri = 'http://delicious.com/%s' # % username
self.tagBaseUri = 'http://delicious.com/tag/%s' # % tagname
CommunityCrawler.__init__(self)
@staticmethod
def getname():
return self.service
@staticmethod
def getDefaultAvatar():
return defAvatar
@staticmethod
def factory(user, depth=2, last_added=None, verbose=False):
delicrawl = DeliciousCrawler()
delicrawl.setStartUserId(user)
delicrawl.setMaxLevel(depth)
delicrawl.setLastAdded(last_added)
delicrawl.setVerbose(verbose)
return delicrawl
def updateDatabase(self, user_metadata, user_ntw, udate=None):
username = user_metadata.username
userUri = self.userBaseUri % username
resourceUri = None
added = 0
# add user to repository
dbaccess.User.addToDB(userUri, self.service, username,
DeliciousCrawler.getDefaultAvatar(), udate=udate)
# add resources, tags and relations between them to repository
for resourceUri,tags,resourceTitle,_,_ in user_metadata.bookmarks:
# add bookmark resource to repository
dbaccess.Bookmark.addToDB(resourceUri, resourceTitle, udate=udate)
added += 1
for tag in tags:
# don't add system:unfiled tag to repository
if tag == 'system:unfiled':
continue
tagUri = self.tagBaseUri % urllib.quote(tag.encode('utf-8'))
# add tag to repository
dbaccess.Tag.addToDB(tagUri, tag, udate=udate)
# add user, resource, tag relation to repository
dbaccess.UserResourceTag.addToDB(userUri, resourceUri, tagUri, udate=udate)
# add user social network relations to repository
for u in user_ntw['knows']:
knownUserUri = self.userBaseUri % u
dbaccess.User.addToDB(knownUserUri, self.service, u,
DeliciousCrawler.getDefaultAvatar(), udate=udate)
dbaccess.User.addToDBUserKnowsUser(userUri, knownUserUri, udate=udate)
# return number of added resources and last added resource uri
return added
def fetchUserMetadata(self, username):
return self.dapi.get_user(username)
def fetchUserNetwork(self, username):
ntw = self.dapi.get_network(username)
try:
ntw_knows = [u[0] for u in ntw[0]]
except:
ntw_knows = []
try:
ntw_fans = [u[1] for u in ntw[1]]
except:
ntw_fans = []
return {'knows': ntw_knows, 'fans': ntw_fans}
def fetchUser(self, username):
return (self.fetchUserMetadata(username), self.fetchUserNetwork(username))
if __name__ == '__main__':
# Delicious crawler
delicrawl = DeliciousCrawler()
delicrawl.setStartUserId('anamaria0509')
delicrawl.setMaxLevel(1)
delicrawl.crawlUserNetwork()
| Python |
'''
Created on 03.11.2009
@author: anamariastoica
'''
from sna.crawler.crawler import CommunityCrawler
import sna.db.dbaccess as dbaccess
import sna.db.services as snaserv
import urllib
import feedparser
defAvatar = 'http://www.christmastree.org/youtube.jpg'
class YouTubeCrawler(CommunityCrawler):
'''
YouTube Crawler
'''
def __init__(self):
self.service = snaserv.YOUTUBE
import gdata.youtube.service
self.ytapi = gdata.youtube.service.YouTubeService()
self.ytapi.ssl = False
self.userBaseUri = 'http://gdata.youtube.com/feeds/api/users/%s' # % username
self.tagBaseUri = 'http://www.youtube.com/results?search_query=%s&search=tag' # ??? % tag
self.videoBaseUri = 'http://gdata.youtube.com/feeds/api/videos/%s' # % videoID
self.videosFeedBaseUri = 'http://gdata.youtube.com/feeds/api/users/%s/uploads' # % username
self.subscriptionsFeedBaseUri = 'http://gdata.youtube.com/feeds/api/users/%s/subscriptions' # % username
self.contactsFeedBaseUri = 'http://gdata.youtube.com/feeds/api/users/%s/contacts' # % username
CommunityCrawler.__init__(self)
@staticmethod
def getname():
return self.service
@staticmethod
def getDefaultAvatar():
return defAvatar
@staticmethod
def factory(user, depth=2, last_added=None, verbose=False):
ytcrawl = YouTubeCrawler()
ytcrawl.setStartUserId(user)
ytcrawl.setMaxLevel(depth)
ytcrawl.setLastAdded(last_added)
ytcrawl.setVerbose(verbose)
return ytcrawl
def updateDatabase(self, user_metadata, user_ntw, udate=None):
userUri = user_metadata['profile']['id']
username = user_metadata['profile']['username']
resourceUri = None
added = 0
# add user to repository
dbaccess.User.addToDB(userUri, self.service, username, YouTubeCrawler.getDefaultAvatar(), udate=udate)
# add resources, tags and relations between them to repository
for entry in user_metadata['videos'].entry:
# add resource to repository
resourceUri = entry.id.text
# entry.media.title.text # rdfs.label | dc.title # entry.media.payer.url / entry.GetSwfUrl() (watch page / flash player URL)
dbaccess.Video.addToDB(resourceUri, entry.title.text, udate=udate)
added += 1
tags = entry.media.keywords.text.split(", ")
for tag in tags:
tagUri = self.tagBaseUri % urllib.quote(tag.encode('utf-8'))
# add tag to repository
dbaccess.Tag.addToDB(tagUri, tag, udate=udate)
# add user, resource, tag relation to repository
dbaccess.UserResourceTag.addToDB(userUri, resourceUri, tagUri, udate=udate)
# add user social network relations to repository
for u in user_ntw['knows']:
knownUserUri = self.userBaseUri % u
dbaccess.User.addToDB(knownUserUri, self.service, u, YouTubeCrawler.getDefaultAvatar(), udate=udate)
dbaccess.User.addToDBUserKnowsUser(userUri, knownUserUri, udate=udate)
return added
def fetchUser(self, username):
userUri = self.userBaseUri % username
videosFeedUri = self.videosFeedBaseUri % username
# get profile data ---> could be also fetched from YouTube :
# user_entry = self.ytapi.GetYouTubeUserEntry(username='username')
profile_feed = {'username': username, 'id': userUri}
# get video data
videos_feed = self.ytapi.GetYouTubeVideoFeed(videosFeedUri)
# create user_metadata object
user_metadata = {'profile': profile_feed, 'videos': videos_feed}
# get user subscriptions
subscription_uri = self.subscriptionsFeedBaseUri % username
contacts = []
doc = feedparser.parse(subscription_uri)
for entry in doc.entries:
contacts.append(entry.yt_username)
# create user social network object
user_ntw = {'knows': contacts}
return (user_metadata, user_ntw)
if __name__ == '__main__':
# YouTube crawler
ytcrawl = YouTubeCrawler()
ytcrawl.setStartUserId('anamaria0509')
ytcrawl.setStartUserId('HDCYT')
ytcrawl.setMaxLevel(1)
ytcrawl.crawlUserNetwork()
| Python |
'''
Created on April 9, 2009
@author: anamariastoica
'''
import sna.db.dbaccess as dbaccess
import time
import threading
class CommunityCrawler(object):
'''
Kind of an abstract class for Social Network Crawlers
Gets resources and friends up to a certain level
'''
def __init__(self, max_level=2):
self.max_level = max_level
def setStartUserId(self, start_user_id):
self.start_user_id = start_user_id
def setMaxLevel(self, max_level):
self.max_level = max_level
def setLastAdded(self, last_added):
self.last_added = last_added
def updateDatabase(self, user_metadata, user_ntw, udate=None):
raise NotImplementedError()
def fetchUser(self, username):
return (None, {'knows':[], 'fans':[]})
def getUserUri(self, username):
return self.userBaseUri % username
def crawlUserNetwork(self, user_id=None, max_level=0, start_time=None):
'''
user_id - an id to uniquely identify a user (can be a username or userid)
'''
user_id = user_id or self.start_user_id
max_level = max_level or self.max_level
queue = [user_id] # init queue
visited = {user_id: 0} # init visited nodes
added = 0 # set number of added resources
while queue:
v = queue.pop(0) # pop next user node
level = visited[v] # get their level
# get user metadata and social network
user_metadata, user_ntw = self.fetchUser(v)
# update database with user data, tagged resources and social network relations
a = self.updateDatabase(user_metadata, user_ntw, udate=start_time)
added += a
# explore following nodes
for w in user_ntw['knows']:
if w not in visited and level < max_level:
queue.append(w)
visited[w] = level + 1
return added
class CommunityCrawlerThread(threading.Thread):
def __init__(self, communityCrawler, start_time=None):
'''
Initialize the thread data.
Required parameters:
- communityCrawler - the community crawler; the crawler MUST have
set the stating user id
'''
self.communityCrawler = communityCrawler
self.start_time = start_time
self.result = None
threading.Thread.__init__(self)
def run (self):
self.result = self.communityCrawler.crawlUserNetwork(
start_time=self.start_time)
class CrawlNetworks():
'''
Creates a thread for each of the crawlers and runs them concurrently
'''
def __init__(self, crawlers):
self.crawlers = crawlers
def crawl(self, start_time=None):
# create threads for each crawler in list
cthreads = [CommunityCrawlerThread(c, start_time=start_time)
for c in self.crawlers]
# run all threads
for cth in cthreads:
cth.start()
# wait for all threads to finish
for cth in cthreads:
cth.join()
if __name__ == '__main__':
# create crawlers using user network accounts data
# Delicious crawler
from sna.crawler.deliciouscrawler import DeliciousCrawler
delicrawl = DeliciousCrawler()
delicrawl.setStartUserId('anamaria0509')
delicrawl.setMaxLevel(2)
# Flickr crawler
from sna.crawler.flickrcrawler import FlickrCrawler
params = {'api_key': 'ac91a445a4223af2ceafb06ae50f9a25'}
fcrawl = FlickrCrawler(params)
fcrawl.setStartUserId('anamaria stoica')
fcrawl.setMaxLevel(2)
# YouTube crawler
from sna.crawler.youtubecrawler import YouTubeCrawler
ytcrawl = YouTubeCrawler()
ytcrawl.setStartUserId('anamaria0509')
ytcrawl.setMaxLevel(2)
# SlideShare crawler
from sna.crawler.slidesharecrawler import SlideShareCrawler
params = {'api_key': 'hGB0A4by', 'secret_key': '3qjmDPUM'}
sscrawl = SlideShareCrawler(params)
sscrawl.setStartUserId('anamaria0509')
sscrawl.setMaxLevel(2)
# all crawlers list
crawlers = [ delicrawl, fcrawl, ytcrawl, sscrawl ]
t1 = time.clock()
CrawlNetworks(crawlers).crawl()
t2 = time.clock()
print 'Finished in %d seconds' % (t2-t1)
print len(dbaccess.db)
| Python |
'''
Created on 03.11.2009
@author: anamariastoica
'''
from sna.crawler.crawler import CommunityCrawler
import sna.db.dbaccess as dbaccess
import sna.db.services as snaserv
import urllib
defAvatar = 'http://press.slideshare.net/wp-content/uploads/2008/12/slideshare_550x150.png'
class SlideShareCrawler(CommunityCrawler):
'''
Crawls SlideShare and updates social network and tagged resources to repository
'''
def __init__(self, params):
self.service = snaserv.SLIDESHARE
import slideshareapi
self.ssapi = slideshareapi.SlideShareAPI(params)
self.userBaseUri = 'http://www.slideshare.net/%s' # % username
self.tagBaseUri = 'http://www.slideshare.net/tag/%s' # % tagname
CommunityCrawler.__init__(self)
@staticmethod
def getname():
return self.service
@staticmethod
def getDefaultAvatar():
return defAvatar
@staticmethod
def factory(user, depth=2, last_added=None, verbose=False):
params = {'api_key': 'hGB0A4by', 'secret_key': '3qjmDPUM'}
sscrawl = SlideShareCrawler(params)
sscrawl.setStartUserId(user)
sscrawl.setMaxLevel(depth)
sscrawl.setLastAdded(last_added)
sscrawl.setVerbose(verbose)
return sscrawl
def updateDatabase(self, user_metadata, user_ntw, udate=None):
username = user_metadata.User.name
userUri = self.userBaseUri % username
resourceUri = None
added = 0
# add user to repository
dbaccess.User.addToDB(userUri, self.service, username,
SlideShareCrawler.getDefaultAvatar(), udate=udate)
# add resources, tags and relations between them to repository
for s in user_metadata.User.Slideshow:
resourceUri = s.Permalink
# add slideshow resource to repository
dbaccess.Document.addToDB(resourceUri,
resourceUri.rpartition("/")[2].replace("-", " "), udate=udate)
added += 1
tags = s.Tags or 'system:unfiled'
for tag in tags.split(" "):
tagUri = self.tagBaseUri % urllib.quote(tag.encode('utf-8'))
# add tag to repository
dbaccess.Tag.addToDB(tagUri, tag, udate=udate)
# add user, resource, tag relation to repository
dbaccess.UserResourceTag.addToDB(userUri, resourceUri, tagUri, udate=udate)
# add user social network relations to repository
for u in user_ntw['knows']:
knownUserUri = self.userBaseUri % u
dbaccess.User.addToDB(knownUserUri, self.service, u,
SlideShareCrawler.getDefaultAvatar(), udate=udate)
dbaccess.User.addToDBUserKnowsUser(userUri, knownUserUri, udate=udate)
return added
def fetchUserMetadata(self, username):
return self.ssapi.get_slideshow_by_user(username)
def fetchUserNetwork(self, username):
ntw = self.ssapi.get_user_contacts(username)
ntw_knows = [c.Username for c in ntw.Contacts.Contact]
return {'knows': ntw_knows, 'fans': []}
def fetchUser(self, username):
return (self.fetchUserMetadata(username), self.fetchUserNetwork(username))
if __name__ == '__main__':
# SlideShare crawler
params = {'api_key': 'hGB0A4by', 'secret_key': '3qjmDPUM'}
sscrawl = SlideShareCrawler(params)
sscrawl.setStartUserId('anamaria0509')
sscrawl.setMaxLevel(1)
sscrawl.crawlUserNetwork()
| Python |
'''
Created on 03.11.2009
@author: anamariastoica
'''
from sna.crawler.crawler import CommunityCrawler
import sna.db.dbaccess as dbaccess
import sna.db.services as snaserv
import urllib
import urllib2
import re
defAvatar = 'http://careernetwork.msu.edu/wp-content/themes/cspMSU_v4.1/_images/twitter-logo.png'
class TwitterCrawler(CommunityCrawler):
'''
Crawls twitter and updates social network and tagged resources to repository
'''
def __init__(self, params):
self.service = snaserv.TWITTER
import twython
self.twapi = twython.core.setup(username=params['username'], password=params['password'])
self.userBaseUri = 'http://twitter.com/%s' # % user screen_name
self.tagBaseUri = 'http://twitter.com/#search?q=#%s' # % trend key word
self.tweetBaseUri = 'http://twitter.com/%s/status/%s' # % (user screen_name, status id)
CommunityCrawler.__init__(self)
@staticmethod
def getname():
return self.service
@staticmethod
def getDefaultAvatar():
return defAvatar
@staticmethod
def factory(user, params=None, depth=2, last_added=None, verbose=False):
# Twitter crawler
params = {'username':'ltfll', 'password':'socialnetassistant'}
twcrawl = TwitterCrawler(params)
twcrawl.setStartUserId(user)
twcrawl.setMaxLevel(depth)
twcrawl.setLastAdded(last_added)
twcrawl.setVerbose(verbose)
return twcrawl
def updateDatabase(self, user_metadata, user_ntw, udate=None):
username = user_metadata['username']
userUri = self.userBaseUri % username
resourceUri = None
added = 0
# add user to repository
dbaccess.User.addToDB(userUri, self.service, username,
TwitterCrawler.getDefaultAvatar(), udate=udate)
# add resources, tags and relations between them to repository
for resource in user_metadata['data']:
# add tweet resource to repository
resourceId = resource['id']
resourceTitle = resource['text']
resourceUri = self.tweetBaseUri % (username, resourceId)
dbaccess.Tweet.addToDB(resourceUri, resourceTitle, udate=udate)
added += 1
# add tag(trend) relations to repository
for tag in self.parseTags(resourceTitle):
tagUri = self.tagBaseUri % urllib.quote(tag.encode('utf-8'))
# add tag to repository
dbaccess.Tag.addToDB(tagUri, tag, udate=udate)
# add user, resource, tag relation to repository
dbaccess.UserResourceTag.addToDB(userUri, resourceUri, tagUri, udate=udate)
# add url relations between the tweet and urls contained
urls = map(self.getRealUrl, self.parseUrls(resourceTitle))
for url in urls:
dbaccess.Bookmark.addToDB(url, None)
dbaccess.Tweet.addReferenceToDB(resourceUri, url)
# add user social network relations to repository
# add friends
for u in user_ntw['knows']:
knownUserUri = self.userBaseUri % u
dbaccess.User.addToDB(knownUserUri, self.service, u,
TwitterCrawler.getDefaultAvatar(), udate=udate)
dbaccess.User.addToDBUserKnowsUser(userUri, knownUserUri, udate=udate)
# add followers
for u in user_ntw['fans']:
otherUserUri = self.userBaseUri % u
dbaccess.User.addToDB(otherUserUri, self.service, u,
TwitterCrawler.getDefaultAvatar(), udate=udate)
dbaccess.User.addToDBUserKnowsUser(otherUserUri, userUri, udate=udate)
# return number of added resources and last added resource uri
return added
def fetchUserMetadata(self, username):
# get user tweets (only last 20)
data = self.twapi.getUserTimeline(screen_name=username)
return { 'username': username, 'data': data}
def fetchUserNetwork(self, username):
chunk_size = 100 # max returned by bulkUserLookup
# Get Friends
fids, cursor = [], -1
while True:
f = self.twapi.getFriendsIDs(screen_name=username, cursor=cursor)
fids.extend(f['ids'])
if f['next_cursor'] == cursor or len(f['ids']) == 0:
break
cursor = f['next_cursor']
# get screen names from ids in chunks of 100
fchunks = [fids[i:i + chunk_size] for i in range(0, len(fids), chunk_size)]
screen_names = []
for chunk in fchunks:
# get all user data for all user ids
screen_names.extend([userdata['screen_name']
for userdata in self.twapi.bulkUserLookup(ids=chunk)])
ntw_knows = screen_names
# Get Followers
fids, cursor = [], -1
while True:
f = self.twapi.getFollowersIDs(screen_name=username, cursor=cursor)
fids.extend(f['ids'])
if f['next_cursor'] == cursor or len(f['ids']) == 0:
break
cursor = f['next_cursor']
# get screen names from ids in chunks of 100
fchunks = [fids[i:i + chunk_size] for i in range(0, len(fids), chunk_size)]
fan_screen_names = []
for chunk in fchunks:
# get all data for all user ids
fan_screen_names.extend([userdata['screen_name']
for userdata in self.twapi.bulkUserLookup(ids=chunk)])
ntw_fans = fan_screen_names
return {'knows': ntw_knows, 'fans': ntw_fans}
def fetchUser(self, username):
return (self.fetchUserMetadata(username), self.fetchUserNetwork(username))
def parseTags(self, text):
'''
Parses trends from tweet: '#trend #tre#nd ## #trend,two #tren%^& smth,#da #1dc #100 #1dc$ #100^',
finds trends: trend, tre, trend, tren, da, 1dc, 100, 1dc, 100.
Text parameter MUST be UNICODE encoded: u'some text'
'''
return re.findall('(?<=[\W^#]#)\w+', ' ' + text, re.UNICODE)
def parseUrls(self, text):
'''Returns a list of urls from the text.
'''
return re.findall('http://\S+', text)
def getRealUrl(self, url):
'''Returns the redirected url if *url* parameter is a shortened url (a redirect was produced),
or itself if not.
'''
try:
u = urllib2.urlopen(url)
realurl = u.geturl()
if realurl == None:
raise urllib2.HTTPError('Could not fetch url')
except:
realurl = url
return realurl
if __name__ == '__main__':
# Twitter crawler
params = {'username':'ltfll', 'password':'socialnetassistant'}
twcrawl = TwitterCrawler(params)
twcrawl.setVerbose(True)
twcrawl.setStartUserId('anamariast')
twcrawl.setMaxLevel(1)
twcrawl.crawlUserNetwork()
| Python |
'''
Created on 03.11.2009
@author: anamariastoica
'''
from sna.crawler.crawler import CommunityCrawler
import sna.db.dbaccess as dbaccess
import sna.db.services as snaserv
import urllib
defAvatar = 'http://wire.wiscnet.net/wp-content/uploads/2009/04/flickr-logo.png'
class FlickrCrawler(CommunityCrawler):
'''
Crawls Flickr and updates social network and tagged resources to repository
'''
def __init__(self, params):
self.service = snaserv.FLICKR
import flickrapi
self.fapi = flickrapi.FlickrAPI(params['api_key'], format='etree')
self.userBaseUri = 'http://www.flickr.com/people/%s' # % user_id
self.tagBaseUri = 'http://www.flickr.com/photos/tags/%s' # % tagname
self.photoBaseUri = 'http://www.flickr.com/photos/%s/%s' # % (user_id, photo_id)
CommunityCrawler.__init__(self)
@staticmethod
def getname():
return self.service
@staticmethod
def getDefaultAvatar():
return defAvatar
@staticmethod
def factory(user, depth=2, last_added=None, verbose=False):
params = {'api_key': 'ac91a445a4223af2ceafb06ae50f9a25'}
fcrawl = FlickrCrawler(params)
fcrawl.setStartUserId(user)
fcrawl.setMaxLevel(depth)
fcrawl.setLastAdded(last_added)
fcrawl.setVerbose(verbose)
return fcrawl
def updateDatabase(self, user_metadata, user_ntw, udate=None):
nsid = user_metadata['nsid']
userUri = self.userBaseUri % nsid
resourceUri = None
added = 0
# add user to repository
dbaccess.User.addToDB(userUri, self.service, nsid,
FlickrCrawler.getDefaultAvatar(), udate=udate)
# add resources, tags and relations between them to repository
res = user_metadata['photos_data']
photos = res.find('photos').findall('photo')
for photo in photos:
resourceUri = self.photoBaseUri % (nsid, photo.attrib['id'])
# add image to repository
dbaccess.Image.addToDB(resourceUri, photo.attrib['title'], udate=udate)
added += 1
tags = photo.attrib['tags']
if not tags: tags = 'system:unfiled' # if no tags, put # 'system:unfiled'
for tag in tags.split(" "):
tag = tag.encode('utf-8')
tagUri = self.tagBaseUri % urllib.quote(tag)
# add tag to repository
dbaccess.Tag.addToDB(tagUri, tag, udate=udate)
# add user, resource, tag relation to repository
dbaccess.UserResourceTag.addToDB(userUri, resourceUri, tagUri, udate=udate)
# add user social network relations to repository
for u in user_ntw['knows']:
knownUserUri = self.userBaseUri % u
dbaccess.User.addToDB(knownUserUri, self.service, u,
FlickrCrawler.getDefaultAvatar(), udate=udate)
dbaccess.User.addToDBUserKnowsUser(userUri, knownUserUri, udate=udate)
# return number of added resources and last added resource uri
return added
def fetchUserMetadata(self, nsid):
# get public images for user id
res2 = self.fapi.people_getPublicPhotos(user_id=nsid, extras='tags')
if res2.attrib['stat'] != 'ok':
return None
return {'nsid': nsid, 'photos_data': res2}
def fetchUserNetwork(self, nsid):
res = self.fapi.contacts_getPublicList(user_id=nsid)
ntw_knows = [c.attrib['nsid'] for c in res.find('contacts').findall('contact')]
return {'knows': ntw_knows, 'fans': []}
def fetchUser(self, nsid):
return (self.fetchUserMetadata(nsid), self.fetchUserNetwork(nsid))
def crawlUserNetwork(self, user_id=None, max_level=0, last_added=None, start_time=None):
'''
user_id - an id to uniquely identify a user (can be a username or userid)
'''
user_id = user_id or self.start_user_id
# get associated user id for the username
res1 = self.fapi.people_findByUsername(username=user_id)
nsid = res1.find('user').attrib['nsid']
return CommunityCrawler.crawlUserNetwork(self, user_id=nsid,
max_level=max_level, last_added=last_added, start_time=start_time)
if __name__ == '__main__':
# Flickr crawler
params = {'api_key': 'ac91a445a4223af2ceafb06ae50f9a25'}
fcrawl = FlickrCrawler(params)
fcrawl.setStartUserId('anamaria stoica')
fcrawl.setMaxLevel(1)
fcrawl.crawlUserNetwork()
| Python |
'''
Created on April 21, 2009
@author: anamariastoica
'''
import urllib
import urllib2
import hashlib
import time
import sna.etc.xml2dict as xml2dict
class SlideShareAPI(object):
def __init__(self, params):
"""
params dictionary must have at least api_key, and secret_key values
"""
self.service_url = {
'slideshow_by_user' : 'http://www.slideshare.net/api/1/get_slideshow_by_user',
'get_user_contacts' : 'http://www.slideshare.net/api/2/get_user_contacts',
}
self.params = params
def get_slideshow_by_user(self, username_for):
"""
Method to get all slideshows created by an user
Requires: username_for
"""
data = self.make_call('slideshow_by_user', username_for=username_for)
try:
data.User.Slideshow
except:
data.User['Slideshow'] = []
return data
def get_user_contacts(self, username_for):
data = self.make_call('get_user_contacts', username_for=username_for)
try:
if not isinstance(data.Contacts.Contact, list):
data.Contacts.Contact = [data.Contacts.Contact]
except:
data.Contacts.Contact = []
return data
def make_call(self, service_type, **args):
"""
Makes the api call
"""
params = self.get_ss_params(**args)
data = urllib2.urlopen(self.service_url[service_type], params).read()
json = self.parsexml(data)
return self.return_data(json)
def get_ss_params(self, **args):
"""
Method which returns the parameters required for an api call
"""
ts = int(time.time())
tmp_params_dict = {
'api_key' : self.params['api_key'],
'ts' : ts,
'hash' : hashlib.sha1(self.params['secret_key'] + str(ts)).hexdigest()
}
# Add method specific parameters to the dict
for arg in args:
if args[arg] and isinstance(args[arg], str):
tmp_params_dict[arg]=args[arg]
ss_params = urllib.urlencode(tmp_params_dict)
return ss_params
def parsexml(self, xml):
return xml2dict.XML2Dict().fromstring(xml)
def return_data(self, json):
"""
Method to trap slideshare error messages and return data if there are no errors
"""
if json and hasattr(json, 'SlideShareServiceError'):
data = object()
data.User = {}
data.Contacts = object()
return data
return json
if __name__ == '__main__':
sshparams = {'api_key': 'hGB0A4by', 'secret_key': '3qjmDPUM'}
sshapi = SlideShareAPI(sshparams)
slides = sshapi.get_slideshow_by_user('vladposea')
print slides
print slides.User.name # username
for ssh in slides.User.Slideshow: # slideshow list
print ssh.Permalink
tags = ssh.Tags or 'system:undefined'
print tags.split(" ")
contacts = sshapi.get_user_contacts('stagiipebune')
for c in contacts.Contacts.Contact:
print c.Username
| Python |
"""
Thunder Chen<nkchenz@gmail.com> 2007.9.1
"""
try:
import xml.etree.ElementTree as ET
except:
import cElementTree as ET # for 2.4
from object_dict import object_dict
import re
class XML2Dict(object):
def __init__(self):
pass
def _parse_node(self, node):
node_tree = object_dict()
# Save attrs and text, hope there will not be a child with same name
if node.text:
node_tree.value = node.text
for (k,v) in node.attrib.items():
k,v = self._namespace_split(k, object_dict({'value':v}))
node_tree[k] = v
#Save childrens
for child in node.getchildren():
tag, tree = self._namespace_split(child.tag, self._parse_node(child))
if tag not in node_tree: # the first time, so store it in dict
node_tree[tag] = tree
continue
old = node_tree[tag]
if not isinstance(old, list):
node_tree.pop(tag)
node_tree[tag] = [old] # multi times, so change old dict to a list
node_tree[tag].append(tree) # add the new one
return node_tree
def _namespace_split(self, tag, value):
"""
Split the tag '{http://cs.sfsu.edu/csc867/myscheduler}patients'
ns = http://cs.sfsu.edu/csc867/myscheduler
name = patients
"""
result = re.compile("\{(.*)\}(.*)").search(tag)
if result:
print tag
value.namespace, tag = result.groups()
return (tag, value)
def parse(self, file):
"""parse a xml file to a dict"""
f = open(file, 'r')
return self.fromstring(f.read())
def fromstring(self, s):
"""parse a string"""
t = ET.fromstring(s)
root_tag, root_tree = self._namespace_split(t.tag, self._parse_node(t))
return object_dict({root_tag: root_tree})
if __name__ == '__main__':
s = """<?xml version="1.0" encoding="utf-8" ?>
<result>
<count n="1">10</count>
<data><id>491691</id><name>test</name></data>
<data><id>491692</id><name>test2</name></data>
<data><id>503938</id><name>hello, world</name></data>
</result>"""
xml = XML2Dict()
r = xml.fromstring(s)
from pprint import pprint
pprint(r)
print r.result.count.value
print r.result.count.n
for data in r.result.data:
print data.id, data.name
pprint(xml.parse('a'))
| Python |
"""
object_dict
nkchenz@gmail.com 2007
Provided as-is; use at your own risk; no warranty; no promises; enjoy!
"""
class object_dict(dict):
"""object view of dict, you can
>>> a = object_dict()
>>> a.fish = 'fish'
>>> a['fish']
'fish'
>>> a['water'] = 'water'
>>> a.water
'water'
>>> a.test = {'value': 1}
>>> a.test2 = object_dict({'name': 'test2', 'value': 2})
>>> a.test, a.test2.name, a.test2.value
(1, 'test2', 2)
"""
def __init__(self, initd=None):
if initd is None:
initd = {}
dict.__init__(self, initd)
def __getattr__(self, item):
d = self.__getitem__(item)
# if value is the only key in object, you can omit it
if isinstance(d, dict) and 'value' in d and len(d) == 1:
return d['value']
else:
return d
def __setattr__(self, item, value):
self.__setitem__(item, value)
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
| Python |
'''
Created on May 28, 2009
@author: anamariastoica
'''
import ana.dbaccess as db
from pysparse import spmatrix
import numpy as np
class FolkRank(object):
'''
FolkRank algorithm
'''
def __init__(self, urt=None, nu=0, nr=0, nt=0):
'''
Initializes the adjacency matrix A, and number of users, resources and tags
- urt = [(u,r,t)] - list of tuples linking users to resources and tags, where:
- 0 < u < nu
- 0 < r < nr
- 0 < t < nt
- u, r, t are integers
- nu = number of users
- nr = number of resources
- nt = number of tags
'''
# number of users, resources and tags
self.nu = nu
self.nr = nr
self.nt = nt
if urt == None:
urt = []
# init the adjacency matrix
self.initA(urt=urt, nu=nu, nr=nr, nt=nt)
# normalize A
self.normalizeA()
def initA(self, urt=None, nu=0, nr=0, nt=0):
'''
Create the relation matrix A
A is an adjacency matrix for the graph, where:
- graph nodes are either users, resources or tags)
- edges link either users and tags, users and resources or resources and tags
'''
# beginning positions for users, resources and tags in the matrix
self.u_beg = 0
self.r_beg = self.nu
self.t_beg = self.nu + self.nr
if urt == None:
urt = []
# total number of entities --> gives the size of the matrix
self.n = self.nu + self.nr + self.nt
# create the relation matrix A
self.A = spmatrix.ll_mat(self.n, self.n)
# create adjacency matrix in the graph
for u,r,t in urt:
i = self.u_beg + u
j = self.r_beg + r
k = self.t_beg + t
self.A[i,j] = self.A[j,i] = self.A[i,j] + 1
self.A[i,k] = self.A[k,i] = self.A[i,k] + 1
self.A[k,j] = self.A[j,k] = self.A[k,j] + 1
def normalizeA(self):
'''
Normalize matrix A on rows
'''
# compute sum of rows in sum vector
v = np.ones(self.n)
sum = np.zeros(self.n)
self.A.matvec(v, sum) # sum = A*v
D = spmatrix.ll_mat(self.n, self.n)
for i in range(self.n):
if sum[i] != 0:
D[i,i] = 1.0/sum[i]
else:
D[i,i] = 1
self.A = spmatrix.matrixmultiply(self.A, D)
def computeP(self, tagid):
'''
Compute the preference vector
'''
high_w = 0.6
low_w = (1.0 - high_w)/self.n
p = np.multiply(np.ones(self.n), low_w)
p[self.t_beg + tagid] = high_w
return p
def computeW(self, p, d, no_steps, Ap, eps=0):
'''
Computes the weight using the random surfer model. The weight is spread
it as follows:
w <- dAw + (1-d)p
, where A is the row-stochastic version of the adjacency matrix of GF
p is the random surfer component
d, 0 <= d <= 1 constant which influences the random surfer
'''
w = np.ones(self.n, '|f8')
w = np.multiply(w, float(1.0 / self.n)) # w <- [1/n 1/n ... 1/n]
w_tmp = np.zeros(self.n)
f = open('weights%f.txt' % d, 'w')
f.write('P %d, SUM = %f : %s\n' % (no_steps, np.sum(p), str(p)))
f.write('%d, SUM = %f : %s\n' % (no_steps, np.sum(w), str(w)))
while no_steps>0:
Ap.matvec(w, w_tmp) # w_tmp <- A*w
w = np.add(np.multiply(w_tmp, d), np.multiply(p, (1 - d))) # w <- d*w_tmp + (1-d)*p
no_steps = no_steps - 1
f.write('%d, SUM = %f : %s\n' % (no_steps, np.sum(w), str(w)))
return w
def search(self, tagid):
# compute preference vector
p = self.computeP(tagid)
no_steps = 50
d = 1
w0 = self.computeW(p, d, no_steps, self.A.to_csr())
d = 0.75
w1 = self.computeW(p, d, no_steps, self.A.to_csr())
w = np.add(w1, np.multiply(w0, -1))
bec_u = []
bec_r = []
bec_t = []
bec = []
for i in xrange(self.n):
if i < self.r_beg:
bec_u.append([w[i], i])
elif i < self.t_beg:
bec_r.append([w[i], i - self.r_beg])
else:
bec_t.append([w[i], i - self.t_beg])
bec_u.sort(key=lambda a: -a[0])
bec_r.sort(key=lambda a: -a[0])
bec_t.sort(key=lambda a: -a[0])
bec.append(bec_u)
bec.append(bec_r)
bec.append(bec_t)
return bec
def searchResourcesByTags(self, stags):
return self.search(15)
def writeResultsToFile(filename, res, users=None, resources=None, tags=None):
f = file(filename, 'w')
f.write('\n\n\nTOP USERS\n\n\n')
print len(res)
print ('users %d == %d' % (len(users.users), len(res[0])))
print ('resources %d == %d' % (len(resources.resources), len(res[1])))
print ('tags %d == %d' % (len(tags.tags), len(res[2])))
for rat,id in res[0]:
f.write('[%2.6f, %d, %s]\n' % (rat, id, users.users[id]))
f.write('\n\n\nTOP RESOURCES\n\n\n')
for rat,id in res[1]:
f.write('[%2.6f, %d, %s]\n' % (rat, id, resources.resources[id]))
f.write('\n\n\nTOP TAGS\n\n\n')
for rat,id in res[2]:
f.write('[%2.6f, %d, %s]\n' % (rat, id, tags.tags[id]))
f.close()
if __name__ == '__main__':
# create users, resources and tag objects
users = db.Users()
resources = db.Resources()
tags = db.Tags()
# fetch users, resources and tags from repository
users.fetchFromDB()
resources.fetchFromDB()
tags.fetchFromDB()
# create user-tag-resource relations object
urt = db.UserResourceTag(users, resources, tags)
# fetch relations from db
urt.fetchFromDB()
# print data
f = open('res_repr.txt', 'w')
f.write('\n\n\nUSERS\n\n\n')
f.write(repr(users))
f.write('\n\n\nRESOURCES\n\n\n')
f.write(repr(resources))
f.write('\n\n\nTAAAAGS\n\n\n')
f.write(repr(tags))
f.write('\n\n\nRELATIONS\n\n\n')
for (u, r, t) in urt.urt:
f.write('(%d, %d, %d)\n' % (u, r, t))
# init folk rank algorithm
fr = FolkRank(
urt=urt.urt,
nu=urt.getUsersNumber(),
nr=urt.getResourcesNumber(),
nt=urt.getTagsNumber()
)
fr.A.export_mtx('rez.txt', 2)
# search tags list
stags = []
print 'Starting search for tags : %d - %s' % (139, urt.tags.tags[139])
res = fr.searchResourcesByTags(139)
writeResultsToFile('ratings139.txt', res, users=users, resources=resources, tags=tags)
print 'Starting search for tags : %d - %s' % (25, urt.tags.tags[25])
res = fr.searchResourcesByTags(25)
writeResultsToFile('ratings25.txt', res, users=users, resources=resources, tags=tags)
print 'All done !'
| Python |
'''
Created on April 9, 2009
@author: anamariastoica
'''
from rdfalchemy.sparql.sesame2 import SesameGraph
from rdflib import Namespace, Literal, URIRef, BNode
from datetime import date
import dbaccess_cfg as config
import time
import urllib
import sna.db.services as snaserv
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
# import the sesame repository
db = SesameGraph(config.rep)
# namespaces
BOOKMARK = Namespace("http://www.w3.org/2002/01/bookmark#")
FOAF = Namespace("http://xmlns.com/foaf/0.1/")
RDF = Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
RDFS = Namespace("http://www.w3.org/2000/01/rdf-schema#")
SCOT = Namespace("http://scot-project.org/scot/ns#")
VIDEO = Namespace("http://digitalbazaar.com/media/video#")
TAGS = Namespace("http://www.holygoat.co.uk/owl/redwood/0.1/tags/")
DC = Namespace("http://purl.org/dc/elements/1.1/")
SIOC = Namespace("http://rdfs.org/sioc/types#")
OWL = Namespace("http://www.w3.org/2002/07/owl#")
def dbAdd(triplet, msg=None, tries=0):
'''Adds triplet to repositry and prints a message.
'''
if tries < config.TRIES:
try:
if msg is not None:
print msg
db.add(triplet)
except:
print 'Error adding to database'
time.sleep(config.WAIT)
dbAdd(triplet, msg, tries+1)
def dbAddReification(statement, reifs, msg=None):
'''Adding reification for the statement. The additional info is present in reifs
dictionary, with the keys being the predicate, and values the objects.
'''
print 'adding reification', reifs
s, p, o = statement
reifUri = '%s/%s/%s' % (str(s), urllib.quote(str(p)), urllib.quote(str(o)))
print reifUri
dbAdd((URIRef(reifUri), RDF.type, RDF.Statement),
'db.add((%s, %s, %s)' % (URIRef(reifUri), RDF.type, RDF.Statement))
dbAdd((URIRef(reifUri), RDF.subject, s),
'db.add((%s, %s, %s)' % (URIRef(reifUri), RDF.subject, s))
dbAdd((URIRef(reifUri), RDF.predicate, p),
'db.add((%s, %s, %s)' % (URIRef(reifUri), RDF.predicate, p))
dbAdd((URIRef(reifUri), RDF.object, o),
'db.add((%s, %s, %s)' % (URIRef(reifUri), RDF.object, o))
for pred, obj in reifs:
dbAdd((URIRef(reifUri), pred, obj),
'db.add((%s, %s, %s)' % (URIRef(reifUri), pred, obj))
def dbAddCreatedDate(resourceUri, udate):
'''Add DC.created date relation to resourceUri only if it doesn't already exist.
'''
a = 0
for t in db.triples((URIRef(resourceUri), DC.created, None)):
a = 1
break
if a == 0:
dbAdd((URIRef(resourceUri), DC.created, Literal(udate)),
'db.add((%s, %s, %s)' % (URIRef(resourceUri), DC.created, Literal(udate)))
class User:
'''
Fetches Users from repository
'''
def setUsers(self, users):
self.users = users
self.dict = {}
count = 0
for u in users:
self.dict[u] = count
count = count + 1
def fetchFromDB(self):
self.users = []
self.dict = {}
# get triples from repository
triples = db.triples((None, RDF.type, FOAF.Person))
# create list of URIs and a dictionary
count = 0;
for t in triples:
u_uri = str(t[0]) # get user URI
self.users.append(u_uri) # add to list
self.dict[u_uri] = count # add to dictionary
count = count + 1
@staticmethod
def addToDB(userUri, account, accountName, userAvatar, t=0, udate=None):
dbAdd((URIRef(userUri), RDF.type, FOAF.Person),
'db.add((%s, %s, %s))' % (URIRef(userUri), RDF.type, FOAF.Person))
dbAdd((URIRef(userUri), FOAF.image, URIRef(userAvatar)),
'db.add((%s, %s, %s))' % (URIRef(userUri), FOAF.image, URIRef(userAvatar)))
# add account details
accountUri = snaserv.serviceUri[account]
dbAdd((URIRef(userUri), FOAF.account, URIRef(accountUri)),
'db.add((%s, %s, %s))' % (URIRef(userUri), FOAF.account, URIRef(accountUri)))
dbAdd((URIRef(userUri), DC.identifier, Literal(accountName)),
'db.add((%s, %s, %s))' % (URIRef(userUri), DC.identifier, URIRef(accountName)))
if not udate: udate = date.today()
dbAddCreatedDate(userUri, udate)
@staticmethod
def addToDBUserKnowsUser(userUri1, userUri2, t=0, udate=None):
statement = (URIRef(userUri1), FOAF.knows, URIRef(userUri2))
dbAdd(statement, 'addUserKnowsUserRelation(%s, %s)' % (userUri1, userUri2))
dbAddReification(statement, [(DC.created, Literal(udate))])
@staticmethod
def fetchUsersFromNetwork(account):
'''
account - account name, one of db.services.DELICIOUS, FLICKR, etc.
'''
account_uri = snaserv.serviceUri[account]
q = 'SELECT ?u WHERE { \
?u rdf:type foaf:Person . \
?u foaf:account <%s> \
}' % account_uri
# get resources from repository
ns = dict(rdf=RDF, foaf=FOAF, dc=DC)
res = db.query(q, initNs=ns)
# create list
users_uri = [str(r[0]) for r in res]
return users_uri
@staticmethod
def fetchUser(username, account):
'''username - user name from the account
account - account name, one of db.services.DELICIOUS, FLICKR, etc.
'''
accountUri = snaserv.serviceUri[account]
q = 'SELECT ?u WHERE { \
?u rdf:type foaf:Person . \
?u dc:identifier "%s" . \
?u foaf:account <%s> \
}' % (username, accountUri)
# get resources from repository
ns = dict(rdf=RDF, foaf=FOAF, dc=DC)
res = db.query(q, initNs=ns)
# create list of URIs and a dictionary
u_uri = None
for r in res:
u_uri = str(r[0]) # get resource URI
break
return u_uri
@staticmethod
def fetchFriends(userUri):
friendUris = []
for t in db.triples((URIRef(userUri), FOAF.knows, None)):
friendUris.append(str(t[2]))
return friendUris
@staticmethod
def fetchCommonFriends(user_uri1, user_uri2):
sparql_query = 'SELECT DISTINCT ?u WHERE { \
?u rdf:type foaf:Person . \
<%s> foaf:knows ?u. \
<%s> foaf:knows ?u.\
}' % (user_uri1, user_uri2)
# get resources from repository
ns = dict(rdf=RDF, foaf=FOAF, dc=DC)
res = db.query(sparql_query, initNs=ns)
# create list
friends_uri = [str(r[0]) for r in res]
return friends_uri
@staticmethod
def fetchUnionFriends(user_uri1, user_uri2):
sparql_query = 'SELECT DISTINCT ?u WHERE { { \
?u rdf:type foaf:Person . \
<%s> foaf:knows ?u. } \
UNION { \
?u rdf:type foaf:Person .\
<%s> foaf:knows ?u.\
} }' % (user_uri1, user_uri2)
# get resources from repository
ns = dict(rdf=RDF, foaf=FOAF, dc=DC)
res = db.query(sparql_query, initNs=ns)
# create list
friends_uri = [str(r[0]) for r in res]
return friends_uri
@staticmethod
def fetchFollowers(userUri):
followerUris = []
for t in db.triples((None, FOAF.knows, URIRef(userUri))):
followerUris.append(str(t[0]))
return followerUris
@staticmethod
def fetchCommonFollowers(user_uri1, user_uri2):
sparql_query = 'SELECT DISTINCT ?u WHERE { \
?u rdf:type foaf:Person . \
?u foaf:knows <%s>. \
?u foaf:knows <%s>.\
}' % (user_uri1, user_uri2)
# get resources from repository
ns = dict(rdf=RDF, foaf=FOAF, dc=DC)
res = db.query(sparql_query, initNs=ns)
# create list
followers_uri = [str(r[0]) for r in res]
return followers_uri
@staticmethod
def fetchUnionFollowers(user_uri1, user_uri2):
sparql_query = 'SELECT DISTINCT ?u WHERE { { \
?u rdf:type foaf:Person . \
?u foaf:knows <%s>. } \
UNION { \
?u rdf:type foaf:Person .\
?u foaf:knows <%s>.\
} }' % (user_uri1, user_uri2)
# get resources from repository
ns = dict(rdf=RDF, foaf=FOAF, dc=DC)
res = db.query(sparql_query, initNs=ns)
# create list
followers_uri = [str(r[0]) for r in res]
return followers_uri
@staticmethod
def is_friend_with(userUri, otherUserUri):
is_friend = False
for t in db.triples((URIRef(userUri), FOAF.knows, URIRef(otherUserUri))):
is_friend = True
break
return is_friend
def getUsers(self):
return self.users
def getDict(self):
return self.dict
def getNumber(self):
return len(self.users)
def __repr__(self):
users_sorted = [(v,k) for k,v in self.dict.items()]
users_sorted.sort()
s = "<Users> total = %s\n" % (self.getNumber(),)
for (v,k) in users_sorted:
s += "\t%s %s\n" % (v, k)
return s
class Tag:
'''
Fetches Tags from repository
'''
def __init__(self):
pass
def setTags(self, tags):
self.tags = tags
self.dict = {}
count = 0
for t in tags:
self.dict[t] = count
count = count + 1
def fetchFromDB(self):
self.tags = []
self.dict = {}
# get triples from repository
triples = db.triples((None, RDF.type, TAGS.Tag))
# create list of URIs and a dictionary
count = 0
try:
for t in triples:
u_uri = str(t[0]) # get tag URI
self.tags.append(u_uri) # add to list
self.dict[u_uri] = count # add to dictionary
count = count + 1
except:
print 'Error reading record'
@staticmethod
def addToDB(tagUri, tagName, t=0, udate=None):
dbAdd((URIRef(tagUri), RDF.type, TAGS.Tag))
dbAdd((URIRef(tagUri), TAGS.tagName, Literal(tagName)))
if not udate: udate = date.today()
dbAddCreatedDate(tagUri, udate)
def getTags(self):
return self.tags
def getDict(self):
return self.dict
def getNumber(self):
return len(self.tags)
def __repr__(self):
tags_sorted = [(v,k) for k,v in self.dict.items()]
tags_sorted.sort()
s = "<Tags> total = %s\n" % (self.getNumber(),)
for (v,k) in tags_sorted:
s += "\t%s %s\n" % (v, k)
return s
class Resource:
'''
Fetches Resources from repository
'''
def __init__(self):
self.q_all = 'SELECT ?r WHERE { {?r rdf:type bookmark:Bookmark} \
UNION {?r rdf:type foaf:Image} \
UNION {?r rdf:type video:Recording} \
UNION {?r rdf:type sioc:Microblog} \
UNION {?r rdf:type foaf:Document} }'
self.q_del = 'SELECT ?r WHERE { ?r rdf:type bookmark:Bookmark}'
self.q_img = 'SELECT ?r WHERE {?r rdf:type foaf:Image}'
self.q_yt = 'SELECT ?r WHERE {?r rdf:type video:Recording}'
self.q_tw = 'SELECT ?r WHERE {?r rdf:type sioc:Microblog}'
self.q_doc = 'SELECT ?r WHERE {?r rdf:type foaf:Document}'
def setResources(self, resources):
self.resources = resources
self.dict = {}
count = 0
for r in resources:
self.dict[r] = count
count = count + 1
def fetchFromDB(self):
self.fetchFromDB_query(self.q_all)
def fetchFromDB_query(self, query):
self.resources = []
self.dict = {}
# get resources from repository
ns = dict(rdf=RDF, bookmark=BOOKMARK, video=VIDEO, foaf=FOAF, sioc=SIOC)
res = db.query(query, initNs=ns)
# create list of URIs and a dictionary
count = 0;
for r in res:
u_uri = str(r[0]) # get resource URI
self.resources.append(u_uri) # add to list
self.dict[u_uri] = count # add to dictionary
#print r
#print u_uri
count = count + 1
def getResources(self):
return self.resources
def getDict(self):
return self.dict
def getNumber(self):
return len(self.resources)
def __repr__(self):
resources_sorted = [(v,k) for k,v in self.dict.items()]
resources_sorted.sort()
s = "<Resources> total = %s\n" % (self.getNumber(),)
for (v,k) in resources_sorted:
s += "\t%s %s\n" % (v, k)
return s
@staticmethod
def addToDB(resourceUri, resourceTitle, t=0, udate=None):
# if no date provided, assume current date
if not udate: udate = date.today()
if resourceTitle is not None:
dbAdd((URIRef(resourceUri), DC['title'], Literal(resourceTitle)),
'db.add((%s, %s, %s))' % (URIRef(resourceUri), DC['title'], Literal(resourceTitle)))
dbAdd((URIRef(resourceUri), DC.identifier, Literal(resourceUri)),
'db.add((%s, %s, %s))' % (URIRef(resourceUri), DC.identifier, Literal(resourceUri)))
dbAddCreatedDate(resourceUri, udate)
@staticmethod
def unify(resourceUri, otherResourceUri):
'''Making resourceUri OWL:sameAs the otherResourceUri (representing the same resource).
'''
dbAdd((URIRef(resourceUri), OWL['sameAs'], URIRef(otherResourceUri)),
'db.add((%s, %s, %s))' % (URIRef(resourceUri), OWL['sameAs'], URIRef(otherResourceUri)))
@staticmethod
def unify_all(resourceUris):
'''Unify each resource with eachother
'''
for ruri in resourceUris:
for oruri in resourceUris:
if ruri != oruri:
Resource.unify(ruri, oruri)
class Bookmark(Resource):
def fetchFromDB(self):
self.fetchFromDB_query(self.q_del)
@staticmethod
def addToDB(resourceUri, resourceTitle, t=0, udate=None):
dbAdd((URIRef(resourceUri), RDF.type, BOOKMARK.Bookmark),
'db.add((%s, %s, %s))' % (URIRef(resourceUri), RDF.type, BOOKMARK.Bookmark))
Resource.addToDB(resourceUri, resourceTitle, udate=udate)
class Image(Resource):
def fetchFromDB(self):
self.fetchFromDB_query(self.q_img)
@staticmethod
def addToDB(resourceUri, resourceTitle, t=0, udate=None):
dbAdd((URIRef(resourceUri), RDF.type, FOAF.Image),
'adding image %s resource to repository' % resourceUri)
Resource.addToDB(resourceUri, resourceTitle, udate=udate)
class Video(Resource):
def fetchFromDB(self):
self.fetchFromDB_query(self.q_yt)
@staticmethod
def addToDB(resourceUri, resourceTitle, t=0, udate=None):
dbAdd((URIRef(resourceUri), RDF.type, VIDEO.Recording),
'adding video %s resource to repository' % resourceUri)
Resource.addToDB(resourceUri, resourceTitle, udate=udate)
class Document(Resource):
def fetchFromDB(self):
self.fetchFromDB_query(self.q_doc)
@staticmethod
def addToDB(resourceUri, resourceTitle, t=0, udate=None):
dbAdd((URIRef(resourceUri), RDF.type, FOAF.Document),
'adding document %s resource to repository' % resourceUri)
Resource.addToDB(resourceUri, resourceTitle, udate=udate)
class Tweet(Resource):
def fetchFromDB(self):
self.fetchFromDB_query(self.q_tw)
@staticmethod
def addToDB(resourceUri, resourceTitle, t=0, udate=None):
dbAdd((URIRef(resourceUri), RDF.type, SIOC.Microblog),
'adding tweet %s resource to repository' % resourceUri)
Resource.addToDB(resourceUri, resourceTitle, udate=udate)
@staticmethod
def addReferenceToDB(resourceUri, referenceUri, t=0, udate=None):
dbAdd((URIRef(resourceUri), RDFS.seeAlso, URIRef(referenceUri)),
'adding reference %s for tweet %s resource to repository' % (referenceUri, resourceUri))
class UserResourceTag:
'''
Fetch User Resource Tag relations from repository
'''
def __init__(self, users, resources, tags):
self.users = users
self.resources = resources
self.tags = tags
self.q_all = 'SELECT ?u ?r ?t WHERE { \
?tagging rdf:type tags:Tagging . \
?tagging tags:taggedResource ?r . \
?tagging tags:taggedBy ?u . \
?tagging tags:associatedTag ?t . }'
self.q_all2 = 'SELECT ?u ?r ?t WHERE { \
?tagging rdf:type tags:Tagging . \
?tagging tags:taggedResource ?r . \
{{?r rdf:type bookmark:Bookmark } UNION {?r rdf:type video:Recording}} . \
?tagging tags:taggedBy ?u . \
?u rdf:type foaf:Person . \
?tagging tags:associatedTag ?t . \
?t rdf:type tags:Tag . }'
self.q_del = 'SELECT ?u ?r ?t WHERE { \
?tagging rdf:type tags:Tagging . \
?tagging tags:taggedResource ?r . \
?r rdf:type bookmark:Bookmark . \
?tagging tags:taggedBy ?u . \
?tagging tags:associatedTag ?t . }'
self.q_yt = 'SELECT ?u ?r ?t WHERE { \
?tagging rdf:type tags:Tagging . \
?tagging tags:taggedResource ?r . \
?r rdf:type video:Recording . \
?tagging tags:taggedBy ?u . \
?tagging tags:associatedTag ?t . }'
self.q_img = 'SELECT ?u ?r ?t WHERE { \
?tagging rdf:type tags:Tagging . \
?tagging tags:taggedResource ?r . \
?r rdf:type foaf:Image . \
?tagging tags:taggedBy ?u . \
?tagging tags:associatedTag ?t . }'
self.q_doc = 'SELECT ?u ?r ?t WHERE { \
?tagging rdf:type tags:Tagging . \
?tagging tags:taggedResource ?r . \
?r rdf:type foaf:Document . \
?tagging tags:taggedBy ?u . \
?tagging tags:associatedTag ?t . }'
self.q_tw = 'SELECT ?u ?r ?t WHERE { \
?tagging rdf:type tags:Tagging . \
?tagging tags:taggedResource ?r . \
?r rdf:type sioc:Microblog . \
?tagging tags:taggedBy ?u . \
?tagging tags:associatedTag ?t . }'
def setRelations(self, relations=[], urt=[]):
self.urt = []
if urt:
self.urt = urt
elif relations:
for u_uri, r_uri, t_uri in relations:
u_no, r_no, t_no = self.users.dict[str(u_uri)], self.resources.dict[str(r_uri)], self.tags.dict[str(t_uri)]
self.urt.append((u_no, r_no, t_no))
def fetchFromDB(self, log=False):
self.fetchFromDB_query(self.q_all)
def fetchFromDB_query(self, query, log=False):
self.urt = []
# query the repository
ns = dict(tags=TAGS, rdf=RDF, bookmark=BOOKMARK, video=VIDEO, foaf=FOAF, sioc=SIOC)
relations = db.query(query, initNs=ns)
# convert to integer representation of the URIs
if log: f = open('relatii.txt', 'w')
for u_uri, r_uri, t_uri in relations:
try:
u_no, r_no, t_no = self.users.dict[str(u_uri)], self.resources.dict[str(r_uri)], self.tags.dict[str(t_uri)]
self.urt.append((u_no, r_no, t_no))
if log:
f.write("(%s, %s, %s)\n" % (u_uri, r_uri, t_uri))
f.write("(%s, %s, %s)\n" % (u_no, r_no, t_no))
except:
pass
if log: f.close()
@staticmethod
def addToDB(userUri, resourceUri, tagUri, t=0, udate=None):
# tagging resource
taggingUri = '%s/user/%s' % (resourceUri, userUri)
tagging = URIRef(taggingUri)
dbAdd((tagging, RDF.type, TAGS.Tagging))
dbAdd((tagging, TAGS.taggedResource, URIRef(resourceUri)))
dbAdd((tagging, TAGS.associatedTag, URIRef(tagUri)))
dbAdd((tagging, TAGS.taggedBy, URIRef(userUri)))
# inverse relations for resource
#db.add((URIRef(resourceUri), TAGS.tag, tagging))
#db.add((URIRef(resourceUri), TAGS.taggedWithTag, URIRef(tagUri)))
#db.add((URIRef(tagUri), TAGS.isTagOf, URIRef(resourceUri)))
if not udate: udate = date.today()
dbAddCreatedDate(taggingUri, udate)
@staticmethod
def fetchUserTags(user_uri):
tags_query = 'SELECT DISTINCT ?t WHERE { \
?tagging rdf:type tags:Tagging . \
?tagging tags:taggedBy <%s> . \
?tagging tags:associatedTag ?t . }' % \
user_uri
# get tags from repository
ns = dict(rdf=RDF, tags=TAGS)
results = db.query(tags_query, initNs=ns)
return [str(result[0]) for result in results]
@staticmethod
def fetchUnionUsersTags(user_uri1, user_uri2):
sparql_query = 'SELECT DISTINCT ?t WHERE { { \
?tagging1 rdf:type tags:Tagging . \
?tagging1 tags:taggedBy <%s>. \
?tagging1 tags:associatedTag ?t.\
} \
UNION { \
?tagging2 rdf:type tags:Tagging .\
?tagging2 tags:taggedBy <%s>. \
?tagging2 tags:associatedTag ?t. \
} }' % (user_uri1, user_uri2)
# get resources from repository
ns = dict(rdf=RDF, foaf=FOAF, tags=TAGS)
results = db.query(sparql_query, initNs=ns)
# create list
return [str(result[0]) for result in results]
@staticmethod
def fetchUserResources(user_uri):
resources_query = 'SELECT DISTINCT ?r WHERE { \
?tagging rdf:type tags:Tagging . \
?tagging tags:taggedBy <%s> . \
?tagging tags:taggedResource ?r . }' % \
user_uri
# get resources from repository
ns = dict(rdf=RDF, tags=TAGS)
results = db.query(resources_query, initNs=ns)
return [str(result[0]) for result in results]
@staticmethod
def fetchUnionUsersResources(user_uri1, user_uri2):
sparql_query = 'SELECT DISTINCT ?r WHERE { { \
?tagging1 rdf:type tags:Tagging . \
?tagging1 tags:taggedBy <%s>. \
?tagging1 tags:taggedResource ?r.\
} \
UNION { \
?tagging2 rdf:type tags:Tagging .\
?tagging2 tags:taggedBy <%s>. \
?tagging2 tags:taggedResource ?r. \
} }' % (user_uri1, user_uri2)
# get resources from repository
ns = dict(rdf=RDF, foaf=FOAF, tags=TAGS)
results = db.query(sparql_query, initNs=ns)
# create list
return [str(result[0]) for result in results]
def getUsersNumber(self):
return self.users.getNumber()
def getResourcesNumber(self):
return self.resources.getNumber()
def getTagsNumber(self):
return self.tags.getNumber()
def __repr__(self):
s = "<UserResourceTagRelation> total = %d\n" % len(self.urt)
for (u, r, t) in self.urt:
s += "(%s , %s, %s)\n" % (u, r, t)
return s
def init_db():
'''Creates online Accounts (foaf:Account) for the services: Flickr, Slideshare, Delicious, etc.
Must be called on an empty repository.
'''
for serv in snaserv.services:
suri = snaserv.serviceUri[serv]
dbAdd((URIRef(suri), RDF.type, FOAF.OnlineAccount),
'adding service %s with type: %s' % (suri, FOAF.OnlineAccount))
if __name__ == '__main__':
f = open('../logs/log_dbaccess_resource_repr.log', 'w')
#f.write('\n\n\nUSERS\n\n\n')
#u = User()
#u.fetchFromDB()
#f.write(repr(u))
#f.write('\n\n\nRESOURCES\n\n\n')
#r = Resource()
#r.fetchFromDB()
#f.write(repr(r))
#print r.dict
#f.write('\n\n\nTAAAAGS\n\n\n')
#t = Tag()
#t.fetchFromDB()
#f.write(repr(t))
#urt = UserResourceTag(u, r, t)
#urt.fetchFromDB()
user_uri = User.fetchUser("vladposea", "delicious")
resources = UserResourceTag.fetchUserResources(user_uri)
print "Number of resources " + str(len(resources))
print "Resource sample: " + resources[0]
| Python |
# services
DELICIOUS = 'delicious'
FLICKR = 'flickr'
SLIDESHARE = 'slideshare'
TWITTER = 'twitter'
YOUTUBE = 'youtube'
services = [DELICIOUS, SLIDESHARE, TWITTER, YOUTUBE]
# service URIs
serviceUri = {
DELICIOUS: 'http://delicious.com/',
FLICKR: 'http://www.flickr.com/',
SLIDESHARE: 'http://www.slideshare.net/',
TWITTER: 'http://twitter.com/',
YOUTUBE: 'http://www.youtube.com/'
}
| Python |
rep = 'http://localhost:8080/openrdf-sesame/repositories/sna_study'
TRIES = 3
WAIT = 3
| Python |
import sys
from sna.db.dbaccess import User, Bookmark, Image, Document, Tweet, Video, \
Resource, Tag, db, UserResourceTag
from sna.study import network
from sna.db.services import DELICIOUS, FLICKR, SLIDESHARE, TWITTER, YOUTUBE
DEFAULT = 'DEFAULT'
RESOURCE_TYPES = {
DELICIOUS: Bookmark(),
FLICKR: Image(),
SLIDESHARE: Document(),
TWITTER: Tweet(),
YOUTUBE: Video(),
DEFAULT: Resource(),
}
def get_total_no_users():
return len(User().fetchFromDB().users)
def get_total_no_resources(rtype=DEFAULT):
r = RESOURCE_TYPES[rtype or DEFAULT]
return len(r.fetchFromDB().resources)
def get_total_no_tags():
return len(Tag().fetchFromDB().tags)
def get_total_no_urt(rtype=None, urt=None):
if not urt:
u, r, t = User(), Resource(), Tag()
u.fetchFromDB()
r.fetchFromDB()
t.fetchFromDB()
urt = UserResourceTag(User(), Resource(), Tag())
if rtype == DELICIOUS:
q = urt.q_del
elif rtype == FLICKR:
q = urt.q_img
elif rtype == SLIDESHARE:
q = urt.q_doc
elif rtype == TWITTER:
q = urt.q_tw
elif rtype == YOUTUBE:
q = urt.q_yt
else:
q = urt.q_all # all
urt.fetchFromDB_query(q)
return len(urt.urt)
def user_LCC(username, account):
'''Local Clustering Coeffiecient for a user.
The of the graph vertices are the users, and the edges the foaf:knows relations.
'''
userUri = User.fetchUser(username, account) # vertex (vi)
if not userUri:
raise ValueError('User %s for network %s not found' % (username, account))
outv = User.fetchFriends(userUri) # out vertices
inv = User.fetchFollowers(userUri) # in vertices
neigh = set(outv + inv) # Neighbourhood - all vertices linked to vi
k = len(neigh)
# get edges in the neighbourhood (neigh) ejk = (vj,vk), where vj, vk are in neigh
e = 0 # no of total edges found
for vj in neigh:
for vk in neigh:
if vj != vk:
ejk = (vj, vk)
if User.is_friend_with(vj, vk):
e = e + 1
lcc = float(e) / k * (k - 1) if k > 1 else 0
info = (len(outv), len(inv), k, e)
return (lcc, info)
def print_LCC_stats_for_user(acc_username, acc_type):
print '\nLCC for', acc_username, 'in network', acc_type,':'
try:
lcc, info = user_LCC(acc_username, acc_type)
print 'lcc =', lcc
print '(friends=%s,followers=%s,neighbourhood size(k)=%s,edges in neigh=%s)' % info
except ValueError, e:
print 'ERROR(failed to calculate LCC):', e
def print_LCC_stats():
print 'Local Clustering Coefficients:'
accounts = network.parse_accounts(open('people.in'))
for account in accounts:
pers_accounts = accounts[account]
for acc_type in pers_accounts:
acc_username = pers_accounts[acc_type]
print_LCC_stats_for_user(acc_username, acc_type)
def print_stats():
print 'Total triples =', len(db)
print 'Total no of users =', get_total_no_users()
print 'Total no of resources =', get_total_no_resources()
print '\tTotal no of Bookmarks(Delicious) =', get_total_no_resources(rtype=DELICIOUS)
print '\tTotal no of Documents(SlideShare) =', get_total_no_resources(rtype=SLIDESHARE)
print '\tTotal no of Images(Flickr) =', get_total_no_resources(rtype=FLICKR)
print '\tTotal no of Tweets(Twitter) =', get_total_no_resources(rtype=TWITTER)
print '\tTotal no of Videos(YouTube) =', get_total_no_resources(rtype=YOUTUBE)
print 'Total no of tags =', get_total_no_tags()
# fetch data to use in all next calls
u, r, t = User(), Resource(), Tag()
u.fetchFromDB(), r.fetchFromDB(), t.fetchFromDB()
urt = UserResourceTag(u, r, t)
print 'Total no of taggings =', get_total_no_urt(urt=urt)
print '\tTotal no of taggings(Delicious) =', get_total_no_urt(rtype=DELICIOUS, urt=urt)
print '\tTotal no of taggings(Slideshare) =', get_total_no_urt(rtype=SLIDESHARE, urt=urt)
print '\tTotal no of taggings(Flickr) =', get_total_no_urt(rtype=FLICKR, urt=urt)
print '\tTotal no of taggings(Twitter) =', get_total_no_urt(rtype=TWITTER, urt=urt)
print '\tTotal no of taggings(YouTube) =', get_total_no_urt(rtype=YOUTUBE, urt=urt)
if __name__ == '__main__':
print_stats()
print_LCC_stats()
print_LCC_stats_for_user('gapox', YOUTUBE)
| Python |
import sys, time
from sna.db.dbaccess import Resource
from sna.crawler.deliciouscrawler import DeliciousCrawler
from sna.crawler.flickrcrawler import FlickrCrawler
from sna.crawler.slidesharecrawler import SlideShareCrawler
from sna.crawler.twittercrawler import TwitterCrawler
from sna.crawler.youtubecrawler import YouTubeCrawler
from sna.crawler.crawler import CrawlNetworks
from sna.db.services import DELICIOUS, FLICKR, SLIDESHARE, TWITTER, YOUTUBE
def parse_accounts(fin):
"""
username=erikduval; delicious=erikduval; youtube=erikduval;
slideshare=erik.duval; flickr=erikduval; twitter=erikduval
=>
{erikduval: {delicious:erikduval, youtube:erikduval, slideshare:erik.duval,
flickr:erikduval, twitter:erikduval}, ...}
"""
accounts = {}
for line in fin:
acclist = line.strip().split(';')
account = {}
for acc in acclist:
acc_type, acc_username = acc.strip().split('=')
if acc_type == 'username':
account_id = acc_username
else:
account[acc_type] = acc_username
accounts[account_id] = account
return accounts
def update_network(accounts, depth):
for account_id in accounts.keys():
print '\tupdating user:', account_id
# start time of update
sdate = time.time()
acclist = accounts.get(account_id)
crawlers, personUris = [], []
for acc_type in acclist:
acc_username = acclist[acc_type]
print '\t\tupdating', acc_type, 'account:', acc_username
if acc_username == '': continue
if acc_type == DELICIOUS:
crawl = DeliciousCrawler.factory(acc_username, depth=depth)
elif acc_type == FLICKR:
crawl = FlickrCrawler.factory(acc_username, depth=depth)
elif acc_type == SLIDESHARE:
crawl = SlideShareCrawler.factory(acc_username, depth=depth)
elif acc_type == TWITTER:
crawl = TwitterCrawler.factory(acc_username, depth=depth)
elif acc_type == YOUTUBE:
crawl = YouTubeCrawler.factory(acc_username, depth=depth)
personUris.append(crawl.getUserUri(acc_username))
crawlers.append(crawl)
# create crawlers using user network accounts data
t1 = time.clock()
CrawlNetworks(crawlers).crawl(start_time=sdate)
t2 = time.clock()
# link resources to each other
Resource.unify_all(personUris)
print 'Finished in %d seconds' % (t2 - t1)
if __name__ == '__main__':
depth = int(sys.argv[1])
fname = 'people.in'
# parse accounts from file
print 'Parsing accounts from input file...'
accounts = parse_accounts(open(fname))
# update network for accounts
print 'Updating network...'
update_network(accounts, depth)
print 'Done updating!'
| Python |
#!/usr/bin/python
# Copyright 2011 Google, Inc. All Rights Reserved.
# simple script to walk source tree looking for third-party licenses
# dumps resulting html page to stdout
import os, re, mimetypes, sys
# read source directories to scan from command line
SOURCE = sys.argv[1:]
# regex to find /* */ style comment blocks
COMMENT_BLOCK = re.compile(r"(/\*.+?\*/)", re.MULTILINE | re.DOTALL)
# regex used to detect if comment block is a license
COMMENT_LICENSE = re.compile(r"(license)", re.IGNORECASE)
COMMENT_COPYRIGHT = re.compile(r"(copyright)", re.IGNORECASE)
EXCLUDE_TYPES = [
"application/xml",
"image/png",
]
# list of known licenses; keys are derived by stripping all whitespace and
# forcing to lowercase to help combine multiple files that have same license.
KNOWN_LICENSES = {}
class License:
def __init__(self, license_text):
self.license_text = license_text
self.filenames = []
# add filename to the list of files that have the same license text
def add_file(self, filename):
if filename not in self.filenames:
self.filenames.append(filename)
LICENSE_KEY = re.compile(r"[^\w]")
def find_license(license_text):
# TODO(alice): a lot these licenses are almost identical Apache licenses.
# Most of them differ in origin/modifications. Consider combining similar
# licenses.
license_key = LICENSE_KEY.sub("", license_text).lower()
if license_key not in KNOWN_LICENSES:
KNOWN_LICENSES[license_key] = License(license_text)
return KNOWN_LICENSES[license_key]
def discover_license(exact_path, filename):
# when filename ends with LICENSE, assume applies to filename prefixed
if filename.endswith("LICENSE"):
with open(exact_path) as file:
license_text = file.read()
target_filename = filename[:-len("LICENSE")]
if target_filename.endswith("."): target_filename = target_filename[:-1]
find_license(license_text).add_file(target_filename)
return None
# try searching for license blocks in raw file
mimetype = mimetypes.guess_type(filename)
if mimetype in EXCLUDE_TYPES: return None
with open(exact_path) as file:
raw_file = file.read()
# include comments that have both "license" and "copyright" in the text
for comment in COMMENT_BLOCK.finditer(raw_file):
comment = comment.group(1)
if COMMENT_LICENSE.search(comment) is None: continue
if COMMENT_COPYRIGHT.search(comment) is None: continue
find_license(comment).add_file(filename)
for source in SOURCE:
for root, dirs, files in os.walk(source):
for name in files:
discover_license(os.path.join(root, name), name)
print "<html><head><style> body { font-family: sans-serif; } pre { background-color: #eeeeee; padding: 1em; white-space: pre-wrap; } </style></head><body>"
for license in KNOWN_LICENSES.values():
print "<h3>Notices for files:</h3><ul>"
filenames = license.filenames
filenames.sort()
for filename in filenames:
print "<li>%s</li>" % (filename)
print "</ul>"
print "<pre>%s</pre>" % license.license_text
print "</body></html>"
| Python |
# script to perform the py2exe setup and then get innosetup to build the installer
# optionally can increment the version number of the output setup
from distutils.core import setup
import py2exe
import os
import sys
import subprocess
def copy_plugins(pluginsdir, distdir, plugintype):
dst = os.path.join(distdir, plugintype)
print("copy_plugins: dst: %s" % (dst))
if not os.path.isdir(dst):
try:
os.mkdir(dst)
except Exception as e:
print("Unable to make plugins destination dir %s, bailing out" % (dst))
print("(%s)" % (str(e)))
sys.exit(2)
src = os.path.join(pluginsdir, plugintype)
if not os.path.isdir(src):
print("Unable to find source plugins at %s, bailing out" % (src))
sys.exit(3)
for f in os.listdir(src):
sys.stdout.write(" copy plugin %s :: %s" % (plugintype, f))
sys.stdout.flush()
try:
open(os.path.join(dst, f), "wb").write(open(os.path.join(src, f), "rb").read())
print("(ok)")
except Exception as e:
print("(fail)")
print("(%s)" % (str(e)))
sys.exit(4)
if __name__ == "__main__":
print(">>> Generating distributable contents")
cmd = "%s %s %s" % (sys.executable, os.path.join(os.path.dirname(sys.argv[0]), "setup_foobrowser.py"), "py2exe")
os.system(cmd)
print(">>> Ensuring Qt plugins are in place")
pydir = os.path.dirname(sys.executable)
pluginsdir = os.path.join(pydir, "Lib", "site-packages", "PyQt4", "plugins")
if not os.path.isdir(pluginsdir):
print("Unable to find Qt plugins at %s, bailing out" % (pluginsdir))
sys.exit(1)
for ptype in ["codecs", "imageformats"]:
copy_plugins(pluginsdir, os.path.join(os.path.dirname(sys.argv[0]), "dist"), ptype)
iss_file = os.path.join(os.path.dirname(sys.argv[0]), "foobrowser.iss")
if "-incver" in sys.argv[1:]:
new_iss = []
for line in open(iss_file, "r"):
line = line.strip()
parts = line.split(" ")
if len(parts) == 3 and parts[0] == "#define" and parts[1] == "MyAppVersion":
ver = float(parts[2].strip("\""))
ver += 0.1
parts[2] = "\"%.1f\"" % (ver)
line = " ".join(parts)
new_iss.append(line)
open(iss_file, "w").write("\n".join(new_iss))
cmd = "\"C:\\Program Files (x86)\\Inno Setup 5\\Compil32.exe\" /cc \"%s\"" % (iss_file)
print(">>> building setup")
print(cmd)
subprocess.call(cmd)
| Python |
#!/usr/bin/python
# minimalistic browser levering off of Python, PyQt and Webkit
from PyQt4 import QtGui, QtCore, QtWebKit, QtNetwork
import sqlite3
import os
import sys
import time
import base64
import socket
import sip # put here to make py2exe work better
import subprocess
def registerShortcuts(actions, defaultOwner):
for action in actions:
shortcut = actions[action][1]
if shortcut.lower() == "none":
continue
# allow multiple shortcuts with keys delimited by |
shortcuts = shortcut.split("|")
for shortcut in shortcuts:
shortcut = shortcut.strip()
if shortcut == "":
continue
callback = actions[action][0]
if len(actions[action]) == 2:
owner = defaultOwner
else:
if type(actions[action][2]) != str:
owner = actions[action][2]
elif len(actions[action]) == 4:
owner = actions[action][3]
else:
owner = defaultOwner
QtGui.QShortcut(shortcut, owner, callback)
class Icons:
"""Container class to hold icons for Bonsai in base64-encoded format"""
def __init__(self):
if os.name == "nt":
import ctypes
try:
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(os.path.basename(sys.argv[0]))
except:
# not a win7 client
pass
self.icons = dict()
self.icons["foobrowser"] = "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAYAAACqaXHeAAAAAXNSR0IArs4c6QAAAAZiS0dEAP8A\
/wD/oL2nkwAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB9sIFw0xIyVxhzgAAAAZdEVYdENv\
bW1lbnQAQ3JlYXRlZCB3aXRoIEdJTVBXgQ4XAAABJUlEQVR42u3WMUoDURSF4V8JKAoqQlyJ2Fll\
B+JKRF2ChcsSQU0hSlAXYJoEUihok0KbOxBkeMZ40Vf8HzyGyTtnArd4MyBJkiRJkiRJkiTN4+Ob\
NasLnAED4C3WIH7rFv5j0V5VA9gDxoXcODJfLdr78wGUbALDyF0BPWA9Vg+4ib1hZH/bq24AJ5G5\
BVZa9leB+8gcJ/SqG8BlZA4KmcPIXCT0qjkDGpO4Lx1YO5GZJPSqG8A07juF53QiM03o/chy0iCW\
WlbjNa5bhf52XF8Sev8ygJKnuO4XMs3eQ0KvukPwdI7T/C4yRwm9qr8DruMdvharB/Rj7xnYSOhV\
NwCAXWBUODBHkcnqVTeAtm/6d+AROI/XWXZPkiRJkiRJkiRJsz4BmPGrGG5RdTEAAAAASUVORK5C\
YII="
def QIcon(self, name):
if list(self.icons.keys()).count(name) == 0:
return None
pixmap = QtGui.QPixmap()
if dir(base64).count("decodebytes"):
if not pixmap.loadFromData(base64.decodebytes(bytes(self.icons[name], encoding="UTF-8"))):
return None
elif dir(base64).count("b64decode"):
if not pixmap.loadFromData(base64.b64decode(self.icons[name])):
return None
icon = QtGui.QIcon(pixmap)
return icon
class DiskCookies(QtNetwork.QNetworkCookieJar):
def __init__(self, storage_location, parent=None):
self.db = None
QtNetwork.QNetworkCookieJar.__init__(self, parent)
self.LoadFromDisk(storage_location)
def LoadFromDisk(self, path):
cookiefile = os.path.join(path, "cookies.db")
init = False
if not os.path.isfile(cookiefile):
init = True
self.db = sqlite3.connect(cookiefile)
if init:
self.initDB()
cur = self.db.execute("select domain, expires, http_only, secure, name, path, value from cookies;")
cookies = []
for row in cur.fetchall():
cookie = QtNetwork.QNetworkCookie()
cookie.setDomain(row[0])
try:
if len(row[1]):
e = time.strptime(row[1], "%Y-%m-%d %H:%M:%S")
cookie.setExpirationDate(QtCore.QDateTime(e.tm_year, e.tm_mon, e.tm_mday, e.tm_hour, e.tm_min, e.tm_sec))
except:
pass
if row[2]:
cookie.setHttpOnly(True)
else:
cookie.setHttpOnly(False)
if row[3]:
cookie.setSecure(True)
else:
cookie.setSecure(False)
cookie.setName(row[4])
cookie.setPath(row[5])
try:
cookie.setValue(bytes(row[6]))
except:
try:
cookie.setValue(bytes(row[6], encoding="UTF-8"))
except:
pass
cookies.append(cookie)
self.setAllCookies(cookies)
def clear(self):
self.setAllCookies([])
if self.db:
self.db.execute("delete from cookies;")
def quote(self, s):
s = str(s)
return "'%s'" % (s.replace("'", "''"))
def boolToInt(self, b):
if b:
return 1
else:
return 0
def Persist(self):
if self.db == None:
return
self.db.execute("delete from cookies;")
sqlstr = "insert into cookies (domain, expires, http_only, secure, name, path, value) values (%s, %s, %i, %i, %s, %s, %s);";
fmt = "yyyy-MM-dd hh:mm:ss"
for cookie in self.allCookies():
cval = cookie.value().data()
if type(cval) != str:
cval = cval.decode("UTF-8")
#print("cookie:\n\tsession: %s\n\tname: %s\n\tpath: %s\n\tvalue: %s\n\t" % (str(cookie.isSessionCookie()),cookie.name(), cookie.path(), cval))
esql = (sqlstr % (self.quote(cookie.domain()),
self.quote(cookie.expirationDate().toString(fmt)),
self.boolToInt(cookie.isHttpOnly()),
self.boolToInt(cookie.isSecure()),
self.quote(cookie.name()),
self.quote(cookie.path()),
self.quote(cookie.value().data().decode("UTF-8"))))
self.db.execute(esql)
self.db.commit()
self.db.close()
def initDB(self):
cur = self.db.execute("create table cookies(domain text, expires text, http_only int, secure int, session int, name text, path text, value text);")
cur.close()
class FooWebView(QtWebKit.QWebView):
def __init__(self, parent = None):
self.parent = parent
QtWebKit.QWebView.__init__(self, parent)
def createWindow(self, type):
return self.parent.browser.addTab().webkit
class WebTab(QtGui.QWidget):
def __init__(self, browser, actions=None, parent=None, showStatusBar=False):
QtGui.QWidget.__init__(self, parent)
self.actions = dict()
self.grid = QtGui.QGridLayout(self)
self.grid.setSpacing(1)
self.cmb = QtGui.QComboBox()
self.cmb.setEditable(True)
self.browser = browser
if browser is not None:
browser.LoadHistoryToCmb(self.cmb)
self.webkit = FooWebView(self)
self.webkit.page().setLinkDelegationPolicy(QtWebKit.QWebPage.DelegateAllLinks)
self.webkit.linkClicked.connect(self.onLinkClick)
self.webkit.settings().setAttribute(QtWebKit.QWebSettings.PluginsEnabled,True)
self.pbar = QtGui.QProgressBar()
self.pbar.setRange(0, 100)
self.pbar.setTextVisible(False)
self.grid.addWidget(self.cmb, 0, 0)
self.grid.addWidget(self.pbar, 1, 0, 1, self.grid.columnCount())
self.grid.addWidget(self.webkit, 2, 0, 1, self.grid.columnCount())
self.pbar.setVisible(False)
self.pbar.setMaximumHeight(10)
self.fraSearch = QtGui.QFrame()
self.searchGrid = QtGui.QGridLayout(self.fraSearch)
self.searchGrid.setSpacing(1)
self.lblSearch = QtGui.QLabel("Find text in page:")
self.txtSearch = QtGui.QLineEdit()
self.btnClearSearch = QtGui.QPushButton("[X]")
self.searchGrid.addWidget(self.lblSearch, 0, 0)
self.searchGrid.addWidget(self.txtSearch, 0, 1)
self.searchGrid.addWidget(self.btnClearSearch, 0, 2)
self.statusbar = QtGui.QStatusBar()
self.statusbar.setVisible(showStatusBar)
self.statusbar.setMaximumHeight(25)
self.grid.addWidget(self.statusbar, self.grid.rowCount(), 0, 1, self.grid.columnCount())
for i in range(2):
self.searchGrid.setColumnStretch(i, i % 2)
self.fraSearch.setVisible(False)
self.grid.addWidget(self.fraSearch, self.grid.rowCount() + 1, 0, 1, self.grid.columnCount())
for c in range(self.grid.columnCount() + 1):
self.grid.setColumnStretch(c, 0)
for r in range(self.grid.rowCount() + 1):
self.grid.setRowStretch(r, 0)
self.grid.setRowStretch(2, 1)
self.grid.setColumnStretch(0, 1)
self.connect(self.cmb, QtCore.SIGNAL("currentIndexChanged(int)"), self.navigate)
if browser:
self.browser.setupWebkit(self.webkit)
self.connect(self.webkit, QtCore.SIGNAL("iconChanged()"), self.setIcon)
self.connect(self.webkit, QtCore.SIGNAL("loadStarted()"), self.loadStarted)
self.connect(self.webkit, QtCore.SIGNAL("loadFinished(bool)"), self.loadFinished)
self.connect(self.webkit, QtCore.SIGNAL("titleChanged(QString)"), self.setTitle)
self.connect(self.webkit, QtCore.SIGNAL("loadProgress(int)"), self.loadProgress)
self.connect(self.webkit, QtCore.SIGNAL("urlChanged(QUrl)"), self.setURL)
self.connect(self.webkit.page(), QtCore.SIGNAL("linkHovered(QString, QString, QString)"), self.onLinkHovered)
page = self.webkit.page()
page.downloadRequested.connect(self.onDownloadRequested)
page.setForwardUnsupportedContent(True)
page.unsupportedContent.connect(self.onUnsupportedContent)
self.connect(self.btnClearSearch, QtCore.SIGNAL("clicked()"), self.stopOrHideSearch)
self.connect(self.txtSearch, QtCore.SIGNAL("textChanged(QString)"), self.doSearch)
self.registerActions(actions)
registerShortcuts(self.actions, self)
self.cmb.setFocus()
self.showHideMessage()
def onLinkClick(self, qurl):
self.navigate(qurl.toString())
def registerActions(self, template):
self.actions["addressnav"] = [self.navigate, "Enter", self.cmb, "Navigate to the url in the address bar"]
self.actions["reload"] = [self.reload, "F5|Ctrl+R", "Reload the current page"]
self.actions["back"] = [self.back, "Alt+Left", "Go back in history"]
self.actions["fwd"] = [self.fwd, "Alt+Right", "Go forward in history"]
self.actions["search"] = [self.showSearch, "/|Ctrl-F", "Search in page"]
self.actions["smartsearch"] = [self.smartSearch, "F3", "Smart search (find next or start search)"]
self.actions["stopsearch"] = [self.stopOrHideSearch, "Escape", self.fraSearch, "Stop current load or searching"]
self.actions["findnext"] = [self.doSearch, "Return", self.txtSearch, "Next match for current search"]
self.actions["togglestatus"]= [self.toggleStatus, "Ctrl+Space", "Toggle visibility of status bar"]
if template:
actionnames = list(self.actions.keys())
for action in template:
if actionnames.count(action):
self.actions[action][1] = template[action][1]
def toggleStatus(self):
if self.browser:
self.browser.toggleStatusVisiblity()
else:
self.statusbar.setVisible(not self.statusBar.isVisible())
def setStatusVisibility(self, visible):
self.statusbar.setVisible(visible)
def loadContent(self, html, baseUrl = None):
if baseUrl:
baseUrl = QtWebKit.QUrl(baseUrl)
else:
baseUrl = QtWebKit.QUrl()
self.webkit.setHTML(html, baseUrl)
def onUnsupportedContent(self, reply):
self.log("Unsupported content %s" % (reply.url().toString()))
if self.browser:
self.browser.addDownload(reply.url().toString())
def onDownloadRequested(self, request):
if self.browser:
self.browser.addDownload(request.url().toString())
def doSearch(self, s = None):
if s is None: s = self.txtSearch.text()
self.webkit.findText(s, QtWebKit.QWebPage.FindWrapsAroundDocument)
def stopOrHideSearch(self):
if self.fraSearch.isVisible():
self.fraSearch.setVisible(False)
self.webkit.setFocus()
else:
self.webkit.stop()
def showSearch(self):
self.txtSearch.setText("")
self.fraSearch.setVisible(True)
self.txtSearch.setFocus()
def zoom(self, lvl):
self.webkit.setZoomFactor(self.webkit.zoomFactor() + (lvl * 0.25))
def stop(self):
self.webkit.stop()
def URL(self):
return self.cmb.currentText()
def loadProgress(self, val):
if self.pbar.isVisible():
self.pbar.setValue(val)
def setTitle(self, title):
if self.browser:
self.browser.setTabTitle(self, title)
def setURL(self, url):
self.cmb.setEditText(url.toString())
def refresh(self):
self.navigate(self.URL())
self.webkit.reload()
def loadStarted(self):
self.showProgressBar()
def loadFinished(self, success):
self.hideProgressBar()
self.setIcon()
if self.cmb.hasFocus():
self.webkit.setFocus()
def showProgressBar(self):
self.pbar.setValue(0)
self.pbar.setVisible(True)
def hideProgressBar(self, success = False):
self.pbar.setVisible(False)
def setIcon(self):
if self.browser:
self.browser.setTabIcon(self, self.webkit.icon())
def reload(self):
self.webkit.reload()
def smartSearch(self):
if self.fraSearch.isVisible():
self.doSearch()
else:
self.showSearch()
def mkShortcuts(self):
if self.browser:
self.bro
def fwd(self):
self.webkit.history().forward()
def back(self):
self.webkit.history().back()
def navigate(self, url = None):
if url and type(url) == str:
u = url
else:
u = str(self.cmb.currentText())
parts = u.split(":")
if len(parts) == 2 and parts[0] == "about":
self.navabout(parts[1].strip().lower())
return
if u.strip() == "":
return
if self.browser is not None:
u = self.browser.fixUrl(u)
self.cmb.setEditText(u)
if self.browser is not None:
self.browser.addHistory(u)
url = QtCore.QUrl(u)
self.setTitle("Loading...")
self.webkit.load(url)
def onStatusBarMessage(self, s):
if s:
self.statusbar.showMessage(s)
else:
self.showHideMessage()
def showHideMessage(self):
self.statusbar.showMessage("(press %s to hide this)" % (self.actions["togglestatus"][1]))
def onLinkHovered(self, link, title, content):
if link or title:
if title and not link:
self.statusbar.showMessage(title)
elif link and not title:
self.statusbar.showMessage(link)
elif link and title:
self.statusbar.showMessage("%s (%s)" % (title, link))
else:
self.showHideMessage()
def navabout(self, dst):
if self.browser is None:
return
if dst == "help":
self.webkit.setHtml(self.browser.genHelp())
self.cmb.setEditText("about:help")
return
elif dst == "foo":
self.webkit.setHtml(self.browser.genAboutFoo())
self.cmb.setEditText("about:foo")
return
elif dst == "nothing":
self.webkit.setHtml("")
self.cmb.setEditText("about:nothing")
return
self.webkit.setHtml("<p>Sorry, Jim, that resource cannot be found</p>")
self.cmb.setEditText("about:lost")
class PrivacyDialog(QtGui.QDialog):
def __init__(self, parent=None, icon=None):
QtGui.QDialog.__init__(self, parent)
if icon:
self.setWindowIcon(icon)
self.setWindowTitle("Clear private data")
self.chkClearCookies = QtGui.QCheckBox("Clear cookies")
self.chkClearHistory = QtGui.QCheckBox("Clear history")
self.chkClearCache = QtGui.QCheckBox("Clear cache")
self.btnOk = QtGui.QPushButton("OK")
self.btnCancel = QtGui.QPushButton("Cancel")
self.grid = QtGui.QGridLayout(self)
row = 0
for chk in [self.chkClearCookies, self.chkClearHistory, self.chkClearCache]:
self.grid.addWidget(chk, row, 0, 1, 3)
chk.setChecked(True)
row += 1
growrow = row
row += 1
self.grid.addWidget(self.btnOk, row, 1)
self.grid.addWidget(self.btnCancel, row, 2)
for i in range(self.grid.rowCount()):
self.grid.setRowStretch(i, 0)
self.grid.setRowStretch(growrow, 1)
self.grid.setColumnStretch(0, 1)
for i in range(self.grid.columnCount()):
if i:
self.grid.setColumnStretch(i, 1)
self.connect(self.btnOk, QtCore.SIGNAL("clicked()"), self.accept)
self.connect(self.btnCancel, QtCore.SIGNAL("clicked()"), self.reject)
class AuthDialog(QtGui.QDialog):
def __init__(self, parent=None, icon=None):
QtGui.QDialog.__init__(self, parent)
if icon:
self.setWindowIcon(icon)
self.setWindowTitle("Authentication required")
self.lblAuth = QtGui.QLabel("Authentication required")
self.lblUserName = QtGui.QLabel("Username:")
self.txtUserName = QtGui.QLineEdit()
self.lblPassword = QtGui.QLabel("Password:")
self.txtPassword = QtGui.QLineEdit()
self.txtPassword.setEchoMode(QtGui.QLineEdit.Password)
self.btnCancel = QtGui.QPushButton("Cancel")
self.btnOK = QtGui.QPushButton("OK")
self.grid = QtGui.QGridLayout(self)
self.grid.addWidget(self.lblAuth, 0, 0, 1, 3)
self.grid.addWidget(self.lblUserName, 1, 0)
self.grid.addWidget(self.txtUserName, 1, 1, 1, 3)
self.grid.addWidget(self.lblPassword, 2, 0)
self.grid.addWidget(self.txtPassword, 2, 1, 1, 3)
self.grid.addWidget(self.btnOK, 3, 2)
self.grid.addWidget(self.btnCancel, 3, 3)
for i in range(self.grid.columnCount()):
self.grid.setColumnStretch(i, 0)
self.grid.setColumnStretch(1, 1)
for i in range(self.grid.rowCount()):
self.grid.setRowStretch(i, 0)
self.cancelled = False
self.connect(self.btnCancel, QtCore.SIGNAL("clicked()"), self.onCancel)
self.connect(self.btnOK, QtCore.SIGNAL("clicked()"), self.onOK)
def onOK(self):
self.cancelled = False
self.close()
def onCancel(self):
self.cancelled = True
self.close()
def prompt(self, url=None):
self.cancelled = False
if url:
self.lblAuth.setText("The page at:\n\n%s\n\nrequires authentication to continue" % (url))
self.exec_()
if self.cancelled:
return None, None
else:
return self.txtUserName.text(), self.txtPassword.text()
class MainWin(QtGui.QMainWindow):
def __init__(self, debug=False):
QtGui.QMainWindow.__init__(self, None)
self.downloader = None
self.debug = debug
self.actions = dict()
self.tabactions = dict()
self.tabactions = dict()
tmp = WebTab(None, None)
self.tabactions = tmp.actions
self.configdir = os.path.join(os.path.expanduser("~"), ".foobrowser")
self.registerActions()
self.showStatusBar = False
self.loadConfig()
self.icons = Icons()
self.setWindowIcon(self.icons.QIcon("foobrowser"))
self.appname = "Foo browser!"
self.cache_mb = 512
self.maxHistory = 4096
self.tabs = []
self.historyDateFormat = "%Y-%m-%d %H:%M:%S"
self.maxTitleLen = 40
if not os.path.isdir(self.configdir):
try:
os.mkdir(self.configdir)
except Exception as e:
self.configdir = None
if self.configdir is not None:
self.loadHistory()
self.disk_cache = None
self.cookie_jar = None
if self.configdir:
cachedir = os.path.join(self.configdir, "cache")
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
self.disk_cache = QtNetwork.QNetworkDiskCache()
self.disk_cache.setCacheDirectory(cachedir)
self.disk_cache.setMaximumCacheSize(self.cache_mb * (1024 * 1024))
self.cookie_jar = DiskCookies(self.configdir)
self.auth_cache = dict()
tmp.deleteLater()
self.mkGui()
registerShortcuts(self.actions, self)
def loadConfig(self):
if self.configdir:
if not os.path.isdir(self.configdir):
try:
os.mkdir(self.configdir)
except:
return
conffile = os.path.join(self.configdir, "config.ini")
if not os.path.isfile(conffile):
return
try:
fp = open(conffile, "r")
except:
return
section = ""
for line in fp:
line = line.strip()
if len(line) == 0:
continue
if line[0] == "[" and line[-1] == "]":
section = line.strip("[]")
continue
parts = line.split("=")
if len(parts) < 2:
continue
setting = parts[0].strip()
value = "=".join(parts[1:]).strip()
self.log("config: %s/%s/%s" % (section, setting, value))
if section == "shortcuts":
setting = setting.lower()
if list(self.actions.keys()).count(setting):
self.actions[setting][1] = value
continue
if section == "tabshortcuts":
setting = setting.lower()
if list(self.tabactions.keys()).count(setting):
self.tabactions[setting][1] = value
continue
if section == "general":
setting = setting.lower()
if setting == "downloader":
if value.lower() != "none":
self.log("setting downloader to %s" % (value))
self.downloader = value
elif setting == "showstatus":
if value.lower() in ["yes", "true", "1"]:
self.showStatusBar = True
else:
self.showStatusBar = False
fp.close()
def toggleStatusVisiblity(self):
self.showStatusBar = not self.showStatusBar
for t in self.tabs:
t.setStatusVisibility(self.showStatusBar)
def persistConfig(self):
if self.configdir:
if not os.path.isdir(self.configdir):
try:
os.mkdir(self.configdir)
except:
return
conffile = os.path.join(self.configdir, "config.ini")
try:
fp = open(conffile, "w")
except:
return
# write out shortcuts
fp.write("[general]\n")
fp.write("; general settings\n")
if self.downloader:
fp.write("downloader = %s\n" % (str(self.downloader)))
else:
fp.write("downloader = None\n")
if self.showStatusBar:
fp.write("showstatus = True\n")
else:
fp.write("showstatus = False\n")
fp.write("[shortcuts]\n")
fp.write("; shortcuts applied to the application as a whole\n")
actionnames = list(self.actions.keys())
actionnames.sort()
for action in actionnames:
fp.write("%s = %s\n" % (action, self.actions[action][1]))
fp.write("[tabshortcuts]\n")
fp.write("; shortcuts applied to individual tabs\n")
actionnames = list(self.tabactions.keys())
actionnames.sort()
for action in actionnames:
fp.write("%s = %s\n" % (action, self.tabactions[action][1]))
fp.close()
def registerActions(self):
self.actions["newwin"] = [self.addWin, "Ctrl+N", "Open new window"]
self.actions["newtab"] = [self.addTab, "Ctrl+T", "Open new tab"]
self.actions["closetab"] = [self.delTab, "Ctrl+W", "Close current tab"]
self.actions["tabprev"] = [self.decTab, "Ctrl+PgUp", "Switch to previous tab"]
self.actions["tabnext"] = [self.incTab, "Ctrl+PgDown", "Switch to next tab"]
self.actions["go"] = [self.currentTabGo, "Ctrl+G", "Focus address bar"]
self.actions["close"] = [self.close, "Ctrl+Q", "Close application"]
self.actions["zoomin"] = [self.zoomIn, "Ctrl+Up", "Zoom into page"]
self.actions["zoomout"] = [self.zoomOut, "Ctrl+Down", "Zoom out of page"]
self.actions["help"] = [self.showHelp, "F1", "Show this help page"]
self.actions["cleardata"] = [self.clearData, "Ctrl+Shift+Delete", "Clear cache and private data"]
def clearData(self):
dlg = PrivacyDialog(parent=self, icon=self.icons.QIcon("foobrowser"))
if dlg.exec_() == QtGui.QDialog.Accepted:
if dlg.chkClearCookies.isChecked() and self.cookie_jar:
self.cookie_jar.clear()
if dlg.chkClearCache.isChecked() and self.disk_cache:
self.disk_cache.clear()
if dlg.chkClearHistory.isChecked():
self.history = {}
def showHelp(self):
self.addTab().navigate("about:help")
def addWin(self):
MainWin().show()
def currentTabGo(self):
self.tabs[self.tabWidget.currentIndex()].cmb.setFocus()
def zoomIn(self):
self.zoom(1)
def zoomOut(self):
self.zoom(-1)
def zoom(self, lvl):
self.tabs[self.tabWidget.currentIndex()].zoom(lvl)
def decTab(self):
self.incTab(-1)
def incTab(self, incby = 1):
if self.tabWidget.count() < 2:
return
idx = self.tabWidget.currentIndex()
idx += incby
if idx < 0:
idx = self.tabWidget.count()-1;
elif idx >= self.tabWidget.count():
idx = 0
self.tabWidget.setCurrentIndex(idx)
def setTabIcon(self, tab, icon):
idx = self.getTabIndex(tab)
if idx > -1:
self.tabWidget.setTabIcon(idx, icon)
def setTabTitle(self, tab, title):
idx = self.getTabIndex(tab)
if idx > -1:
if len(title) > self.maxTitleLen:
title = title[:self.maxTitleLen-3] + "..."
self.tabWidget.setTabText(idx, title)
def getTabIndex(self, tab):
for i in range(len(self.tabs)):
if tab == self.tabs[i]:
return i
return -1
def setupWebkit(self, webkit):
nam = webkit.page().networkAccessManager()
nam.authenticationRequired.connect(self.onAuthRequest)
nam.setCache(self.disk_cache)
nam.setCookieJar(self.cookie_jar)
self.cookie_jar.setParent(None)
self.disk_cache.setParent(None)
g = webkit.settings()
g.enablePersistentStorage(self.configdir)
def onAuthRequest(self, networkreply, authenticator):
cached = list(self.auth_cache.keys())
r = authenticator.realm()
if cached.count(r):
authenticator.setUser(self.auth_cache[r]["user"])
authenticator.setPassword(self.auth_cache[r]["password"])
else:
authdlg = AuthDialog(parent=self, icon=self.icons.QIcon("foobrowser"))
username, password = authdlg.prompt(networkreply.url().toString())
if username and password:
authenticator.setUser(username)
authenticator.setPassword(password)
self.auth_cache[r] = {"user":username, "password":password}
def closeEvent(self, e):
if len(self.tabs) > 1:
if QtGui.QMessageBox.question(self, "Confirm exit", "You have more than one tab open. Are you sure you want to exit?", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) == QtGui.QMessageBox.No:
e.ignore()
return
self.persistHistory()
self.persistConfig()
if self.disk_cache:
self.disk_cache.expire()
if self.cookie_jar:
self.cookie_jar.Persist()
e.accept()
self.close()
def log(self, s):
if self.debug:
print(s)
def persistHistory(self):
if self.configdir is None:
return
hfile = os.path.join(self.configdir, "history")
try:
fp = open(hfile, "w")
except Exception as e:
return
keys = list(self.history.keys())
keys.sort()
for k in keys[:self.maxHistory]: # only store up to the last maxHistory history points
fp.write("%s :: %s\n" % (time.strftime(self.historyDateFormat, k), self.history[k]))
fp.close()
def loadHistory(self):
self.history = {}
if self.configdir is None:
return
hfile = os.path.join(self.configdir, "history")
if os.path.isfile(hfile):
for line in open(hfile, "r"):
line = line.strip()
parts = line.split("::")
try:
k = time.strptime(parts[0].strip(), self.historyDateFormat)
hurl = "::".join(parts[1:]).strip()
self.history[k] = hurl
except Exception as e:
self.log(str(e))
pass
def LoadHistoryToCmb(self, cmb):
if self.configdir is None:
return
keys = list(self.history.keys())
keys.sort(reverse=True)
if keys:
cmb.addItem("")
items = []
for k in keys:
if self.history[k] in items:
continue
cmb.addItem(self.history[k])
items.append(self.history[k])
if keys:
cmb.setCurrentIndex(0)
def addHistory(self, url, when = None):
if when is None:
when = time.localtime()
self.history[when] = url
def mkGui(self):
self.layout().setSpacing(1)
self.setWindowTitle(self.appname)
self.tabWidget = QtGui.QTabWidget(self)
self.tabWidget.tabBar().setMovable(True)
self.tabWidget.setStyleSheet("padding: 2px; margin: 2px;")
self.setCentralWidget(self.tabWidget)
self.tabWidget.setTabsClosable(True)
self.connect(self.tabWidget, QtCore.SIGNAL("tabCloseRequested(int)"), self.delTab)
self.connect(self, QtCore.SIGNAL("refreshAll()"), self.refreshAll)
self.addTab()
def addTab(self, url = None):
tab = WebTab(browser=self, actions=self.tabactions, showStatusBar = self.showStatusBar)
self.tabWidget.addTab(tab, "New tab")
self.tabs.append(tab)
self.tabWidget.setCurrentWidget(tab)
if url:
tab.navigate(url)
else:
self.currentTabGo()
return self.tabs[self.tabWidget.currentIndex()]
def addDownload(self, url):
if type(self.downloader) == str:
# commandline
cmd = self.downloader.replace("%url%", "\"%s\"" % url)
retcode = subprocess.call(cmd)
if retcode:
if (QtGui.QMessageBox.question(self, "External downloader failure", "An attempt to invoke your external downloader with the command line:\n\n%s\n\nappears to have failed. Would you like to change the commandline to your external downloader?" % (cmd)) == QtGui.QMessageBox.Ok):
self.downloader = None
self.addDownload(url)
elif self.downloader is None:
# prompt the user
dlg = QtGui.QInputDialog()
lbltxt = "%s does not implement an internal download manager but will talk to external download managers which can be command-line driven.\n\nPlease enter a commandline for an external downloader. %%url%% in your command will be replaced with the url to download" % (self.appname)
commandline, ok = dlg.getText(self, "External downloader configuration", lbltxt)
commandline = commandline.strip()
if commandline == "" or ok == False:
if QtGui.QMessageBox.question(self, "External downloader problem", "You haven't specified an external downloader command line. This means the request to download %s can't be processed. Are you sure?" % (url), QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) == QtGui.QMessageBox.Yes:
return
self.addDownload(url)
self.downloader = commandline
self.addDownload(url)
def fixUrl(self, url):
# look for "smart" google search
search = False
parts = url.split("://")
if len(parts) != 2 or (len(parts) == 2 and parts[0] not in ["http", "https", "ftp"]):
parts = url.split(" ") # multipl words == search
if len(parts) > 1:
search = True
hostname = url.split("/")[0]
parts = hostname.split(".") # hostname without periods == perhaps search
if len(parts) == 1:
try:
socket.gethostbyname(hostname) # if we can look up the host name, go for it
except:
search = True
if search:
url = "http://www.google.com/search?q=%s" % (url.replace(" ", "+"))
else:
try:
if url.index("about:") == 0:
return url
except:
if url.count("://") == 0:
url = "%s%s" % ("http://", url)
return url
def delTab(self, idx = -1):
if idx >= len(self.tabs):
return
if idx == -1:
idx = self.tabWidget.currentIndex()
t = self.tabs.pop(idx)
t.stop()
self.tabWidget.removeTab(idx)
t.deleteLater()
if len(self.tabs) == 0:
self.close()
def load(self, url):
if self.tabs[-1].URL() == "":
self.tabs[-1].navigate(url)
else:
self.addTab(url)
def refreshAll(self):
for t in self.tabs:
t.refresh()
def defaultCSS(self):
return " html {background-color: Window, color: WindowText}\ntable {border-collapse: collapse; margin: auto;}\ntd,th {border: 1px solid ThreeDDarkShadow; padding-left: 5px; padding-right: 5px}\n h1,h2,h3,h4,h5 {text-align: center;}"
def genAboutFoo(self):
return "<html><head><title>About %s</title><style>%s</style></head><body><h4>About %s</h4><p>%s is a dead-simple, lightweight tabbed web browser with support for:</p><ul><li>Disk cache</li><li>Persistent cookies</li><li>Plugin support (eg flash), where WebKit supports it</li><li>Re-orderable tabs</li><li>Browsing history (max %i items)</li><li>External download manager</li><li>Basic authentication for websites that require authentication</li></ul><p>%s would have been completely impossible without the giants upon whose shoulders it stands:</p><ul><li>Python</li><li>Qt (and PyQt4 in particular)</li><li>And, of course, Webkit</li></ul><p>%s was started as a fun project just to see what would be involved in creating a light browser out of the available powerful components. I hope that you find it useful!</p><p>Author: Davyd McColl (<a href=\"mailto:davydm@gmail.com\">davydm@gmail.com</a>)</body></html>" % (self.appname, self.appname, self.appname, self.appname, self.maxHistory, self.appname, self.appname)
def genHelp(self):
ret = ["<html><head><title>Help for: %s</title><style>%s</style></head><body><h4>Help for: <a href=\"about:foo\">%s</a></h4>" % (self.appname, self.defaultCSS(), self.appname)]
ret.extend(self.genActionTable(self.actions, "Application shortcuts"))
ret.append("<br/>")
ret.extend(self.genActionTable(self.tabactions, "Tab shortcuts"))
ret.append("</body></html>")
return "".join(ret)
def genActionTable(self, actions, title):
ret = []
ret.append("<h5>%s</h5><table>" % (title))
ret.append("<tr><th>Action</th><th>Shortcut</th></tr>")
data = {}
for action in actions:
shortcut = None
description = None
# each item is either a list of 3 elements:
# bound method, shortcut key, description
# or:
# bound method, shortcut key, bound object, description
shortcut = actions[action][1]
if len(actions[action]) == 3:
description = actions[action][2]
elif len(actions[action]) == 4:
description = actions[action][3]
if shortcut and description:
data[description] = shortcut
d = list(data.keys())
d.sort()
for desc in d:
ret.append("<tr><td>%s</td><td>%s</td></tr>" % (desc, data[desc]))
ret.append("</table>")
return ret
if __name__ == "__main__":
app = QtGui.QApplication([])
debug = False
if sys.argv[1:].count("-debug"):
debug = True
mainwin = MainWin(debug=debug)
mainwin.show()
for arg in sys.argv[1:]:
if arg not in ["-debug"]:
mainwin.load(arg)
app.exec_()
| Python |
from distutils.core import setup
import py2exe
setup(windows=[
{ "script":"foobrowser.pyw",
"icon_resources":[(1, "foobrowser.ico")]
}
])
| Python |
#!/usr/bin/python
# minimalistic browser levering off of Python, PyQt and Webkit
from PyQt4 import QtGui, QtCore, QtWebKit, QtNetwork
import sqlite3
import os
import sys
import time
import base64
import socket
import sip # put here to make py2exe work better
import subprocess
def registerShortcuts(actions, defaultOwner):
for action in actions:
shortcut = actions[action][1]
if shortcut.lower() == "none":
continue
# allow multiple shortcuts with keys delimited by |
shortcuts = shortcut.split("|")
for shortcut in shortcuts:
shortcut = shortcut.strip()
if shortcut == "":
continue
callback = actions[action][0]
if len(actions[action]) == 2:
owner = defaultOwner
else:
if type(actions[action][2]) != str:
owner = actions[action][2]
elif len(actions[action]) == 4:
owner = actions[action][3]
else:
owner = defaultOwner
QtGui.QShortcut(shortcut, owner, callback)
class Icons:
"""Container class to hold icons for Bonsai in base64-encoded format"""
def __init__(self):
if os.name == "nt":
import ctypes
try:
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(os.path.basename(sys.argv[0]))
except:
# not a win7 client
pass
self.icons = dict()
self.icons["foobrowser"] = "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAYAAACqaXHeAAAAAXNSR0IArs4c6QAAAAZiS0dEAP8A\
/wD/oL2nkwAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB9sIFw0xIyVxhzgAAAAZdEVYdENv\
bW1lbnQAQ3JlYXRlZCB3aXRoIEdJTVBXgQ4XAAABJUlEQVR42u3WMUoDURSF4V8JKAoqQlyJ2Fll\
B+JKRF2ChcsSQU0hSlAXYJoEUihok0KbOxBkeMZ40Vf8HzyGyTtnArd4MyBJkiRJkiRJkiTN4+Ob\
NasLnAED4C3WIH7rFv5j0V5VA9gDxoXcODJfLdr78wGUbALDyF0BPWA9Vg+4ib1hZH/bq24AJ5G5\
BVZa9leB+8gcJ/SqG8BlZA4KmcPIXCT0qjkDGpO4Lx1YO5GZJPSqG8A07juF53QiM03o/chy0iCW\
WlbjNa5bhf52XF8Sev8ygJKnuO4XMs3eQ0KvukPwdI7T/C4yRwm9qr8DruMdvharB/Rj7xnYSOhV\
NwCAXWBUODBHkcnqVTeAtm/6d+AROI/XWXZPkiRJkiRJkiRJsz4BmPGrGG5RdTEAAAAASUVORK5C\
YII="
def QIcon(self, name):
if list(self.icons.keys()).count(name) == 0:
return None
pixmap = QtGui.QPixmap()
if dir(base64).count("decodebytes"):
if not pixmap.loadFromData(base64.decodebytes(bytes(self.icons[name], encoding="UTF-8"))):
return None
elif dir(base64).count("b64decode"):
if not pixmap.loadFromData(base64.b64decode(self.icons[name])):
return None
icon = QtGui.QIcon(pixmap)
return icon
class DiskCookies(QtNetwork.QNetworkCookieJar):
def __init__(self, storage_location, parent=None):
self.db = None
QtNetwork.QNetworkCookieJar.__init__(self, parent)
self.LoadFromDisk(storage_location)
def LoadFromDisk(self, path):
cookiefile = os.path.join(path, "cookies.db")
init = False
if not os.path.isfile(cookiefile):
init = True
self.db = sqlite3.connect(cookiefile)
if init:
self.initDB()
cur = self.db.execute("select domain, expires, http_only, secure, name, path, value from cookies;")
cookies = []
for row in cur.fetchall():
cookie = QtNetwork.QNetworkCookie()
cookie.setDomain(row[0])
try:
if len(row[1]):
e = time.strptime(row[1], "%Y-%m-%d %H:%M:%S")
cookie.setExpirationDate(QtCore.QDateTime(e.tm_year, e.tm_mon, e.tm_mday, e.tm_hour, e.tm_min, e.tm_sec))
except:
pass
if row[2]:
cookie.setHttpOnly(True)
else:
cookie.setHttpOnly(False)
if row[3]:
cookie.setSecure(True)
else:
cookie.setSecure(False)
cookie.setName(row[4])
cookie.setPath(row[5])
try:
cookie.setValue(bytes(row[6]))
except:
try:
cookie.setValue(bytes(row[6], encoding="UTF-8"))
except:
pass
cookies.append(cookie)
self.setAllCookies(cookies)
def clear(self):
self.setAllCookies([])
if self.db:
self.db.execute("delete from cookies;")
def quote(self, s):
s = str(s)
return "'%s'" % (s.replace("'", "''"))
def boolToInt(self, b):
if b:
return 1
else:
return 0
def Persist(self):
if self.db == None:
return
self.db.execute("delete from cookies;")
sqlstr = "insert into cookies (domain, expires, http_only, secure, name, path, value) values (%s, %s, %i, %i, %s, %s, %s);";
fmt = "yyyy-MM-dd hh:mm:ss"
for cookie in self.allCookies():
cval = cookie.value().data()
if type(cval) != str:
cval = cval.decode("UTF-8")
#print("cookie:\n\tsession: %s\n\tname: %s\n\tpath: %s\n\tvalue: %s\n\t" % (str(cookie.isSessionCookie()),cookie.name(), cookie.path(), cval))
esql = (sqlstr % (self.quote(cookie.domain()),
self.quote(cookie.expirationDate().toString(fmt)),
self.boolToInt(cookie.isHttpOnly()),
self.boolToInt(cookie.isSecure()),
self.quote(cookie.name()),
self.quote(cookie.path()),
self.quote(cookie.value().data().decode("UTF-8"))))
self.db.execute(esql)
self.db.commit()
self.db.close()
def initDB(self):
cur = self.db.execute("create table cookies(domain text, expires text, http_only int, secure int, session int, name text, path text, value text);")
cur.close()
class FooWebView(QtWebKit.QWebView):
def __init__(self, parent = None):
self.parent = parent
QtWebKit.QWebView.__init__(self, parent)
def createWindow(self, type):
return self.parent.browser.addTab().webkit
class WebTab(QtGui.QWidget):
def __init__(self, browser, actions=None, parent=None, showStatusBar=False):
QtGui.QWidget.__init__(self, parent)
self.actions = dict()
self.grid = QtGui.QGridLayout(self)
self.grid.setSpacing(1)
self.cmb = QtGui.QComboBox()
self.cmb.setEditable(True)
self.browser = browser
if browser is not None:
browser.LoadHistoryToCmb(self.cmb)
self.webkit = FooWebView(self)
self.webkit.page().setLinkDelegationPolicy(QtWebKit.QWebPage.DelegateAllLinks)
self.webkit.linkClicked.connect(self.onLinkClick)
self.webkit.settings().setAttribute(QtWebKit.QWebSettings.PluginsEnabled,True)
self.pbar = QtGui.QProgressBar()
self.pbar.setRange(0, 100)
self.pbar.setTextVisible(False)
self.grid.addWidget(self.cmb, 0, 0)
self.grid.addWidget(self.pbar, 1, 0, 1, self.grid.columnCount())
self.grid.addWidget(self.webkit, 2, 0, 1, self.grid.columnCount())
self.pbar.setVisible(False)
self.pbar.setMaximumHeight(10)
self.fraSearch = QtGui.QFrame()
self.searchGrid = QtGui.QGridLayout(self.fraSearch)
self.searchGrid.setSpacing(1)
self.lblSearch = QtGui.QLabel("Find text in page:")
self.txtSearch = QtGui.QLineEdit()
self.btnClearSearch = QtGui.QPushButton("[X]")
self.searchGrid.addWidget(self.lblSearch, 0, 0)
self.searchGrid.addWidget(self.txtSearch, 0, 1)
self.searchGrid.addWidget(self.btnClearSearch, 0, 2)
self.statusbar = QtGui.QStatusBar()
self.statusbar.setVisible(showStatusBar)
self.statusbar.setMaximumHeight(25)
self.grid.addWidget(self.statusbar, self.grid.rowCount(), 0, 1, self.grid.columnCount())
for i in range(2):
self.searchGrid.setColumnStretch(i, i % 2)
self.fraSearch.setVisible(False)
self.grid.addWidget(self.fraSearch, self.grid.rowCount() + 1, 0, 1, self.grid.columnCount())
for c in range(self.grid.columnCount() + 1):
self.grid.setColumnStretch(c, 0)
for r in range(self.grid.rowCount() + 1):
self.grid.setRowStretch(r, 0)
self.grid.setRowStretch(2, 1)
self.grid.setColumnStretch(0, 1)
self.connect(self.cmb, QtCore.SIGNAL("currentIndexChanged(int)"), self.navigate)
if browser:
self.browser.setupWebkit(self.webkit)
self.connect(self.webkit, QtCore.SIGNAL("iconChanged()"), self.setIcon)
self.connect(self.webkit, QtCore.SIGNAL("loadStarted()"), self.loadStarted)
self.connect(self.webkit, QtCore.SIGNAL("loadFinished(bool)"), self.loadFinished)
self.connect(self.webkit, QtCore.SIGNAL("titleChanged(QString)"), self.setTitle)
self.connect(self.webkit, QtCore.SIGNAL("loadProgress(int)"), self.loadProgress)
self.connect(self.webkit, QtCore.SIGNAL("urlChanged(QUrl)"), self.setURL)
self.connect(self.webkit.page(), QtCore.SIGNAL("linkHovered(QString, QString, QString)"), self.onLinkHovered)
page = self.webkit.page()
page.downloadRequested.connect(self.onDownloadRequested)
page.setForwardUnsupportedContent(True)
page.unsupportedContent.connect(self.onUnsupportedContent)
self.connect(self.btnClearSearch, QtCore.SIGNAL("clicked()"), self.stopOrHideSearch)
self.connect(self.txtSearch, QtCore.SIGNAL("textChanged(QString)"), self.doSearch)
self.registerActions(actions)
registerShortcuts(self.actions, self)
self.cmb.setFocus()
self.showHideMessage()
def onLinkClick(self, qurl):
self.navigate(qurl.toString())
def registerActions(self, template):
self.actions["addressnav"] = [self.navigate, "Enter", self.cmb, "Navigate to the url in the address bar"]
self.actions["reload"] = [self.reload, "F5|Ctrl+R", "Reload the current page"]
self.actions["back"] = [self.back, "Alt+Left", "Go back in history"]
self.actions["fwd"] = [self.fwd, "Alt+Right", "Go forward in history"]
self.actions["search"] = [self.showSearch, "/|Ctrl-F", "Search in page"]
self.actions["smartsearch"] = [self.smartSearch, "F3", "Smart search (find next or start search)"]
self.actions["stopsearch"] = [self.stopOrHideSearch, "Escape", self.fraSearch, "Stop current load or searching"]
self.actions["findnext"] = [self.doSearch, "Return", self.txtSearch, "Next match for current search"]
self.actions["togglestatus"]= [self.toggleStatus, "Ctrl+Space", "Toggle visibility of status bar"]
if template:
actionnames = list(self.actions.keys())
for action in template:
if actionnames.count(action):
self.actions[action][1] = template[action][1]
def toggleStatus(self):
if self.browser:
self.browser.toggleStatusVisiblity()
else:
self.statusbar.setVisible(not self.statusBar.isVisible())
def setStatusVisibility(self, visible):
self.statusbar.setVisible(visible)
def loadContent(self, html, baseUrl = None):
if baseUrl:
baseUrl = QtWebKit.QUrl(baseUrl)
else:
baseUrl = QtWebKit.QUrl()
self.webkit.setHTML(html, baseUrl)
def onUnsupportedContent(self, reply):
self.log("Unsupported content %s" % (reply.url().toString()))
if self.browser:
self.browser.addDownload(reply.url().toString())
def onDownloadRequested(self, request):
if self.browser:
self.browser.addDownload(request.url().toString())
def doSearch(self, s = None):
if s is None: s = self.txtSearch.text()
self.webkit.findText(s, QtWebKit.QWebPage.FindWrapsAroundDocument)
def stopOrHideSearch(self):
if self.fraSearch.isVisible():
self.fraSearch.setVisible(False)
self.webkit.setFocus()
else:
self.webkit.stop()
def showSearch(self):
self.txtSearch.setText("")
self.fraSearch.setVisible(True)
self.txtSearch.setFocus()
def zoom(self, lvl):
self.webkit.setZoomFactor(self.webkit.zoomFactor() + (lvl * 0.25))
def stop(self):
self.webkit.stop()
def URL(self):
return self.cmb.currentText()
def loadProgress(self, val):
if self.pbar.isVisible():
self.pbar.setValue(val)
def setTitle(self, title):
if self.browser:
self.browser.setTabTitle(self, title)
def setURL(self, url):
self.cmb.setEditText(url.toString())
def refresh(self):
self.navigate(self.URL())
self.webkit.reload()
def loadStarted(self):
self.showProgressBar()
def loadFinished(self, success):
self.hideProgressBar()
self.setIcon()
if self.cmb.hasFocus():
self.webkit.setFocus()
def showProgressBar(self):
self.pbar.setValue(0)
self.pbar.setVisible(True)
def hideProgressBar(self, success = False):
self.pbar.setVisible(False)
def setIcon(self):
if self.browser:
self.browser.setTabIcon(self, self.webkit.icon())
def reload(self):
self.webkit.reload()
def smartSearch(self):
if self.fraSearch.isVisible():
self.doSearch()
else:
self.showSearch()
def mkShortcuts(self):
if self.browser:
self.bro
def fwd(self):
self.webkit.history().forward()
def back(self):
self.webkit.history().back()
def navigate(self, url = None):
if url and type(url) == str:
u = url
else:
u = str(self.cmb.currentText())
parts = u.split(":")
if len(parts) == 2 and parts[0] == "about":
self.navabout(parts[1].strip().lower())
return
if u.strip() == "":
return
if self.browser is not None:
u = self.browser.fixUrl(u)
self.cmb.setEditText(u)
if self.browser is not None:
self.browser.addHistory(u)
url = QtCore.QUrl(u)
self.setTitle("Loading...")
self.webkit.load(url)
def onStatusBarMessage(self, s):
if s:
self.statusbar.showMessage(s)
else:
self.showHideMessage()
def showHideMessage(self):
self.statusbar.showMessage("(press %s to hide this)" % (self.actions["togglestatus"][1]))
def onLinkHovered(self, link, title, content):
if link or title:
if title and not link:
self.statusbar.showMessage(title)
elif link and not title:
self.statusbar.showMessage(link)
elif link and title:
self.statusbar.showMessage("%s (%s)" % (title, link))
else:
self.showHideMessage()
def navabout(self, dst):
if self.browser is None:
return
if dst == "help":
self.webkit.setHtml(self.browser.genHelp())
self.cmb.setEditText("about:help")
return
elif dst == "foo":
self.webkit.setHtml(self.browser.genAboutFoo())
self.cmb.setEditText("about:foo")
return
elif dst == "nothing":
self.webkit.setHtml("")
self.cmb.setEditText("about:nothing")
return
self.webkit.setHtml("<p>Sorry, Jim, that resource cannot be found</p>")
self.cmb.setEditText("about:lost")
class PrivacyDialog(QtGui.QDialog):
def __init__(self, parent=None, icon=None):
QtGui.QDialog.__init__(self, parent)
if icon:
self.setWindowIcon(icon)
self.setWindowTitle("Clear private data")
self.chkClearCookies = QtGui.QCheckBox("Clear cookies")
self.chkClearHistory = QtGui.QCheckBox("Clear history")
self.chkClearCache = QtGui.QCheckBox("Clear cache")
self.btnOk = QtGui.QPushButton("OK")
self.btnCancel = QtGui.QPushButton("Cancel")
self.grid = QtGui.QGridLayout(self)
row = 0
for chk in [self.chkClearCookies, self.chkClearHistory, self.chkClearCache]:
self.grid.addWidget(chk, row, 0, 1, 3)
chk.setChecked(True)
row += 1
growrow = row
row += 1
self.grid.addWidget(self.btnOk, row, 1)
self.grid.addWidget(self.btnCancel, row, 2)
for i in range(self.grid.rowCount()):
self.grid.setRowStretch(i, 0)
self.grid.setRowStretch(growrow, 1)
self.grid.setColumnStretch(0, 1)
for i in range(self.grid.columnCount()):
if i:
self.grid.setColumnStretch(i, 1)
self.connect(self.btnOk, QtCore.SIGNAL("clicked()"), self.accept)
self.connect(self.btnCancel, QtCore.SIGNAL("clicked()"), self.reject)
class AuthDialog(QtGui.QDialog):
def __init__(self, parent=None, icon=None):
QtGui.QDialog.__init__(self, parent)
if icon:
self.setWindowIcon(icon)
self.setWindowTitle("Authentication required")
self.lblAuth = QtGui.QLabel("Authentication required")
self.lblUserName = QtGui.QLabel("Username:")
self.txtUserName = QtGui.QLineEdit()
self.lblPassword = QtGui.QLabel("Password:")
self.txtPassword = QtGui.QLineEdit()
self.txtPassword.setEchoMode(QtGui.QLineEdit.Password)
self.btnCancel = QtGui.QPushButton("Cancel")
self.btnOK = QtGui.QPushButton("OK")
self.grid = QtGui.QGridLayout(self)
self.grid.addWidget(self.lblAuth, 0, 0, 1, 3)
self.grid.addWidget(self.lblUserName, 1, 0)
self.grid.addWidget(self.txtUserName, 1, 1, 1, 3)
self.grid.addWidget(self.lblPassword, 2, 0)
self.grid.addWidget(self.txtPassword, 2, 1, 1, 3)
self.grid.addWidget(self.btnOK, 3, 2)
self.grid.addWidget(self.btnCancel, 3, 3)
for i in range(self.grid.columnCount()):
self.grid.setColumnStretch(i, 0)
self.grid.setColumnStretch(1, 1)
for i in range(self.grid.rowCount()):
self.grid.setRowStretch(i, 0)
self.cancelled = False
self.connect(self.btnCancel, QtCore.SIGNAL("clicked()"), self.onCancel)
self.connect(self.btnOK, QtCore.SIGNAL("clicked()"), self.onOK)
def onOK(self):
self.cancelled = False
self.close()
def onCancel(self):
self.cancelled = True
self.close()
def prompt(self, url=None):
self.cancelled = False
if url:
self.lblAuth.setText("The page at:\n\n%s\n\nrequires authentication to continue" % (url))
self.exec_()
if self.cancelled:
return None, None
else:
return self.txtUserName.text(), self.txtPassword.text()
class MainWin(QtGui.QMainWindow):
def __init__(self, debug=False):
QtGui.QMainWindow.__init__(self, None)
self.downloader = None
self.debug = debug
self.actions = dict()
self.tabactions = dict()
self.tabactions = dict()
tmp = WebTab(None, None)
self.tabactions = tmp.actions
self.configdir = os.path.join(os.path.expanduser("~"), ".foobrowser")
self.registerActions()
self.showStatusBar = False
self.loadConfig()
self.icons = Icons()
self.setWindowIcon(self.icons.QIcon("foobrowser"))
self.appname = "Foo browser!"
self.cache_mb = 512
self.maxHistory = 4096
self.tabs = []
self.historyDateFormat = "%Y-%m-%d %H:%M:%S"
self.maxTitleLen = 40
if not os.path.isdir(self.configdir):
try:
os.mkdir(self.configdir)
except Exception as e:
self.configdir = None
if self.configdir is not None:
self.loadHistory()
self.disk_cache = None
self.cookie_jar = None
if self.configdir:
cachedir = os.path.join(self.configdir, "cache")
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
self.disk_cache = QtNetwork.QNetworkDiskCache()
self.disk_cache.setCacheDirectory(cachedir)
self.disk_cache.setMaximumCacheSize(self.cache_mb * (1024 * 1024))
self.cookie_jar = DiskCookies(self.configdir)
self.auth_cache = dict()
tmp.deleteLater()
self.mkGui()
registerShortcuts(self.actions, self)
def loadConfig(self):
if self.configdir:
if not os.path.isdir(self.configdir):
try:
os.mkdir(self.configdir)
except:
return
conffile = os.path.join(self.configdir, "config.ini")
if not os.path.isfile(conffile):
return
try:
fp = open(conffile, "r")
except:
return
section = ""
for line in fp:
line = line.strip()
if len(line) == 0:
continue
if line[0] == "[" and line[-1] == "]":
section = line.strip("[]")
continue
parts = line.split("=")
if len(parts) < 2:
continue
setting = parts[0].strip()
value = "=".join(parts[1:]).strip()
self.log("config: %s/%s/%s" % (section, setting, value))
if section == "shortcuts":
setting = setting.lower()
if list(self.actions.keys()).count(setting):
self.actions[setting][1] = value
continue
if section == "tabshortcuts":
setting = setting.lower()
if list(self.tabactions.keys()).count(setting):
self.tabactions[setting][1] = value
continue
if section == "general":
setting = setting.lower()
if setting == "downloader":
if value.lower() != "none":
self.log("setting downloader to %s" % (value))
self.downloader = value
elif setting == "showstatus":
if value.lower() in ["yes", "true", "1"]:
self.showStatusBar = True
else:
self.showStatusBar = False
fp.close()
def toggleStatusVisiblity(self):
self.showStatusBar = not self.showStatusBar
for t in self.tabs:
t.setStatusVisibility(self.showStatusBar)
def persistConfig(self):
if self.configdir:
if not os.path.isdir(self.configdir):
try:
os.mkdir(self.configdir)
except:
return
conffile = os.path.join(self.configdir, "config.ini")
try:
fp = open(conffile, "w")
except:
return
# write out shortcuts
fp.write("[general]\n")
fp.write("; general settings\n")
if self.downloader:
fp.write("downloader = %s\n" % (str(self.downloader)))
else:
fp.write("downloader = None\n")
if self.showStatusBar:
fp.write("showstatus = True\n")
else:
fp.write("showstatus = False\n")
fp.write("[shortcuts]\n")
fp.write("; shortcuts applied to the application as a whole\n")
actionnames = list(self.actions.keys())
actionnames.sort()
for action in actionnames:
fp.write("%s = %s\n" % (action, self.actions[action][1]))
fp.write("[tabshortcuts]\n")
fp.write("; shortcuts applied to individual tabs\n")
actionnames = list(self.tabactions.keys())
actionnames.sort()
for action in actionnames:
fp.write("%s = %s\n" % (action, self.tabactions[action][1]))
fp.close()
def registerActions(self):
self.actions["newwin"] = [self.addWin, "Ctrl+N", "Open new window"]
self.actions["newtab"] = [self.addTab, "Ctrl+T", "Open new tab"]
self.actions["closetab"] = [self.delTab, "Ctrl+W", "Close current tab"]
self.actions["tabprev"] = [self.decTab, "Ctrl+PgUp", "Switch to previous tab"]
self.actions["tabnext"] = [self.incTab, "Ctrl+PgDown", "Switch to next tab"]
self.actions["go"] = [self.currentTabGo, "Ctrl+G", "Focus address bar"]
self.actions["close"] = [self.close, "Ctrl+Q", "Close application"]
self.actions["zoomin"] = [self.zoomIn, "Ctrl+Up", "Zoom into page"]
self.actions["zoomout"] = [self.zoomOut, "Ctrl+Down", "Zoom out of page"]
self.actions["help"] = [self.showHelp, "F1", "Show this help page"]
self.actions["cleardata"] = [self.clearData, "Ctrl+Shift+Delete", "Clear cache and private data"]
def clearData(self):
dlg = PrivacyDialog(parent=self, icon=self.icons.QIcon("foobrowser"))
if dlg.exec_() == QtGui.QDialog.Accepted:
if dlg.chkClearCookies.isChecked() and self.cookie_jar:
self.cookie_jar.clear()
if dlg.chkClearCache.isChecked() and self.disk_cache:
self.disk_cache.clear()
if dlg.chkClearHistory.isChecked():
self.history = {}
def showHelp(self):
self.addTab().navigate("about:help")
def addWin(self):
MainWin().show()
def currentTabGo(self):
self.tabs[self.tabWidget.currentIndex()].cmb.setFocus()
def zoomIn(self):
self.zoom(1)
def zoomOut(self):
self.zoom(-1)
def zoom(self, lvl):
self.tabs[self.tabWidget.currentIndex()].zoom(lvl)
def decTab(self):
self.incTab(-1)
def incTab(self, incby = 1):
if self.tabWidget.count() < 2:
return
idx = self.tabWidget.currentIndex()
idx += incby
if idx < 0:
idx = self.tabWidget.count()-1;
elif idx >= self.tabWidget.count():
idx = 0
self.tabWidget.setCurrentIndex(idx)
def setTabIcon(self, tab, icon):
idx = self.getTabIndex(tab)
if idx > -1:
self.tabWidget.setTabIcon(idx, icon)
def setTabTitle(self, tab, title):
idx = self.getTabIndex(tab)
if idx > -1:
if len(title) > self.maxTitleLen:
title = title[:self.maxTitleLen-3] + "..."
self.tabWidget.setTabText(idx, title)
def getTabIndex(self, tab):
for i in range(len(self.tabs)):
if tab == self.tabs[i]:
return i
return -1
def setupWebkit(self, webkit):
nam = webkit.page().networkAccessManager()
nam.authenticationRequired.connect(self.onAuthRequest)
nam.setCache(self.disk_cache)
nam.setCookieJar(self.cookie_jar)
self.cookie_jar.setParent(None)
self.disk_cache.setParent(None)
g = webkit.settings()
g.enablePersistentStorage(self.configdir)
def onAuthRequest(self, networkreply, authenticator):
cached = list(self.auth_cache.keys())
r = authenticator.realm()
if cached.count(r):
authenticator.setUser(self.auth_cache[r]["user"])
authenticator.setPassword(self.auth_cache[r]["password"])
else:
authdlg = AuthDialog(parent=self, icon=self.icons.QIcon("foobrowser"))
username, password = authdlg.prompt(networkreply.url().toString())
if username and password:
authenticator.setUser(username)
authenticator.setPassword(password)
self.auth_cache[r] = {"user":username, "password":password}
def closeEvent(self, e):
if len(self.tabs) > 1:
if QtGui.QMessageBox.question(self, "Confirm exit", "You have more than one tab open. Are you sure you want to exit?", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) == QtGui.QMessageBox.No:
e.ignore()
return
self.persistHistory()
self.persistConfig()
if self.disk_cache:
self.disk_cache.expire()
if self.cookie_jar:
self.cookie_jar.Persist()
e.accept()
self.close()
def log(self, s):
if self.debug:
print(s)
def persistHistory(self):
if self.configdir is None:
return
hfile = os.path.join(self.configdir, "history")
try:
fp = open(hfile, "w")
except Exception as e:
return
keys = list(self.history.keys())
keys.sort()
for k in keys[:self.maxHistory]: # only store up to the last maxHistory history points
fp.write("%s :: %s\n" % (time.strftime(self.historyDateFormat, k), self.history[k]))
fp.close()
def loadHistory(self):
self.history = {}
if self.configdir is None:
return
hfile = os.path.join(self.configdir, "history")
if os.path.isfile(hfile):
for line in open(hfile, "r"):
line = line.strip()
parts = line.split("::")
try:
k = time.strptime(parts[0].strip(), self.historyDateFormat)
hurl = "::".join(parts[1:]).strip()
self.history[k] = hurl
except Exception as e:
self.log(str(e))
pass
def LoadHistoryToCmb(self, cmb):
if self.configdir is None:
return
keys = list(self.history.keys())
keys.sort(reverse=True)
if keys:
cmb.addItem("")
items = []
for k in keys:
if self.history[k] in items:
continue
cmb.addItem(self.history[k])
items.append(self.history[k])
if keys:
cmb.setCurrentIndex(0)
def addHistory(self, url, when = None):
if when is None:
when = time.localtime()
self.history[when] = url
def mkGui(self):
self.layout().setSpacing(1)
self.setWindowTitle(self.appname)
self.tabWidget = QtGui.QTabWidget(self)
self.tabWidget.tabBar().setMovable(True)
self.tabWidget.setStyleSheet("padding: 2px; margin: 2px;")
self.setCentralWidget(self.tabWidget)
self.tabWidget.setTabsClosable(True)
self.connect(self.tabWidget, QtCore.SIGNAL("tabCloseRequested(int)"), self.delTab)
self.connect(self, QtCore.SIGNAL("refreshAll()"), self.refreshAll)
self.addTab()
def addTab(self, url = None):
tab = WebTab(browser=self, actions=self.tabactions, showStatusBar = self.showStatusBar)
self.tabWidget.addTab(tab, "New tab")
self.tabs.append(tab)
self.tabWidget.setCurrentWidget(tab)
if url:
tab.navigate(url)
else:
self.currentTabGo()
return self.tabs[self.tabWidget.currentIndex()]
def addDownload(self, url):
if type(self.downloader) == str:
# commandline
cmd = self.downloader.replace("%url%", "\"%s\"" % url)
retcode = subprocess.call(cmd)
if retcode:
if (QtGui.QMessageBox.question(self, "External downloader failure", "An attempt to invoke your external downloader with the command line:\n\n%s\n\nappears to have failed. Would you like to change the commandline to your external downloader?" % (cmd)) == QtGui.QMessageBox.Ok):
self.downloader = None
self.addDownload(url)
elif self.downloader is None:
# prompt the user
dlg = QtGui.QInputDialog()
lbltxt = "%s does not implement an internal download manager but will talk to external download managers which can be command-line driven.\n\nPlease enter a commandline for an external downloader. %%url%% in your command will be replaced with the url to download" % (self.appname)
commandline, ok = dlg.getText(self, "External downloader configuration", lbltxt)
commandline = commandline.strip()
if commandline == "" or ok == False:
if QtGui.QMessageBox.question(self, "External downloader problem", "You haven't specified an external downloader command line. This means the request to download %s can't be processed. Are you sure?" % (url), QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) == QtGui.QMessageBox.Yes:
return
self.addDownload(url)
self.downloader = commandline
self.addDownload(url)
def fixUrl(self, url):
# look for "smart" google search
search = False
parts = url.split("://")
if len(parts) != 2 or (len(parts) == 2 and parts[0] not in ["http", "https", "ftp"]):
parts = url.split(" ") # multipl words == search
if len(parts) > 1:
search = True
hostname = url.split("/")[0]
parts = hostname.split(".") # hostname without periods == perhaps search
if len(parts) == 1:
try:
socket.gethostbyname(hostname) # if we can look up the host name, go for it
except:
search = True
if search:
url = "http://www.google.com/search?q=%s" % (url.replace(" ", "+"))
else:
try:
if url.index("about:") == 0:
return url
except:
if url.count("://") == 0:
url = "%s%s" % ("http://", url)
return url
def delTab(self, idx = -1):
if idx >= len(self.tabs):
return
if idx == -1:
idx = self.tabWidget.currentIndex()
t = self.tabs.pop(idx)
t.stop()
self.tabWidget.removeTab(idx)
t.deleteLater()
if len(self.tabs) == 0:
self.close()
def load(self, url):
if self.tabs[-1].URL() == "":
self.tabs[-1].navigate(url)
else:
self.addTab(url)
def refreshAll(self):
for t in self.tabs:
t.refresh()
def defaultCSS(self):
return " html {background-color: Window, color: WindowText}\ntable {border-collapse: collapse; margin: auto;}\ntd,th {border: 1px solid ThreeDDarkShadow; padding-left: 5px; padding-right: 5px}\n h1,h2,h3,h4,h5 {text-align: center;}"
def genAboutFoo(self):
return "<html><head><title>About %s</title><style>%s</style></head><body><h4>About %s</h4><p>%s is a dead-simple, lightweight tabbed web browser with support for:</p><ul><li>Disk cache</li><li>Persistent cookies</li><li>Plugin support (eg flash), where WebKit supports it</li><li>Re-orderable tabs</li><li>Browsing history (max %i items)</li><li>External download manager</li><li>Basic authentication for websites that require authentication</li></ul><p>%s would have been completely impossible without the giants upon whose shoulders it stands:</p><ul><li>Python</li><li>Qt (and PyQt4 in particular)</li><li>And, of course, Webkit</li></ul><p>%s was started as a fun project just to see what would be involved in creating a light browser out of the available powerful components. I hope that you find it useful!</p><p>Author: Davyd McColl (<a href=\"mailto:davydm@gmail.com\">davydm@gmail.com</a>)</body></html>" % (self.appname, self.appname, self.appname, self.appname, self.maxHistory, self.appname, self.appname)
def genHelp(self):
ret = ["<html><head><title>Help for: %s</title><style>%s</style></head><body><h4>Help for: <a href=\"about:foo\">%s</a></h4>" % (self.appname, self.defaultCSS(), self.appname)]
ret.extend(self.genActionTable(self.actions, "Application shortcuts"))
ret.append("<br/>")
ret.extend(self.genActionTable(self.tabactions, "Tab shortcuts"))
ret.append("</body></html>")
return "".join(ret)
def genActionTable(self, actions, title):
ret = []
ret.append("<h5>%s</h5><table>" % (title))
ret.append("<tr><th>Action</th><th>Shortcut</th></tr>")
data = {}
for action in actions:
shortcut = None
description = None
# each item is either a list of 3 elements:
# bound method, shortcut key, description
# or:
# bound method, shortcut key, bound object, description
shortcut = actions[action][1]
if len(actions[action]) == 3:
description = actions[action][2]
elif len(actions[action]) == 4:
description = actions[action][3]
if shortcut and description:
data[description] = shortcut
d = list(data.keys())
d.sort()
for desc in d:
ret.append("<tr><td>%s</td><td>%s</td></tr>" % (desc, data[desc]))
ret.append("</table>")
return ret
if __name__ == "__main__":
app = QtGui.QApplication([])
debug = False
if sys.argv[1:].count("-debug"):
debug = True
mainwin = MainWin(debug=debug)
mainwin.show()
for arg in sys.argv[1:]:
if arg not in ["-debug"]:
mainwin.load(arg)
app.exec_()
| Python |
# This web app runs a contest for college football bowl games.
#
# Each bowl game has a name and two teams. We associate unique
# 2-letter id's to each game, and 2-4 letter id's to each team. For
# example:
#
# New Mexico Bowl (NM) -- Nevada (NEV) vs Arizona (ARIZ)
# Russell Athletic Bowl (RA) -- Rutgers (RUTG) vs Virginia Tech (VT)
#
# Each user can vote for which team will win each bowl. These choices
# are stored hierarchically like so:
#
# Player('alice')
# Choice(bowl='NM', team='NEV')
# Choice(bowl='RA', team='RUTG')
# Player('bob')
# Choice(bowl='NM', team='NEV')
# Choice(bowl='RA', team='VT')
#
# The actual game outcomes can be set by the admin user, and these are
# stored under a singleton object:
#
# Winners('singleton')
# Choice(bowl='NM', team='ARIZ')
# Choice(bowl='RA', team='VT')
#
# The users make their selections at /player/choose, which then posts
# to /player/save when the user clicks a team.
#
# Similarly, game outcomes go through /admin/choose and /admin/save.
#
# In addition, there is an overall summary at /public/scoreboard,
# viewable by anyone (including non-logged-in visitors).
import cgi
import datetime
import os
import time
import urllib
import webapp2
import jinja2 # configured in app.yaml
from google.appengine.api import users
from google.appengine.ext import db
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
# NB: All times Eastern.
BOWLS = [
('2012 Dec 15 1:00 pm', 'NM', 'NEV', 'ARIZ', 'New Mexico', 'Nevada', 'Arizona'),
('2012 Dec 15 4:30 pm', 'IP', 'TOL', 'USU', 'Idaho Potato', 'Toledo', 'Utah St'),
('2012 Dec 20 8:00 pm', 'PA', 'BYU', 'SDSU', 'Poinsettia', 'BYU', 'San Diego St'),
('2012 Dec 21 7:30 pm', 'BB', 'BALL', 'UCF', "Beef 'O' Brady's", 'Ball St', 'UCF'),
('2012 Dec 22 12:00 pm', 'NO', 'ECU', 'ULL', 'New Orleans', 'East Carolina', 'LA-Lafayette'),
('2012 Dec 22 3:30 pm', 'MA', 'WASH', 'BSU', 'Maaco', 'Washington', 'Boise St'),
('2012 Dec 24 8:00 pm', 'HI', 'FRES', 'SMU', "Hawai'i", 'Fresno St', 'SMU'),
('2012 Dec 26 7:30 pm', 'LC', 'WKU', 'CMU', 'Little Caesars Pizza', 'W Kentucky', 'Cent Michigan'),
('2012 Dec 27 3:00 pm', 'MI', 'SJSU', 'BGSU', 'Military', 'San Jose St', 'Bowling Green'),
('2012 Dec 27 6:30 pm', 'BK', 'CIN', 'DUKE', 'Belk', 'Cincinnati', 'Duke'),
('2012 Dec 27 9:45 pm', 'HO', 'BAY', 'UCLA', 'Holiday', 'Baylor', 'UCLA'),
('2012 Dec 28 2:00 pm', 'IN', 'OHIO', 'ULM', 'Independence', 'Ohio', 'LA-Monroe'),
('2012 Dec 28 5:30 pm', 'RA', 'RUTG', 'VT', 'Russell Athletic', 'Rutgers', 'Virginia Tech'),
('2012 Dec 28 9:00 pm', 'ME', 'MINN', 'TTU', 'Meineke Car Care', 'Minnesota', 'Texas Tech'),
('2012 Dec 29 11:45 am', 'AF', 'RICE', 'AFA', 'Armed Forces', 'Rice', 'Air Force'),
('2012 Dec 29 3:15 pm', 'PS', 'WVU', 'SYR', 'Pinstripe', 'West Virginia', 'Syracuse'),
('2012 Dec 29 4:00 pm', 'FH', 'NAVY', 'ASU', 'Fight Hunger', 'Navy', 'Arizona St'),
('2012 Dec 29 6:45 pm', 'AL', 'TEX', 'ORST', 'Alamo', 'Texas', 'Oregon St'),
('2012 Dec 29 10:15 pm', 'BW', 'TCU', 'MSU', 'Buffalo Wild Wings', 'TCU', 'Michigan St'),
('2012 Dec 31 12:00 pm', 'MU', 'NCST', 'VAN', 'Music City', 'NC State', 'Vanderbilt'),
('2012 Dec 31 2:00 pm', 'SN', 'USC', 'GT', 'Sun', 'USC', 'Georgia Tech'),
('2012 Dec 31 3:30 pm', 'LY', 'ISU', 'TLSA', 'Liberty', 'Iowa St', 'Tulsa'),
('2012 Dec 31 7:30 pm', 'CK', 'LSU', 'CLEM', 'Chick-fil-A', 'LSU', 'Clemson'),
('2013 Jan 1 12:00 pm', 'GA', 'MSST', 'NW', 'Gator', 'Miss. St', 'Northwestern'),
('2013 Jan 1 12:00 pm', 'HD', 'PUR', 'OKST', 'Heart of Dallas', 'Purdue', 'Oklahoma St'),
('2013 Jan 1 1:00 pm', 'OU', 'SCAR', 'MICH', 'Outback', 'South Carolina', 'Michigan'),
('2013 Jan 1 1:00 pm', 'C1', 'UGA', 'NEB', 'Capital One', 'Georgia', 'Nebraska'),
('2013 Jan 1 5:00 pm', 'RO', 'WIS', 'STAN', 'Rose', 'Wisconsin', 'Stanford'),
('2013 Jan 1 8:30 pm', 'OR', 'NIU', 'FSU', 'Orange', 'N Illinois', 'Florida St'),
('2013 Jan 2 8:30 pm', 'SG', 'LOU', 'FLA', 'Sugar', 'Louisville', 'Florida'),
('2013 Jan 3 8:30 pm', 'FA', 'ORE', 'KSU', 'Fiesta', 'Oregon', 'Kansas St'),
('2013 Jan 4 8:00 pm', 'CN', 'TA&M', 'OKLA', 'Cotton', 'Texas A&M', 'Oklahoma'),
('2013 Jan 5 1:00 pm', 'CS', 'PITT', 'MISS', 'Compass', 'Pittsburgh', 'Ole Miss'),
('2013 Jan 6 9:00 pm', 'GO', 'KENT', 'ARST', 'GoDaddy.com', 'Kent St', 'Arkansas St'),
('2013 Jan 7 8:30 pm', 'NC', 'ND', 'ALA', 'BCS National Championship', 'Notre Dame', 'Alabama'),
]
class Winners(db.Model):
"""Parent entity for Choice entities representing bowl outcomes.
NB: We only create one such entity, and its key name is 'singleton'"""
class Player(db.Model):
"""Represents a user who is guessing bowl results."""
user = db.UserProperty(required=True)
pct_correct = db.FloatProperty()
class Choice(db.Model):
"""The team chosen to win a given bowl.
Each Choice has a Player parent, or has the Winners singleton as parent."""
bowl = db.StringProperty(required=True)
team = db.StringProperty(required=True)
class MainPage(webapp2.RequestHandler):
def get(self):
logout = None
is_admin = False
user = users.get_current_user()
if user:
logout = users.create_logout_url('/')
if users.is_current_user_admin():
is_admin = True
tmpl = jinja_environment.get_template('index.html')
self.response.out.write(tmpl.render(is_admin=is_admin, logout=logout))
class Choose(webapp2.RequestHandler):
def choose(self, parent, greeting, is_admin):
choice_query = Choice.all()
choice_query.ancestor(parent)
choices = dict((c.bowl, c.team) for c in choice_query.run())
tmpl = jinja_environment.get_template('choose.html')
self.response.out.write(tmpl.render(
greeting=greeting, bowls=BOWLS, choices=choices,
is_admin=is_admin))
class PlayerChoose(Choose):
def get(self):
user = users.get_current_user()
if not user:
self.response.out.write('<html><body>Login required</body></html>')
return
parent = Player.get_or_insert(user.user_id(), user=user)
self.choose(parent=parent,
greeting='Welcome %s !' % user.nickname(),
is_admin=False)
class AdminChoose(Choose):
def get(self):
parent = Winners.get_or_insert('singleton')
self.choose(parent=parent,
greeting='ADMIN PAGE: Select winners',
is_admin=True)
class Save(webapp2.RequestHandler):
def save(self, user):
"""Saves user's choice to datastore; user=None means admin mode."""
bowl = self.request.get('bowl')
team = self.request.get('team')
# Verify inputs
for d, b, t1, t2, _, _, _ in BOWLS:
if bowl == b:
if team and team not in [t1, t2]:
self.response.out.write(
'Invalid team for bowl %s: %s' %
(cgi.escape(bowl), cgi.escape(team)))
return
# Only check time in non-admin case.
if user is not None:
# UTC is 5 hours ahead of EST
utc_kickoff = (
datetime.datetime.strptime(d, '%Y %b %d %I:%M %p') +
datetime.timedelta(hours=5))
if datetime.datetime.utcnow() > utc_kickoff:
self.response.out.write('Game already started!')
return
break
else:
self.response.out.write('Invalid bowl: %s' % cgi.escape(bowl))
return
# Store or delete a choice (remember: user is None => admin).
if team:
if user is None:
parent = Winners.get_or_insert('singleton')
else:
parent = Player.get_or_insert(user.user_id(), user=user)
Choice(parent=parent, key_name=bowl, bowl=bowl, team=team).put()
else:
if user is None:
key = db.Key.from_path('Winners', 'singleton', 'Choice', bowl)
else:
key = db.Key.from_path('Player', user.user_id(), 'Choice', bowl)
db.delete(key)
self.response.out.write('Saved')
class PlayerSave(Save):
def post(self):
self.response.headers['Content-Type'] = 'text/plain'
user = users.get_current_user()
if not user:
self.response.out.write('Login required')
return
self.save(user)
class AdminSave(Save):
def post(self):
self.response.headers['Content-Type'] = 'text/plain'
self.save(None)
def query_winners():
"""Returns a dict mapping each bowl id to the winning team's id."""
parent = Winners.get_or_insert('singleton')
q = Choice.all()
q.ancestor(parent)
return dict((c.bowl, c.team) for c in q.run())
class AdminUpdate(webapp2.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/plain'
winners = query_winners()
# Update all the players
player_query = db.GqlQuery('SELECT * FROM Player')
players = []
for player in player_query.run():
total = 0
correct = 0
choice_query = Choice.all()
choice_query.ancestor(player)
for c in choice_query.run():
if c.bowl in winners:
total += 1
if winners[c.bowl] == c.team:
correct += 1
if total == 0:
player.pct_correct = 0.0
else:
player.pct_correct = float(correct) / total
player.put()
self.response.out.write('OK')
def started_bowls():
# TODO: Represent bowls using a dict/class instead of a tuple.
started = set()
utc_now = datetime.datetime.utcnow()
for d, b, _, _, _, _, _ in BOWLS:
# UTC is 5 hours ahead of EST
utc_kickoff = (datetime.datetime.strptime(d, '%Y %b %d %I:%M %p')
+ datetime.timedelta(hours=5))
if utc_kickoff < utc_now:
started.add(b)
return started
class Scoreboard(webapp2.RequestHandler):
def get(self):
winners = query_winners()
# Look up all the players
player_query = db.GqlQuery('SELECT * FROM Player '
'ORDER BY pct_correct DESC')
started = started_bowls()
players = []
for player in player_query.run():
choice_query = Choice.all()
choice_query.ancestor(player)
choices = dict((c.bowl, c.team) for c in choice_query.run()
if c.bowl in started)
players.append((player, choices))
tmpl = jinja_environment.get_template('scoreboard.html')
self.response.out.write(tmpl.render(
bowls=BOWLS, players=players, winners=winners))
app = webapp2.WSGIApplication([('/', MainPage),
('/admin/choose', AdminChoose),
('/admin/save', AdminSave),
('/admin/update', AdminUpdate),
('/player/choose', PlayerChoose),
('/player/save', PlayerSave),
('/public/scoreboard', Scoreboard)],
debug=True)
| Python |
x = 'x = %s; print x %% repr(x)'; print x % repr(x)
| Python |
#!/usr/bin/python
#
# pills.py exact 100 > pills.html
# pills.py rand 200 1000 > pills.html
#
# Common variables below:
# n - Number of pills to start with
# t - Total pills remaining (wholes + halves)
# w - Whole pills remaining
#
# We track w & t when simulating or computing probabilities.
# From state (w, t):
# choose a whole with prob w/t and move to state (w-1, t)
# choose a half with prob 1-w/t and move to state (w, t-1)
import random
import sys
def gen_rand(t):
w = t
while t:
yield w, t
if random.randrange(t) < w:
w -= 1
else:
t -= 1
def run_rand(n, trials):
wholes = [0] * 2*n
totals = [0] * 2*n
for _ in range(trials):
for i, (w, t) in enumerate(gen_rand(n)):
wholes[i] += w
totals[i] += t
for i in xrange(2*n):
wholes[i] /= float(trials)
totals[i] /= float(trials)
return zip(wholes, totals)
def sum_cache(cache):
return (sum(w*p for (w, _), p in cache.iteritems()),
sum(t*p for (_, t), p in cache.iteritems()))
def next_cache(cache, w, t, n):
next = {}
while w >= 0 and t <= n:
p = 0
if (w+1, t) in cache:
p += cache[(w+1, t)] * float(w+1)/t
if (w, t+1) in cache:
p += cache[(w, t+1)] * (1 - w/float(t+1))
next[(w, t)] = p
w -= 1
t += 1
return next
def gen_exact(n):
cache = {(n, n): 1.0}
yield sum_cache(cache)
for i in xrange(2*n-1, 0, -1):
w = i / 2
t = i - w
cache = next_cache(cache, w, t, n)
yield sum_cache(cache)
def run_exact(n):
return list(gen_exact(n))
def usage():
print """usage:
%s exact <n>
%s rand <n> <trials>
""" % (sys.argv[0], sys.argv[0])
sys.exit(1)
def main():
if len(sys.argv) < 2:
usage()
if sys.argv[1] == 'exact':
if len(sys.argv) != 3:
usage()
try:
data = run_exact(int(sys.argv[2]))
except ValueError as e:
print e
usage()
elif sys.argv[1] == 'rand':
if len(sys.argv) != 4:
usage()
try:
data = run_rand(int(sys.argv[2]), int(sys.argv[3]))
except ValueError as e:
print e
usage()
else:
usage()
print """<html>
<head>
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<script type="text/javascript">
google.load("visualization", "1", {packages:["corechart"]});
google.setOnLoadCallback(drawChart);
function drawChart() {
var areadata = google.visualization.arrayToDataTable([
['Time', 'Halves', 'Wholes'],"""
for i, (w, t) in enumerate(data):
print " ['%d', %f, %f]," % (i, t - w, w)
print """ ]);
var linedata = google.visualization.arrayToDataTable([
['Time', '% Wholes'],"""
for i, (w, t) in enumerate(data):
print " ['%d', %f]," % (i, float(w) / t)
print """ ]);
new google.visualization.AreaChart(document.getElementById('areachart')).draw(areadata, {isStacked: true})
new google.visualization.LineChart(document.getElementById('linechart')).draw(linedata, {})
}
</script>
</head>
<body>
<div id="areachart" style="width: 900px; height: 500px;"></div>
<div id="linechart" style="width: 900px; height: 500px;"></div>
</body>
</html>"""
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import random
h = 10
n = 20
x = [random.randrange(h) for _ in xrange(n)]
w = [0] * n
a, b = 0, n-1
aa, bb = x[0], x[-1]
while a < b:
if x[a] < x[b]:
a += 1
aa = max(aa, x[a])
w[a] = aa - x[a]
else:
b -= 1
bb = max(bb, x[b])
w[b] = bb - x[b]
for r in xrange(h-1, 0, -1):
for i in xrange(n):
if x[i] + w[i] >= r:
if x[i] >= r:
print '#',
else:
print '~',
else:
print ' ',
print
| Python |
#!/usr/bin/python
import random
class Sample(object):
def __init__(self, k):
self._k = k
self._n = 0
self._values = []
def Add(self, x):
self._n += 1
if self._n <= self._k:
self._values.append(x)
else:
i = random.randrange(self._n)
if i < self._k:
self._values[i] = x
def Values(self):
return self._values
def main():
domain = 10
size = 3
dist = [0] * domain
for _ in range(10000):
sample = Sample(size)
for i in range(domain):
sample.Add(i)
for v in sample.Values():
dist[v] += 1
print dist
if __name__ == '__main__':
main()
| Python |
import re
from django import forms
from django.utils.translation import gettext_lazy as _
from fontrender.settings import MIN_FONT_SIZE,MAX_FONT_SIZE,MIN_LEADING,MAX_LEADING
TRACKING_CHOICE = (
(-1,'-100%'),
(-0.75,'-75%'),
(-0.5,'-50%'),
(-0.25,'-25%'),
(-0.1,'-10%'),
(-0.05,'-5%'),
(0,'0'),
(0.05,'+5%'),
(0.1,'+10%'),
(0.25,'+25%'),
(0.5,'+50%'),
(0.75,'+75%'),
(1,'+100%'),
)
ALIGNMENT_CHOICE = (
('left',_('Flush left')),
('right',_('Flush right')),
('justified',_('Justified')),
('center',_('Centered')),
)
class ConverterForm(forms.Form):
size_choices = range(MIN_FONT_SIZE,MAX_FONT_SIZE+1)
leading_choices = ['auto']+range(MIN_LEADING,MAX_LEADING+1)
size = forms.ChoiceField(choices=zip(size_choices,size_choices),required=True)
color = forms.CharField()
alignment = forms.ChoiceField(choices=ALIGNMENT_CHOICE,required=True)
letter_spacing = forms.ChoiceField(choices=TRACKING_CHOICE,required=True)
leading = forms.ChoiceField(choices=zip(leading_choices,leading_choices),required=True)
vertical_scale = forms.IntegerField(min_value=0,max_value=1000,required=True)
horizontal_scale = forms.IntegerField(min_value=0,max_value=1000,required=True)
bold = forms.BooleanField(required=False)
italic = forms.BooleanField(required=False)
underline = forms.BooleanField(required=False)
message = forms.CharField(widget=forms.Textarea,required=True)
def clean_color(self):
color = self.cleaned_data['color'].strip()
w = re.match(r'^([a-f0-9]{6})$', color)
if w==None:
raise forms.ValidationError(_("Input color %(col)s is not in #RRGGBB format") % {'col': color})
return color
| Python |
from fontrender.converter.models import Font
import cairo
import pango
import pangocairo
class PyGTKRenderer():
def __init__(self,font,data, *args, **kwargs):
self.font = font
self.size = data['size']
# self.distance = float(data['distance'])
self.text = data['message']
# self.parent = super(FontRenderer,self)
def image(self, surface, context, font="sans 14", position=None,
color=None, box_width=None, alignment=pango.ALIGN_CENTER,
line_spacing=None, letter_spacing=None, extra_kerning=None):
if color is None:
color = (0.0, 0.0, 0.0)
context.set_source_rgb(*color)
pc = pangocairo.CairoContext(context)
layout = pc.create_layout()
layout.set_text(self.text)
layout.set_font_description(pango.FontDescription("%s %s" % ('Schluss\-Vignetten',self.size)))
f = open("nicka.txt", "w+")
f.write('font: %s\n' % layout.get_font_description().to_filename())
f.close()
if box_width: layout.set_width(box_width)
layout.set_alignment(alignment)
if line_spacing: layout.set_spacing(spacing)
alist = pango.AttrList()
if letter_spacing:
alist.insert(pango.AttrLetterSpacing(letter_spacing, 0, len(self.text)))
if extra_kerning:
for pos, kern in extra_kerning.iteritems():
alist.insert(pango.AttrLetterSpacing(kern, pos, pos+1))
layout.set_attributes(alist)
if position is None:
width, height = surface.get_width(), surface.get_height()
w, h = layout.get_pixel_size()
position = (width/2.0 - w/2.0, height/2.0 - h/2.0)
context.move_to(*position)
pc.show_layout(layout)
| Python |
from django.db import models
import fontfield
import datetime
class Tag(models.Model):
name = models.CharField(max_length=30,blank=False)
def __unicode__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length=30,blank=False)
slug = models.SlugField()
description = models.TextField()
created = models.DateTimeField(default=datetime.datetime.now)
rating = models.PositiveIntegerField(default=0)
class Meta:
ordering = ['created']
verbose_name_plural = "categories"
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('fontrender.converter.views.converter', (), {'slug_category': self.slug})
class Font(models.Model):
name = models.CharField(max_length=30,blank=False)
font_file = fontfield.FontField(upload_to='fonts',blank=False)
slug = models.SlugField()
tags = models.ManyToManyField(Tag,blank=False)
category = models.ForeignKey(Category,blank=False)
created = models.DateTimeField(default=datetime.datetime.now)
rating = models.PositiveIntegerField(default=0)
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('fontrender.converter.views.converter', (),
{'slug_category': self.category.slug, 'slug_font': self.slug})
| Python |
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| Python |
from django.db.models.fields.files import FileField, FieldFile
from PIL import Image, ImageFont, ImageDraw
import os
def _add_preview(s):
parts = s.split(".")
parts[-1] = 'png'
return ".".join(parts)
class FontFieldFile(FieldFile):
def _get_thumb_path(self):
return _add_preview(self.path)
thumb_path = property(_get_thumb_path)
def _get_thumb_url(self):
return _add_preview(self.url)
thumb_url = property(_get_thumb_url)
def save(self, name, content, save=True):
super(FontFieldFile, self).save(name, content, save)
text = "The quick brown fox jumps over the lazy dog"
font = ImageFont.truetype(self.path, 18)
img = Image.new("RGBA", font.getsize(text), (0, 0, 0, 0))
draw = ImageDraw.Draw(img)
draw.text((0, 0), text, font=font, fill="black")
img.save(self.thumb_path, 'PNG')
def delete(self, save=True):
if os.path.exists(self.thumb_path):
os.remove(self.thumb_path)
super(FontFieldFile, self).delete(save)
class FontField(FileField):
attr_class = FontFieldFile
def __init__(self, thumb_width=400, thumb_height=64, *args, **kwargs):
self.thumb_width = thumb_width
self.thumb_height = thumb_height
super(FontField, self).__init__(*args, **kwargs)
| Python |
from django.conf.urls.defaults import *
urlpatterns = patterns('fontrender.converter.views',
url(r'^$', 'converter', name='converter'),
url(r'^(?P<slug_category>[-\w]+)/$', 'converter'),
url(r'^(?P<slug_category>[-\w]+)/(?P<slug_font>[-\w]+)/$', 'converter'),
) | Python |
from BeautifulSoup import BeautifulSoup, NavigableString
from soupselect import select
from urllib2 import urlopen
from fontrender.converter.models import Font
from PIL import Image,ImageFont, ImageDraw
import pygame
import sys
def p_properties(pTag):
align = hscale = vscale = None
for attr, value in pTag.attrs:
if attr == 'align':
if value.lower() == 'left' or value.lower() == 'l':
align = 'left'
elif value.lower() == 'right' or value.lower() == 'r':
align = 'right'
elif value.lower() == 'center' or value.lower() == 'c':
align = 'center'
elif value.lower() == 'justify' or value.lower() == 'j':
align = 'justify'
elif value.lower() == 'ljustify' or value.lower() == 'lj':
align = 'ljustify'
elif value.lower() == 'rjustify' or value.lower() == 'rj':
align = 'rjustify'
elif value.lower() == 'cjustify' or value.lower() == 'cj':
align = 'cjustify'
elif attr == 'width':
hscale = value
elif attr == 'height':
vscale = value
return (align,hscale,vscale)
def p_content(pTag):
for p in pTag.contents:
bold = italic = underline = False
q = p
while type(q) is not NavigableString:
if q.name == u'i': italic = True
elif q.name == u'b': bold = True
elif q.name == u'u': underline = True
q = q.next
text = q.string
print text
def htmlparse(data):
doc = BeautifulSoup(data)
for c in doc.contents:
if type(c) is NavigableString:
print c
elif c.name == u'p':
prop = p_properties(c)
p_content(c)
return select(doc, 'p')
class FontRenderer(pygame.font.Font):
def __init__(self,font,data, *args, **kwargs):
self.letter_spacing = float(data['letter_spacing'])
self.color = self.color_to_rgb(data['color'])
self.data = data
xxx = htmlparse(data['message'])
pygame.font.init()
super(FontRenderer,self).__init__(font.font_file.path, int(data['size']),*args,**kwargs)
if data['leading'] == 'auto':
self.leading = self.get_height()
else:
self.leading = int(data['leading'])
self.set_bold(data['bold'])
self.set_italic(data['italic'])
self.set_underline(data['underline'])
def color_to_rgb(self,colorstring):
r, g, b = colorstring[:2], colorstring[2:4], colorstring[4:]
r, g, b = [int(n, 16) for n in (r, g, b)]
return (r, g, b)
def get_letter_spacing(self, text):
delta = self.size(text)[0] - (self.size(text[0])[0] + self.size(text[1])[0])
return int(self.letter_spacing * self.size(" ")[0] + delta)
def get_leading(self):
return int(self.get_height() * self.leading)
def line_width(self,text):
length = self.size(text[0])[0]
return length + reduce(lambda res, x: res + self.get_letter_spacing(text[x:x+2]) + self.size(text[x+1])[0],
range(0,len(text)-1), 0)
def line_size(self,text):
return (self.line_width(text), self.get_height())
def get_size(self):
parts = self.data['message'].split("\n")
height = (len(parts)-1) * self.leading + self.get_height()
width = reduce(lambda res, x: max([res,self.line_width(x)]), parts, self.line_width(parts[0]))
return (width,height)
def draw_line(self, text):
if len(text) == 0:
return pygame.Surface((1,1),pygame.SRCALPHA,32)
offset = 0
surf = pygame.Surface((self.img_size[0],self.get_height()),pygame.SRCALPHA,32)
if self.data['alignment'] == 'left':
h = 0
elif self.data['alignment'] == 'right':
h = self.img_size[0] - self.line_size(text)[0]
elif self.data['alignment'] == 'center':
h = (self.img_size[0] - self.line_size(text)[0])/2
elif self.data['alignment'] == 'justified':
h = 0
offset = int((self.img_size[0] - self.line_size(text)[0])/(len(text)-1))
char = self.render(text[0], True, self.color)
surf.blit(char,(h, 0))
h += self.size(text[0])[0]
for i in range(0,len(text)-1):
h += self.get_letter_spacing(text[i:i+2]) + offset
char = self.render(text[i+1], True, self.color)
surf.blit(char,(h, 0))
h += self.size(text[i+1])[0]
return surf
def image(self):
self.img_size = self.get_size()
surf = pygame.Surface(self.img_size,pygame.SRCALPHA,32)
h = 0
for item in self.data['message'].split("\n"):
surf.blit(self.draw_line(item.replace('\r','')), (0, h))
h += self.leading
hscale = float(self.data['horizontal_scale'])/100
vscale = float(self.data['vertical_scale'])/100
pil_image = Image.fromstring("RGBA",self.img_size,pygame.image.tostring(surf,'RGBA'))
if hscale!=1 or vscale!=1:
tu = (0,0, self.img_size[0]-1, self.img_size[1] - 1)
pil_image = pil_image.transform((self.img_size[0]*hscale, self.img_size[1]*vscale), Image.EXTENT, tu, Image.BICUBIC)
return pil_image
| Python |
from django.contrib import admin
from fontrender.converter.models import Tag, Category, Font
class CategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('name',)}
class FontAdmin(admin.ModelAdmin):
filter_horizontal = ('tags',)
search_fields = ('name',)
list_filter = ('created',)
prepopulated_fields = {'slug': ('name',)}
admin.site.register(Tag)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Font, FontAdmin)
| Python |
from django.core.context_processors import csrf
from django.http import HttpResponse
from django.core.servers.basehttp import FileWrapper
from django.shortcuts import render_to_response, get_object_or_404
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from fontrender.converter.models import Category, Font
from fontrender.settings import LANGUAGES
from fontrender.converter.font_renderer import FontRenderer
from fontrender.converter.font_pygtk import PyGTKRenderer
from forms import ConverterForm
#import cairo
#import pango
#import pangocairo
import StringIO
def converter(request, slug_category='', slug_font=''):
categories = Category.objects.all().order_by("-rating")
if slug_category == '':
return render_to_response('mainpage.html', {'categories': categories,'LANGUAGES':LANGUAGES})
cr = get_object_or_404(Category, slug=slug_category)
if slug_font != '':
font = get_object_or_404(Font, slug=slug_font)
if request.method == 'POST':
form = ConverterForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
font.rating+=1
cr.rating+=1
font.save()
cr.save()
rendered_img = FontRenderer(font,cd)
response = HttpResponse(content_type='image/png')
response['Content-Disposition'] = 'attachment; filename=rendered_image.png'
rendered_img.image().save(response, format='PNG')
return response
else:
form = ConverterForm(
initial={'message': 'Test', 'letter_spacing': 0, 'leading': 'auto',
'vertical_scale':100,'horizontal_scale':100}
)
return render_to_response('render_form.html',
{'form': form, 'categories': categories,
'cur_category':cr,'cur_font': font,'LANGUAGES':LANGUAGES})
fonts_list_full = Font.objects.filter(category=cr).order_by("-rating")
paginator = Paginator(fonts_list_full, 10)
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
try:
fonts_list = paginator.page(page)
except (EmptyPage, InvalidPage):
fonts_list = paginator.page(paginator.num_pages)
return render_to_response('font_choose.html',
{'categories': categories, 'fonts': fonts_list,
'cur_category':cr,'LANGUAGES':LANGUAGES})
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf.urls.defaults import *
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
(r'^fontrender/', include('fontrender.converter.urls')),
(r'^i18n/', include('django.conf.urls.i18n')),
# Uncomment the admin/doc line below to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
)
urlpatterns += staticfiles_urlpatterns() | Python |
from django.utils.translation import gettext_lazy as _
# Django settings for dtic project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
MIN_LEADING = 0
MAX_LEADING = 210
MIN_FONT_SIZE = 6
MAX_FONT_SIZE = 210
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'd:/work/projects/db/fontrender.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-US'
LANGUAGES = (
('en', _('English')),
('ru', _(u'Russian')),
('uk', _(u'Ukrainian')),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
#ROOT_URL = '/fontrender/'
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = 'd:/work/projects/fontrender/tmp/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
STATIC_ROOT = 'd:/work/projects/fontrender/tmp'
STATIC_URL = '/static/'
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/static/"
STATICFILES_ROOT = 'd:/work/projects/fontrender/tmp/'
# URL that handles the static files served from STATICFILES_ROOT.
# Example: "http://static.lawrence.com/", "http://example.com/static/"
STATICFILES_URL = '/static/'
# URL prefix for admin media -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# A list of locations of additional static files
STATICFILES_DIRS = ('d:/work/projects/fontrender/tmp/',)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '()0ok6ft4v+4^!-y*+bp%(3ya4ew@husq9lexg8)9rqxsv#e5b'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
)
ROOT_URLCONF = 'fontrender.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'd:/work/projects/fontrender/templates',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'fontrender.converter',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request':{
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| Python |
'''
Created on May 28, 2010
@author: ivan
'''
import cgi
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import os
from google.appengine.ext.webapp import template
import pylast
from configuration import FConfiguration
from vk import Vkontakte
from pylast import WSError
API_KEY = FConfiguration().API_KEY
API_SECRET = FConfiguration().API_SECRET
username = FConfiguration().lfm_login
password_hash = pylast.md5(FConfiguration().lfm_password)
network = pylast.get_lastfm_network(api_key=API_KEY, api_secret=API_SECRET, username=username, password_hash=password_hash)
vkontakte = Vkontakte(FConfiguration().vk_login, FConfiguration().vk_password)
def search_top_tracks(query):
artist = network.get_artist(query)
if not artist:
return None
try:
tracks = artist.get_top_tracks()
except WSError:
return None
songs = []
for track in tracks:
try:
track_item = track.item
except AttributeError:
track_item = track['item']
song = Song(name=str(track_item));
songs.append(song)
return songs
class Song():
def __init__(self, name="",path="",type=""):
self.path = path
self.name = name
self.type = type
class SourcePath(webapp.RequestHandler):
def get(self):
query = self.request.get('query')
song = vkontakte.find_most_relative_song(query)
self.redirect(song.path)
class SearchPage(webapp.RequestHandler):
def get(self):
song = self.request.get('song')
if song:
songs = search_top_tracks(song)
else:
songs = []
template_values = {
'song': song.capitalize(),
'songs': songs
}
path = os.path.join(os.path.dirname(__file__), 'search.html')
self.response.out.write(template.render(path, template_values))
application = webapp.WSGIApplication(
[('/', SearchPage),
('/search', SearchPage),
('/source', SourcePath)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main() | Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| Python |
'''
Created on Feb 26, 2010
@author: ivan
'''
import sys
def debug(*args):
print "DEBUG:", args
def info(*args):
print "INFO:", args
def error(*args):
print >> sys.stderr, args
| Python |
'''
Created on May 28, 2010
@author: ivan
'''
class Demo():
def __init__(self, name):
self.name = name
| Python |
# -*- coding: utf-8 -*-
'''
Created on Mar 17, 2010
@author: ivan
'''
import urllib2
import urllib
import re
import time
from string import replace
from configuration import FConfiguration
class Vkontakte:
def __init__(self, email, password):
self.email = email
self.password = password
self.cookie = None
self.execute_time = time.time()
def isLive(self):
return self.get_s_value()
def get_s_value(self):
host = 'http://login.vk.com/?act=login'
post = urllib.urlencode({'email' : self.email,
'expire' : '',
'pass' : self.password,
'vk' : ''})
headers = {'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; ru; rv:1.9.0.13) Gecko/2009073022 Firefox/3.0.13 (.NET CLR 3.5.30729)',
'Host' : 'login.vk.com',
'Referer' : 'http://vkontakte.ru/index.php',
'Connection' : 'close',
'Pragma' : 'no-cache',
'Cache-Control' : 'no-cache',
}
conn = urllib2.Request(host, post, headers)
data = urllib2.urlopen(conn)
result = data.read()
value = re.findall(r"name='s' id='s' value='(.*?)'", result)
if value:
return value[0]
return None
def get_cookie(self):
if FConfiguration().cookie:
return FConfiguration().cookie
if self.cookie: return self.cookie
host = 'http://vkontakte.ru/login.php?op=slogin'
post = urllib.urlencode({'s' : self.get_s_value()})
headers = {'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; ru; rv:1.9.0.13) Gecko/2009073022 Firefox/3.0.13',
'Host' : 'vkontakte.ru',
'Referer' : 'http://login.vk.com/?act=login',
'Connection' : 'close',
'Cookie' : 'remixchk=5; remixsid=nonenone',
'Pragma' : 'no-cache',
'Cache-Control' : 'no-cache'
}
conn = urllib2.Request(host, post, headers)
data = urllib2.urlopen(conn)
cookie_src = data.info().get('Set-Cookie')
self.cookie = re.sub(r'(expires=.*?;\s|path=\/;\s|domain=\.vkontakte\.ru(?:,\s)?)', '', cookie_src)
FConfiguration().cookie = self.cookie
return self.cookie
def get_page(self, query):
if not query:
return None
host = 'http://vkontakte.ru/gsearch.php?section=audio&q=vasya#c[q]=some%20id&c[section]=audio'
post = urllib.urlencode({
"c[q]" : query,
"c[section]":"audio"
})
headers = {'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; ru; rv:1.9.0.13) Gecko/2009073022 Firefox/3.0.13',
'Host' : 'vkontakte.ru',
'Referer' : 'http://vkontakte.ru/index.php',
'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With' : 'XMLHttpRequest',
'Connection' : 'close',
'Cookie' : 'remixlang=0; remixchk=5; audio_vol=100; %s' % self.get_cookie(),
'Pragma' : 'no-cache',
'Cache-Control' : ' no-cache'
}
conn = urllib2.Request(host, post, headers)
#Do not run to offten
cur_time = time.time()
if cur_time - self.execute_time < 0.5:
time.sleep(0.8)
self.execute_time = time.time()
data = urllib2.urlopen(conn);
result = data.read()
return result
def get_page_by_url(self, host_url):
if not host_url:
return host_url
host_url.replace("#","&")
post = host_url[host_url.find("?")+1:]
headers = {'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; ru; rv:1.9.0.13) Gecko/2009073022 Firefox/3.0.13',
'Host' : 'vkontakte.ru',
'Referer' : 'http://vkontakte.ru/index.php',
'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With' : 'XMLHttpRequest',
'Connection' : 'close',
'Cookie' : 'remixlang=0; remixchk=5; audio_vol=100; %s' % self.get_cookie(),
'Pragma' : 'no-cache',
'Cache-Control' : ' no-cache'
}
conn = urllib2.Request(host_url, post, headers)
#Do not run to offten
cur_time = time.time()
if cur_time - self.execute_time < 0.5:
time.sleep(0.8)
self.execute_time = time.time()
data = urllib2.urlopen(conn);
result = data.read()
return result
def get_name_by(self, id, result_album):
for album in result_album:
id_album = album[0]
name = album[1]
if id_album == id:
return name
return None
def find_most_relative_song(self, song_title):
vkSongs = self.find_song_urls(song_title)
if not vkSongs:
return None
times_count = {}
for song in vkSongs:
time = song.time
if time in times_count:
times_count[time] = times_count[time] + 1
else:
times_count[time] = 1
#get most relatives times time
r_count = max(times_count.values())
r_time = self.find_time_value(times_count, r_count)
for song in vkSongs:
if song.time == r_time:
return song
return vkSongs[0]
def find_time_value(self, times_count, r_count):
for i in times_count:
if times_count[i] == r_count:
return i
return None
def convert_vk_songs_to_beans(self, vk_songs):
beans = []
for vk_song in vk_songs:
bean = VKSong(name=vk_song.album + " - " + vk_song.track, path=vk_song.path, type="");
beans.append(bean)
return beans
def find_song_urls(self, song_title):
page = self.get_page(song_title)
page = page.decode('cp1251')
#page = page.decode("cp1251")
#unicode(page, "cp1251")
#print page
reg_all = "([^<>]*)"
resultall = re.findall("return operate\(([\w() ,']*)\);", page, re.IGNORECASE)
result_album = re.findall(u"<b id=\\\\\"performer([0-9]*)\\\\\">" + reg_all + "<", page, re.IGNORECASE | re.UNICODE)
result_track = re.findall(u"<span id=\\\\\"title([0-9]*)\\\\\">" + reg_all + "<", page, re.IGNORECASE | re.UNICODE)
result_time = re.findall("<div class=\\\\\"duration\\\\\">" + reg_all + "<", page, re.IGNORECASE)
urls = []
ids = []
vkSongs = []
for result in resultall:
result = replace(result, "'", " ")
result = replace(result, ",", " ")
result = result.split()
if len(result) > 4:
id_id = result[0]
id_server = result[1]
id_folder = result[2]
id_file = result[3]
url = "http://cs" + id_server + ".vkontakte.ru/u" + id_folder + "/audio/" + id_file + ".mp3"
urls.append(url)
ids.append(id_id)
#print len(resultall), resultall
#print len(urls), urls
#print len(result_album), result_album
#print len(result_track), result_track
#print len(result_time), result_time
for i in xrange(len(result_time)):
id = ids[i]
path = urls[i]
album = self.get_name_by(id, result_album)
track = self.get_name_by(id, result_track)
time = result_time[i]
vkSong = VKSong(path, album, track, time)
vkSongs.append(vkSong)
return self.convert_vk_songs_to_beans(vkSongs)
def get_songs_by_url(self, url):
result = self.get_page_by_url(url)
try:
result=unicode(result)
except:
result=result
reg_all = "([^{</}]*)"
result_url = re.findall(ur"http:([\\/.0-9A-Z]*)", result, re.IGNORECASE)
result_artist = re.findall(u"q]="+reg_all+"'", result, re.IGNORECASE | re.UNICODE)
result_title = re.findall(u"\"title([0-9]*)\\\\\">"+ reg_all+"", result, re.IGNORECASE | re.UNICODE)
result_time = re.findall("duration\\\\\">" + reg_all, result, re.IGNORECASE | re.UNICODE)
result_lyr = re.findall(ur"showLyrics"+reg_all, result, re.IGNORECASE | re.UNICODE)
songs = []
j = 0
for i, artist in enumerate(result_artist):
path = "http:" +result_url[i].replace("\\/", "/")
title = self.to_good_chars(result_title[i][1])
if not title:
if len(result_lyr) > j:
title = result_lyr[j]
title = title[title.find(";'>")+3:]
j +=1
artist = self.to_good_chars(artist)
song = VKSong(path, artist, title, result_time[i]);
songs.append(song)
return self.convert_vk_songs_to_beans(songs)
def to_good_chars(self, line):
return line
class VKSong():
def __init__(self, path=None, album=None, track=None, time=None,name=None,type=None):
self.path = path
self.album = album
self.track = track
self.time = time
self.name = name
self.type= type
def getTime(self):
if self.time:
return time
else:
return "no time"
def getFullDescription(self):
return "[ " + self.s(self.album) + " ] " + self.s(self.track) + " " + self.s(self.time)
def __str__(self):
return "" + self.s(self.album) + " " + self.s(self.track) + " " + self.s(self.time) + " " + self.s(self.path)
def s(self, value):
if value:
return value
else:
return ""
def get_group_id(str):
search = "gid="
index = str.find("gid=")
return str[index+len(search):] | Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| Python |
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from demo import Demo
class MainPage(webapp.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('Hello, webapp World!')
demo = Demo("name");
print demo.path
application = webapp.WSGIApplication([('/', MainPage)], debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
# -*- coding: utf-8 -*-
#
# pylast - A Python interface to Last.fm (and other API compatible social networks)
# Copyright (C) 2008-2009 Amr Hassan
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
# http://code.google.com/p/pylast/
import datetime
__version__ = '0.4'
__author__ = 'Amr Hassan'
__copyright__ = "Copyright (C) 2008-2009 Amr Hassan"
__license__ = "gpl"
__email__ = 'amr.hassan@gmail.com'
import hashlib
import httplib
import urllib
from xml.dom import minidom
import xml.dom
import time
import shelve
import tempfile
import sys
import htmlentitydefs
try:
import collections
except ImportError:
pass
STATUS_INVALID_SERVICE = 2
STATUS_INVALID_METHOD = 3
STATUS_AUTH_FAILED = 4
STATUS_INVALID_FORMAT = 5
STATUS_INVALID_PARAMS = 6
STATUS_INVALID_RESOURCE = 7
STATUS_TOKEN_ERROR = 8
STATUS_INVALID_SK = 9
STATUS_INVALID_API_KEY = 10
STATUS_OFFLINE = 11
STATUS_SUBSCRIBERS_ONLY = 12
STATUS_INVALID_SIGNATURE = 13
STATUS_TOKEN_UNAUTHORIZED = 14
STATUS_TOKEN_EXPIRED = 15
EVENT_ATTENDING = '0'
EVENT_MAYBE_ATTENDING = '1'
EVENT_NOT_ATTENDING = '2'
PERIOD_OVERALL = 'overall'
PERIOD_3MONTHS = '3month'
PERIOD_6MONTHS = '6month'
PERIOD_12MONTHS = '12month'
DOMAIN_ENGLISH = 0
DOMAIN_GERMAN = 1
DOMAIN_SPANISH = 2
DOMAIN_FRENCH = 3
DOMAIN_ITALIAN = 4
DOMAIN_POLISH = 5
DOMAIN_PORTUGUESE = 6
DOMAIN_SWEDISH = 7
DOMAIN_TURKISH = 8
DOMAIN_RUSSIAN = 9
DOMAIN_JAPANESE = 10
DOMAIN_CHINESE = 11
COVER_SMALL = 0
COVER_MEDIUM = 1
COVER_LARGE = 2
COVER_EXTRA_LARGE = 3
COVER_MEGA = 4
IMAGES_ORDER_POPULARITY = "popularity"
IMAGES_ORDER_DATE = "dateadded"
USER_MALE = 'Male'
USER_FEMALE = 'Female'
SCROBBLE_SOURCE_USER = "P"
SCROBBLE_SOURCE_NON_PERSONALIZED_BROADCAST = "R"
SCROBBLE_SOURCE_PERSONALIZED_BROADCAST = "E"
SCROBBLE_SOURCE_LASTFM = "L"
SCROBBLE_SOURCE_UNKNOWN = "U"
SCROBBLE_MODE_PLAYED = ""
SCROBBLE_MODE_LOVED = "L"
SCROBBLE_MODE_BANNED = "B"
SCROBBLE_MODE_SKIPPED = "S"
"""
A list of the implemented webservices (from http://www.last.fm/api/intro)
=====================================
# Album
* album.addTags DONE
* album.getInfo DONE
* album.getTags DONE
* album.removeTag DONE
* album.search DONE
# Artist
* artist.addTags DONE
* artist.getEvents DONE
* artist.getImages DONE
* artist.getInfo DONE
* artist.getPodcast TODO
* artist.getShouts DONE
* artist.getSimilar DONE
* artist.getTags DONE
* artist.getTopAlbums DONE
* artist.getTopFans DONE
* artist.getTopTags DONE
* artist.getTopTracks DONE
* artist.removeTag DONE
* artist.search DONE
* artist.share DONE
* artist.shout DONE
# Auth
* auth.getMobileSession DONE
* auth.getSession DONE
* auth.getToken DONE
# Event
* event.attend DONE
* event.getAttendees DONE
* event.getInfo DONE
* event.getShouts DONE
* event.share DONE
* event.shout DONE
# Geo
* geo.getEvents
* geo.getTopArtists
* geo.getTopTracks
# Group
* group.getMembers DONE
* group.getWeeklyAlbumChart DONE
* group.getWeeklyArtistChart DONE
* group.getWeeklyChartList DONE
* group.getWeeklyTrackChart DONE
# Library
* library.addAlbum DONE
* library.addArtist DONE
* library.addTrack DONE
* library.getAlbums DONE
* library.getArtists DONE
* library.getTracks DONE
# Playlist
* playlist.addTrack DONE
* playlist.create DONE
* playlist.fetch DONE
# Radio
* radio.getPlaylist
* radio.tune
# Tag
* tag.getSimilar DONE
* tag.getTopAlbums DONE
* tag.getTopArtists DONE
* tag.getTopTags DONE
* tag.getTopTracks DONE
* tag.getWeeklyArtistChart DONE
* tag.getWeeklyChartList DONE
* tag.search DONE
# Tasteometer
* tasteometer.compare DONE
# Track
* track.addTags DONE
* track.ban DONE
* track.getInfo DONE
* track.getSimilar DONE
* track.getTags DONE
* track.getTopFans DONE
* track.getTopTags DONE
* track.love DONE
* track.removeTag DONE
* track.search DONE
* track.share DONE
# User
* user.getEvents DONE
* user.getFriends DONE
* user.getInfo DONE
* user.getLovedTracks DONE
* user.getNeighbours DONE
* user.getPastEvents DONE
* user.getPlaylists DONE
* user.getRecentStations TODO
* user.getRecentTracks DONE
* user.getRecommendedArtists DONE
* user.getRecommendedEvents DONE
* user.getShouts DONE
* user.getTopAlbums DONE
* user.getTopArtists DONE
* user.getTopTags DONE
* user.getTopTracks DONE
* user.getWeeklyAlbumChart DONE
* user.getWeeklyArtistChart DONE
* user.getWeeklyChartList DONE
* user.getWeeklyTrackChart DONE
* user.shout DONE
# Venue
* venue.getEvents DONE
* venue.getPastEvents DONE
* venue.search DONE
"""
class Network(object):
"""
A music social network website that is Last.fm or one exposing a Last.fm compatible API
"""
def __init__(self, name, homepage, ws_server, api_key, api_secret, session_key, submission_server, username, password_hash,
domain_names, urls):
"""
name: the name of the network
homepage: the homepage url
ws_server: the url of the webservices server
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
submission_server: the url of the server to which tracks are submitted (scrobbled)
username: a username of a valid user
password_hash: the output of pylast.md5(password) where password is the user's password thingy
domain_names: a dict mapping each DOMAIN_* value to a string domain name
urls: a dict mapping types to urls
if username and password_hash were provided and not session_key, session_key will be
generated automatically when needed.
Either a valid session_key or a combination of username and password_hash must be present for scrobbling.
You should use a preconfigured network object through a get_*_network(...) method instead of creating an object
of this class, unless you know what you're doing.
"""
self.ws_server = ws_server
self.submission_server = submission_server
self.name = name
self.homepage = homepage
self.api_key = api_key
self.api_secret = api_secret
self.session_key = session_key
self.username = username
self.password_hash = password_hash
self.domain_names = domain_names
self.urls = urls
self.cache_backend = None
self.proxy_enabled = False
self.proxy = None
self.last_call_time = 0
#generate a session_key if necessary
if (self.api_key and self.api_secret) and not self.session_key and (self.username and self.password_hash):
sk_gen = SessionKeyGenerator(self)
self.session_key = sk_gen.get_session_key(self.username, self.password_hash)
def get_artist(self, artist_name):
"""
Return an Artist object
"""
return Artist(artist_name, self)
def get_track(self, artist, title):
"""
Return a Track object
"""
return Track(artist, title, self)
def get_album(self, artist, title):
"""
Return an Album object
"""
return Album(artist, title, self)
def get_authenticated_user(self):
"""
Returns the authenticated user
"""
return AuthenticatedUser(self)
def get_country(self, country_name):
"""
Returns a country object
"""
return Country(country_name, self)
def get_group(self, name):
"""
Returns a Group object
"""
return Group(name, self)
def get_user(self, username):
"""
Returns a user object
"""
return User(username, self)
def get_tag(self, name):
"""
Returns a tag object
"""
return Tag(name, self)
def get_scrobbler(self, client_id, client_version):
"""
Returns a Scrobbler object used for submitting tracks to the server
Quote from http://www.last.fm/api/submissions:
========
Client identifiers are used to provide a centrally managed database of
the client versions, allowing clients to be banned if they are found to
be behaving undesirably. The client ID is associated with a version
number on the server, however these are only incremented if a client is
banned and do not have to reflect the version of the actual client application.
During development, clients which have not been allocated an identifier should
use the identifier tst, with a version number of 1.0. Do not distribute code or
client implementations which use this test identifier. Do not use the identifiers
used by other clients.
=========
To obtain a new client identifier please contact:
* Last.fm: submissions@last.fm
* # TODO: list others
...and provide us with the name of your client and its homepage address.
"""
return Scrobbler(self, client_id, client_version)
def _get_language_domain(self, domain_language):
"""
Returns the mapped domain name of the network to a DOMAIN_* value
"""
if domain_language in self.domain_names:
return self.domain_names[domain_language]
def _get_url(self, domain, type):
return "http://%s/%s" %(self._get_language_domain(domain), self.urls[type])
def _get_ws_auth(self):
"""
Returns a (API_KEY, API_SECRET, SESSION_KEY) tuple.
"""
return (self.api_key, self.api_secret, self.session_key)
def _delay_call(self):
"""
Makes sure that web service calls are at least a second apart
"""
# delay time in seconds
DELAY_TIME = 1.0
now = time.time()
if (now - self.last_call_time) < DELAY_TIME:
time.sleep(1)
self.last_call_time = now
def create_new_playlist(self, title, description):
"""
Creates a playlist for the authenticated user and returns it
title: The title of the new playlist.
description: The description of the new playlist.
"""
params = {}
params['title'] = _unicode(title)
params['description'] = _unicode(description)
doc = _Request(self, 'playlist.create', params).execute(False)
e_id = doc.getElementsByTagName("id")[0].firstChild.data
user = doc.getElementsByTagName('playlists')[0].getAttribute('user')
return Playlist(user, e_id, self)
def get_top_tags(self, limit=None):
"""Returns a sequence of the most used tags as a sequence of TopItem objects."""
doc = _Request(self, "tag.getTopTags").execute(True)
seq = []
for node in doc.getElementsByTagName("tag"):
tag = Tag(_extract(node, "name"), self)
weight = _number(_extract(node, "count"))
if len(seq) < limit:
seq.append(TopItem(tag, weight))
return seq
def enable_proxy(self, host, port):
"""Enable a default web proxy"""
self.proxy = [host, _number(port)]
self.proxy_enabled = True
def disable_proxy(self):
"""Disable using the web proxy"""
self.proxy_enabled = False
def is_proxy_enabled(self):
"""Returns True if a web proxy is enabled."""
return self.proxy_enabled
def _get_proxy(self):
"""Returns proxy details."""
return self.proxy
def enable_caching(self, file_path = None):
"""Enables caching request-wide for all cachable calls.
In choosing the backend used for caching, it will try _SqliteCacheBackend first if
the module sqlite3 is present. If not, it will fallback to _ShelfCacheBackend which uses shelve.Shelf objects.
* file_path: A file path for the backend storage file. If
None set, a temp file would probably be created, according the backend.
"""
if not file_path:
file_path = tempfile.mktemp(prefix="pylast_tmp_")
self.cache_backend = _ShelfCacheBackend(file_path)
def disable_caching(self):
"""Disables all caching features."""
self.cache_backend = None
def is_caching_enabled(self):
"""Returns True if caching is enabled."""
return not (self.cache_backend == None)
def _get_cache_backend(self):
return self.cache_backend
def search_for_album(self, album_name):
"""Searches for an album by its name. Returns a AlbumSearch object.
Use get_next_page() to retreive sequences of results."""
return AlbumSearch(album_name, self)
def search_for_artist(self, artist_name):
"""Searches of an artist by its name. Returns a ArtistSearch object.
Use get_next_page() to retreive sequences of results."""
return ArtistSearch(artist_name, self)
def search_for_tag(self, tag_name):
"""Searches of a tag by its name. Returns a TagSearch object.
Use get_next_page() to retreive sequences of results."""
return TagSearch(tag_name, self)
def search_for_track(self, artist_name, track_name):
"""Searches of a track by its name and its artist. Set artist to an empty string if not available.
Returns a TrackSearch object.
Use get_next_page() to retreive sequences of results."""
return TrackSearch(artist_name, track_name, self)
def search_for_venue(self, venue_name, country_name):
"""Searches of a venue by its name and its country. Set country_name to an empty string if not available.
Returns a VenueSearch object.
Use get_next_page() to retreive sequences of results."""
return VenueSearch(venue_name, country_name, self)
def get_track_by_mbid(self, mbid):
"""Looks up a track by its MusicBrainz ID"""
params = {"mbid": _unicode(mbid)}
doc = _Request(self, "track.getInfo", params).execute(True)
return Track(_extract(doc, "name", 1), _extract(doc, "name"), self)
def get_artist_by_mbid(self, mbid):
"""Loooks up an artist by its MusicBrainz ID"""
params = {"mbid": _unicode(mbid)}
doc = _Request(self, "artist.getInfo", params).execute(True)
return Artist(_extract(doc, "name"), self)
def get_album_by_mbid(self, mbid):
"""Looks up an album by its MusicBrainz ID"""
params = {"mbid": _unicode(mbid)}
doc = _Request(self, "album.getInfo", params).execute(True)
return Album(_extract(doc, "artist"), _extract(doc, "name"), self)
def get_lastfm_network(api_key="", api_secret="", session_key = "", username = "", password_hash = ""):
"""
Returns a preconfigured Network object for Last.fm
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
username: a username of a valid user
password_hash: the output of pylast.md5(password) where password is the user's password
if username and password_hash were provided and not session_key, session_key will be
generated automatically when needed.
Either a valid session_key or a combination of username and password_hash must be present for scrobbling.
Most read-only webservices only require an api_key and an api_secret, see about obtaining them from:
http://www.last.fm/api/account
"""
return Network (
name = "Last.fm",
homepage = "http://last.fm",
ws_server = ("ws.audioscrobbler.com", "/2.0/"),
api_key = api_key,
api_secret = api_secret,
session_key = session_key,
submission_server = "http://post.audioscrobbler.com:80/",
username = username,
password_hash = password_hash,
domain_names = {
DOMAIN_ENGLISH: 'www.last.fm',
DOMAIN_GERMAN: 'www.lastfm.de',
DOMAIN_SPANISH: 'www.lastfm.es',
DOMAIN_FRENCH: 'www.lastfm.fr',
DOMAIN_ITALIAN: 'www.lastfm.it',
DOMAIN_POLISH: 'www.lastfm.pl',
DOMAIN_PORTUGUESE: 'www.lastfm.com.br',
DOMAIN_SWEDISH: 'www.lastfm.se',
DOMAIN_TURKISH: 'www.lastfm.com.tr',
DOMAIN_RUSSIAN: 'www.lastfm.ru',
DOMAIN_JAPANESE: 'www.lastfm.jp',
DOMAIN_CHINESE: 'cn.last.fm',
},
urls = {
"album": "music/%(artist)s/%(album)s",
"artist": "music/%(artist)s",
"event": "event/%(id)s",
"country": "place/%(country_name)s",
"playlist": "user/%(user)s/library/playlists/%(appendix)s",
"tag": "tag/%(name)s",
"track": "music/%(artist)s/_/%(title)s",
"group": "group/%(name)s",
"user": "user/%(name)s",
}
)
def get_librefm_network(api_key="", api_secret="", session_key = "", username = "", password_hash = ""):
"""
Returns a preconfigured Network object for Libre.fm
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
username: a username of a valid user
password_hash: the output of pylast.md5(password) where password is the user's password
if username and password_hash were provided and not session_key, session_key will be
generated automatically when needed.
"""
return Network (
name = "Libre.fm",
homepage = "http://alpha.dev.libre.fm",
ws_server = ("alpha.dev.libre.fm", "/2.0/"),
api_key = api_key,
api_secret = api_secret,
session_key = session_key,
submission_server = "http://turtle.libre.fm:80/",
username = username,
password_hash = password_hash,
domain_names = {
DOMAIN_ENGLISH: "alpha.dev.libre.fm",
DOMAIN_GERMAN: "alpha.dev.libre.fm",
DOMAIN_SPANISH: "alpha.dev.libre.fm",
DOMAIN_FRENCH: "alpha.dev.libre.fm",
DOMAIN_ITALIAN: "alpha.dev.libre.fm",
DOMAIN_POLISH: "alpha.dev.libre.fm",
DOMAIN_PORTUGUESE: "alpha.dev.libre.fm",
DOMAIN_SWEDISH: "alpha.dev.libre.fm",
DOMAIN_TURKISH: "alpha.dev.libre.fm",
DOMAIN_RUSSIAN: "alpha.dev.libre.fm",
DOMAIN_JAPANESE: "alpha.dev.libre.fm",
DOMAIN_CHINESE: "alpha.dev.libre.fm",
},
urls = {
"album": "artist/%(artist)s/album/%(album)s",
"artist": "artist/%(artist)s",
"event": "event/%(id)s",
"country": "place/%(country_name)s",
"playlist": "user/%(user)s/library/playlists/%(appendix)s",
"tag": "tag/%(name)s",
"track": "music/%(artist)s/_/%(title)s",
"group": "group/%(name)s",
"user": "user/%(name)s",
}
)
class _ShelfCacheBackend(object):
"""Used as a backend for caching cacheable requests."""
def __init__(self, file_path = None):
self.shelf = shelve.open(file_path)
def get_xml(self, key):
return self.shelf[key]
def set_xml(self, key, xml_string):
self.shelf[key] = xml_string
def has_key(self, key):
return key in self.shelf.keys()
class _Request(object):
"""Representing an abstract web service operation."""
def __init__(self, network, method_name, params = {}):
self.params = params
self.network = network
(self.api_key, self.api_secret, self.session_key) = network._get_ws_auth()
self.params["api_key"] = self.api_key
self.params["method"] = method_name
if network.is_caching_enabled():
self.cache = network._get_cache_backend()
if self.session_key:
self.params["sk"] = self.session_key
self.sign_it()
def sign_it(self):
"""Sign this request."""
if not "api_sig" in self.params.keys():
self.params['api_sig'] = self._get_signature()
def _get_signature(self):
"""Returns a 32-character hexadecimal md5 hash of the signature string."""
keys = self.params.keys()[:]
keys.sort()
string = ""
for name in keys:
string += name
string += self.params[name]
string += self.api_secret
return md5(string)
def _get_cache_key(self):
"""The cache key is a string of concatenated sorted names and values."""
keys = self.params.keys()
keys.sort()
cache_key = str()
for key in keys:
if key != "api_sig" and key != "api_key" and key != "sk":
cache_key += key + _string(self.params[key])
return hashlib.sha1(cache_key).hexdigest()
def _get_cached_response(self):
"""Returns a file object of the cached response."""
if not self._is_cached():
response = self._download_response()
self.cache.set_xml(self._get_cache_key(), response)
return self.cache.get_xml(self._get_cache_key())
def _is_cached(self):
"""Returns True if the request is already in cache."""
return self.cache.has_key(self._get_cache_key())
def _download_response(self):
"""Returns a response body string from the server."""
# Delay the call if necessary
#self.network._delay_call() # enable it if you want.
data = []
for name in self.params.keys():
data.append('='.join((name, urllib.quote_plus(_string(self.params[name])))))
data = '&'.join(data)
headers = {
"Content-type": "application/x-www-form-urlencoded",
'Accept-Charset': 'utf-8',
'User-Agent': "pylast" + '/' + __version__
}
(HOST_NAME, HOST_SUBDIR) = self.network.ws_server
if self.network.is_proxy_enabled():
conn = httplib.HTTPConnection(host = self._get_proxy()[0], port = self._get_proxy()[1])
conn.request(method='POST', url="http://" + HOST_NAME + HOST_SUBDIR,
body=data, headers=headers)
else:
conn = httplib.HTTPConnection(host=HOST_NAME)
conn.request(method='POST', url=HOST_SUBDIR, body=data, headers=headers)
response = conn.getresponse()
response_text = _unicode(response.read())
self._check_response_for_errors(response_text)
return response_text
def execute(self, cacheable = False):
"""Returns the XML DOM response of the POST Request from the server"""
if self.network.is_caching_enabled() and cacheable:
response = self._get_cached_response()
else:
response = self._download_response()
return minidom.parseString(_string(response))
def _check_response_for_errors(self, response):
"""Checks the response for errors and raises one if any exists."""
doc = minidom.parseString(_string(response))
e = doc.getElementsByTagName('lfm')[0]
if e.getAttribute('status') != "ok":
e = doc.getElementsByTagName('error')[0]
status = e.getAttribute('code')
details = e.firstChild.data.strip()
raise WSError(self.network, status, details)
class SessionKeyGenerator(object):
"""Methods of generating a session key:
1) Web Authentication:
a. network = get_*_network(API_KEY, API_SECRET)
b. sg = SessionKeyGenerator(network)
c. url = sg.get_web_auth_url()
d. Ask the user to open the url and authorize you, and wait for it.
e. session_key = sg.get_web_auth_session_key(url)
2) Username and Password Authentication:
a. network = get_*_network(API_KEY, API_SECRET)
b. username = raw_input("Please enter your username: ")
c. password_hash = pylast.md5(raw_input("Please enter your password: ")
d. session_key = SessionKeyGenerator(network).get_session_key(username, password_hash)
A session key's lifetime is infinie, unless the user provokes the rights of the given API Key.
If you create a Network object with just a API_KEY and API_SECRET and a username and a password_hash, a
SESSION_KEY will be automatically generated for that network and stored in it so you don't have to do this
manually, unless you want to.
"""
def __init__(self, network):
self.network = network
self.web_auth_tokens = {}
def _get_web_auth_token(self):
"""Retrieves a token from the network for web authentication.
The token then has to be authorized from getAuthURL before creating session.
"""
request = _Request(self.network, 'auth.getToken')
# default action is that a request is signed only when
# a session key is provided.
request.sign_it()
doc = request.execute()
e = doc.getElementsByTagName('token')[0]
return e.firstChild.data
def get_web_auth_url(self):
"""The user must open this page, and you first, then call get_web_auth_session_key(url) after that."""
token = self._get_web_auth_token()
url = '%(homepage)s/api/auth/?api_key=%(api)s&token=%(token)s' % \
{"homepage": self.network.homepage, "api": self.network.api_key, "token": token}
self.web_auth_tokens[url] = token
return url
def get_web_auth_session_key(self, url):
"""Retrieves the session key of a web authorization process by its url."""
if url in self.web_auth_tokens.keys():
token = self.web_auth_tokens[url]
else:
token = "" #that's gonna raise a WSError of an unauthorized token when the request is executed.
request = _Request(self.network, 'auth.getSession', {'token': token})
# default action is that a request is signed only when
# a session key is provided.
request.sign_it()
doc = request.execute()
return doc.getElementsByTagName('key')[0].firstChild.data
def get_session_key(self, username, password_hash):
"""Retrieve a session key with a username and a md5 hash of the user's password."""
params = {"username": username, "authToken": md5(username + password_hash)}
request = _Request(self.network, "auth.getMobileSession", params)
# default action is that a request is signed only when
# a session key is provided.
request.sign_it()
doc = request.execute()
return _extract(doc, "key")
def _namedtuple(name, children):
"""
collections.namedtuple is available in (python >= 2.6)
"""
v = sys.version_info
if v[1] >= 6 and v[0] < 3:
return collections.namedtuple(name, children)
else:
def fancydict(*args):
d = {}
i = 0
for child in children:
d[child.strip()] = args[i]
i += 1
return d
return fancydict
TopItem = _namedtuple("TopItem", ["item", "weight"])
SimilarItem = _namedtuple("SimilarItem", ["item", "match"])
LibraryItem = _namedtuple("LibraryItem", ["item", "playcount", "tagcount"])
PlayedTrack = _namedtuple("PlayedTrack", ["track", "playback_date", "timestamp"])
LovedTrack = _namedtuple("LovedTrack", ["track", "date", "timestamp"])
ImageSizes = _namedtuple("ImageSizes", ["original", "large", "largesquare", "medium", "small", "extralarge"])
Image = _namedtuple("Image", ["title", "url", "dateadded", "format", "owner", "sizes", "votes"])
Shout = _namedtuple("Shout", ["body", "author", "date"])
def _string_output(funct):
def r(*args):
return _string(funct(*args))
return r
class _BaseObject(object):
"""An abstract webservices object."""
network = None
def __init__(self, network):
self.network = network
def _request(self, method_name, cacheable = False, params = None):
if not params:
params = self._get_params()
return _Request(self.network, method_name, params).execute(cacheable)
def _get_params(self):
"""Returns the most common set of parameters between all objects."""
return {}
def __hash__(self):
return hash(self.network) + \
hash(str(type(self)) + "".join(self._get_params().keys() + self._get_params().values()).lower())
class _Taggable(object):
"""Common functions for classes with tags."""
def __init__(self, ws_prefix):
self.ws_prefix = ws_prefix
def add_tags(self, *tags):
"""Adds one or several tags.
* *tags: Any number of tag names or Tag objects.
"""
for tag in tags:
self._add_tag(tag)
def _add_tag(self, tag):
"""Adds one or several tags.
* tag: one tag name or a Tag object.
"""
if isinstance(tag, Tag):
tag = tag.get_name()
params = self._get_params()
params['tags'] = _unicode(tag)
self._request(self.ws_prefix + '.addTags', False, params)
def _remove_tag(self, single_tag):
"""Remove a user's tag from this object."""
if isinstance(single_tag, Tag):
single_tag = single_tag.get_name()
params = self._get_params()
params['tag'] = _unicode(single_tag)
self._request(self.ws_prefix + '.removeTag', False, params)
def get_tags(self):
"""Returns a list of the tags set by the user to this object."""
# Uncacheable because it can be dynamically changed by the user.
params = self._get_params()
doc = self._request(self.ws_prefix + '.getTags', False, params)
tag_names = _extract_all(doc, 'name')
tags = []
for tag in tag_names:
tags.append(Tag(tag, self.network))
return tags
def remove_tags(self, *tags):
"""Removes one or several tags from this object.
* *tags: Any number of tag names or Tag objects.
"""
for tag in tags:
self._remove_tag(tag)
def clear_tags(self):
"""Clears all the user-set tags. """
self.remove_tags(*(self.get_tags()))
def set_tags(self, *tags):
"""Sets this object's tags to only those tags.
* *tags: any number of tag names.
"""
c_old_tags = []
old_tags = []
c_new_tags = []
new_tags = []
to_remove = []
to_add = []
tags_on_server = self.get_tags()
for tag in tags_on_server:
c_old_tags.append(tag.get_name().lower())
old_tags.append(tag.get_name())
for tag in tags:
c_new_tags.append(tag.lower())
new_tags.append(tag)
for i in range(0, len(old_tags)):
if not c_old_tags[i] in c_new_tags:
to_remove.append(old_tags[i])
for i in range(0, len(new_tags)):
if not c_new_tags[i] in c_old_tags:
to_add.append(new_tags[i])
self.remove_tags(*to_remove)
self.add_tags(*to_add)
def get_top_tags(self, limit = None):
"""Returns a list of the most frequently used Tags on this object."""
doc = self._request(self.ws_prefix + '.getTopTags', True)
elements = doc.getElementsByTagName('tag')
seq = []
for element in elements:
if limit and len(seq) >= limit:
break
tag_name = _extract(element, 'name')
tagcount = _extract(element, 'count')
seq.append(TopItem(Tag(tag_name, self.network), tagcount))
return seq
class WSError(Exception):
"""Exception related to the Network web service"""
def __init__(self, network, status, details):
self.status = status
self.details = details
self.network = network
@_string_output
def __str__(self):
return self.details
def get_id(self):
"""Returns the exception ID, from one of the following:
STATUS_INVALID_SERVICE = 2
STATUS_INVALID_METHOD = 3
STATUS_AUTH_FAILED = 4
STATUS_INVALID_FORMAT = 5
STATUS_INVALID_PARAMS = 6
STATUS_INVALID_RESOURCE = 7
STATUS_TOKEN_ERROR = 8
STATUS_INVALID_SK = 9
STATUS_INVALID_API_KEY = 10
STATUS_OFFLINE = 11
STATUS_SUBSCRIBERS_ONLY = 12
STATUS_TOKEN_UNAUTHORIZED = 14
STATUS_TOKEN_EXPIRED = 15
"""
return self.status
class Album(_BaseObject, _Taggable):
"""An album."""
title = None
artist = None
def __init__(self, artist, title, network):
"""
Create an album instance.
# Parameters:
* artist: An artist name or an Artist object.
* title: The album title.
"""
_BaseObject.__init__(self, network)
_Taggable.__init__(self, 'album')
if isinstance(artist, Artist):
self.artist = artist
else:
self.artist = Artist(artist, self.network)
self.title = title
@_string_output
def __repr__(self):
return u"%s - %s" %(self.get_artist().get_name(), self.get_title())
def __eq__(self, other):
return (self.get_title().lower() == other.get_title().lower()) and (self.get_artist().get_name().lower() == other.get_artist().get_name().lower())
def __ne__(self, other):
return (self.get_title().lower() != other.get_title().lower()) or (self.get_artist().get_name().lower() != other.get_artist().get_name().lower())
def _get_params(self):
return {'artist': self.get_artist().get_name(), 'album': self.get_title(), }
def get_artist(self):
"""Returns the associated Artist object."""
return self.artist
def get_title(self):
"""Returns the album title."""
return self.title
def get_name(self):
"""Returns the album title (alias to Album.get_title)."""
return self.get_title()
def get_release_date(self):
"""Retruns the release date of the album."""
return _extract(self._request("album.getInfo", cacheable = True), "releasedate")
def get_release_year(self):
dt= datetime.datetime.strptime(self.get_release_date(),"%d %b %Y, %H:%M")
return str(dt.year)
def get_cover_image(self, size = COVER_EXTRA_LARGE):
"""
Returns a uri to the cover image
size can be one of:
COVER_MEGA
COVER_EXTRA_LARGE
COVER_LARGE
COVER_MEDIUM
COVER_SMALL
"""
return _extract_all(self._request("album.getInfo", cacheable = True), 'image')[size]
def get_id(self):
"""Returns the ID"""
return _extract(self._request("album.getInfo", cacheable = True), "id")
def get_playcount(self):
"""Returns the number of plays on the network"""
return _number(_extract(self._request("album.getInfo", cacheable = True), "playcount"))
def get_listener_count(self):
"""Returns the number of liteners on the network"""
return _number(_extract(self._request("album.getInfo", cacheable = True), "listeners"))
def get_top_tags(self, limit=None):
"""Returns a list of the most-applied tags to this album."""
doc = self._request("album.getInfo", True)
e = doc.getElementsByTagName("toptags")[0]
seq = []
for name in _extract_all(e, "name"):
if len(seq) < limit:
seq.append(Tag(name, self.network))
return seq
def get_tracks(self):
"""Returns the list of Tracks on this album."""
uri = 'lastfm://playlist/album/%s' %self.get_id()
return XSPF(uri, self.network).get_tracks()
def get_mbid(self):
"""Returns the MusicBrainz id of the album."""
return _extract(self._request("album.getInfo", cacheable = True), "mbid")
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the album page on the network.
# Parameters:
* domain_name str: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
artist = _url_safe(self.get_artist().get_name())
album = _url_safe(self.get_title())
return self.network._get_url(domain_name, "album") %{'artist': artist, 'album': album}
def get_wiki_published_date(self):
"""Returns the date of publishing this version of the wiki."""
doc = self._request("album.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "published")
def get_wiki_summary(self):
"""Returns the summary of the wiki."""
doc = self._request("album.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "summary")
def get_wiki_content(self):
"""Returns the content of the wiki."""
doc = self._request("album.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "content")
class Artist(_BaseObject, _Taggable):
"""An artist."""
name = None
def __init__(self, name, network):
"""Create an artist object.
# Parameters:
* name str: The artist's name.
"""
_BaseObject.__init__(self, network)
_Taggable.__init__(self, 'artist')
self.name = name
@_string_output
def __repr__(self):
return self.get_name()
def __eq__(self, other):
return self.get_name().lower() == other.get_name().lower()
def __ne__(self, other):
return self.get_name().lower() != other.get_name().lower()
def _get_params(self):
return {'artist': self.get_name()}
def get_name(self):
"""Returns the name of the artist."""
return self.name
def get_cover_image(self, size = COVER_LARGE):
"""
Returns a uri to the cover image
size can be one of:
COVER_MEGA
COVER_EXTRA_LARGE
COVER_LARGE
COVER_MEDIUM
COVER_SMALL
"""
return _extract_all(self._request("artist.getInfo", True), "image")[size]
def get_playcount(self):
"""Returns the number of plays on the network."""
return _number(_extract(self._request("artist.getInfo", True), "playcount"))
def get_mbid(self):
"""Returns the MusicBrainz ID of this artist."""
doc = self._request("artist.getInfo", True)
return _extract(doc, "mbid")
def get_listener_count(self):
"""Returns the number of liteners on the network."""
return _number(_extract(self._request("artist.getInfo", True), "listeners"))
def is_streamable(self):
"""Returns True if the artist is streamable."""
return bool(_number(_extract(self._request("artist.getInfo", True), "streamable")))
def get_bio_published_date(self):
"""Returns the date on which the artist's biography was published."""
return _extract(self._request("artist.getInfo", True), "published")
def get_bio_summary(self):
"""Returns the summary of the artist's biography."""
return _extract(self._request("artist.getInfo", True), "summary")
def get_bio_content(self):
"""Returns the content of the artist's biography."""
return _extract(self._request("artist.getInfo", True), "content")
def get_upcoming_events(self):
"""Returns a list of the upcoming Events for this artist."""
doc = self._request('artist.getEvents', True)
ids = _extract_all(doc, 'id')
events = []
for e_id in ids:
events.append(Event(e_id, self.network))
return events
def get_similar(self, limit = None):
"""Returns the similar artists on the network."""
params = self._get_params()
if limit:
params['limit'] = _unicode(limit)
doc = self._request('artist.getSimilar', True, params)
names = _extract_all(doc, "name")
matches = _extract_all(doc, "match")
artists = []
for i in range(0, len(names)):
artists.append(SimilarItem(Artist(names[i], self.network), _number(matches[i])))
return artists
def get_top_albums(self):
"""Retuns a list of the top albums."""
doc = self._request('artist.getTopAlbums', True)
seq = []
for node in doc.getElementsByTagName("album"):
name = _extract(node, "name")
artist = _extract(node, "name", 1)
playcount = _extract(node, "playcount")
seq.append(TopItem(Album(artist, name, self.network), playcount))
return seq
def get_top_tracks(self):
"""Returns a list of the most played Tracks by this artist."""
doc = self._request("artist.getTopTracks", True)
seq = []
for track in doc.getElementsByTagName('track'):
title = _extract(track, "name")
artist = _extract(track, "name", 1)
playcount = _number(_extract(track, "playcount"))
seq.append( TopItem(Track(artist, title, self.network), playcount) )
return seq
def get_top_fans(self, limit = None):
"""Returns a list of the Users who played this artist the most.
# Parameters:
* limit int: Max elements.
"""
doc = self._request('artist.getTopFans', True)
seq = []
elements = doc.getElementsByTagName('user')
for element in elements:
if limit and len(seq) >= limit:
break
name = _extract(element, 'name')
weight = _number(_extract(element, 'weight'))
seq.append(TopItem(User(name, self.network), weight))
return seq
def share(self, users, message = None):
"""Shares this artist (sends out recommendations).
# Parameters:
* users [User|str,]: A list that can contain usernames, emails, User objects, or all of them.
* message str: A message to include in the recommendation message.
"""
#last.fm currently accepts a max of 10 recipient at a time
while(len(users) > 10):
section = users[0:9]
users = users[9:]
self.share(section, message)
nusers = []
for user in users:
if isinstance(user, User):
nusers.append(user.get_name())
else:
nusers.append(user)
params = self._get_params()
recipients = ','.join(nusers)
params['recipient'] = recipients
if message: params['message'] = _unicode(message)
self._request('artist.share', False, params)
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the artist page on the network.
# Parameters:
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
artist = _url_safe(self.get_name())
return self.network._get_url(domain_name, "artist") %{'artist': artist}
def get_images(self, order=IMAGES_ORDER_POPULARITY, limit=None):
"""
Returns a sequence of Image objects
if limit is None it will return all
order can be IMAGES_ORDER_POPULARITY or IMAGES_ORDER_DATE
"""
images = []
params = self._get_params()
params["order"] = order
nodes = _collect_nodes(limit, self, "artist.getImages", True, params)
for e in nodes:
if _extract(e, "name"):
user = User(_extract(e, "name"), self.network)
else:
user = None
images.append(Image(
_extract(e, "title"),
_extract(e, "url"),
_extract(e, "dateadded"),
_extract(e, "format"),
user,
ImageSizes(*_extract_all(e, "size")),
(_extract(e, "thumbsup"), _extract(e, "thumbsdown"))
)
)
return images
def get_shouts(self, limit=50):
"""
Returns a sequqence of Shout objects
"""
shouts = []
for node in _collect_nodes(limit, self, "artist.getShouts", False):
shouts.append(Shout(
_extract(node, "body"),
User(_extract(node, "author"), self.network),
_extract(node, "date")
)
)
return shouts
def shout(self, message):
"""
Post a shout
"""
params = self._get_params()
params["message"] = message
self._request("artist.Shout", False, params)
class Event(_BaseObject):
"""An event."""
id = None
def __init__(self, event_id, network):
_BaseObject.__init__(self, network)
self.id = _unicode(event_id)
@_string_output
def __repr__(self):
return "Event #" + self.get_id()
def __eq__(self, other):
return self.get_id() == other.get_id()
def __ne__(self, other):
return self.get_id() != other.get_id()
def _get_params(self):
return {'event': self.get_id()}
def attend(self, attending_status):
"""Sets the attending status.
* attending_status: The attending status. Possible values:
o EVENT_ATTENDING
o EVENT_MAYBE_ATTENDING
o EVENT_NOT_ATTENDING
"""
params = self._get_params()
params['status'] = _unicode(attending_status)
self._request('event.attend', False, params)
def get_attendees(self):
"""
Get a list of attendees for an event
"""
doc = self._request("event.getAttendees", False)
users = []
for name in _extract_all(doc, "name"):
users.append(User(name, self.network))
return users
def get_id(self):
"""Returns the id of the event on the network. """
return self.id
def get_title(self):
"""Returns the title of the event. """
doc = self._request("event.getInfo", True)
return _extract(doc, "title")
def get_headliner(self):
"""Returns the headliner of the event. """
doc = self._request("event.getInfo", True)
return Artist(_extract(doc, "headliner"), self.network)
def get_artists(self):
"""Returns a list of the participating Artists. """
doc = self._request("event.getInfo", True)
names = _extract_all(doc, "artist")
artists = []
for name in names:
artists.append(Artist(name, self.network))
return artists
def get_venue(self):
"""Returns the venue where the event is held."""
doc = self._request("event.getInfo", True)
v = doc.getElementsByTagName("venue")[0]
venue_id = _number(_extract(v, "id"))
return Venue(venue_id, self.network)
def get_start_date(self):
"""Returns the date when the event starts."""
doc = self._request("event.getInfo", True)
return _extract(doc, "startDate")
def get_description(self):
"""Returns the description of the event. """
doc = self._request("event.getInfo", True)
return _extract(doc, "description")
def get_cover_image(self, size = COVER_LARGE):
"""
Returns a uri to the cover image
size can be one of:
COVER_MEGA
COVER_EXTRA_LARGE
COVER_LARGE
COVER_MEDIUM
COVER_SMALL
"""
doc = self._request("event.getInfo", True)
return _extract_all(doc, "image")[size]
def get_attendance_count(self):
"""Returns the number of attending people. """
doc = self._request("event.getInfo", True)
return _number(_extract(doc, "attendance"))
def get_review_count(self):
"""Returns the number of available reviews for this event. """
doc = self._request("event.getInfo", True)
return _number(_extract(doc, "reviews"))
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the event page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
return self.network._get_url(domain_name, "event") %{'id': self.get_id()}
def share(self, users, message = None):
"""Shares this event (sends out recommendations).
* users: A list that can contain usernames, emails, User objects, or all of them.
* message: A message to include in the recommendation message.
"""
#last.fm currently accepts a max of 10 recipient at a time
while(len(users) > 10):
section = users[0:9]
users = users[9:]
self.share(section, message)
nusers = []
for user in users:
if isinstance(user, User):
nusers.append(user.get_name())
else:
nusers.append(user)
params = self._get_params()
recipients = ','.join(nusers)
params['recipient'] = recipients
if message: params['message'] = _unicode(message)
self._request('event.share', False, params)
def get_shouts(self, limit=50):
"""
Returns a sequqence of Shout objects
"""
shouts = []
for node in _collect_nodes(limit, self, "event.getShouts", False):
shouts.append(Shout(
_extract(node, "body"),
User(_extract(node, "author"), self.network),
_extract(node, "date")
)
)
return shouts
def shout(self, message):
"""
Post a shout
"""
params = self._get_params()
params["message"] = message
self._request("event.Shout", False, params)
class Country(_BaseObject):
"""A country at Last.fm."""
name = None
def __init__(self, name, network):
_BaseObject.__init__(self, network)
self.name = name
@_string_output
def __repr__(self):
return self.get_name()
def __eq__(self, other):
return self.get_name().lower() == other.get_name().lower()
def __ne__(self, other):
return self.get_name() != other.get_name()
def _get_params(self):
return {'country': self.get_name()}
def _get_name_from_code(self, alpha2code):
# TODO: Have this function lookup the alpha-2 code and return the country name.
return alpha2code
def get_name(self):
"""Returns the country name. """
return self.name
def get_top_artists(self):
"""Returns a sequence of the most played artists."""
doc = self._request('geo.getTopArtists', True)
seq = []
for node in doc.getElementsByTagName("artist"):
name = _extract(node, 'name')
playcount = _extract(node, "playcount")
seq.append(TopItem(Artist(name, self.network), playcount))
return seq
def get_top_tracks(self):
"""Returns a sequence of the most played tracks"""
doc = self._request("geo.getTopTracks", True)
seq = []
for n in doc.getElementsByTagName('track'):
title = _extract(n, 'name')
artist = _extract(n, 'name', 1)
playcount = _number(_extract(n, "playcount"))
seq.append( TopItem(Track(artist, title, self.network), playcount))
return seq
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the event page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
country_name = _url_safe(self.get_name())
return self.network._get_url(domain_name, "country") %{'country_name': country_name}
class Library(_BaseObject):
"""A user's Last.fm library."""
user = None
def __init__(self, user, network):
_BaseObject.__init__(self, network)
if isinstance(user, User):
self.user = user
else:
self.user = User(user, self.network)
self._albums_index = 0
self._artists_index = 0
self._tracks_index = 0
@_string_output
def __repr__(self):
return repr(self.get_user()) + "'s Library"
def _get_params(self):
return {'user': self.user.get_name()}
def get_user(self):
"""Returns the user who owns this library."""
return self.user
def add_album(self, album):
"""Add an album to this library."""
params = self._get_params()
params["artist"] = album.get_artist.get_name()
params["album"] = album.get_name()
self._request("library.addAlbum", False, params)
def add_artist(self, artist):
"""Add an artist to this library."""
params = self._get_params()
params["artist"] = artist.get_name()
self._request("library.addArtist", False, params)
def add_track(self, track):
"""Add a track to this library."""
params = self._get_params()
params["track"] = track.get_title()
self._request("library.addTrack", False, params)
def get_albums(self, limit=50):
"""
Returns a sequence of Album objects
if limit==None it will return all (may take a while)
"""
seq = []
for node in _collect_nodes(limit, self, "library.getAlbums", True):
name = _extract(node, "name")
artist = _extract(node, "name", 1)
playcount = _number(_extract(node, "playcount"))
tagcount = _number(_extract(node, "tagcount"))
seq.append(LibraryItem(Album(artist, name, self.network), playcount, tagcount))
return seq
def get_artists(self, limit=50):
"""
Returns a sequence of Album objects
if limit==None it will return all (may take a while)
"""
seq = []
for node in _collect_nodes(limit, self, "library.getArtists", True):
name = _extract(node, "name")
playcount = _number(_extract(node, "playcount"))
tagcount = _number(_extract(node, "tagcount"))
seq.append(LibraryItem(Artist(name, self.network), playcount, tagcount))
return seq
def get_tracks(self, limit=50):
"""
Returns a sequence of Album objects
if limit==None it will return all (may take a while)
"""
seq = []
for node in _collect_nodes(limit, self, "library.getTracks", True):
name = _extract(node, "name")
artist = _extract(node, "name", 1)
playcount = _number(_extract(node, "playcount"))
tagcount = _number(_extract(node, "tagcount"))
seq.append(LibraryItem(Track(artist, name, self.network), playcount, tagcount))
return seq
class Playlist(_BaseObject):
"""A Last.fm user playlist."""
id = None
user = None
def __init__(self, user, id, network):
_BaseObject.__init__(self, network)
if isinstance(user, User):
self.user = user
else:
self.user = User(user, self.network)
self.id = _unicode(id)
@_string_output
def __repr__(self):
return repr(self.user) + "'s playlist # " + repr(self.id)
def _get_info_node(self):
"""Returns the node from user.getPlaylists where this playlist's info is."""
doc = self._request("user.getPlaylists", True)
for node in doc.getElementsByTagName("playlist"):
if _extract(node, "id") == str(self.get_id()):
return node
def _get_params(self):
return {'user': self.user.get_name(), 'playlistID': self.get_id()}
def get_id(self):
"""Returns the playlist id."""
return self.id
def get_user(self):
"""Returns the owner user of this playlist."""
return self.user
def get_tracks(self):
"""Returns a list of the tracks on this user playlist."""
uri = u'lastfm://playlist/%s' %self.get_id()
return XSPF(uri, self.network).get_tracks()
def add_track(self, track):
"""Adds a Track to this Playlist."""
params = self._get_params()
params['artist'] = track.get_artist().get_name()
params['track'] = track.get_title()
self._request('playlist.addTrack', False, params)
def get_title(self):
"""Returns the title of this playlist."""
return _extract(self._get_info_node(), "title")
def get_creation_date(self):
"""Returns the creation date of this playlist."""
return _extract(self._get_info_node(), "date")
def get_size(self):
"""Returns the number of tracks in this playlist."""
return _number(_extract(self._get_info_node(), "size"))
def get_description(self):
"""Returns the description of this playlist."""
return _extract(self._get_info_node(), "description")
def get_duration(self):
"""Returns the duration of this playlist in milliseconds."""
return _number(_extract(self._get_info_node(), "duration"))
def is_streamable(self):
"""Returns True if the playlist is streamable.
For a playlist to be streamable, it needs at least 45 tracks by 15 different artists."""
if _extract(self._get_info_node(), "streamable") == '1':
return True
else:
return False
def has_track(self, track):
"""Checks to see if track is already in the playlist.
* track: Any Track object.
"""
return track in self.get_tracks()
def get_cover_image(self, size = COVER_LARGE):
"""
Returns a uri to the cover image
size can be one of:
COVER_MEGA
COVER_EXTRA_LARGE
COVER_LARGE
COVER_MEDIUM
COVER_SMALL
"""
return _extract(self._get_info_node(), "image")[size]
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the playlist on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
english_url = _extract(self._get_info_node(), "url")
appendix = english_url[english_url.rfind("/") + 1:]
return self.network._get_url(domain_name, "playlist") %{'appendix': appendix, "user": self.get_user().get_name()}
class Tag(_BaseObject):
"""A Last.fm object tag."""
# TODO: getWeeklyArtistChart (too lazy, i'll wait for when someone requests it)
name = None
def __init__(self, name, network):
_BaseObject.__init__(self, network)
self.name = name
def _get_params(self):
return {'tag': self.get_name()}
@_string_output
def __repr__(self):
return self.get_name()
def __eq__(self, other):
return self.get_name().lower() == other.get_name().lower()
def __ne__(self, other):
return self.get_name().lower() != other.get_name().lower()
def get_name(self):
"""Returns the name of the tag. """
return self.name
def get_similar(self):
"""Returns the tags similar to this one, ordered by similarity. """
doc = self._request('tag.getSimilar', True)
seq = []
names = _extract_all(doc, 'name')
for name in names:
seq.append(Tag(name, self.network))
return seq
def get_top_albums(self):
"""Retuns a list of the top albums."""
doc = self._request('tag.getTopAlbums', True)
seq = []
for node in doc.getElementsByTagName("album"):
name = _extract(node, "name")
artist = _extract(node, "name", 1)
playcount = _extract(node, "playcount")
seq.append(TopItem(Album(artist, name, self.network), playcount))
return seq
def get_top_tracks(self):
"""Returns a list of the most played Tracks by this artist."""
doc = self._request("tag.getTopTracks", True)
seq = []
for track in doc.getElementsByTagName('track'):
title = _extract(track, "name")
artist = _extract(track, "name", 1)
playcount = _number(_extract(track, "playcount"))
seq.append( TopItem(Track(artist, title, self.network), playcount) )
return seq
def get_top_artists(self):
"""Returns a sequence of the most played artists."""
doc = self._request('tag.getTopArtists', True)
seq = []
for node in doc.getElementsByTagName("artist"):
name = _extract(node, 'name')
playcount = _extract(node, "playcount")
seq.append(TopItem(Artist(name, self.network), playcount))
return seq
def get_weekly_chart_dates(self):
"""Returns a list of From and To tuples for the available charts."""
doc = self._request("tag.getWeeklyChartList", True)
seq = []
for node in doc.getElementsByTagName("chart"):
seq.append( (node.getAttribute("from"), node.getAttribute("to")) )
return seq
def get_weekly_artist_charts(self, from_date = None, to_date = None):
"""Returns the weekly artist charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("tag.getWeeklyArtistChart", True, params)
seq = []
for node in doc.getElementsByTagName("artist"):
item = Artist(_extract(node, "name"), self.network)
weight = _number(_extract(node, "weight"))
seq.append(TopItem(item, weight))
return seq
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the tag page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
name = _url_safe(self.get_name())
return self.network._get_url(domain_name, "tag") %{'name': name}
class Track(_BaseObject, _Taggable):
"""A Last.fm track."""
artist = None
title = None
def __init__(self, artist, title, network):
_BaseObject.__init__(self, network)
_Taggable.__init__(self, 'track')
if isinstance(artist, Artist):
self.artist = artist
else:
self.artist = Artist(artist, self.network)
self.title = title
@_string_output
def __repr__(self):
return self.get_artist().get_name() + ' - ' + self.get_title()
def __eq__(self, other):
return (self.get_title().lower() == other.get_title().lower()) and (self.get_artist().get_name().lower() == other.get_artist().get_name().lower())
def __ne__(self, other):
return (self.get_title().lower() != other.get_title().lower()) or (self.get_artist().get_name().lower() != other.get_artist().get_name().lower())
def _get_params(self):
return {'artist': self.get_artist().get_name(), 'track': self.get_title()}
def get_artist(self):
"""Returns the associated Artist object."""
return self.artist
def get_title(self):
"""Returns the track title."""
return self.title
def get_name(self):
"""Returns the track title (alias to Track.get_title)."""
return self.get_title()
def get_id(self):
"""Returns the track id on the network."""
doc = self._request("track.getInfo", True)
return _extract(doc, "id")
def get_duration(self):
"""Returns the track duration."""
doc = self._request("track.getInfo", True)
return _number(_extract(doc, "duration"))
def get_mbid(self):
"""Returns the MusicBrainz ID of this track."""
doc = self._request("track.getInfo", True)
return _extract(doc, "mbid")
def get_listener_count(self):
"""Returns the listener count."""
doc = self._request("track.getInfo", True)
return _number(_extract(doc, "listeners"))
def get_playcount(self):
"""Returns the play count."""
doc = self._request("track.getInfo", True)
return _number(_extract(doc, "playcount"))
def is_streamable(self):
"""Returns True if the track is available at Last.fm."""
doc = self._request("track.getInfo", True)
return _extract(doc, "streamable") == "1"
def is_fulltrack_available(self):
"""Returns True if the fulltrack is available for streaming."""
doc = self._request("track.getInfo", True)
return doc.getElementsByTagName("streamable")[0].getAttribute("fulltrack") == "1"
def get_album(self):
"""Returns the album object of this track."""
doc = self._request("track.getInfo", True)
albums = doc.getElementsByTagName("album")
if len(albums) == 0:
return
node = doc.getElementsByTagName("album")[0]
return Album(_extract(node, "artist"), _extract(node, "title"), self.network)
def get_wiki_published_date(self):
"""Returns the date of publishing this version of the wiki."""
doc = self._request("track.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "published")
def get_wiki_summary(self):
"""Returns the summary of the wiki."""
doc = self._request("track.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "summary")
def get_wiki_content(self):
"""Returns the content of the wiki."""
doc = self._request("track.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "content")
def love(self):
"""Adds the track to the user's loved tracks. """
self._request('track.love')
def ban(self):
"""Ban this track from ever playing on the radio. """
self._request('track.ban')
def get_similar(self):
"""Returns similar tracks for this track on the network, based on listening data. """
doc = self._request('track.getSimilar', True)
seq = []
for node in doc.getElementsByTagName("track"):
title = _extract(node, 'name')
artist = _extract(node, 'name', 1)
match = _number(_extract(node, "match"))
seq.append(SimilarItem(Track(artist, title, self.network), match))
return seq
def get_top_fans(self, limit = None):
"""Returns a list of the Users who played this track."""
doc = self._request('track.getTopFans', True)
seq = []
elements = doc.getElementsByTagName('user')
for element in elements:
if limit and len(seq) >= limit:
break
name = _extract(element, 'name')
weight = _number(_extract(element, 'weight'))
seq.append(TopItem(User(name, self.network), weight))
return seq
def share(self, users, message = None):
"""Shares this track (sends out recommendations).
* users: A list that can contain usernames, emails, User objects, or all of them.
* message: A message to include in the recommendation message.
"""
#last.fm currently accepts a max of 10 recipient at a time
while(len(users) > 10):
section = users[0:9]
users = users[9:]
self.share(section, message)
nusers = []
for user in users:
if isinstance(user, User):
nusers.append(user.get_name())
else:
nusers.append(user)
params = self._get_params()
recipients = ','.join(nusers)
params['recipient'] = recipients
if message: params['message'] = _unicode(message)
self._request('track.share', False, params)
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the track page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
artist = _url_safe(self.get_artist().get_name())
title = _url_safe(self.get_title())
return self.network._get_url(domain_name, "track") %{'domain': self.network._get_language_domain(domain_name), 'artist': artist, 'title': title}
def get_shouts(self, limit=50):
"""
Returns a sequqence of Shout objects
"""
shouts = []
for node in _collect_nodes(limit, self, "track.getShouts", False):
shouts.append(Shout(
_extract(node, "body"),
User(_extract(node, "author"), self.network),
_extract(node, "date")
)
)
return shouts
def shout(self, message):
"""
Post a shout
"""
params = self._get_params()
params["message"] = message
self._request("track.Shout", False, params)
class Group(_BaseObject):
"""A Last.fm group."""
name = None
def __init__(self, group_name, network):
_BaseObject.__init__(self, network)
self.name = group_name
@_string_output
def __repr__(self):
return self.get_name()
def __eq__(self, other):
return self.get_name().lower() == other.get_name().lower()
def __ne__(self, other):
return self.get_name() != other.get_name()
def _get_params(self):
return {'group': self.get_name()}
def get_name(self):
"""Returns the group name. """
return self.name
def get_weekly_chart_dates(self):
"""Returns a list of From and To tuples for the available charts."""
doc = self._request("group.getWeeklyChartList", True)
seq = []
for node in doc.getElementsByTagName("chart"):
seq.append( (node.getAttribute("from"), node.getAttribute("to")) )
return seq
def get_weekly_artist_charts(self, from_date = None, to_date = None):
"""Returns the weekly artist charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("group.getWeeklyArtistChart", True, params)
seq = []
for node in doc.getElementsByTagName("artist"):
item = Artist(_extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def get_weekly_album_charts(self, from_date = None, to_date = None):
"""Returns the weekly album charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("group.getWeeklyAlbumChart", True, params)
seq = []
for node in doc.getElementsByTagName("album"):
item = Album(_extract(node, "artist"), _extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def get_weekly_track_charts(self, from_date = None, to_date = None):
"""Returns the weekly track charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("group.getWeeklyTrackChart", True, params)
seq = []
for node in doc.getElementsByTagName("track"):
item = Track(_extract(node, "artist"), _extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the group page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
name = _url_safe(self.get_name())
return self.network._get_url(domain_name, "group") %{'name': name}
def get_members(self, limit=50):
"""
Returns a sequence of User objects
if limit==None it will return all
"""
nodes = _collect_nodes(limit, self, "group.getMembers", False)
users = []
for node in nodes:
users.append(User(_extract(node, "name"), self.network))
return users
class XSPF(_BaseObject):
"A Last.fm XSPF playlist."""
uri = None
def __init__(self, uri, network):
_BaseObject.__init__(self, network)
self.uri = uri
def _get_params(self):
return {'playlistURL': self.get_uri()}
@_string_output
def __repr__(self):
return self.get_uri()
def __eq__(self, other):
return self.get_uri() == other.get_uri()
def __ne__(self, other):
return self.get_uri() != other.get_uri()
def get_uri(self):
"""Returns the Last.fm playlist URI. """
return self.uri
def get_tracks(self):
"""Returns the tracks on this playlist."""
doc = self._request('playlist.fetch', True)
seq = []
for n in doc.getElementsByTagName('track'):
title = _extract(n, 'title')
artist = _extract(n, 'creator')
seq.append(Track(artist, title, self.network))
return seq
class User(_BaseObject):
"""A Last.fm user."""
name = None
def __init__(self, user_name, network):
_BaseObject.__init__(self, network)
self.name = user_name
self._past_events_index = 0
self._recommended_events_index = 0
self._recommended_artists_index = 0
@_string_output
def __repr__(self):
return self.get_name()
def __eq__(self, another):
return self.get_name() == another.get_name()
def __ne__(self, another):
return self.get_name() != another.get_name()
def _get_params(self):
return {"user": self.get_name()}
def get_name(self):
"""Returns the nuser name."""
return self.name
def get_upcoming_events(self):
"""Returns all the upcoming events for this user. """
doc = self._request('user.getEvents', True)
ids = _extract_all(doc, 'id')
events = []
for e_id in ids:
events.append(Event(e_id, self.network))
return events
def get_friends(self, limit = 50):
"""Returns a list of the user's friends. """
seq = []
for node in _collect_nodes(limit, self, "user.getFriends", False):
seq.append(User(_extract(node, "name"), self.network))
return seq
def get_loved_tracks(self, limit=50):
"""Returns this user's loved track as a sequence of LovedTrack objects
in reverse order of their timestamp, all the way back to the first track.
If limit==None, it will try to pull all the available data.
This method uses caching. Enable caching only if you're pulling a
large amount of data.
Use extract_items() with the return of this function to
get only a sequence of Track objects with no playback dates. """
params = self._get_params()
if limit:
params['limit'] = _unicode(limit)
seq = []
for track in _collect_nodes(limit, self, "user.getLovedTracks", True, params):
title = _extract(track, "name")
artist = _extract(track, "name", 1)
date = _extract(track, "date")
timestamp = track.getElementsByTagName("date")[0].getAttribute("uts")
seq.append(LovedTrack(Track(artist, title, self.network), date, timestamp))
return seq
def get_neighbours(self, limit = 50):
"""Returns a list of the user's friends."""
params = self._get_params()
if limit:
params['limit'] = _unicode(limit)
doc = self._request('user.getNeighbours', True, params)
seq = []
names = _extract_all(doc, 'name')
for name in names:
seq.append(User(name, self.network))
return seq
def get_past_events(self, limit=50):
"""
Returns a sequence of Event objects
if limit==None it will return all
"""
seq = []
for n in _collect_nodes(limit, self, "user.getPastEvents", False):
seq.append(Event(_extract(n, "id"), self.network))
return seq
def get_playlists(self):
"""Returns a list of Playlists that this user owns."""
doc = self._request("user.getPlaylists", True)
playlists = []
for playlist_id in _extract_all(doc, "id"):
playlists.append(Playlist(self.get_name(), playlist_id, self.network))
return playlists
def get_now_playing(self):
"""Returns the currently playing track, or None if nothing is playing. """
params = self._get_params()
params['limit'] = '1'
doc = self._request('user.getRecentTracks', False, params)
e = doc.getElementsByTagName('track')[0]
if not e.hasAttribute('nowplaying'):
return None
artist = _extract(e, 'artist')
title = _extract(e, 'name')
return Track(artist, title, self.network)
def get_recent_tracks(self, limit = 10):
"""Returns this user's played track as a sequence of PlayedTrack objects
in reverse order of their playtime, all the way back to the first track.
If limit==None, it will try to pull all the available data.
This method uses caching. Enable caching only if you're pulling a
large amount of data.
Use extract_items() with the return of this function to
get only a sequence of Track objects with no playback dates. """
params = self._get_params()
if limit:
params['limit'] = _unicode(limit)
seq = []
for track in _collect_nodes(limit, self, "user.getRecentTracks", True, params):
if track.hasAttribute('nowplaying'):
continue #to prevent the now playing track from sneaking in here
title = _extract(track, "name")
artist = _extract(track, "artist")
date = _extract(track, "date")
timestamp = track.getElementsByTagName("date")[0].getAttribute("uts")
seq.append(PlayedTrack(Track(artist, title, self.network), date, timestamp))
return seq
def get_top_albums(self, period = PERIOD_OVERALL):
"""Returns the top albums played by a user.
* period: The period of time. Possible values:
o PERIOD_OVERALL
o PERIOD_3MONTHS
o PERIOD_6MONTHS
o PERIOD_12MONTHS
"""
params = self._get_params()
params['period'] = period
doc = self._request('user.getTopAlbums', True, params)
seq = []
for album in doc.getElementsByTagName('album'):
name = _extract(album, 'name')
artist = _extract(album, 'name', 1)
playcount = _extract(album, "playcount")
seq.append(TopItem(Album(artist, name, self.network), playcount))
return seq
def get_top_artists(self, period = PERIOD_OVERALL):
"""Returns the top artists played by a user.
* period: The period of time. Possible values:
o PERIOD_OVERALL
o PERIOD_3MONTHS
o PERIOD_6MONTHS
o PERIOD_12MONTHS
"""
params = self._get_params()
params['period'] = period
doc = self._request('user.getTopArtists', True, params)
seq = []
for node in doc.getElementsByTagName('artist'):
name = _extract(node, 'name')
playcount = _extract(node, "playcount")
seq.append(TopItem(Artist(name, self.network), playcount))
return seq
def get_top_tags(self, limit = None):
"""Returns a sequence of the top tags used by this user with their counts as (Tag, tagcount).
* limit: The limit of how many tags to return.
"""
doc = self._request("user.getTopTags", True)
seq = []
for node in doc.getElementsByTagName("tag"):
if len(seq) < limit:
seq.append(TopItem(Tag(_extract(node, "name"), self.network), _extract(node, "count")))
return seq
def get_top_tracks(self, period = PERIOD_OVERALL):
"""Returns the top tracks played by a user.
* period: The period of time. Possible values:
o PERIOD_OVERALL
o PERIOD_3MONTHS
o PERIOD_6MONTHS
o PERIOD_12MONTHS
"""
params = self._get_params()
params['period'] = period
doc = self._request('user.getTopTracks', True, params)
seq = []
for track in doc.getElementsByTagName('track'):
name = _extract(track, 'name')
artist = _extract(track, 'name', 1)
playcount = _extract(track, "playcount")
seq.append(TopItem(Track(artist, name, self.network), playcount))
return seq
def get_weekly_chart_dates(self):
"""Returns a list of From and To tuples for the available charts."""
doc = self._request("user.getWeeklyChartList", True)
seq = []
for node in doc.getElementsByTagName("chart"):
seq.append( (node.getAttribute("from"), node.getAttribute("to")) )
return seq
def get_weekly_artist_charts(self, from_date = None, to_date = None):
"""Returns the weekly artist charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("user.getWeeklyArtistChart", True, params)
seq = []
for node in doc.getElementsByTagName("artist"):
item = Artist(_extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def get_weekly_album_charts(self, from_date = None, to_date = None):
"""Returns the weekly album charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("user.getWeeklyAlbumChart", True, params)
seq = []
for node in doc.getElementsByTagName("album"):
item = Album(_extract(node, "artist"), _extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def get_weekly_track_charts(self, from_date = None, to_date = None):
"""Returns the weekly track charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("user.getWeeklyTrackChart", True, params)
seq = []
for node in doc.getElementsByTagName("track"):
item = Track(_extract(node, "artist"), _extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def compare_with_user(self, user, shared_artists_limit = None):
"""Compare this user with another Last.fm user.
Returns a sequence (tasteometer_score, (shared_artist1, shared_artist2, ...))
user: A User object or a username string/unicode object.
"""
if isinstance(user, User):
user = user.get_name()
params = self._get_params()
if shared_artists_limit:
params['limit'] = _unicode(shared_artists_limit)
params['type1'] = 'user'
params['type2'] = 'user'
params['value1'] = self.get_name()
params['value2'] = user
doc = self._request('tasteometer.compare', False, params)
score = _extract(doc, 'score')
artists = doc.getElementsByTagName('artists')[0]
shared_artists_names = _extract_all(artists, 'name')
shared_artists_seq = []
for name in shared_artists_names:
shared_artists_seq.append(Artist(name, self.network))
return (score, shared_artists_seq)
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the user page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
name = _url_safe(self.get_name())
return self.network._get_url(domain_name, "user") %{'name': name}
def get_library(self):
"""Returns the associated Library object. """
return Library(self, self.network)
def get_shouts(self, limit=50):
"""
Returns a sequqence of Shout objects
"""
shouts = []
for node in _collect_nodes(limit, self, "user.getShouts", False):
shouts.append(Shout(
_extract(node, "body"),
User(_extract(node, "author"), self.network),
_extract(node, "date")
)
)
return shouts
def shout(self, message):
"""
Post a shout
"""
params = self._get_params()
params["message"] = message
self._request("user.Shout", False, params)
class AuthenticatedUser(User):
def __init__(self, network):
User.__init__(self, "", network);
def _get_params(self):
return {"user": self.get_name()}
def get_name(self):
"""Returns the name of the authenticated user."""
doc = self._request("user.getInfo", True, {"user": ""}) # hack
self.name = _extract(doc, "name")
return self.name
def get_id(self):
"""Returns the user id."""
doc = self._request("user.getInfo", True)
return _extract(doc, "id")
def get_cover_image(self):
"""Returns the user's avatar."""
doc = self._request("user.getInfo", True)
return _extract(doc, "image")
def get_language(self):
"""Returns the language code of the language used by the user."""
doc = self._request("user.getInfo", True)
return _extract(doc, "lang")
def get_country(self):
"""Returns the name of the country of the user."""
doc = self._request("user.getInfo", True)
return Country(_extract(doc, "country"), self.network)
def get_age(self):
"""Returns the user's age."""
doc = self._request("user.getInfo", True)
return _number(_extract(doc, "age"))
def get_gender(self):
"""Returns the user's gender. Either USER_MALE or USER_FEMALE."""
doc = self._request("user.getInfo", True)
value = _extract(doc, "gender")
if value == 'm':
return USER_MALE
elif value == 'f':
return USER_FEMALE
return None
def is_subscriber(self):
"""Returns whether the user is a subscriber or not. True or False."""
doc = self._request("user.getInfo", True)
return _extract(doc, "subscriber") == "1"
def get_playcount(self):
"""Returns the user's playcount so far."""
doc = self._request("user.getInfo", True)
return _number(_extract(doc, "playcount"))
def get_recommended_events(self, limit=50):
"""
Returns a sequence of Event objects
if limit==None it will return all
"""
seq = []
for node in _collect_nodes(limit, self, "user.getRecommendedEvents", False):
seq.append(Event(_extract(node, "id"), self.network))
return seq
def get_recommended_artists(self, limit=50):
"""
Returns a sequence of Event objects
if limit==None it will return all
"""
seq = []
for node in _collect_nodes(limit, self, "user.getRecommendedArtists", False):
seq.append(Artist(_extract(node, "name"), self.network))
return seq
class _Search(_BaseObject):
"""An abstract class. Use one of its derivatives."""
def __init__(self, ws_prefix, search_terms, network):
_BaseObject.__init__(self, network)
self._ws_prefix = ws_prefix
self.search_terms = search_terms
self._last_page_index = 0
def _get_params(self):
params = {}
for key in self.search_terms.keys():
params[key] = self.search_terms[key]
return params
def get_total_result_count(self):
"""Returns the total count of all the results."""
doc = self._request(self._ws_prefix + ".search", True)
return _extract(doc, "opensearch:totalResults")
def _retreive_page(self, page_index):
"""Returns the node of matches to be processed"""
params = self._get_params()
params["page"] = str(page_index)
doc = self._request(self._ws_prefix + ".search", True, params)
return doc.getElementsByTagName(self._ws_prefix + "matches")[0]
def _retrieve_next_page(self):
self._last_page_index += 1
return self._retreive_page(self._last_page_index)
class AlbumSearch(_Search):
"""Search for an album by name."""
def __init__(self, album_name, network):
_Search.__init__(self, "album", {"album": album_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Album objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("album"):
seq.append(Album(_extract(node, "artist"), _extract(node, "name"), self.network))
return seq
class ArtistSearch(_Search):
"""Search for an artist by artist name."""
def __init__(self, artist_name, network):
_Search.__init__(self, "artist", {"artist": artist_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Artist objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("artist"):
seq.append(Artist(_extract(node, "name"), self.network))
return seq
class TagSearch(_Search):
"""Search for a tag by tag name."""
def __init__(self, tag_name, network):
_Search.__init__(self, "tag", {"tag": tag_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Tag objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("tag"):
seq.append(Tag(_extract(node, "name"), self.network))
return seq
class TrackSearch(_Search):
"""Search for a track by track title. If you don't wanna narrow the results down
by specifying the artist name, set it to empty string."""
def __init__(self, artist_name, track_title, network):
_Search.__init__(self, "track", {"track": track_title, "artist": artist_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Track objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("track"):
seq.append(Track(_extract(node, "artist"), _extract(node, "name"), self.network))
return seq
class VenueSearch(_Search):
"""Search for a venue by its name. If you don't wanna narrow the results down
by specifying a country, set it to empty string."""
def __init__(self, venue_name, country_name, network):
_Search.__init__(self, "venue", {"venue": venue_name, "country": country_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Track objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("venue"):
seq.append(Venue(_extract(node, "id"), self.network))
return seq
class Venue(_BaseObject):
"""A venue where events are held."""
# TODO: waiting for a venue.getInfo web service to use.
id = None
def __init__(self, id, network):
_BaseObject.__init__(self, network)
self.id = _number(id)
@_string_output
def __repr__(self):
return "Venue #" + str(self.id)
def __eq__(self, other):
return self.get_id() == other.get_id()
def _get_params(self):
return {"venue": self.get_id()}
def get_id(self):
"""Returns the id of the venue."""
return self.id
def get_upcoming_events(self):
"""Returns the upcoming events in this venue."""
doc = self._request("venue.getEvents", True)
seq = []
for node in doc.getElementsByTagName("event"):
seq.append(Event(_extract(node, "id"), self.network))
return seq
def get_past_events(self):
"""Returns the past events held in this venue."""
doc = self._request("venue.getEvents", True)
seq = []
for node in doc.getElementsByTagName("event"):
seq.append(Event(_extract(node, "id"), self.network))
return seq
def md5(text):
"""Returns the md5 hash of a string."""
h = hashlib.md5()
h.update(_string(text))
return h.hexdigest()
def _unicode(text):
if type(text) == unicode:
return text
if type(text) == int:
return unicode(text)
return unicode(text, "utf-8")
def _string(text):
if type(text) == str:
return text
if type(text) == int:
return str(text)
return text.encode("utf-8")
def _collect_nodes(limit, sender, method_name, cacheable, params=None):
"""
Returns a sequqnce of dom.Node objects about as close to
limit as possible
"""
if not limit: limit = sys.maxint
if not params: params = sender._get_params()
nodes = []
page = 1
end_of_pages = False
while len(nodes) < limit and not end_of_pages:
params["page"] = str(page)
doc = sender._request(method_name, cacheable, params)
main = doc.documentElement.childNodes[1]
if main.hasAttribute("totalPages"):
total_pages = _number(main.getAttribute("totalPages"))
elif main.hasAttribute("totalpages"):
total_pages = _number(main.getAttribute("totalpages"))
else:
raise Exception("No total pages attribute")
for node in main.childNodes:
if not node.nodeType == xml.dom.Node.TEXT_NODE and len(nodes) < limit:
nodes.append(node)
if page >= total_pages:
end_of_pages = True
page += 1
return nodes
def _extract(node, name, index = 0):
"""Extracts a value from the xml string"""
nodes = node.getElementsByTagName(name)
if len(nodes):
if nodes[index].firstChild:
return _unescape_htmlentity(nodes[index].firstChild.data.strip())
else:
return None
def _extract_all(node, name, limit_count = None):
"""Extracts all the values from the xml string. returning a list."""
seq = []
for i in range(0, len(node.getElementsByTagName(name))):
if len(seq) == limit_count:
break
seq.append(_extract(node, name, i))
return seq
def _url_safe(text):
"""Does all kinds of tricks on a text to make it safe to use in a url."""
if type(text) == unicode:
text = text.encode('utf-8')
return urllib.quote_plus(urllib.quote_plus(text)).lower()
def _number(string):
"""
Extracts an int from a string. Returns a 0 if None or an empty string was passed
"""
if not string:
return 0
elif string == "":
return 0
else:
try:
return int(string)
except ValueError:
return float(string)
def _unescape_htmlentity(string):
string = _unicode(string)
mapping = htmlentitydefs.name2codepoint
for key in mapping:
string = string.replace("&%s;" %key, unichr(mapping[key]))
return string
def extract_items(topitems_or_libraryitems):
"""Extracts a sequence of items from a sequence of TopItem or LibraryItem objects."""
seq = []
for i in topitems_or_libraryitems:
seq.append(i.item)
return seq
class ScrobblingError(Exception):
def __init__(self, message):
Exception.__init__(self)
self.message = message
@_string_output
def __str__(self):
return self.message
class BannedClientError(ScrobblingError):
def __init__(self):
ScrobblingError.__init__(self, "This version of the client has been banned")
class BadAuthenticationError(ScrobblingError):
def __init__(self):
ScrobblingError.__init__(self, "Bad authentication token")
class BadTimeError(ScrobblingError):
def __init__(self):
ScrobblingError.__init__(self, "Time provided is not close enough to current time")
class BadSessionError(ScrobblingError):
def __init__(self):
ScrobblingError.__init__(self, "Bad session id, consider re-handshaking")
class _ScrobblerRequest(object):
def __init__(self, url, params, network, type="POST"):
self.params = params
self.type = type
(self.hostname, self.subdir) = urllib.splithost(url[len("http:"):])
self.network = network
def execute(self):
"""Returns a string response of this request."""
connection = httplib.HTTPConnection(self.hostname)
data = []
for name in self.params.keys():
value = urllib.quote_plus(self.params[name])
data.append('='.join((name, value)))
data = "&".join(data)
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept-Charset": "utf-8",
"User-Agent": "pylast" + "/" + __version__,
"HOST": self.hostname
}
if self.type == "GET":
connection.request("GET", self.subdir + "?" + data, headers = headers)
else:
connection.request("POST", self.subdir, data, headers)
response = connection.getresponse().read()
self._check_response_for_errors(response)
return response
def _check_response_for_errors(self, response):
"""When passed a string response it checks for erros, raising
any exceptions as necessary."""
lines = response.split("\n")
status_line = lines[0]
if status_line == "OK":
return
elif status_line == "BANNED":
raise BannedClientError()
elif status_line == "BADAUTH":
raise BadAuthenticationError()
elif status_line == "BadTimeError":
raise BadTimeError()
elif status_line == "BadSessionError":
raise BadSessionError()
elif status_line.startswith("FAILED "):
reason = status_line[status_line.find("FAILED ")+len("FAILED "):]
raise ScrobblingError(reason)
class Scrobbler(object):
"""A class for scrobbling tracks to Last.fm"""
session_id = None
nowplaying_url = None
submissions_url = None
def __init__(self, network, client_id, client_version):
self.client_id = client_id
self.client_version = client_version
self.username = network.username
self.password = network.password_hash
self.network = network
def _do_handshake(self):
"""Handshakes with the server"""
timestamp = str(int(time.time()))
if self.password and self.username:
token = md5(self.password + timestamp)
elif self.network.api_key and self.network.api_secret and self.network.session_key:
if not self.username:
self.username = self.network.get_authenticated_user().get_name()
token = md5(self.network.api_secret + timestamp)
params = {"hs": "true", "p": "1.2.1", "c": self.client_id,
"v": self.client_version, "u": self.username, "t": timestamp,
"a": token}
if self.network.session_key and self.network.api_key:
params["sk"] = self.network.session_key
params["api_key"] = self.network.api_key
server = self.network.submission_server
response = _ScrobblerRequest(server, params, self.network, "GET").execute().split("\n")
self.session_id = response[1]
self.nowplaying_url = response[2]
self.submissions_url = response[3]
def _get_session_id(self, new = False):
"""Returns a handshake. If new is true, then it will be requested from the server
even if one was cached."""
if not self.session_id or new:
self._do_handshake()
return self.session_id
def report_now_playing(self, artist, title, album = "", duration = "", track_number = "", mbid = ""):
params = {"s": self._get_session_id(), "a": artist, "t": title,
"b": album, "l": duration, "n": track_number, "m": mbid}
try:
_ScrobblerRequest(self.nowplaying_url, params, self.network).execute()
except BadSessionError:
self._do_handshake()
self.report_now_playing(artist, title, album, duration, track_number, mbid)
def scrobble(self, artist, title, time_started, source, mode, duration, album="", track_number="", mbid=""):
"""Scrobble a track. parameters:
artist: Artist name.
title: Track title.
time_started: UTC timestamp of when the track started playing.
source: The source of the track
SCROBBLE_SOURCE_USER: Chosen by the user (the most common value, unless you have a reason for choosing otherwise, use this).
SCROBBLE_SOURCE_NON_PERSONALIZED_BROADCAST: Non-personalised broadcast (e.g. Shoutcast, BBC Radio 1).
SCROBBLE_SOURCE_PERSONALIZED_BROADCAST: Personalised recommendation except Last.fm (e.g. Pandora, Launchcast).
SCROBBLE_SOURCE_LASTFM: ast.fm (any mode). In this case, the 5-digit recommendation_key value must be set.
SCROBBLE_SOURCE_UNKNOWN: Source unknown.
mode: The submission mode
SCROBBLE_MODE_PLAYED: The track was played.
SCROBBLE_MODE_LOVED: The user manually loved the track (implies a listen)
SCROBBLE_MODE_SKIPPED: The track was skipped (Only if source was Last.fm)
SCROBBLE_MODE_BANNED: The track was banned (Only if source was Last.fm)
duration: Track duration in seconds.
album: The album name.
track_number: The track number on the album.
mbid: MusicBrainz ID.
"""
params = {"s": self._get_session_id(), "a[0]": _string(artist), "t[0]": _string(title),
"i[0]": str(time_started), "o[0]": source, "r[0]": mode, "l[0]": str(duration),
"b[0]": _string(album), "n[0]": track_number, "m[0]": mbid}
_ScrobblerRequest(self.submissions_url, params, self.network).execute()
| Python |
# -*- coding: utf-8 -*-
'''
Created on Feb 27, 2010
@author: ivan
'''
class FConfiguration:
def __init__(self):
self.vk_login = "qax@bigmir.net"
self.vk_password = "foobnix"
self.lfm_login = "foobnix"
self.lfm_password = "foobnix"
self.API_KEY = "bca6866edc9bdcec8d5e8c32f709bea1"
self.API_SECRET = "800adaf46e237805a4ec2a81404b3ff2"
self.cookie = None
| Python |
import cgi
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
import os
from google.appengine.ext.webapp import template
class Greeting(db.Model):
author = db.UserProperty()
content = db.StringProperty(multiline=True)
date = db.DateTimeProperty(auto_now_add=True)
class MainPage(webapp.RequestHandler):
def get(self):
greetings_query = Greeting.all().order('-date')
greetings = greetings_query.fetch(10)
if users.get_current_user():
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
template_values = {
'greetings': greetings,
'url': url,
'url_linktext': url_linktext,
}
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(template.render(path, template_values))
class Guestbook(webapp.RequestHandler):
def post(self):
greeting = Greeting()
if users.get_current_user():
greeting.author = users.get_current_user()
greeting.content = self.request.get('content')
greeting.put()
self.redirect('/')
application = webapp.WSGIApplication(
[('/', MainPage),
('/sign', Guestbook)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main() | Python |
#!/usr/bin/env python
import os, glob, shutil
from distutils.core import setup
from foobnix.util.configuration import VERSION, FOOBNIX_TMP, FOOBNIX_TMP_RADIO
if not os.path.exists(FOOBNIX_TMP):
os.mkdir(FOOBNIX_TMP)
os.mkdir(FOOBNIX_TMP_RADIO)
def capture(cmd):
return os.popen(cmd).read().strip()
def removeall(path):
if not os.path.isdir(path):
return
files = os.listdir(path)
for x in files:
fullpath = os.path.join(path, x)
if os.path.isfile(fullpath):
f = os.remove
rmgeneric(fullpath, f)
elif os.path.isdir(fullpath):
removeall(fullpath)
f = os.rmdir
rmgeneric(fullpath, f)
def rmgeneric(path, __func__):
try:
__func__(path)
except OSError, (errno, strerror):
pass
# Create mo files:
if not os.path.exists("mo/"):
os.mkdir("mo/")
for lang in ('ru', 'uk', 'he'):
pofile = "po/" + lang + ".po"
mofile = "mo/" + lang + "/foobnix.mo"
if not os.path.exists("mo/" + lang + "/"):
os.mkdir("mo/" + lang + "/")
print "generating", mofile
os.system("msgfmt %s -o %s" % (pofile, mofile))
# Copy script "foobnix" file to foobnix dir:
shutil.copyfile("foobnix.py", "foobnix/foobnix")
versionfile = file("foobnix/version.py", "wt")
versionfile.write("""
# generated by setup.py
VERSION = %r
""" % VERSION)
versionfile.close()
setup(name='foobnix',
version=VERSION,
description='GTK+ client for the Music Player Daemon (MPD).',
author='Ivan Ivanenko',
author_email='ivan.ivanenko@gmail.com',
url='www.foobnix.com',
classifiers=[
'Development Status :: Beta',
'Environment :: X11 Applications',
'Intended Audience :: End Users/Desktop',
'License :: GNU General Public License (GPL)',
'Operating System :: Linux',
'Programming Language :: Python',
'Topic :: Multimedia :: Sound :: Players',
],
packages=[
"foobnix",
"foobnix.application",
"foobnix.base",
"foobnix.cue",
"foobnix.directory",
"foobnix.glade",
"foobnix.lyric",
"foobnix.model",
"foobnix.online",
"foobnix.online.google",
"foobnix.online.integration",
"foobnix.player",
"foobnix.playlist",
"foobnix.preferences",
"foobnix.radio",
"foobnix.thirdparty",
"foobnix.trayicon",
"foobnix.util",
"foobnix.window"
],
package_data={'foobnix': ['glade/*.glade', 'glade/*.png']},
#package_dir={"src/foobnix": "foobnix/"},
scripts=['foobnix/foobnix'],
data_files=[('share/foobnix', ['README']),
(FOOBNIX_TMP, ['version']),
('/usr/share/applications', ['foobnix.desktop']),
('/usr/share/pixmaps', glob.glob('foobnix/pixmaps/*')),
(FOOBNIX_TMP_RADIO, glob.glob('radio/*')),
('share/man/man1', ['foobnix.1']),
('/usr/share/locale/uk/LC_MESSAGES', ['mo/uk/foobnix.mo']),
('/usr/share/locale/he/LC_MESSAGES', ['mo/he/foobnix.mo']),
('/usr/share/locale/ru/LC_MESSAGES', ['mo/ru/foobnix.mo'])
]
)
# Cleanup (remove /build, /mo, and *.pyc files:
print "Cleaning up..."
try:
removeall("build/")
os.rmdir("build/")
pass
except:
pass
try:
removeall("mo/")
os.rmdir("mo/")
except:
pass
try:
for f in os.listdir("."):
if os.path.isfile(f):
if os.path.splitext(os.path.basename(f))[1] == ".pyc":
os.remove(f)
except:
pass
try:
os.remove("foobnix/foobnix")
except:
pass
try:
os.remove("foobnix/version.py")
except:
pass
try:
os.remove(os.getenv("HOME") + "/foobnix_conf.pkl")
except:
pass
| Python |
#!/usr/bin/env python
'''
Created on Mar 10, 2010
@author: ivan
'''
import pygst
from foobnix.util import LOG
pygst.require('0.10')
import pygtk
pygtk.require20()
import gtk
import gobject
import gettext
from foobnix.application.app_view import AppView
from foobnix.application.app_controller import AppController
import __main__, os
def is_only_instance():
# Determine if there are more than the current instance of the application
# running at the current time.
return os.system("(( $(ps -ef | grep python | grep '[" +
__main__.__file__[0] + "]" + __main__.__file__[1:] +
"' | wc -l) > 1 ))") != 0
if __name__ == "__main__":
LOG.print_debug_info()
if is_only_instance():
APP_NAME = "foobnix"
gettext.install(APP_NAME, unicode=True)
gettext.textdomain(APP_NAME)
gtk.glade.textdomain(APP_NAME)
AppController(AppView())
gobject.threads_init() #@UndefinedVariable
gtk.main()
LOG.info(_("Success"))
else:
LOG.warn("Other instance of player is already running")
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.