code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from utils import *
class Gadget(Handler):
def get(self):
self.response.headers['Content-Type'] = 'application/xml'
self.render('templates/gadget.xml')
if __name__ == '__main__':
run(('/gadget', Gadget))
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from model import *
from utils import *
from text_query import TextQuery
import logging
import prefix
MAX_RESULTS = 100
class Results(Handler):
def search(self, query):
"""Performs a search and adds view_url attributes to the results."""
results = indexing.search(
Person.all_in_subdomain(self.subdomain), query, MAX_RESULTS)
for result in results:
result.view_url = self.get_url('/view',
id=result.record_id,
role=self.params.role,
query=self.params.query,
first_name=self.params.first_name,
last_name=self.params.last_name)
result.latest_note_status = get_person_status_text(result)
return results
def reject_query(self, query):
return self.redirect(
'/query', role=self.params.role, small=self.params.small,
style=self.params.style, error='error', query=query.query)
def get(self):
results_url = self.get_url('/results',
small='no',
query=self.params.query,
first_name=self.params.first_name,
last_name=self.params.last_name)
create_url = self.get_url('/create',
small='no',
role=self.params.role,
first_name=self.params.first_name,
last_name=self.params.last_name)
min_query_word_length = self.config.min_query_word_length
if self.params.role == 'provide':
query = TextQuery(
self.params.first_name + ' ' + self.params.last_name)
# Ensure that required parameters are present.
if not self.params.first_name:
return self.reject_query(query)
if self.config.use_family_name and not self.params.last_name:
return self.reject_query(query)
if (len(query.query_words) == 0 or
max(map(len, query.query_words)) < min_query_word_length):
return self.reject_query(query)
# Look for *similar* names, not prefix matches.
# Eyalf: we need to full query string
# for key in criteria:
# criteria[key] = criteria[key][:3] # "similar" = same first 3 letters
results = self.search(query)
if results:
# Perhaps the person you wanted to report has already been
# reported?
return self.render('templates/results.html',
results=results, num_results=len(results),
results_url=results_url,
create_url=create_url)
else:
if self.params.small:
# show a link to a create page.
create_url = self.get_url(
'/create', query=self.params.query)
return self.render('templates/small-create.html',
create_url=create_url)
else:
# No matches; proceed to create a new record.
logging.info(repr(self.params.__dict__))
return self.redirect('/create', **self.params.__dict__)
if self.params.role == 'seek':
query = TextQuery(self.params.query)
# Ensure that required parameters are present.
if (len(query.query_words) == 0 or
max(map(len, query.query_words)) < min_query_word_length):
logging.info('rejecting %s' % query.query)
return self.reject_query(query)
# Look for prefix matches.
results = self.search(query)
# Show the (possibly empty) matches.
return self.render('templates/results.html',
results=results, num_results=len(results),
results_url=results_url, create_url=create_url)
if __name__ == '__main__':
run(('/results', Results))
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exports the URLs of all person entries to a sitemap.xml file."""
__author__ = 'jocatalano@google.com (Joe Catalano) and many other Googlers'
import logging
from datetime import datetime, timedelta
from google.appengine.api import urlfetch
from model import *
from time import *
from utils import *
def _compute_max_shard_index(now, sitemap_epoch, shard_size_seconds):
delta = now - sitemap_epoch
delta_seconds = delta.days * 24 * 60 * 60 + delta.seconds
return delta_seconds / shard_size_seconds
def _get_static_sitemap_info(subdomain):
infos = StaticSiteMapInfo.all().fetch(2)
if len(infos) > 1:
logging.error("There should be at most 1 StaticSiteMapInfo record!")
return None
elif len(infos) == 1:
return infos[0]
else:
# Set the sitemap generation time according to the time of the first
# record with a timestamp. This will make the other stuff work
# correctly in case there is no static sitemap.
query = Person.all_in_subdomain(subdomain)
query = query.filter('last_modified != ', None)
first_updated_person = query.order('last_modified').get()
if not first_updated_person:
# No records; set the time to now.
time = get_utcnow()
else:
# Set the time to just before the first person was entered.
time = first_updated_person.last_modified - timedelta(seconds=1)
info = StaticSiteMapInfo(static_sitemaps_generation_time=time)
db.put(info)
return info
class SiteMap(Handler):
_FETCH_LIMIT = 1000
def get(self):
requested_shard_index = self.request.get('shard_index')
sitemap_info = _get_static_sitemap_info(self.subdomain)
shard_size_seconds = sitemap_info.shard_size_seconds
then = sitemap_info.static_sitemaps_generation_time
if not requested_shard_index:
max_shard_index = _compute_max_shard_index(
get_utcnow(), then, shard_size_seconds)
shards = []
for shard_index in range(max_shard_index + 1):
shard = {}
shard['index'] = shard_index
offset_seconds = shard_size_seconds * (shard_index + 1)
shard['lastmod'] = format_sitemaps_datetime(
then + timedelta(seconds=offset_seconds))
shards.append(shard)
self.render('templates/sitemap-index.xml', shards=shards,
static_lastmod=format_sitemaps_datetime(then),
static_map_files=sitemap_info.static_sitemaps)
else:
shard_index = int(requested_shard_index)
assert 0 <= shard_index < 50000 #TODO: nicer error (400 maybe)
persons = []
time_lower = \
then + timedelta(seconds=shard_size_seconds * shard_index)
time_upper = time_lower + timedelta(seconds=shard_size_seconds)
query = Person.all_in_subdomain(self.subdomain
).filter('last_modified >', time_lower
).filter('last_modified <=', time_upper
).order('last_modified')
fetched_persons = query.fetch(self._FETCH_LIMIT)
while fetched_persons:
persons.extend(fetched_persons)
last_value = fetched_persons[-1].last_modified
query = Person.all_in_subdomain(self.subdomain
).filter('last_modified >', last_value
).filter('last_modified <=', time_upper
).order('last_modified')
fetched_persons = query.fetch(self._FETCH_LIMIT)
urlinfos = [
{'person_record_id': p.record_id,
'lastmod': format_sitemaps_datetime(p.last_modified)}
for p in persons]
self.render('templates/sitemap.xml', urlinfos=urlinfos)
class SiteMapPing(Handler):
"""Pings the index server with sitemap files that are new since last ping"""
_INDEXER_MAP = {'google': 'http://www.google.com/webmasters/tools/ping?',
'not-specified': ''}
def get(self):
search_engine = self.request.get('search_engine')
if not search_engine:
search_engine = 'not-specified'
last_update_query = SiteMapPingStatus.all()
last_update_query.filter('search_engine = ', search_engine)
last_update_status = last_update_query.fetch(1)
if not last_update_status:
last_shard = -1
last_update_status = SiteMapPingStatus(search_engine=search_engine)
else:
last_update_status = last_update_status[0]
last_shard = last_update_status.shard_index
sitemap_info = _get_static_sitemap_info(self.subdomain)
generation_time = sitemap_info.static_sitemaps_generation_time
shard_size_seconds = sitemap_info.shard_size_seconds
max_shard_index = _compute_max_shard_index(
get_utcnow(), generation_time, shard_size_seconds)
if not self.ping_indexer(
last_shard+1, max_shard_index, search_engine, last_update_status):
self.error(500)
def ping_indexer(self, start_index, end_index, search_engine, status):
"""Pings the server with sitemap updates; returns True if all succeed"""
try:
for shard_index in range(start_index, end_index + 1):
ping_url = self._INDEXER_MAP[search_engine]
sitemap_url = 'http://%s/sitemap?shard_index=%s' % (
self.env.netloc, shard_index)
ping_url = ping_url + urlencode({'sitemap': sitemap_url})
response = urlfetch.fetch(url=ping_url, method=urlfetch.GET)
if not response.status_code == 200:
#TODO(jocatalano): Retry or email haiticrisis on failure.
logging.error('Received %d pinging %s',
response.status_code, ping_url)
return False
else:
status.shard_index = shard_index
return True
finally:
# Always update database to reflect how many the max shard that was
# pinged particularly when a DeadlineExceededError is thrown
db.put(status)
if __name__ == '__main__':
run(('/sitemap', SiteMap), ('/sitemap/ping', SiteMapPing))
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic API for reading/writing small numbers of records."""
__author__ = 'kpy@google.com (Ka-Ping Yee)'
from datetime import datetime
import atom
import model
import utils
import importer
import pfif
import indexing
from model import Person, Note, Subdomain
from text_query import TextQuery
class Read(utils.Handler):
https_required = True
def get(self):
if self.config.read_auth_key_required and not (
self.auth and self.auth.read_permission):
self.response.set_status(403)
self.write('Missing or invalid authorization key\n')
return
pfif_version = pfif.PFIF_VERSIONS.get(
self.params.version or pfif.PFIF_DEFAULT_VERSION)
# Note that self.request.get can handle multiple IDs at once; we
# can consider adding support for multiple records later.
record_id = self.request.get('id')
if not record_id:
return self.error(400, 'Missing id parameter')
person = model.Person.get(
self.subdomain, record_id, filter_expired=False)
if not person:
return self.error(404, 'No person record with ID %s' % record_id)
notes = model.Note.get_by_person_record_id(self.subdomain, record_id)
self.response.headers['Content-Type'] = 'application/xml'
records = [pfif_version.person_to_dict(person, person.is_expired)]
note_records = map(pfif_version.note_to_dict, notes)
utils.optionally_filter_sensitive_fields(records, self.auth)
utils.optionally_filter_sensitive_fields(note_records, self.auth)
pfif_version.write_file(
self.response.out, records, lambda p: note_records)
class Write(utils.Handler):
https_required = True
def post(self):
if not (self.auth and self.auth.domain_write_permission):
self.response.set_status(403)
self.write('Missing or invalid authorization key\n')
return
source_domain = self.auth.domain_write_permission
try:
person_records, note_records = \
pfif.parse_file(self.request.body_file)
except Exception, e:
self.response.set_status(400)
self.write('Invalid XML: %s\n' % e)
return
self.response.headers['Content-Type'] = 'application/xml'
self.write('<?xml version="1.0"?>\n')
self.write('<status:status>\n')
create_person = importer.create_person
written, skipped, total = importer.import_records(
self.subdomain, source_domain, create_person, person_records)
self.write_status(
'person', written, skipped, total, 'person_record_id')
create_note = importer.create_note
written, skipped, total = importer.import_records(
self.subdomain, source_domain, create_note, note_records)
self.write_status(
'note', written, skipped, total, 'note_record_id')
self.write('</status:status>\n')
def write_status(self, type, written, skipped, total, id_field):
"""Emit status information about the results of an attempted write."""
skipped_records = []
for error, record in skipped:
skipped_records.append(
' <pfif:%s>%s</pfif:%s>\n' %
(id_field, record.get(id_field, ''), id_field))
skipped_records.append(
' <status:error>%s</status:error>\n' % error)
self.write('''
<status:write>
<status:record_type>pfif:%s</status:record_type>
<status:parsed>%d</status:parsed>
<status:written>%d</status:written>
<status:skipped>
%s
</status:skipped>
</status:write>
''' % (type, total, written, ''.join(skipped_records).rstrip()))
class Search(utils.Handler):
https_required = False
def get(self):
if self.config.search_auth_key_required and not (
self.auth and self.auth.search_permission):
return self.error(403, 'Missing or invalid authorization key\n')
pfif_version = pfif.PFIF_VERSIONS.get(self.params.version or '1.2')
# Retrieve parameters and do some sanity checks on them.
query_string = self.request.get("q")
subdomain = self.request.get("subdomain")
if not query_string:
return self.error(400, 'Missing q parameter')
if not subdomain:
return self.error(400, 'Missing subdomain parameter')
# Perform the search.
results = indexing.search(Person.all_in_subdomain(subdomain),
TextQuery(query_string), 100)
records = [pfif_version.person_to_dict(result) for result in results]
utils.optionally_filter_sensitive_fields(records, self.auth)
# Define the function to retrieve notes for a person.
def get_notes_for_person(person):
notes = model.Note.get_by_person_record_id(
self.subdomain, person['person_record_id'])
records = map(pfif_version.note_to_dict, notes)
utils.optionally_filter_sensitive_fields(records, self.auth)
return records
self.response.headers['Content-Type'] = 'application/xml'
pfif_version.write_file(
self.response.out, records, get_notes_for_person)
if __name__ == '__main__':
utils.run(('/api/read', Read),
('/api/write', Write),
('/api/search', Search))
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Person Finder data model, based on PFIF (http://zesty.ca/pfif)."""
__author__ = 'kpy@google.com (Ka-Ping Yee) and many other Googlers'
import datetime
from google.appengine.api import datastore_errors
from google.appengine.api import memcache
from google.appengine.ext import db
import indexing
import pfif
import prefix
import re
import sys
import utils
# The domain name of this application. The application hosts multiple
# repositories, each at a subdomain of this domain.
HOME_DOMAIN = 'person-finder.appspot.com'
# ==== PFIF record IDs =====================================================
def is_original(subdomain, record_id):
"""Returns True if this is a record_id for an original record in the given
subdomain (a record originally created in this subdomain's repository)."""
try:
domain, local_id = record_id.split('/', 1)
return domain == subdomain + '.' + HOME_DOMAIN
except ValueError:
raise ValueError('%r is not a valid record_id' % record_id)
def is_clone(subdomain, record_id):
"""Returns True if this is a record_id for a clone record (a record created
in another repository and copied into this one)."""
return not is_original(subdomain, record_id)
def filter_by_prefix(query, key_name_prefix):
"""Filters a query for key_names that have the given prefix. If root_kind
is specified, filters the query for children of any entities that are of
that kind with the given prefix; otherwise, the results are assumed to be
top-level entities of the kind being queried."""
root_kind = query._model_class.__name__
min_key = db.Key.from_path(root_kind, key_name_prefix)
max_key = db.Key.from_path(root_kind, key_name_prefix + u'\uffff')
return query.filter('__key__ >=', min_key).filter('__key__ <=', max_key)
def get_properties_as_dict(db_obj):
"""Returns a dictionary containing all (dynamic)* properties of db_obj."""
properties = dict((k, v.__get__(db_obj, db_obj.__class__)) for
k, v in db_obj.properties().iteritems() if
v.__get__(db_obj, db_obj.__class__))
dynamic_properties = dict((prop, getattr(db_obj, prop)) for
prop in db_obj.dynamic_properties())
properties.update(dynamic_properties)
return properties
def clone_to_new_type(origin, dest_class, **kwargs):
"""Clones the given entity to a new entity of the type "dest_class".
Optionally, pass in values to kwargs to update values during cloning."""
vals = get_properties_as_dict(origin)
vals.update(**kwargs)
if hasattr(origin, 'record_id'):
vals.update(record_id=origin.record_id)
return dest_class(key_name=origin.key().name(), **vals)
# ==== Model classes =======================================================
# Every Person or Note entity belongs to a specific subdomain. To partition
# the datastore, key names consist of the subdomain, a colon, and then the
# record ID. Each subdomain appears to be a separate instance of the app
# with its own respository.
# Note that the repository subdomain doesn't necessarily have to match the
# domain in the record ID! For example, a person record created at
# foo.person-finder.appspot.com would have a key name such as:
#
# foo:foo.person-finder.appspot.com/person.234
#
# This record would be searchable only at foo.person-finder.appspot.com --
# each repository is independent. Copying it to bar.person-finder.appspot.com
# would produce a clone record with the key name:
#
# bar:foo.person-finder.appspot.com/person.234
#
# That is, the clone has the same record ID but a different subdomain.
class Subdomain(db.Model):
"""A separate grouping of Person and Note records. This is a top-level
entity, with no parent, whose existence just indicates the existence of
a subdomain. Key name: unique subdomain name. In the UI, each subdomain
appears to be an independent instance of the application."""
# No properties for now; only the key_name is significant.
@staticmethod
def list():
return [subdomain.key().name() for subdomain in Subdomain.all()]
class Base(db.Model):
"""Base class providing methods common to both Person and Note entities,
whose key names are partitioned using the subdomain as a prefix."""
# Even though the subdomain is part of the key_name, it is also stored
# redundantly as a separate property so it can be indexed and queried upon.
subdomain = db.StringProperty(required=True)
# We can't use an inequality filter on expiry_date (together with other
# inequality filters), so we use a periodic task to set the is_expired flag
# on expired records, and filter using the flag. Note that we must provide
# a default value to ensure that all entities are eligible for filtering.
# NOTE: is_expired should ONLY be modified in Person.put_expiry_flags().
is_expired = db.BooleanProperty(required=False, default=False)
@classmethod
def all(cls, keys_only=False, filter_expired=True):
"""Returns a query for all records of this kind; by default this
filters out the records marked as expired.
Args:
keys_only - If true, return only the keys.
filter_expired - If true, omit records with is_expired == True.
Returns:
query - A Query object for the results.
"""
query = super(Base, cls).all(keys_only=keys_only)
if filter_expired:
query.filter('is_expired =', False)
return query
@classmethod
def all_in_subdomain(cls, subdomain, filter_expired=True):
"""Gets a query for all entities in a given subdomain's repository."""
return cls.all(filter_expired=filter_expired).filter(
'subdomain =', subdomain)
def get_record_id(self):
"""Returns the record ID of this record."""
subdomain, record_id = self.key().name().split(':', 1)
return record_id
record_id = property(get_record_id)
def get_original_domain(self):
"""Returns the domain name of this record's original repository."""
return self.record_id.split('/', 1)[0]
original_domain = property(get_original_domain)
def is_original(self):
"""Returns True if this record was created in this repository."""
return is_original(self.subdomain, self.record_id)
def is_clone(self):
"""Returns True if this record was copied from another repository."""
return not self.is_original()
@classmethod
def get(cls, subdomain, record_id, filter_expired=True):
"""Gets the entity with the given record_id in a given repository."""
record = cls.get_by_key_name(subdomain + ':' + record_id)
if record:
if not (filter_expired and record.is_expired):
return record
@classmethod
def create_original(cls, subdomain, **kwargs):
"""Creates a new original entity with the given field values."""
record_id = '%s.%s/%s.%d' % (
subdomain, HOME_DOMAIN, cls.__name__.lower(), UniqueId.create_id())
key_name = subdomain + ':' + record_id
return cls(key_name=key_name, subdomain=subdomain, **kwargs)
@classmethod
def create_clone(cls, subdomain, record_id, **kwargs):
"""Creates a new clone entity with the given field values."""
assert is_clone(subdomain, record_id)
key_name = subdomain + ':' + record_id
return cls(key_name=key_name, subdomain=subdomain, **kwargs)
@classmethod
def create_original_with_record_id(cls, subdomain, record_id, **kwargs):
"""Creates an original entity with the given record_id and field
values, overwriting any existing entity with the same record_id.
This should be rarely used in practice (e.g. for an administrative
import into a home repository), hence the long method name."""
key_name = subdomain + ':' + record_id
return cls(key_name=key_name, subdomain=subdomain, **kwargs)
# All fields are either required, or have a default value. For property
# types that have a false value, the default is the false value. For types
# with no false value, the default is None.
class Person(Base):
"""The datastore entity kind for storing a PFIF person record. Never call
Person() directly; use Person.create_clone() or Person.create_original().
Methods that start with "get_" return actual values or lists of values;
other methods return queries or generators for values.
"""
# If you add any new fields, be sure they are handled in wipe_contents().
# entry_date should update every time a record is created or re-imported.
entry_date = db.DateTimeProperty(required=True)
expiry_date = db.DateTimeProperty(required=False)
author_name = db.StringProperty(default='', multiline=True)
author_email = db.StringProperty(default='')
author_phone = db.StringProperty(default='')
# source_date is the original creation time; it should not change.
source_name = db.StringProperty(default='')
source_date = db.DateTimeProperty()
source_url = db.StringProperty(default='')
full_name = db.StringProperty()
first_name = db.StringProperty()
last_name = db.StringProperty()
sex = db.StringProperty(default='', choices=pfif.PERSON_SEX_VALUES)
date_of_birth = db.StringProperty(default='') # YYYY, YYYY-MM, YYYY-MM-DD
age = db.StringProperty(default='') # NN or NN-MM
home_street = db.StringProperty(default='')
home_neighborhood = db.StringProperty(default='')
home_city = db.StringProperty(default='')
home_state = db.StringProperty(default='')
home_postal_code = db.StringProperty(default='')
home_country = db.StringProperty(default='')
photo_url = db.TextProperty(default='')
other = db.TextProperty(default='')
# This reference points to a locally stored Photo entity. ONLY set this
# property when storing a new Photo object that is owned by this Person
# record and can be safely deleted when the Person is deleted.
photo = db.ReferenceProperty(default=None)
# The following properties are not part of the PFIF data model; they are
# cached on the Person for efficiency.
# Value of the 'status' and 'source_date' properties on the Note
# with the latest source_date with the 'status' field present.
latest_status = db.StringProperty(default='')
latest_status_source_date = db.DateTimeProperty()
# Value of the 'found' and 'source_date' properties on the Note
# with the latest source_date with the 'found' field present.
latest_found = db.BooleanProperty()
latest_found_source_date = db.DateTimeProperty()
# Last write time of this Person or any Notes on this Person.
# This reflects any change to the Person page.
last_modified = db.DateTimeProperty(auto_now=True)
# attributes used by indexing.py
names_prefixes = db.StringListProperty()
_fields_to_index_properties = ['first_name', 'last_name']
_fields_to_index_by_prefix_properties = ['first_name', 'last_name']
@staticmethod
def past_due_records():
"""Returns a query for all Person records with expiry_date in the past,
regardless of their is_expired flags."""
return Person.all(filter_expired=False).filter(
'expiry_date <=', utils.get_utcnow())
def get_person_record_id(self):
return self.record_id
person_record_id = property(get_person_record_id)
def get_notes(self, filter_expired=True):
"""Returns a list of all the Notes on this Person, omitting expired
Notes by default."""
return Note.get_by_person_record_id(
self.subdomain, self.record_id, filter_expired=filter_expired)
def get_subscriptions(self, subscription_limit=200):
"""Retrieves a list of all the Subscriptions for this Person."""
return Subscription.get_by_person_record_id(
self.subdomain, self.record_id, limit=subscription_limit)
def get_linked_persons(self):
"""Retrieves the Persons linked (as duplicates) to this Person."""
linked_persons = []
for note in self.get_notes():
person = Person.get(self.subdomain, note.linked_person_record_id)
if person:
linked_persons.append(person)
return linked_persons
def get_associated_emails(self):
"""Gets all the e-mail addresses to notify when significant things
happen to this Person record."""
email_addresses = set([note.author_email for note in self.get_notes()])
email_addresses.add(self.author_email)
return email_addresses
def put_expiry_flags(self):
"""Updates the is_expired flags on this Person and related Notes to
make them consistent with the expiry_date on this Person, and commits
these changes to the datastore."""
now = utils.get_utcnow()
expired = self.expiry_date and now >= self.expiry_date
if self.is_expired != expired:
# NOTE: This should be the ONLY code that modifies is_expired.
self.is_expired = expired
# If the record is expiring (being replaced with a placeholder,
# see http://zesty.ca/pfif/1.3/#data-expiry) or un-expiring (being
# restored from deletion), we want the source_date and entry_date
# updated so downstream clients will see this as the newest state.
self.source_date = now
self.entry_date = now
# All the Notes on the Person also expire or unexpire, to match.
notes = self.get_notes(filter_expired=False)
for note in notes:
note.is_expired = expired
# Store these changes in the datastore.
db.put(notes + [self])
# TODO(lschumacher): photos don't have expiration currently.
def wipe_contents(self):
"""Sets all the content fields to None (leaving timestamps and the
expiry flag untouched), stores the empty record, and permanently
deletes any related Notes and Photo. Call this method ONLY on records
that have already expired."""
# We rely on put_expiry_flags to have properly set the source_date,
# entry_date, and is_expired flags on Notes, as necessary.
assert self.is_expired
# Delete all related Notes (they will have is_expired == True by now).
db.delete(self.get_notes(filter_expired=False))
if self.photo:
db.delete(self.photo) # Delete the locally stored Photo, if any.
for name, property in self.properties().items():
# Leave the subdomain, is_expired flag, and timestamps untouched.
if name not in ['subdomain', 'is_expired',
'source_date', 'entry_date', 'expiry_date']:
setattr(self, name, property.default)
self.put() # Store the empty placeholder record.
def update_from_note(self, note):
"""Updates any necessary fields on the Person to reflect a new Note."""
# We want to transfer only the *non-empty, newer* values to the Person.
if note.found is not None: # for boolean, None means unspecified
# datetime stupidly refuses to compare to None, so check for None.
if (self.latest_found_source_date is None or
note.source_date >= self.latest_found_source_date):
self.latest_found = note.found
self.latest_found_source_date = note.source_date
if note.status: # for string, '' means unspecified
if (self.latest_status_source_date is None or
note.source_date >= self.latest_status_source_date):
self.latest_status = note.status
self.latest_status_source_date = note.source_date
def update_index(self, which_indexing):
#setup new indexing
if 'new' in which_indexing:
indexing.update_index_properties(self)
# setup old indexing
if 'old' in which_indexing:
prefix.update_prefix_properties(self)
#old indexing
prefix.add_prefix_properties(
Person, 'first_name', 'last_name', 'home_street', 'home_neighborhood',
'home_city', 'home_state', 'home_postal_code')
class Note(Base):
"""The datastore entity kind for storing a PFIF note record. Never call
Note() directly; use Note.create_clone() or Note.create_original()."""
FETCH_LIMIT = 200
# The entry_date should update every time a record is re-imported.
entry_date = db.DateTimeProperty(required=True)
person_record_id = db.StringProperty(required=True)
# Use this field to store the person_record_id of a duplicate Person entry.
linked_person_record_id = db.StringProperty(default='')
author_name = db.StringProperty(default='', multiline=True)
author_email = db.StringProperty(default='')
author_phone = db.StringProperty(default='')
# source_date is the original creation time; it should not change.
source_date = db.DateTimeProperty()
status = db.StringProperty(default='', choices=pfif.NOTE_STATUS_VALUES)
found = db.BooleanProperty()
email_of_found_person = db.StringProperty(default='')
phone_of_found_person = db.StringProperty(default='')
last_known_location = db.StringProperty(default='')
text = db.TextProperty(default='')
# True if the note has been marked as spam. Will cause the note to be
# initially hidden from display upon loading a record page.
hidden = db.BooleanProperty(default=False)
def get_note_record_id(self):
return self.record_id
note_record_id = property(get_note_record_id)
@staticmethod
def get_by_person_record_id(
subdomain, person_record_id, filter_expired=True):
"""Gets a list of all the Notes on a Person, ordered by source_date."""
return list(Note.generate_by_person_record_id(
subdomain, person_record_id, filter_expired))
@staticmethod
def generate_by_person_record_id(
subdomain, person_record_id, filter_expired=True):
"""Generates all the Notes on a Person record ordered by source_date."""
query = Note.all_in_subdomain(subdomain, filter_expired=filter_expired
).filter('person_record_id =', person_record_id
).order('source_date')
notes = query.fetch(Note.FETCH_LIMIT)
while notes:
for note in notes:
yield note
query.with_cursor(query.cursor()) # Continue where fetch left off.
notes = query.fetch(Note.FETCH_LIMIT)
class Photo(db.Model):
"""An entity kind for storing uploaded photos."""
bin_data = db.BlobProperty()
date = db.DateTimeProperty(auto_now_add=True)
def get_url(self, handler):
return handler.get_url('/photo', scheme='https', id=str(self.id()))
class Authorization(db.Model):
"""Authorization tokens. Key name: subdomain + ':' + auth_key."""
# Even though the subdomain is part of the key_name, it is also stored
# redundantly as a separate property so it can be indexed and queried upon.
subdomain = db.StringProperty(required=True)
# If this field is non-empty, this authorization token allows the client
# to write records with this original domain.
domain_write_permission = db.StringProperty()
# If this flag is true, this authorization token allows the client to read
# non-sensitive fields (i.e. filtered by utils.filter_sensitive_fields).
read_permission = db.BooleanProperty()
# If this flag is true, this authorization token allows the client to read
# all fields (i.e. not filtered by utils.filter_sensitive_fields).
full_read_permission = db.BooleanProperty()
# If this flag is true, this authorization token allows the client to use
# the search API and return non-sensitive fields (i.e. filtered
# by utils.filter_sensitive_fields).
search_permission = db.BooleanProperty()
# Bookkeeping information for humans, not used programmatically.
contact_name = db.StringProperty()
contact_email = db.StringProperty()
organization_name = db.StringProperty()
@classmethod
def get(cls, subdomain, key):
"""Gets the Authorization entity for a subdomain and key."""
key_name = subdomain + ':' + key
return cls.get_by_key_name(key_name)
@classmethod
def create(cls, subdomain, key, **kwargs):
"""Creates an Authorization entity for a given subdomain and key."""
key_name = subdomain + ':' + key
return cls(key_name=key_name, subdomain=subdomain, **kwargs)
class Secret(db.Model):
"""A place to store application-level secrets in the database."""
secret = db.BlobProperty()
class Counter(db.Expando):
"""Counters hold partial and completed results for ongoing counting tasks.
To see how this is used, check out tasks.py. A single Counter object can
contain several named accumulators. Typical usage is to scan for entities
in order by __key__, update the accumulators for each entity, and save the
partial counts when the time limit for a request is reached. The last
scanned key is saved in last_key so the next request can pick up the scan
where the last one left off. A non-empty last_key means a scan is not
finished; when a scan is done, last_key should be set to ''."""
timestamp = db.DateTimeProperty(auto_now=True)
scan_name = db.StringProperty()
subdomain = db.StringProperty()
last_key = db.StringProperty(default='') # if non-empty, count is partial
# Each Counter also has a dynamic property for each accumulator; all such
# properties are named "count_" followed by a count_name.
def get(self, count_name):
"""Gets the specified accumulator from this counter object."""
return getattr(self, 'count_' + count_name, 0)
def increment(self, count_name):
"""Increments the given accumulator on this Counter object."""
prop_name = 'count_' + count_name
setattr(self, prop_name, getattr(self, prop_name, 0) + 1)
@classmethod
def get_count(cls, subdomain, name):
"""Gets the latest finished count for the given subdomain and name.
'name' should be in the format scan_name + '.' + count_name."""
scan_name, count_name = name.split('.')
counter_key = subdomain + ':' + scan_name
# Get the counts from memcache, loading from datastore if necessary.
counter_dict = memcache.get(counter_key)
if not counter_dict:
try:
# Get the latest completed counter with this scan_name.
counter = cls.all().filter('subdomain =', subdomain
).filter('scan_name =', scan_name
).filter('last_key =', ''
).order('-timestamp').get()
except datastore_errors.NeedIndexError:
# Absurdly, it can take App Engine up to an hour to build an
# index for a kind that has zero entities, and during that time
# all queries fail. Catch this error so we don't get screwed.
counter = None
counter_dict = {}
if counter:
# Cache the counter's contents in memcache for one minute.
counter_dict = dict((name[6:], getattr(counter, name))
for name in counter.dynamic_properties()
if name.startswith('count_'))
memcache.set(counter_key, counter_dict, 60)
# Get the count for the given count_name.
return counter_dict.get(count_name, 0)
@classmethod
def all_finished_counters(cls, subdomain, scan_name):
"""Gets a query for all finished counters for the specified scan."""
return cls.all().filter('subdomain =', subdomain
).filter('scan_name =', scan_name
).filter('last_key =', '')
@classmethod
def get_unfinished_or_create(cls, subdomain, scan_name):
"""Gets the latest unfinished Counter entity for the given subdomain
and scan_name. If there is no unfinished Counter, create a new one."""
counter = cls.all().filter('subdomain =', subdomain
).filter('scan_name =', scan_name
).order('-timestamp').get()
if not counter or not counter.last_key:
counter = Counter(subdomain=subdomain, scan_name=scan_name)
return counter
class UserActionLog(db.Model):
"""Logs user actions and their reasons."""
time = db.DateTimeProperty(required=True)
action = db.StringProperty(
required=True, choices=['delete', 'restore', 'hide', 'unhide'])
entity_kind = db.StringProperty(required=True)
entity_key_name = db.StringProperty(required=True)
reason = db.StringProperty() # should be present when action is 'delete'
@classmethod
def put_new(cls, action, entity, reason=''):
cls(time=utils.get_utcnow(), action=action, entity_kind=entity.kind(),
entity_key_name=entity.key().name(), reason=reason).put()
class Subscription(db.Model):
"""Subscription to notifications when a note is added to a person record"""
subdomain = db.StringProperty(required=True)
person_record_id = db.StringProperty(required=True)
email = db.StringProperty(required=True)
language = db.StringProperty(required=True)
timestamp = db.DateTimeProperty(auto_now_add=True)
@staticmethod
def create(subdomain, record_id, email, language):
"""Creates a new Subscription"""
key_name = '%s:%s:%s' % (subdomain, record_id, email)
return Subscription(key_name=key_name, subdomain=subdomain,
person_record_id=record_id,
email=email, language=language)
@staticmethod
def get(subdomain, record_id, email):
"""Gets the entity with the given record_id in a given repository."""
key_name = '%s:%s:%s' % (subdomain, record_id, email)
return Subscription.get_by_key_name(key_name)
@staticmethod
def get_by_person_record_id(subdomain, person_record_id, limit=200):
"""Retrieve subscriptions for a person record."""
query = Subscription.all().filter('subdomain =', subdomain)
query = query.filter('person_record_id =', person_record_id)
return query.fetch(limit)
class StaticSiteMapInfo(db.Model):
"""Holds static sitemaps file info."""
static_sitemaps = db.StringListProperty()
static_sitemaps_generation_time = db.DateTimeProperty(required=True)
shard_size_seconds = db.IntegerProperty(default=90)
class SiteMapPingStatus(db.Model):
"""Tracks the last shard index that was pinged to the search engine."""
search_engine = db.StringProperty(required=True)
shard_index = db.IntegerProperty(default=-1)
class UniqueId(db.Model):
"""This entity is used just to generate unique numeric IDs."""
@staticmethod
def create_id():
"""Gets an integer ID that is guaranteed to be different from any ID
previously returned by this static method."""
unique_id = UniqueId()
unique_id.put()
return unique_id.key().id()
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for approximate string prefix queries.
A hit is defined when the words entered in the query are all prefixes of one
of the words in the first and last names on the record. For example, a
record with the fields:
first_name: ABC 123
last_name: DEF 456
will be retrieved by:
"ABC 456"
"45 ED"
"123 ABC"
"ABC 123 DEF"
but will not be retrieved by:
"ABC 1234"
"ABC 123 DEF 456 789"
"""
from text_query import TextQuery
from google.appengine.ext import db
import unicodedata
import logging
import re
def update_index_properties(entity):
"""Finds and updates all prefix-related properties on the given entity."""
# Using set to make sure I'm not adding the same string more than once.
names_prefixes = set()
for property in entity._fields_to_index_properties:
for value in TextQuery(getattr(entity, property)).query_words:
if property in entity._fields_to_index_by_prefix_properties:
for n in xrange(1,len(value)+1):
pref = value[:n]
if pref not in names_prefixes:
names_prefixes.add(pref)
else:
if value not in names_prefixes:
names_prefixes.add(value)
# Put a cap on the number of tokens, just as a precaution.
MAX_TOKENS = 100
entity.names_prefixes = list(names_prefixes)[:MAX_TOKENS]
if len(names_prefixes) > MAX_TOKENS:
logging.debug('MAX_TOKENS exceeded for %s' %
' '.join(list(names_prefixes)))
class CmpResults():
def __init__(self, query):
self.query = query
self.query_words_set = set(query.words)
def __call__(self, p1, p2):
if p1.first_name == p2.first_name and p1.last_name == p2.last_name:
return 0
self.set_ranking_attr(p1)
self.set_ranking_attr(p2)
r1 = self.rank(p1)
r2 = self.rank(p2)
if r1 == r2:
# if rank is the same sort by name so same names will be together
return cmp(p1._normalized_full_name, p2._normalized_full_name)
else:
return cmp(r2, r1)
def set_ranking_attr(self, person):
"""Consider save these into to db"""
if not hasattr(person, '_normalized_first_name'):
person._normalized_first_name = TextQuery(person.first_name)
person._normalized_last_name = TextQuery(person.last_name)
person._name_words = set(person._normalized_first_name.words +
person._normalized_last_name.words)
person._normalized_full_name = '%s %s' % (
person._normalized_first_name.normalized,
person._normalized_last_name.normalized)
def rank(self, person):
# The normalized query words, in the order as entered.
ordered_words = self.query.normalized.split()
if (ordered_words ==
person._normalized_first_name.words +
person._normalized_last_name.words):
# Matches a Latin name exactly (given name followed by surname).
return 10
if (re.match(ur'^[\u3400-\u9fff]$', person.last_name) and
ordered_words in [
[person.last_name + person.first_name],
[person.last_name, person.first_name]
]):
# Matches a CJK name exactly (surname followed by given name).
return 10
if (re.match(ur'^[\u3400-\u9fff]+$', person.last_name) and
ordered_words in [
[person.last_name + person.first_name],
[person.last_name, person.first_name]
]):
# Matches a CJK name exactly (surname followed by given name).
# A multi-character surname is uncommon, so it is ranked a bit lower.
return 9.5
if (ordered_words ==
person._normalized_last_name.words +
person._normalized_first_name.words):
# Matches a Latin name with first and last name switched.
return 9
if (re.match(ur'^[\u3400-\u9fff]$', person.first_name) and
ordered_words in [
[person.first_name + person.last_name],
[person.first_name, person.last_name]
]):
# Matches a CJK name with surname and given name switched.
return 9
if (re.match(ur'^[\u3400-\u9fff]+$', person.first_name) and
ordered_words in [
[person.first_name + person.last_name],
[person.first_name, person.last_name]
]):
# Matches a CJK name with surname and given name switched.
# A multi-character surname is uncommon, so it's ranked a bit lower.
return 8.5
if person._name_words == self.query_words_set:
# Matches all the words in the first and last name, out of order.
return 8
if self.query.normalized in [
person._normalized_first_name.normalized,
person._normalized_last_name.normalized,
]:
# Matches the first name exactly or the last name exactly.
return 7
if person._name_words.issuperset(self.query_words_set):
# All words in the query appear somewhere in the name.
return 6
# Count the number of words in the query that appear in the name.
matched_words = person._name_words.intersection(self.query_words_set)
return min(5, 1 + len(matched_words))
def rank_and_order(results, query, max_results):
results.sort(CmpResults(query))
return results[:max_results]
def search(query, query_obj, max_results):
results_to_fetch = min(max_results * 3, 300)
query_words = query_obj.query_words
logging.debug("query_words: %s" % query_words)
if len(query_words) == 0:
return []
for word in reversed(sorted(query_words, key=len)):
query = query.filter('names_prefixes = ', word)
res = rank_and_order(query.fetch(results_to_fetch), query_obj, max_results)
logging.info('n results=%d' % len(res))
return list(res)
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import simplejson
import sys
from model import *
from utils import *
import reveal
class Admin(Handler):
# After a subdomain is deactivated, we still need the admin page to be
# accessible so we can edit its settings.
ignore_deactivation = True
def get(self):
user = users.get_current_user()
simplejson.encoder.FLOAT_REPR = str
encoder = simplejson.encoder.JSONEncoder(ensure_ascii=False)
config_json = dict((name, encoder.encode(self.config[name]))
for name in self.config.keys())
self.render('templates/admin.html', user=user,
subdomains=Subdomain.all(),
config=self.config, config_json=config_json,
start_url=self.get_start_url(),
login_url=users.create_login_url(self.request.url),
logout_url=users.create_logout_url(self.request.url),
id=self.env.domain + '/person.')
def post(self):
if self.params.operation == 'delete':
# Redirect to the deletion handler with a valid signature.
action = ('delete', str(self.params.id))
self.redirect('/delete', id=self.params.id,
signature=reveal.sign(action))
elif self.params.operation == 'subdomain_create':
Subdomain(key_name=self.params.subdomain_new).put()
config.set_for_subdomain( # Provide some defaults.
self.params.subdomain_new,
language_menu_options=['en', 'fr'],
subdomain_titles={'en': 'Earthquake', 'fr': u'S\xe9isme'},
keywords='person finder, people finder, person, people, ' +
'crisis, survivor, family',
use_family_name=True,
use_postal_code=True,
min_query_word_length=2,
map_default_zoom=6,
map_default_center=[0, 0],
map_size_pixels=[400, 280],
read_auth_key_required=True,
search_auth_key_required=True,
deactivated=False,
deactivation_message_html='',
main_page_custom_html='',
results_page_custom_html='',
view_page_custom_html='',
)
self.redirect('/admin', subdomain=self.params.subdomain_new)
elif self.params.operation == 'subdomain_save':
values = {}
for name in [ # These settings are all entered in JSON.
'language_menu_options', 'subdomain_titles',
'use_family_name', 'family_name_first', 'use_postal_code',
'min_query_word_length', 'map_default_zoom',
'map_default_center', 'map_size_pixels',
'read_auth_key_required', 'search_auth_key_required',
'deactivated'
]:
try:
values[name] = simplejson.loads(self.request.get(name))
except:
return self.error(
400, 'The setting for %s was not valid JSON.' % name)
for name in ['keywords',
'deactivation_message_html',
'main_page_custom_html',
'results_page_custom_html',
'view_page_custom_html']:
# These settings are literal strings (not JSON).
values[name] = self.request.get(name)
config.set_for_subdomain(self.subdomain, **values)
self.redirect('/admin', subdomain=self.subdomain)
if __name__ == '__main__':
run(('/admin', Admin))
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Storage for configuration settings. Settings can be global or specific
to a subdomain, and their values can be of any JSON-encodable type."""
from google.appengine.ext import db
import UserDict, model, random, simplejson
class ConfigEntry(db.Model):
"""An application configuration setting, identified by its key_name."""
value = db.StringProperty(default='')
def get(name, default=None):
"""Gets a configuration setting."""
config = ConfigEntry.get_by_key_name(name)
if config:
return simplejson.loads(config.value)
return default
def get_or_generate(name):
"""Gets a configuration setting, or sets it to a random 32-byte value
encoded in hexadecimal if it doesn't exist. Use this function when you
need a persistent cryptographic secret unique to the application."""
random_hex = ''.join('%02x' % random.randrange(256) for i in range(32))
ConfigEntry.get_or_insert(key_name=name, value=simplejson.dumps(random_hex))
return get(name)
def set(**kwargs):
"""Sets configuration settings."""
db.put(ConfigEntry(key_name=name, value=simplejson.dumps(value))
for name, value in kwargs.items())
def get_for_subdomain(subdomain, name, default=None):
"""Gets a configuration setting for a particular subdomain. Looks for a
setting specific to the subdomain, then falls back to a global setting."""
value = get(subdomain + ':' + name)
if value is not None:
return value
return get(name, default)
def set_for_subdomain(subdomain, **kwargs):
"""Sets configuration settings for a particular subdomain. When used
with get_for_subdomain, has the effect of overriding global settings."""
subdomain = str(subdomain) # need an 8-bit string, not Unicode
set(**dict((subdomain + ':' + key, value) for key, value in kwargs.items()))
class Configuration(UserDict.DictMixin):
def __init__(self, subdomain):
self.subdomain = subdomain
def __getattr__(self, name):
return self[name]
def __getitem__(self, name):
"""Gets a configuration setting for this subdomain. Looks for a
subdomain-specific setting, then falls back to a global setting."""
return get_for_subdomain(self.subdomain, name)
def keys(self):
entries = model.filter_by_prefix(
ConfigEntry.all(), self.subdomain + ':')
return [entry.key().name().split(':', 1)[1] for entry in entries]
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs the unit tests, with stubs for the datastore API.
Instead of running this script directly, use the 'unit_tests' shell script,
which sets up the PYTHONPATH and other necessary environment variables."""
import os
import sys
import unittest
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore_file_stub
import remote_api
# Gather the tests from all the test modules.
loader = unittest.defaultTestLoader
suites = []
for filename in os.listdir(remote_api.TESTS_DIR):
if filename.startswith('test_') and filename.endswith('.py'):
module = filename[:-3]
suites.append(loader.loadTestsFromName(module))
# Create a new apiproxy and temp datastore to use for this test suite
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
temp_db = datastore_file_stub.DatastoreFileStub(
'PersonFinderUnittestDataStore', None, None, trusted=True)
apiproxy_stub_map.apiproxy.RegisterStub('datastore', temp_db)
# An application id is required to access the datastore, so let's create one
os.environ['APPLICATION_ID'] = 'personfinder-unittest'
# Run the tests.
result = unittest.TextTestRunner().run(unittest.TestSuite(suites))
sys.exit(not result.wasSuccessful())
| Python |
# Copyright 2005-2008 Ka-Ping Yee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python module for web browsing and scraping.
Done:
- navigate to absolute and relative URLs
- follow links in page or region
- find strings or regular expressions: first, all, split
- find tags: first, last, next, previous, all, splittag
- find elements: first, last, next, previous, enclosing, all
- set form fields
- submit forms
- strip tags from arbitrary strings of HTML
- support HTTPS
- handle entities > 255 and Unicode documents
- accept and store cookies during redirection
- store and send cookies according to domain and path
To do:
- split by element
- detect ends of elements in most cases even if matching end tags are missing
- make the line breaks in striptags correctly reflect whitespace significance
- handle <![CDATA[ marked sections ]]>
- submit forms with file upload
- use Regions in striptags instead of duplicating work?
- remove dependency on urllib.urlencode
"""
__author__ = 'Ka-Ping Yee <ping@zesty.ca>'
__date__ = '$Date: 2008/07/06 11:13:19 $'.split()[1].replace('/', '-')
__version__ = '$Revision: 1.43 $'
from urlparse import urlsplit, urljoin
from htmlentitydefs import name2codepoint
import sys, re
def regex(template, *params, **kwargs):
"""Compile a regular expression, substituting in any passed parameters
for placeholders of the form __0__, __1__, __2__, etc. in the template.
Specify the named argument 'flags' to set regular expression compilation
flags; by default, DOTALL is set ('.' matches anything including '\n')."""
flags = kwargs.get('flags', re.DOTALL)
for i, param in enumerate(params):
template = template.replace('__%d__' % i, param)
return re.compile(template, flags)
def iregex(template, *params, **kwargs):
"""Compile a regular expression, substituting in any passed parameters
for placeholders of the form __0__, __1__, __2__, etc. in the template.
Specify the named argument 'flags' to set regular expression compilation
flags; by default, DOTALL and IGNORECASE are set."""
kwargs['flags'] = kwargs.get('flags', 0) | re.IGNORECASE
return regex(template, *params, **kwargs)
class ScrapeError(Exception):
pass
def request(scheme, method, host, path, headers, data='', verbose=0):
"""Make an HTTP or HTTPS request; return the entire reply as a string."""
request = method + ' ' + path + ' HTTP/1.0\r\n'
for name, value in headers.items():
capname = '-'.join([part.capitalize() for part in name.split('-')])
request += capname + ': ' + str(value) + '\r\n'
request += '\r\n' + data
host, port = host.split('@')[-1], [80, 443][scheme == 'https']
if ':' in host:
host, port = host.split(':', 1)
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if verbose >= 3:
print >>sys.stderr, 'connect:', host, port
sock.connect((host, int(port)))
file = scheme == 'https' and socket.ssl(sock) or sock.makefile()
if verbose >= 3:
print >>sys.stderr, ('\r\n' + request.rstrip()).replace(
'\r\n', '\nrequest: ').lstrip()
file.write(request)
if hasattr(file, 'flush'):
file.flush()
chunks = []
try:
while not (chunks and len(chunks[-1]) == 0):
chunks.append(file.read())
except socket.error:
pass
return ''.join(chunks)
def shellquote(text):
"""Quote a string literal for /bin/sh."""
return "'" + text.replace("'", "'\\''") + "'"
def curl(url, data=None, agent=None, referrer=None, cookies=None, verbose=0):
"""Use curl to make a request; return the entire reply as a string."""
import os, tempfile
fd, tempname = tempfile.mkstemp(prefix='scrape')
command = 'curl --include --insecure --silent --max-redirs 0'
if data:
if not isinstance(data, str): # Unicode not allowed here
data = urlencode(data)
command += ' --data ' + shellquote(data)
if agent:
command += ' --user-agent ' + shellquote(agent)
if referrer:
command += ' --referer ' + shellquote(referrer)
if cookies:
command += ' --cookie ' + shellquote(cookies)
command += ' ' + shellquote(url)
if verbose >= 3:
print >>sys.stderr, 'execute:', command
os.system(command + ' > ' + tempname)
reply = open(tempname).read()
os.remove(tempname)
return reply
def getcookies(cookiejar, host, path):
"""Get a dictionary of the cookies from 'cookiejar' that apply to the
given request host and request path."""
cookies = {}
for cdomain in cookiejar:
if ('.' + host).endswith(cdomain):
for cpath in cookiejar[cdomain]:
if path.startswith(cpath):
for key, value in cookiejar[cdomain][cpath].items():
cookies[key] = value
return cookies
def setcookies(cookiejar, host, lines):
"""Store cookies in 'cookiejar' according to the given Set-Cookie
header lines."""
for line in lines:
pairs = [(part.strip().split('=', 1) + [''])[:2]
for part in line.split(';')]
(name, value), attrs = pairs[0], dict(pairs[1:])
cookiejar.setdefault(attrs.get('domain', host), {}
).setdefault(attrs.get('path', '/'), {})[name] = value
RAW = object() # This sentinel value for 'charset' means "don't decode".
def fetch(url, data='', agent=None, referrer=None, charset=None, verbose=0,
cookiejar={}, type=None):
"""Make an HTTP or HTTPS request. If 'data' is given, do a POST;
otherwise do a GET. If 'agent' and/or 'referrer' are given, include
them as User-Agent and Referer headers in the request, respectively.
'cookiejar' should have the form {domain: {path: {name: value, ...}}};
cookies will be sent from it and received cookies will be stored in it.
Return the 5-element tuple (url, status, message, headers, content)
where 'url' is the final URL retrieved, 'status' is the integer status
code, 'message' is the reply status message, 'headers' is a dictionary of
HTTP headers, and 'content' is a string containing the received content.
For multiple occurrences of the same header, 'headers' will contain a
single key-value pair where the values are joined together with newlines.
If the Content-Type header specifies a 'charset' parameter, 'content'
will be a Unicode string, decoded using the given charset. Giving the
'charset' argument overrides any received 'charset' parameter; a charset
of RAW ensures that the content is left undecoded in an 8-bit string."""
scheme, host, path, query, fragment = urlsplit(url)
host = host.split('@')[-1]
# Prepare the POST data.
method = data and 'POST' or 'GET'
if data and not isinstance(data, str): # Unicode not allowed here
data = urlencode(data)
# Get the cookies to send with this request.
cookieheader = '; '.join([
'%s=%s' % pair for pair in getcookies(cookiejar, host, path).items()])
# Make the HTTP or HTTPS request using Python or cURL.
if verbose:
print >>sys.stderr, 'fetch:', url
import socket
if scheme == 'http' or scheme == 'https' and hasattr(socket, 'ssl'):
if query:
path += '?' + query
headers = {'host': host, 'accept': '*/*'}
if data:
headers['content-type'] = 'application/x-www-form-urlencoded'
headers['content-length'] = len(data)
if agent:
headers['user-agent'] = agent
if referrer:
headers['referer'] = referrer
if cookieheader:
headers['cookie'] = cookieheader
if type:
headers['content-type'] = type
reply = request(scheme, method, host, path, headers, data, verbose)
elif scheme == 'https':
reply = curl(url, data, agent, referrer, cookieheader, verbose)
else:
raise ValueError, scheme + ' not supported'
# Take apart the HTTP reply.
headers, head, content = {}, reply, ''
if '\r\n\r\n' in reply:
head, content = (reply.split('\r\n\r\n', 1) + [''])[:2]
else: # Non-conformant reply. Bummer!
match = re.search('\r?\n[ \t]*\r?\n', reply)
if match:
head, content = head[:match.start()], head[match.end():]
head = head.replace('\r\n', '\n').replace('\r', '\n')
response, head = head.split('\n', 1)
if verbose >= 3:
print >>sys.stderr, 'reply:', response.rstrip()
status = int(response.split()[1])
message = ' '.join(response.split()[2:])
for line in head.split('\n'):
if verbose >= 3:
print >>sys.stderr, 'reply:', line.rstrip()
name, value = line.split(': ', 1)
name = name.lower()
if name in headers:
headers[name] += '\n' + value
else:
headers[name] = value
if verbose >= 2:
print >>sys.stderr, 'content: %d byte%s\n' % (
len(content), content != 1 and 's' or '')
if verbose >= 3:
for line in content.rstrip('\n').split('\n'):
print >>sys.stderr, 'content: ' + repr(line + '\n')
# Store any received cookies.
if 'set-cookie' in headers:
setcookies(cookiejar, host, headers['set-cookie'].split('\n'))
# Handle the 'charset' parameter.
if 'content-type' in headers and not charset:
for param in headers['content-type'].split(';')[1:]:
if param.strip().startswith('charset='):
charset = param.strip()[8:]
break
if charset and charset is not RAW:
content = content.decode(charset)
return url, status, message, headers, content
class Session:
"""A Web-browsing session. Exposed attributes:
agent - the User-Agent string (clients can set this attribute)
url - the last successfully fetched URL
status - the status code of the last request
message - the status message of the last request
headers - the headers of the last request as a dictionary
content - the content of the last fetched document
doc - the Region spanning the last fetched document
"""
def __init__(self, agent=None, verbose=0):
"""Specify 'agent' to set the User-Agent. Set 'verbose' to 1, 2, or
3 to display status messages on stderr during document retrieval."""
self.agent = agent
self.url = self.status = self.message = self.content = self.doc = None
self.verbose = verbose
self.headers = {}
self.cookiejar = {}
self.history = []
def go(self, url, data='', redirects=10, referrer=True, charset=None,
type=None):
"""Navigate to a given URL. If the URL is relative, it is resolved
with respect to the current URL. If 'data' is provided, do a POST;
otherwise do a GET. Follow redirections up to 'redirects' times.
If 'referrer' is given, send it as the referrer; if 'referrer' is
True (default), send the current URL as the referrer; if 'referrer'
is a false value, send no referrer. If 'charset' is given, it
overrides any received 'charset' parameter; setting 'charset' to RAW
leaves the content undecoded in an 8-bit string. If the document is
successfully fetched, return a Region spanning the entire document.
Any relevant previously stored cookies will be included in the
request, and any received cookies will be stored for future use."""
historyentry = (self.url, self.status, self.message,
self.headers, self.content, self.doc)
url = self.resolve(url)
if referrer is True:
referrer = self.url
while 1:
self.url, self.status, self.message, self.headers, self.content = \
fetch(url, data, self.agent, referrer, charset, self.verbose,
self.cookiejar, type)
if redirects:
if self.status in [301, 302] and 'location' in self.headers:
url, data = urljoin(url, self.headers['location']), ''
redirects -= 1
continue
break
self.history.append(historyentry)
self.doc = Region(self.content)
return self.doc
def back(self):
"""Restore the state of this session before the previous request."""
(self.url, self.status, self.message,
self.headers, self.content, self.doc) = self.history.pop()
return self.url
def follow(self, anchor, region=None):
"""Find the first link that has the given anchor text, and follow it.
The anchor may be given as a string or a compiled RE. If 'region' is
specified, only that region is searched for a matching link, instead
of the whole document."""
link = (region or self.doc).first('a', content=anchor)
if not link:
raise ScrapeError('link %r not found' % anchor)
if not link.get('href', ''):
raise ScrapeError('link %r has no href' % link)
return self.go(link['href'])
def submit(self, region, paramdict=None, url=None, redirects=10, **params):
"""Submit a form, optionally by clicking a given button. The 'region'
argument can be the form itself or a button in the form to click.
Obtain the parameters to submit by (a) starting with the 'paramdict'
dictionary if specified, or the default parameter values as returned
by get_params; then (b) adding or replacing parameters in this
dictionary according to the keyword arguments. The 'url' argument
overrides the form's action attribute and submits the form elsewhere.
After submission, follow redirections up to 'redirects' times."""
form = region.tagname == 'form' and region or region.enclosing('form')
if not form:
raise ScrapeError('%r is not contained in a form' % region)
if paramdict is not None:
p = paramdict.copy()
else:
p = form.params
if 'name' in region:
p[region['name']] = region.get('value', '')
p.update(params)
method = form['method'].lower() or 'get'
url = url or form.get('action', self.url)
if method == 'get':
return self.go(url + '?' + urlencode(p), '', redirects)
elif method == 'post':
return self.go(url, p, redirects)
else:
raise ScrapeError('unknown form method %r' % method)
def resolve(self, url):
"""Resolve a URL with respect to the current location."""
if self.url and not (
url.startswith('http://') or url.startswith('https://')):
url = urljoin(self.url, url)
return url
def setcookie(self, cookieline):
"""Put a cookie in this session's cookie jar. 'cookieline' should
have the format "<name>=<value>; domain=<domain>; path=<path>"."""
scheme, host, path, query, fragment = urlsplit(self.url)
host = host.split('@')[-1].split(':')[0]
setcookies(self.cookiejar, host, [cookieline])
# This pattern has been carefully tuned, but re.search can still cause a
# stack overflow. Try re.search('(a|b)*', 'a'*10000), for example.
tagcontent_re = r'''(('[^']*'|"[^"]*"|--([^-]+|-[^-]+)*--|-(?!-)|[^'">-])*)'''
def tag_re(tagname_re):
return '<' + tagname_re + tagcontent_re + '>'
anytag_re = tag_re(r'(\?|!\w*|/?[a-zA-Z_:][\w:.-]*)')
tagpat = re.compile(anytag_re)
# This pattern matches a character entity reference (a decimal numeric
# references, a hexadecimal numeric reference, or a named reference).
charrefpat = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?')
def htmldecode(text):
"""Decode HTML entities in the given text."""
if type(text) is unicode:
uchr = unichr
else:
uchr = lambda value: value > 127 and unichr(value) or chr(value)
def entitydecode(match, uchr=uchr):
entity = match.group(1)
if entity.startswith('#x'):
return uchr(int(entity[2:], 16))
elif entity.startswith('#'):
return uchr(int(entity[1:]))
elif entity in name2codepoint:
return uchr(name2codepoint[entity])
else:
return match.group(0)
return charrefpat.sub(entitydecode, text)
def htmlencode(text):
"""Use HTML entities to encode special characters in the given text."""
text = text.replace('&', '&')
text = text.replace('"', '"')
text = text.replace('<', '<')
text = text.replace('>', '>')
return text
urlquoted = dict((chr(i), '%%%02X' % i) for i in range(256))
urlquoted.update(dict((c, c) for c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' +
'abcdefghijklmnopqrstuvwxyz' +
'0123456789._-'))
def urlquote(text):
return ''.join(map(urlquoted.get, text))
def urlencode(params):
pairs = ['%s=%s' % (urlquote(key), urlquote(value).replace('%20', '+'))
for key, value in params.items()]
return '&'.join(pairs)
def no_groups(re):
return re.replace('(', '(?:').replace('(?:?', '(?')
tagsplitter = re.compile(no_groups(anytag_re))
parasplitter = re.compile(no_groups(tag_re('(p|table|form)')), re.I)
linesplitter = re.compile(no_groups(tag_re('(div|br|tr)')), re.I)
cdatapat = re.compile(r'<(!\s*--|style\b|script\b)', re.I)
endcdatapat = {'!': re.compile(r'--\s*>'),
'script': re.compile(r'</script[^>]*>', re.I),
'style': re.compile(r'</style[^>]*>', re.I)}
def striptags(html):
"""Strip HTML tags from the given string, yielding line breaks for DIV,
BR, or TR tags and blank lines for P, TABLE, or FORM tags."""
# Remove comments and elements with CDATA content (<script> and <style>).
# These are special cases because tags are not parsed in their content.
chunks, pos = [], 0
while 1:
startmatch = cdatapat.search(html, pos)
if not startmatch:
break
tagname = startmatch.group(1).rstrip('-').strip()
tagname = tagname.lower().encode('utf-8')
endmatch = endcdatapat[tagname].search(html, startmatch.end())
if not endmatch:
break
chunks.append(html[pos:startmatch.start()])
pos = endmatch.end()
chunks.append(html[pos:])
html = ''.join(chunks)
# Break up the text into paragraphs and lines, then remove all other tags.
paragraphs = []
for paragraph in parasplitter.split(html):
lines = []
for line in linesplitter.split(paragraph):
line = ''.join(tagsplitter.split(line))
line = htmldecode(line)
nbsp = (type(line) is unicode) and u'\xa0' or '\xa0'
line = line.replace(nbsp, ' ')
lines.append(' '.join(line.split()))
paragraph = '\n'.join(lines)
if type(paragraph) is str:
try:
paragraph.decode('ascii')
except:
# Assume Latin-1 for characters > 127.
paragraph = paragraph.decode('latin-1')
paragraphs.append(paragraph)
return re.sub('\n\n+', '\n\n', '\n\n'.join(paragraphs)).strip()
attr_re = r'''\s*([\w:.-]+)(\s*=\s*('[^']*'|"[^"]*"|[^\s>]*))?'''
attrpat = re.compile(attr_re)
def parseattrs(text):
"""Turn a string of name=value pairs into an attribute dictionary."""
attrs = {}
pos = 0
while 1:
match = attrpat.search(text, pos)
if not match:
break
pos = match.end()
name, value = match.group(1), match.group(3) or ''
if value[:1] in ["'", '"']:
value = value[1:-1]
try:
name = str(name)
except:
pass
try:
value = str(value)
except:
pass
attrs[name.lower()] = htmldecode(value)
return attrs
MISSING = object() # the sentinel for "not present"
PRESENT = lambda x: 1 # the sentinel for "present with any value"
ANY = lambda x: x.strip() # the sentinel for "contains non-whitespace"
def NUMERIC(x): # the sentinel for "contains a numeric value"
try:
getnumber(x)
return 1
except:
return 0
def matchcontent(specimen, desired):
"""Match a string specimen to a desired string or compiled RE."""
if hasattr(desired, 'match'):
return desired.match(specimen)
elif callable(desired):
return desired(specimen)
else:
return specimen == desired
def matchattrs(specimen, desired):
"""Match an attribute dictionary to a dictionary of desired attribute
values, where each value can be a string or a compiled RE. For
convenience, the keys of the dictionary have their underscores turned
into hyphens, and trailing underscores are removed."""
for name, value in desired.items():
name = name.rstrip('_').replace('_', '-')
if value is MISSING:
if name in specimen:
return 0
else:
if not (name in specimen and matchcontent(specimen[name], value)):
return 0
return 1
class Region:
"""A Region object represents a contiguous region of a document (in terms
of a starting and ending position in the document string) together with
an associated HTML or XML tag and its attributes. Dictionary-like access
retrieves the name-value pairs in the attributes. Various other methods
allow slicing up a Region into subregions and searching within, before,
or after a Region for tags or elements. For a Region that represents a
single tag, the starting and ending positions are the start and end of
the tag itself. For a Region that represents an element, the starting
and ending positions are just after the starting tag and just before the
ending tag, respectively."""
def __init__(self, parent, start=0, end=None, starttag=None, endtag=None):
"""Create a Region. The 'parent' argument is a string or another
Region. The 'start' and 'end' arguments, if given, are non-negative
indices into the original string (not into the parent region). The
'starttag' and 'endtag' arguments are indices into an internal array
of tags, intended for use by the implementation only."""
if isinstance(parent, basestring):
self.document = parent
self.tags = self.scantags(self.document)
else:
self.document = parent.document
self.tags = parent.tags
if end is None:
end = len(self.document)
self.start, self.end = start, end
self.tagname, self.attrs = None, {}
# If only starttag is specified, this Region is a tag.
# If starttag and endtag are specified, this Region is an element.
self.starttag, self.endtag = starttag, endtag
if starttag is not None:
self.start, self.end, self.tagname, self.attrs = self.tags[starttag]
if endtag is not None:
self.start, self.end = self.tags[starttag][1], self.tags[endtag][0]
# Find the minimum and maximum indices of tags within this Region.
if starttag and endtag:
self.tagmin, self.tagmax = starttag + 1, endtag - 1
else:
self.tagmin, self.tagmax = len(self.tags), -1
for i, (start, end, tagname, attrs) in enumerate(self.tags):
if start >= self.start and i < self.tagmin:
self.tagmin = i
if end <= self.end and i > self.tagmax:
self.tagmax = i
def __repr__(self):
if self.tagname:
attrs = ''.join([' %s=%r' % item for item in self.attrs.items()])
return '<Region %d:%d %s%s>' % (
self.start, self.end, self.tagname, attrs)
else:
return '<Region %d:%d>' % (self.start, self.end)
def __str__(self):
return self.content
# Utilities that operate on the array of scanned tags.
def scantags(self, document):
"""Generate a list of all the tags in a document."""
tags = []
pos = 0
while 1:
match = tagpat.search(document, pos)
if not match:
break
start, end = match.span()
tagname = match.group(1).lower().encode('utf-8')
attrs = match.group(2)
tags.append([start, end, tagname, attrs])
if tagname in ['script', 'style']:
match = endcdatapat[tagname].search(document, end)
if not match:
break
start, end = match.span()
tags.append([start, end, '/' + tagname, ''])
pos = end
return tags
def matchtag(self, i, tagname, attrs):
"""Return 1 if the ith tag matches the given tagname and attributes."""
itagname, iattrs = self.tags[i][2], self.tags[i][3]
if itagname[:1] not in ['', '/']:
if itagname == tagname or tagname is None:
if isinstance(iattrs, basestring):
if itagname[:1] in ['?', '!']:
self.tags[i][3] = iattrs = {}
else:
self.tags[i][3] = iattrs = parseattrs(iattrs)
return matchattrs(iattrs, attrs)
def findendtag(self, starttag, enders=[], outside=0):
"""Find the index of the tag that ends an element, given the index of
its start tag, by scanning for a balanced matching end tag or a tag
whose name is in 'enders'. 'enders' may contain plain tag names (for
start tags) or tag names prefixed with '/' (for end tags). If
'outside' is 0, scan within the current region; if 'outside' is 1,
scan starting from the end of the current region onwards."""
if isinstance(enders, basestring):
enders = enders.split()
tagname = self.tags[starttag][2]
depth = 1
for i in range(starttag + 1, len(self.tags)):
if self.tags[i][2] == '/' + tagname:
depth -= 1
if depth == 0 or depth == 1 and self.tags[i][2] in enders:
if not outside and i <= self.tagmax:
return i
if outside and i > self.tagmax:
return i
break
if self.tags[i][2] == tagname:
depth += 1
def matchelement(self, starttag, content=None, enders=[], outside=0):
"""If the element with the given start tag matches the given content,
return the index of the tag that ends the element. The end of the
element is found by scanning for either a balanced matching end
tag or tag whose name is in 'enders'. 'enders' may contain plain tag
names (for start tags) or tag names prefixed with '/' (for end tags).
If 'outside' is 0, scan within the current region; if 'outside' is 1,
scan starting from the end of the current region onwards."""
endtag = self.findendtag(starttag, enders, outside)
if endtag is not None:
start, end = self.tags[starttag][1], self.tags[endtag][0]
if content is None or matchcontent(
striptags(self.document[start:end]), content):
return endtag
# Provide the "content" and "text" attributes to access the contents.
content = property(lambda self: self.document[self.start:self.end])
text = property(lambda self: striptags(self.content))
# Provide information on forms.
def get_params(self):
"""Get a dictionary of default values for all the form parameters."""
if self.tagname == 'form':
params = {}
for input in self.alltags('input'):
if 'name' in input and 'disabled' not in input:
type = input.get('type', 'text').lower()
if type in ['text', 'password', 'hidden'] or (
type in ['checkbox', 'radio'] and 'checked' in input):
params[input['name']] = input.get('value', '')
for select in self.all('select'):
if 'disabled' not in select:
selections = [option['value']
for option in select.alltags('option')
if 'selected' in option]
if 'multiple' in select:
params[select['name']] = selections
elif selections:
params[select['name']] = selections[0]
for textarea in self.all('textarea'):
if 'disabled' not in textarea:
params[textarea['name']] = textarea.content
return params
def get_buttons(self):
"""Get a list of all the form submission buttons."""
if self.tagname == 'form':
return [tag for tag in self.alltags('input')
if (tag.get('type', 'text').lower()
in ['submit', 'image'])
] + [tag for tag in self.alltags('button')
if tag.get('type', '').lower() in ['submit', '']]
params = property(get_params)
buttons = property(get_buttons)
# Provide access to numeric content.
def get_number(self):
return getnumber(self.text)
number = property(get_number)
# Provide a dictionary-like interface to the tag attributes.
def __contains__(self, name):
return name in self.attrs
def __getitem__(self, name):
if isinstance(name, slice):
return self.__getslice__(name.start, name.stop)
if name in self.attrs:
return self.attrs[name]
raise AttributeError('no attribute named %r' % name)
def get(self, name, default):
if name in self.attrs:
return self.attrs[name]
return default
def keys(self):
return self.attrs.keys()
# Report the length of the region.
def __len__(self):
return self.end - self.start
# Access subregions by slicing. The starting and ending positions of a
# slice can be given as string positions within the region (just like
# slicing a string), or as regions. A slice between two regions begins
# at the end of the start region and ends at the start of the end region.
def __getslice__(self, start, end):
if start is None:
start = 0
if end is None:
end = len(self)
if hasattr(start, 'end'):
start = start.end
elif start < 0:
start += self.end
else:
start += self.start
if hasattr(end, 'start'):
end = end.start
elif end < 0:
end += self.end
else:
end += self.start
return Region(self, max(self.start, start), min(self.end, end))
def after(self):
"""Return the Region for everything after this Region."""
return Region(self, self.end)
def before(self):
"""Return the Region for everything before this Region."""
return Region(self, 0, self.start)
# Search for text.
def find(self, target, group=0):
"""Search this Region for a string or a compiled RE and return a
Region representing the match. If 'group' is given, it specifies
which grouped subexpression should be returned as the match."""
if hasattr(target, 'search'):
match = target.search(self.content)
if match:
return self[match.start(group):match.end(group)]
else:
start = self.content.find(target)
if start > -1:
return self[start:start+len(target)]
raise ScrapeError('no match found for %r' % target)
def findall(self, target, group=0):
"""Search this Region for a string or a compiled RE and return a
sequence of Regions representing all the matches."""
pos = 0
content = self.content
matches = []
if hasattr(target, 'search'):
while 1:
match = target.search(content, pos)
if not match:
break
start, pos = match.span(group)
matches.append(self[start:pos])
else:
while 1:
start = content.find(target, pos)
if start < 0:
break
pos = start + len(target)
matches.append(self[start:pos])
return matches
def split(self, separator):
"""Find all occurrences of the given string or compiled RE and use
them as separators to split this Region into a sequence of Regions."""
pos = 0
content = self.content
matches = []
if hasattr(separator, 'search'):
while 1:
match = separator.search(content, pos)
if not match:
break
start, end = match.span(0)
matches.append(self[pos:start])
pos = end
matches.append(self[pos:])
else:
while 1:
start = content.find(separator, pos)
if start < 0:
break
end = start + len(separator)
matches.append(self[pos:start])
pos = end
matches.append(self[pos:])
return matches
# Search for tags.
def firsttag(self, tagname=None, **attrs):
"""Return the Region for the first tag entirely within this Region
with the given tag name and attributes."""
for i in range(self.tagmin, self.tagmax + 1):
if self.matchtag(i, tagname, attrs):
return Region(self, 0, 0, i)
tag = tagname is None and 'tag' or '<%s> tag' % tagname
a = attrs and ' matching %r' % attrs or ''
raise ScrapeError('no %s found%s' % (tag, a))
def lasttag(self, tagname=None, **attrs):
"""Return the Region for the last tag entirely within this Region
with the given tag name and attributes."""
for i in range(self.tagmax, self.tagmin - 1, -1):
if self.matchtag(i, tagname, attrs):
return Region(self, 0, 0, i)
tag = tagname is None and 'tag' or '<%s> tag' % tagname
a = attrs and ' matching %r' % attrs or ''
raise ScrapeError('no %s found%s' % (tag, a))
def alltags(self, tagname=None, **attrs):
"""Return a list of Regions for all the tags entirely within this
Region with the given tag name and attributes."""
tags = []
for i in range(self.tagmin, self.tagmax + 1):
if self.matchtag(i, tagname, attrs):
tags.append(Region(self, 0, 0, i))
return tags
def nexttag(self, tagname=None, **attrs):
"""Return the Region for the nearest tag after the end of this Region
with the given tag name and attributes."""
return self.after().firsttag(tagname, **attrs)
def previoustag(self, tagname=None, **attrs):
"""Return the Region for the nearest tag before the start of this
Region with the given tag name and attributes."""
return self.before().lasttag(tagname, **attrs)
def splittag(self, tagname=None, **attrs):
"""Split this Region into a list of the subregions separated by tags
with the given tag name and attributes."""
subregions, start = [], 0
for tag in self.alltags(tagname, **attrs):
subregions.append(self[start:tag])
start = tag
subregions.append(self[tag:])
return subregions
# Search for elements.
def first(self, tagname=None, content=None, enders=[], **attrs):
"""Return the Region for the first element entirely within this Region
with the given tag name, content, and attributes. The element ends at
a balanced matching end tag or any tag listed in 'enders'. 'enders' may
may contain plain tag names (for start tags) or tag names prefixed with
'/' (for end tags). The element content is passed through striptags()
for comparison. If 'content' has a match() method, the stripped content
is passed to it; otherwise it is compared directly as a string."""
for starttag in range(self.tagmin, self.tagmax + 1):
if self.matchtag(starttag, tagname, attrs):
endtag = self.matchelement(starttag, content, enders)
if endtag is not None:
return Region(self, 0, 0, starttag, endtag)
tag = tagname is None and 'element' or '<%s> element' % tagname
a = attrs and ' matching %r' % attrs or ''
c = content is not None and ' with content %r' % content or ''
raise ScrapeError('no %s found%s%s' % (tag, a, c))
def last(self, tagname=None, content=None, enders=[], **attrs):
"""Return the Region for the last element entirely within this Region
with the given tag name, content, and attributes. The element ends at
a balanced matching end tag or at any tag listed in 'enders'."""
for starttag in range(self.tagmax, self.tagmin - 1, -1):
if self.matchtag(starttag, tagname, attrs):
endtag = self.matchelement(starttag, content, enders)
if endtag is not None:
return Region(self, 0, 0, starttag, endtag)
tag = tagname is None and 'element' or '<%s> element' % tagname
a = attrs and ' matching %r' % attrs or ''
c = content is not None and ' with content %r' % content or ''
raise ScrapeError('no %s found%s%s' % (tag, a, c))
def all(self, tagname=None, content=None, enders=[], **attrs):
"""Return Regions for all non-overlapping elements entirely within
this Region with the given tag name, content, and attributes, where
each element ends at a balanced matching end tag or any tag listed
in 'enders'."""
elements = []
starttag = self.tagmin
while starttag <= self.tagmax:
if self.matchtag(starttag, tagname, attrs):
endtag = self.matchelement(starttag, content, enders)
if endtag is not None:
elements.append(Region(self, 0, 0, starttag, endtag))
starttag = endtag - 1
starttag += 1
return elements
def next(self, tagname=None, content=None, enders=[], **attrs):
"""Return the Region for the nearest element after the end of this
Region with the given tag name, content, and attributes. The element
ends at a balanced matching end tag or any tag listed in 'enders'."""
return self.after().first(tagname, content, enders, **attrs)
def previous(self, tagname=None, content=None, enders=[], **attrs):
"""Return the Region for the nearest element before the start of this
Region with the given tag name, content, and attributes. The element
ends at a balanced matching end tag or any tag listed in 'enders'."""
return self.before().last(tagname, content, enders, **attrs)
def enclosing(self, tagname=None, content=None, enders=[], **attrs):
"""Return the Region for the nearest element that encloses this Region
with the given tag name, content, and attributes. The element ends at
ends at a balanced matching end tag or any tag listed in 'enders'."""
if self.starttag and self.endtag: # skip this Region's own start tag
laststarttag = self.starttag - 1
else:
laststarttag = self.tagmin - 1
for starttag in range(laststarttag, -1, -1):
if self.matchtag(starttag, tagname, attrs):
endtag = self.matchelement(starttag, content, enders, outside=1)
if endtag is not None:
return Region(self, 0, 0, starttag, endtag)
tag = tagname is None and 'element' or '<%s> element' % tagname
a = attrs and ' matching %r' % attrs or ''
c = content is not None and ' with content %r' % content or ''
raise ScrapeError('no %s found%s%s' % (tag, a, c))
def read(path):
"""Read and return the entire contents of the file at the given path."""
return open(path).read()
def write(path, text):
"""Write the given text to a file at the given path."""
file = open(path, 'w')
file.write(text)
file.close()
def load(path):
"""Return the deserialized contents of the file at the given path."""
import marshal
return marshal.load(open(path))
def dump(path, data):
"""Serialize the given data and write it to a file at the given path."""
import marshal
file = open(path, 'w')
marshal.dump(data, file)
file.close()
def getnumber(text):
"""Find and parse an integer or floating-point number in the given text,
ignoring commas, percentage signs, and non-numeric words."""
for word in striptags(text).replace(',', '').replace('%', ' ').split():
try:
return int(word)
except:
try:
return float(word)
except:
continue
raise ScrapeError('no number found in %r' % text)
s = Session()
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup and teardown fixtures for all the tests in the tests/ directory."""
import os
import sys
import unittest
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore_file_stub
import remote_api
def setup():
# Create a new apiproxy and temp datastore to use for this test suite
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
temp_db = datastore_file_stub.DatastoreFileStub(
'PersonFinderUnittestDataStore', None, None, trusted=True)
apiproxy_stub_map.apiproxy.RegisterStub('datastore', temp_db)
# An application id is required to access the datastore, so let's create one
os.environ['APPLICATION_ID'] = 'person-finder-test'
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starts up an appserver and runs end-to-end tests against it.
Instead of running this script directly, use the 'server_tests' shell script,
which sets up the PYTHONPATH and other necessary environment variables.
You can specify a particular test class or method on the command line:
tools/server_tests ConfigTests
tools/server_tests PersonNoteTests.test_delete_and_restore
"""
import datetime
import difflib
import inspect
import logging
import optparse
import os
import re
import signal
import smtpd
import subprocess
import sys
import threading
import time
import traceback
import unittest
import calendar
import config
from model import *
import remote_api
import reveal
import scrape
import setup
from test_pfif import text_diff
import utils
from utils import PERSON_STATUS_TEXT, NOTE_STATUS_TEXT
NOTE_STATUS_OPTIONS = [
'',
'information_sought',
'is_note_author',
'believed_alive',
'believed_missing',
'believed_dead'
]
last_star = time.time() # timestamp of the last message that started with '*'.
def log(message, *args):
"""Prints a timestamped message to stderr (handy for debugging or profiling
tests). If the message starts with '*', the clock will be reset to zero."""
global last_star
now = time.time()
if isinstance(message, unicode):
message = message.encode('utf-8')
else:
message = str(message)
print >>sys.stderr, '%6.2f:' % (now - last_star), message, args or ''
if message[:1] == '*':
last_star = now
def timed(function):
def timed_function(*args, **kwargs):
start = time.time()
try:
function(*args, **kwargs)
finally:
print '%s: %.2f s' % (function.__name__, time.time() - start)
return timed_function
class ProcessRunner(threading.Thread):
"""A thread that starts a subprocess, collects its output, and stops it."""
READY_RE = re.compile('') # this output means the process is ready
OMIT_RE = re.compile('INFO ') # omit these lines from the displayed output
ERROR_RE = re.compile('ERROR|CRITICAL') # this output indicates failure
def __init__(self, name, args):
threading.Thread.__init__(self)
self.name = name
self.args = args
self.process = None # subprocess.Popen instance
self.ready = False # process is running and ready
self.failed = False # process emitted an error message in its output
self.output = []
def run(self):
"""Starts the subprocess and collects its output while it runs."""
self.process = subprocess.Popen(
self.args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
close_fds=True)
# Each subprocess needs a thread to be watching it and absorbing its
# output; otherwise it will block when its stdout pipe buffer fills.
while self.process.poll() is None:
line = self.process.stdout.readline()
if not line: # process finished
return
if self.READY_RE.search(line):
self.ready = True
if self.OMIT_RE.search(line): # filter out these lines
continue
if self.ERROR_RE.search(line): # something went wrong
self.failed = True
if line.strip():
self.output.append(line.strip())
def stop(self):
"""Terminates the subprocess and returns its status code."""
if self.process: # started
if self.isAlive(): # still running
os.kill(self.process.pid, signal.SIGKILL)
else:
self.failed = self.process.returncode != 0
self.clean_up()
if self.failed:
self.flush_output()
print >>sys.stderr, '%s failed (status %s).\n' % (
self.name, self.process.returncode)
else:
print >>sys.stderr, '%s stopped.' % self.name
def flush_output(self):
"""Flushes the buffered output from this subprocess to stderr."""
self.output, lines_to_print = [], self.output
if lines_to_print:
print >>sys.stderr
for line in lines_to_print:
print >>sys.stderr, self.name + ': ' + line
def wait_until_ready(self, timeout=10):
"""Waits until the subprocess has logged that it is ready."""
fail_time = time.time() + timeout
while self.isAlive() and not self.ready and time.time() < fail_time:
for jiffy in range(10): # wait one second, aborting early if ready
if not self.ready:
time.sleep(0.1)
if not self.ready:
self.flush_output() # after each second, show output
if self.ready:
print >>sys.stderr, '%s started.' % self.name
else:
raise RuntimeError('%s failed to start.' % self.name)
def clean_up(self):
pass
class AppServerRunner(ProcessRunner):
"""Manages a dev_appserver subprocess."""
READY_RE = re.compile('Running application ' + remote_api.get_app_id())
def __init__(self, port, smtp_port):
self.datastore_path = '/tmp/dev_appserver.datastore.%d' % os.getpid()
ProcessRunner.__init__(self, 'appserver', [
os.environ['PYTHON'],
os.path.join(os.environ['APPENGINE_DIR'], 'dev_appserver.py'),
os.environ['APP_DIR'],
'--port=%s' % port,
'--clear_datastore',
'--datastore_path=%s' % self.datastore_path,
'--require_indexes',
'--smtp_host=localhost',
'--smtp_port=%d' % smtp_port
])
def clean_up(self):
if os.path.exists(self.datastore_path):
os.unlink(self.datastore_path)
class MailThread(threading.Thread):
"""Runs an SMTP server and stores the incoming messages."""
messages = []
def __init__(self, port):
threading.Thread.__init__(self)
self.port = port
self.stop_requested = False
def run(self):
class MailServer(smtpd.SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data):
MailThread.messages.append(
{'from': mailfrom, 'to': rcpttos, 'data': data})
server = MailServer(('localhost', self.port), None)
print >>sys.stderr, 'SMTP server started.'
while not self.stop_requested:
smtpd.asyncore.loop(timeout=0.5, count=1)
print >>sys.stderr, 'SMTP server stopped.'
def stop(self):
self.stop_requested = True
def wait_until_ready(self, timeout=10):
pass
def get_test_data(filename):
return open(os.path.join(remote_api.TESTS_DIR, filename)).read()
def reset_data():
"""Reset the datastore to a known state, populated with test data."""
setup.reset_datastore()
db.put([
Authorization.create(
'haiti', 'test_key', domain_write_permission='test.google.com'),
Authorization.create(
'haiti', 'other_key', domain_write_permission='other.google.com'),
Authorization.create(
'haiti', 'read_key', read_permission=True),
Authorization.create(
'haiti', 'full_read_key', full_read_permission=True),
Authorization.create(
'haiti', 'search_key', search_permission=True)
])
def assert_params_conform(url, required_params=None, forbidden_params=None):
"""Enforces the presence and non-presence of URL parameters.
If required_params or forbidden_params is set, this function asserts that
the given URL contains or does not contain those parameters, respectively.
"""
required_params = required_params or {}
forbidden_params = forbidden_params or {}
# TODO(kpy): Decode the URL, don't match against it directly like this.
for key, value in required_params.iteritems():
param_regex = r'\b%s=%s\b' % (re.escape(key), re.escape(value))
assert re.search(param_regex, url), \
'URL %s must contain %s=%s' % (url, key, value)
for key, value in forbidden_params.iteritems():
param_regex = r'\b%s=%s\b' % (re.escape(key), re.escape(value))
assert not re.search(param_regex, url), \
'URL %s must not contain %s=%s' % (url, key, value)
class TestsBase(unittest.TestCase):
"""Base class for test cases."""
verbose = 0
hostport = None
kinds_written_by_tests = []
default_test_time = datetime.datetime(2010, 1, 2, 3, 4, 5)
debug = False
def get_debug(self):
return self.debug
def set_debug(self, dbg):
self.debug = dbg
def debug_print(self, msg):
"""Echo useful stuff to stderr, encoding to preserve sanity."""
if self.get_debug():
print >>sys.stderr, msg.encode('ascii', 'ignore')
def setUp(self):
"""Sets up a scrape Session for each test."""
# See http://zesty.ca/scrape for documentation on scrape.
self.s = scrape.Session(verbose=self.verbose)
self.logged_in_as_admin = False
MailThread.messages = []
def path_to_url(self, path):
return 'http://%s%s' % (self.hostport, path)
def go(self, path, **kwargs):
"""Navigates the scrape Session to the given path on the test server."""
return self.s.go(self.path_to_url(path), **kwargs)
def tearDown(self):
"""Resets the datastore by deleting anything written during a test."""
# make sure we reset current time as well.
self.set_utcnow_for_test(date_time=None)
self.set_debug(TestsBase.debug)
if self.kinds_written_by_tests:
setup.wipe_datastore(*self.kinds_written_by_tests)
def set_utcnow_for_test(self, date_time=None):
"""Set utc timestamp locally and on the server.
Args:
date_time: a datetime object, or None to reset to wall time.
"""
utils.set_utcnow_for_test(date_time)
new_utcnow = '' # If date_time is None, the parameter should be empty.
if date_time:
new_utcnow = calendar.timegm(date_time.utctimetuple())
self.get_url_as_admin(
'/admin/set_utcnow_for_test?test_mode=yes&utcnow=%s' % new_utcnow)
self.debug_print('set utcnow to %s: %s' %
(date_time, self.s.doc.content))
def get_url_as_admin(self, path):
'''Authenticate as admin and continue to the provided path.
# TODO(lschumacher): update other logins to use this.
Args:
path - path to continue, including leading /.
Returns:
true if status == 200.'''
if not self.logged_in_as_admin:
self.go('/_ah/login?continue=%s' % self.path_to_url(path))
self.debug_print(
'get_url_as_admin %s: %s' % (path, self.s.doc.content))
login_form = self.s.doc.first('form')
self.s.submit(login_form, admin='True', action='Login')
self.logged_in_as_admin = self.s.status == 200
# already logged in, so fetch path directly. We do this unconditionaly
# since sometimes continue doesn't seem to work quite right.
self.go(path)
self.debug_print(
u'got_url_as_admin %s: %s' % (path, self.s.doc.content))
return self.s.status == 200
class ReadOnlyTests(TestsBase):
"""Tests that don't modify data go here."""
def test_main(self):
"""Check the main page with no language specified."""
doc = self.go('/?subdomain=haiti')
assert 'I\'m looking for someone' in doc.text
def test_main_english(self):
"""Check the main page with English language specified."""
doc = self.go('/?subdomain=haiti&lang=en')
assert 'I\'m looking for someone' in doc.text
def test_main_french(self):
"""Check the French main page."""
doc = self.go('/?subdomain=haiti&lang=fr')
assert 'Je recherche quelqu\'un' in doc.text
def test_main_creole(self):
"""Check the Creole main page."""
doc = self.go('/?subdomain=haiti&lang=ht')
assert u'Mwen ap ch\u00e8che yon moun' in doc.text
def test_language_links(self):
"""Check that the language links go to the translated main page."""
doc = self.go('/?subdomain=haiti')
doc = self.s.follow(u'espa\u00f1ol')
assert 'Busco a alguien' in doc.text
doc = self.s.follow(u'Fran\u00e7ais')
assert 'Je recherche quelqu\'un' in doc.text
doc = self.go('/?subdomain=pakistan')
doc = self.s.follow(u'\u0627\u0631\u062f\u0648')
assert (u'\u0645\u06CC\u06BA \u06A9\u0633\u06CC \u06A9\u0648 ' +
u'\u062A\u0644\u0627\u0634 \u06A9\u0631 ' +
u'\u0631\u06C1\u0627 \u06C1\u0648') in doc.text
doc = self.s.follow(u'English')
assert 'I\'m looking for someone' in doc.text
def test_language_xss(self):
"""Regression test for an XSS vulnerability in the 'lang' parameter."""
doc = self.go('/?subdomain=haiti&lang="<script>alert(1)</script>')
assert '<script>' not in doc.content
def test_query(self):
"""Check the query page."""
doc = self.go('/query?subdomain=haiti')
button = doc.firsttag('input', type='submit')
assert button['value'] == 'Search for this person'
doc = self.go('/query?subdomain=haiti&role=provide')
button = doc.firsttag('input', type='submit')
assert button['value'] == 'Provide information about this person'
def test_results(self):
"""Check the results page."""
doc = self.go('/results?subdomain=haiti&query=xy')
assert 'We have nothing' in doc.text
def test_create(self):
"""Check the create page."""
doc = self.go('/create?subdomain=haiti')
assert 'Identify who you are looking for' in doc.text
doc = self.go('/create?subdomain=haiti&role=provide')
assert 'Identify who you have information about' in doc.text
def test_view(self):
"""Check the view page."""
doc = self.go('/view?subdomain=haiti')
assert 'No person id was specified' in doc.text
def test_multiview(self):
"""Check the multiview page."""
doc = self.go('/multiview?subdomain=haiti')
assert 'Compare these records' in doc.text
def test_photo(self):
"""Check the photo page."""
doc = self.go('/photo?subdomain=haiti')
assert 'No photo id was specified' in doc.text
def test_static(self):
"""Check that the static files are accessible."""
doc = self.go('/static/no-photo.gif?subdomain=haiti')
assert doc.content.startswith('GIF89a')
doc = self.go('/static/style.css?subdomain=haiti')
assert 'body {' in doc.content
def test_embed(self):
"""Check the embed page."""
doc = self.go('/embed?subdomain=haiti')
assert 'Embedding' in doc.text
def test_gadget(self):
"""Check the gadget page."""
doc = self.go('/gadget?subdomain=haiti')
assert '<Module>' in doc.content
assert 'application/xml' in self.s.headers['content-type']
def test_sitemap(self):
"""Check the sitemap generator."""
doc = self.go('/sitemap?subdomain=haiti')
assert '</sitemapindex>' in doc.content
doc = self.go('/sitemap?subdomain=haiti&shard_index=1')
assert '</urlset>' in doc.content
def test_config_subdomain_titles(self):
doc = self.go('/?subdomain=haiti')
assert 'Haiti Earthquake' in doc.first('h1').text
doc = self.go('/?subdomain=pakistan')
assert 'Pakistan Floods' in doc.first('h1').text
def test_config_language_menu_options(self):
doc = self.go('/?subdomain=haiti')
assert doc.first('a', u'Fran\xe7ais')
assert doc.first('a', u'Krey\xf2l')
assert not doc.all('a',u'\u0627\u0631\u062F\u0648') # Urdu
doc = self.go('/?subdomain=pakistan')
assert doc.first('a',u'\u0627\u0631\u062F\u0648') # Urdu
assert not doc.all('a', u'Fran\xe7ais')
def test_config_keywords(self):
doc = self.go('/?subdomain=haiti')
meta = doc.firsttag('meta', name='keywords')
assert 'tremblement' in meta['content']
doc = self.go('/?subdomain=pakistan')
meta = doc.firsttag('meta', name='keywords')
assert 'pakistan flood' in meta['content']
class PersonNoteTests(TestsBase):
"""Tests that modify Person and Note entities in the datastore go here.
The contents of the datastore will be reset for each test."""
kinds_written_by_tests = [Person, Note, UserActionLog]
def assert_error_deadend(self, page, *fragments):
"""Assert that the given page is a dead-end.
Checks to make sure there's an error message that contains the given
fragments. On failure, fail assertion. On success, step back.
"""
error_message = page.first(class_=re.compile(r'.*\berror\b.*'))
for fragment in fragments:
assert fragment in error_message.text, (
'%s missing from error message' % fragment)
self.s.back()
# The verify_ functions below implement common fragments of the testing
# workflow that are assembled below in the test_ methods.
def verify_results_page(self, num_results, all_have=(), some_have=(), status=()):
"""Verifies conditions on the results page common to seeking and
providing. Verifies that all of the results contain all of the
strings in all_have and that at least one of the results has each
of some_have.
Precondition: the current session must be on the results page
Postcondition: the current session is still on the results page
"""
# Check that the results are as expected
result_titles = self.s.doc.all(class_='resultDataTitle')
assert len(result_titles) == num_results
for title in result_titles:
for text in all_have:
assert text in title.content, \
'%s must have %s' % (title.content, text)
for text in some_have:
assert any(text in title.content for title in result_titles), \
'One of %s must have %s' % (result_titles, text)
if status:
result_statuses = self.s.doc.all(class_='resultDataPersonFound')
assert len(result_statuses) == len(status)
for expected_status, result_status in zip(status, result_statuses):
assert expected_status in result_status.content, \
'"%s" missing expected status: "%s"' % (
result_status, expected_status)
def verify_unsatisfactory_results(self):
"""Verifies the clicking the button at the bottom of the results page.
Precondition: the current session must be on the results page
Postcondition: the current session is on the create new record page
"""
# Click the button to create a new record
found = False
for results_form in self.s.doc.all('form'):
if 'Create a new record' in results_form.content:
self.s.submit(results_form)
found = True
assert found, "didn't find Create a new record in any form"
def verify_create_form(self, prefilled_params=None, unfilled_params=None):
"""Verifies the behavior of the create form.
Verifies that the form must contain prefilled_params (a dictionary)
and may not have any defaults for unfilled_params.
Precondition: the current session is on the create new record page
Postcondition: the current session is still on the create page
"""
create_form = self.s.doc.first('form')
for key, value in (prefilled_params or {}).iteritems():
assert create_form.params[key] == value
for key in unfilled_params or ():
assert not create_form.params[key]
# Try to submit without filling in required fields
self.assert_error_deadend(
self.s.submit(create_form), 'required', 'try again')
def verify_note_form(self):
"""Verifies the behavior of the add note form.
Precondition: the current session is on a page with a note form.
Postcondition: the current session is still on a page with a note form.
"""
note_form = self.s.doc.first('form')
assert 'Tell us the status of this person' in note_form.content
self.assert_error_deadend(
self.s.submit(note_form), 'required', 'try again')
def verify_details_page(self, num_notes, details=None):
"""Verifies the content of the details page.
Verifies that the details contain the given number of notes and the
given details.
Precondition: the current session is on the details page
Postcondition: the current session is still on the details page
"""
# Do not assert params. Upon reaching the details page, you've lost
# the difference between seekers and providers and the param is gone.
details = details or {}
details_page = self.s.doc
# Person info is stored in matching 'label' and 'field' cells.
fields = dict(zip(
[label.text.strip() for label in details_page.all(class_='label')],
details_page.all(class_='field')))
for label, value in details.iteritems():
assert fields[label].text.strip() == value
assert len(details_page.all(class_='view note')) == num_notes
def verify_click_search_result(self, n, url_test=lambda u: None):
"""Simulates clicking the nth search result (where n is zero-based).
Also passes the URL followed to the given url_test function for checking.
This function should raise an AssertionError on failure.
Precondition: the current session must be on the results page
Postcondition: the current session is on the person details page
"""
# Get the list of links.
results = self.s.doc.first('ul', class_='searchResults')
result_link = results.all('a', class_='result-link')[n]
# Verify and then follow the link.
url_test(result_link['href'])
self.s.go(result_link['href'])
def verify_update_notes(self, found, note_body, author, status, **kwargs):
"""Verifies the process of adding a new note.
Posts a new note with the given parameters.
Precondition: the current session must be on the details page
Postcondition: the current session is still on the details page
"""
# Do not assert params. Upon reaching the details page, you've lost
# the difference between seekers and providers and the param is gone.
details_page = self.s.doc
num_initial_notes = len(details_page.all(class_='view note'))
note_form = details_page.first('form')
params = dict(kwargs)
params['found'] = (found and 'yes') or 'no'
params['text'] = note_body
params['author_name'] = author
extra_values = [note_body, author]
if status:
params['status'] = status
extra_values.append(str(NOTE_STATUS_TEXT.get(status)))
details_page = self.s.submit(note_form, **params)
notes = details_page.all(class_='view note')
assert len(notes) == num_initial_notes + 1
new_note_text = notes[-1].text
extra_values.extend(kwargs.values())
for text in extra_values:
assert text in new_note_text, \
'Note text %r missing %r' % (new_note_text, text)
# Show this text if and only if the person has been found
assert ('This person has been in contact with someone'
in new_note_text) == found
def verify_email_sent(self, message_count=1):
"""Verifies email was sent, firing manually from the taskqueue
if necessary. """
# Explicitly fire the send-mail task if necessary
doc = self.go('/_ah/admin/tasks?queue=send-mail')
try:
button = doc.firsttag('button',
**{'class': 'ae-taskqueues-run-now'})
doc = self.s.submit(d.first('form', name='queue_run_now'),
run_now=button.id)
except scrape.ScrapeError, e:
# button not found, assume task completed
pass
assert len(MailThread.messages) == message_count
def test_seeking_someone_regular(self):
"""Follow the seeking someone flow on the regular-sized embed."""
# Set utcnow to match source date
self.set_utcnow_for_test(datetime.datetime(2001, 1, 1, 0, 0, 0))
test_source_date = utils.get_utcnow().strftime('%Y-%m-%d')
# Shorthand to assert the correctness of our URL
def assert_params(url=None):
assert_params_conform(
url or self.s.url, {'role': 'seek'}, {'small': 'yes'})
# Start on the home page and click the "I'm looking for someone" button
self.go('/?subdomain=haiti')
search_page = self.s.follow('I\'m looking for someone')
search_form = search_page.first('form')
assert 'Search for this person' in search_form.content
# Try a search, which should yield no results.
self.s.submit(search_form, query='_test_first_name')
assert_params()
self.verify_results_page(0)
assert_params()
self.verify_unsatisfactory_results()
assert_params()
# Submit the create form with minimal information.
create_form = self.s.doc.first('form')
self.s.submit(create_form,
first_name='_test_first_name',
last_name='_test_last_name',
author_name='_test_author_name')
# For now, the date of birth should be hidden.
assert 'birth' not in self.s.content.lower()
self.verify_details_page(0, details={
'Given name:': '_test_first_name',
'Family name:': '_test_last_name',
'Author\'s name:': '_test_author_name'})
# Now the search should yield a result.
self.s.submit(search_form, query='_test_first_name')
assert_params()
self.verify_results_page(1, all_have=(['_test_first_name']),
some_have=(['_test_first_name']),
status=(['Unspecified']))
self.verify_click_search_result(0, assert_params)
# set the person entry_date to something in order to make sure adding
# note doesn't update
person = Person.all().filter('first_name =', '_test_first_name').get()
person.entry_date = datetime.datetime(2006, 6, 6, 6, 6, 6)
db.put(person)
self.verify_details_page(0)
self.verify_note_form()
self.verify_update_notes(
False, '_test A note body', '_test A note author', None)
self.verify_update_notes(
True, '_test Another note body', '_test Another note author',
'believed_alive',
last_known_location='Port-au-Prince')
person = Person.all().filter('first_name =', '_test_first_name').get()
assert person.entry_date == datetime.datetime(2006, 6, 6, 6, 6, 6)
self.s.submit(search_form, query='_test_first_name')
assert_params()
self.verify_results_page(1, all_have=(['_test_first_name']),
some_have=(['_test_first_name']),
status=(['Someone has received information that this person is alive']))
# Submit the create form with complete information
self.s.submit(create_form,
author_name='_test_author_name',
author_email='_test_author_email',
author_phone='_test_author_phone',
clone='yes',
source_name='_test_source_name',
source_date=test_source_date,
source_url='_test_source_url',
first_name='_test_first_name',
last_name='_test_last_name',
sex='female',
date_of_birth='1955',
age='52',
home_street='_test_home_street',
home_neighborhood='_test_home_neighborhood',
home_city='_test_home_city',
home_state='_test_home_state',
home_postal_code='_test_home_postal_code',
home_country='_test_home_country',
photo_url='_test_photo_url',
expiry_option='10',
description='_test_description')
self.verify_details_page(0, details={
'Given name:': '_test_first_name',
'Family name:': '_test_last_name',
'Sex:': 'female',
# 'Date of birth:': '1955', # currently hidden
'Age:': '52',
'Street name:': '_test_home_street',
'Neighborhood:': '_test_home_neighborhood',
'City:': '_test_home_city',
'Province or state:': '_test_home_state',
'Postal or zip code:': '_test_home_postal_code',
'Home country:': '_test_home_country',
'Author\'s name:': '_test_author_name',
'Author\'s phone number:': '(click to reveal)',
'Author\'s e-mail address:': '(click to reveal)',
'Original URL:': 'Link',
'Original posting date:': '2001-01-01 00:00 UTC',
'Original site name:': '_test_source_name',
'Expiry date of this record:': '2001-01-11 00:00 UTC'})
def test_new_indexing(self):
"""First create new entry with new_search param then search for it"""
# Shorthand to assert the correctness of our URL
def assert_params(url=None):
assert_params_conform(
url or self.s.url, {'role': 'seek'}, {'small': 'yes'})
# Start on the home page and click the "I'm looking for someone" button
self.go('/?subdomain=haiti')
search_page = self.s.follow('I\'m looking for someone')
search_form = search_page.first('form')
assert 'Search for this person' in search_form.content
# Try a search, which should yield no results.
self.s.submit(search_form, query='ABCD EFGH IJKL MNOP')
assert_params()
self.verify_results_page(0)
assert_params()
self.verify_unsatisfactory_results()
assert_params()
# Submit the create form with a valid first and last name
self.s.submit(self.s.doc.first('form'),
first_name='ABCD EFGH',
last_name='IJKL MNOP',
author_name='author_name')
# Try a middle-name match.
self.s.submit(search_form, query='EFGH')
self.verify_results_page(1, all_have=(['ABCD EFGH']))
# Try a middle-name non-match.
self.s.submit(search_form, query='ABCDEF')
self.verify_results_page(0)
# Try a middle-name prefix match.
self.s.submit(search_form, query='MNO')
self.verify_results_page(1, all_have=(['ABCD EFGH']))
# Try a multiword match.
self.s.submit(search_form, query='MNOP IJK ABCD EFG')
self.verify_results_page(1, all_have=(['ABCD EFGH']))
def test_have_information_regular(self):
"""Follow the "I have information" flow on the regular-sized embed."""
# Set utcnow to match source date
self.set_utcnow_for_test(datetime.datetime(2001, 1, 1, 0, 0, 0))
test_source_date = utils.get_utcnow().strftime('%Y-%m-%d')
# Shorthand to assert the correctness of our URL
def assert_params(url=None):
assert_params_conform(
url or self.s.url, {'role': 'provide'}, {'small': 'yes'})
self.go('/?subdomain=haiti')
search_page = self.s.follow('I have information about someone')
search_form = search_page.first('form')
assert 'I have information about someone' in search_form.content
self.assert_error_deadend(
self.s.submit(search_form),
'Enter the person\'s given and family names.')
self.assert_error_deadend(
self.s.submit(search_form, first_name='_test_first_name'),
'Enter the person\'s given and family names.')
self.s.submit(search_form,
first_name='_test_first_name',
last_name='_test_last_name')
assert_params()
# Because the datastore is empty, should go straight to the create page
self.verify_create_form(prefilled_params={
'first_name': '_test_first_name',
'last_name': '_test_last_name'})
self.verify_note_form()
# Submit the create form with minimal information
create_form = self.s.doc.first('form')
self.s.submit(create_form,
first_name='_test_first_name',
last_name='_test_last_name',
author_name='_test_author_name',
text='_test A note body')
self.verify_details_page(1, details={
'Given name:': '_test_first_name',
'Family name:': '_test_last_name',
'Author\'s name:': '_test_author_name'})
# Try the search again, and should get some results
self.s.submit(search_form,
first_name='_test_first_name',
last_name='_test_last_name')
assert_params()
self.verify_results_page(
1, all_have=('_test_first_name', '_test_last_name'))
self.verify_click_search_result(0, assert_params)
# For now, the date of birth should be hidden.
assert 'birth' not in self.s.content.lower()
self.verify_details_page(1)
self.verify_note_form()
self.verify_update_notes(
False, '_test A note body', '_test A note author', None)
self.verify_update_notes(
True, '_test Another note body', '_test Another note author',
None, last_known_location='Port-au-Prince')
# Submit the create form with complete information
self.s.submit(create_form,
author_name='_test_author_name',
author_email='_test_author_email',
author_phone='_test_author_phone',
clone='yes',
source_name='_test_source_name',
source_date=test_source_date,
source_url='_test_source_url',
first_name='_test_first_name',
last_name='_test_last_name',
sex='male',
date_of_birth='1970-01',
age='30-40',
home_street='_test_home_street',
home_neighborhood='_test_home_neighborhood',
home_city='_test_home_city',
home_state='_test_home_state',
home_postal_code='_test_home_postal_code',
home_country='_test_home_country',
photo_url='_test_photo_url',
expiry_option='20',
description='_test_description',
add_note='yes',
found='yes',
status='believed_alive',
email_of_found_person='_test_email_of_found_person',
phone_of_found_person='_test_phone_of_found_person',
last_known_location='_test_last_known_location',
text='_test A note body')
self.verify_details_page(1, details={
'Given name:': '_test_first_name',
'Family name:': '_test_last_name',
'Sex:': 'male',
# 'Date of birth:': '1970-01', # currently hidden
'Age:': '30-40',
'Street name:': '_test_home_street',
'Neighborhood:': '_test_home_neighborhood',
'City:': '_test_home_city',
'Province or state:': '_test_home_state',
'Postal or zip code:': '_test_home_postal_code',
'Home country:': '_test_home_country',
'Author\'s name:': '_test_author_name',
'Author\'s phone number:': '(click to reveal)',
'Author\'s e-mail address:': '(click to reveal)',
'Original URL:': 'Link',
'Original posting date:': '2001-01-01 00:00 UTC',
'Original site name:': '_test_source_name',
'Expiry date of this record:': '2001-01-21 00:00 UTC'})
def test_multiview(self):
"""Test the page for marking duplicate records."""
db.put(Person(
key_name='haiti:test.google.com/person.111',
subdomain='haiti',
author_name='_author_name_1',
author_email='_author_email_1',
author_phone='_author_phone_1',
entry_date=utils.get_utcnow(),
first_name='_first_name_1',
last_name='_last_name_1',
sex='male',
date_of_birth='1970-01-01',
age='31-41',
))
db.put(Person(
key_name='haiti:test.google.com/person.222',
subdomain='haiti',
author_name='_author_name_2',
author_email='_author_email_2',
author_phone='_author_phone_2',
entry_date=utils.get_utcnow(),
first_name='_first_name_2',
last_name='_last_name_2',
sex='male',
date_of_birth='1970-02-02',
age='32-42',
))
db.put(Person(
key_name='haiti:test.google.com/person.333',
subdomain='haiti',
author_name='_author_name_3',
author_email='_author_email_3',
author_phone='_author_phone_3',
entry_date=utils.get_utcnow(),
first_name='_first_name_3',
last_name='_last_name_3',
sex='male',
date_of_birth='1970-03-03',
age='33-43',
))
# All three records should appear on the multiview page.
doc = self.go('/multiview?subdomain=haiti' +
'&id1=test.google.com/person.111' +
'&id2=test.google.com/person.222' +
'&id3=test.google.com/person.333')
assert '_first_name_1' in doc.content
assert '_first_name_2' in doc.content
assert '_first_name_3' in doc.content
assert '31-41' in doc.content
assert '32-42' in doc.content
assert '33-43' in doc.content
# Mark all three as duplicates.
button = doc.firsttag('input', value='Yes, these are the same person')
doc = self.s.submit(button, text='duplicate test', author_name='foo')
# We should arrive back at the first record, with two duplicate notes.
assert self.s.status == 200
assert 'id=test.google.com%2Fperson.111' in self.s.url
assert 'Possible duplicates' in doc.content
assert '_first_name_2 _last_name_2' in doc.content
assert '_first_name_3 _last_name_3' in doc.content
p = Person.get('haiti', 'test.google.com/person.111')
assert len(p.get_linked_persons()) == 2
# Ask for detailed information on the duplicate markings.
doc = self.s.follow('Show who marked these duplicates')
assert '_first_name_1' in doc.content
notes = doc.all('div', class_='view note')
assert len(notes) == 2, str(doc.content.encode('ascii', 'ignore'))
assert 'Posted by foo' in notes[0].text
assert 'duplicate test' in notes[0].text
assert ('This record is a duplicate of test.google.com/person.222' in
notes[0].text)
assert 'Posted by foo' in notes[1].text
assert 'duplicate test' in notes[1].text
assert ('This record is a duplicate of test.google.com/person.333' in
notes[1].text)
def test_reveal(self):
"""Test the hiding and revealing of contact information in the UI."""
db.put(Person(
key_name='haiti:test.google.com/person.123',
subdomain='haiti',
author_name='_reveal_author_name',
author_email='_reveal_author_email',
author_phone='_reveal_author_phone',
entry_date=utils.get_utcnow(),
first_name='_reveal_first_name',
last_name='_reveal_last_name',
sex='male',
date_of_birth='1970-01-01',
age='30-40',
))
db.put(Person(
key_name='haiti:test.google.com/person.456',
subdomain='haiti',
author_name='_reveal_author_name',
author_email='_reveal_author_email',
author_phone='_reveal_author_phone',
entry_date=datetime.datetime.now(),
first_name='_reveal_first_name',
last_name='_reveal_last_name',
sex='male',
date_of_birth='1970-01-01',
age='30-40',
))
db.put(Note(
key_name='haiti:test.google.com/note.456',
subdomain='haiti',
author_name='_reveal_note_author_name',
author_email='_reveal_note_author_email',
author_phone='_reveal_note_author_phone',
entry_date=utils.get_utcnow(),
email_of_found_person='_reveal_email_of_found_person',
phone_of_found_person='_reveal_phone_of_found_person',
person_record_id='test.google.com/person.123',
))
# All contact information should be hidden by default.
doc = self.go('/view?subdomain=haiti&id=test.google.com/person.123')
assert '_reveal_author_email' not in doc.content
assert '_reveal_author_phone' not in doc.content
assert '_reveal_note_author_email' not in doc.content
assert '_reveal_note_author_phone' not in doc.content
assert '_reveal_email_of_found_person' not in doc.content
assert '_reveal_phone_of_found_person' not in doc.content
# Clicking the '(click to reveal)' link should bring the user
# to a captcha turing test page.
reveal_region = doc.first('a', u'(click to reveal)')
url = reveal_region.get('href', '')
doc = self.go(url[url.find('/reveal'):])
assert 'iframe' in doc.content
assert 'recaptcha_response_field' in doc.content
# Try to continue with an invalid captcha response. Get redirected
# back to the same page.
button = doc.firsttag('input', value='Proceed')
doc = self.s.submit(button)
assert 'iframe' in doc.content
assert 'recaptcha_response_field' in doc.content
# Continue as if captcha is valid. All information should be viewable.
url = '/reveal?subdomain=haiti&id=test.google.com/person.123&' + \
'test_mode=yes'
doc = self.s.submit(button, url=url)
assert '_reveal_author_email' in doc.content
assert '_reveal_author_phone' in doc.content
assert '_reveal_note_author_email' in doc.content
assert '_reveal_note_author_phone' in doc.content
assert '_reveal_email_of_found_person' in doc.content
assert '_reveal_phone_of_found_person' in doc.content
# Start over. Information should no longer be viewable.
doc = self.go('/view?subdomain=haiti&id=test.google.com/person.123')
assert '_reveal_author_email' not in doc.content
assert '_reveal_author_phone' not in doc.content
assert '_reveal_note_author_email' not in doc.content
assert '_reveal_note_author_phone' not in doc.content
assert '_reveal_email_of_found_person' not in doc.content
assert '_reveal_phone_of_found_person' not in doc.content
# Other person's records should also be invisible.
doc = self.go('/view?subdomain=haiti&id=test.google.com/person.456')
assert '_reveal_author_email' not in doc.content
assert '_reveal_author_phone' not in doc.content
assert '_reveal_note_author_email' not in doc.content
assert '_reveal_note_author_phone' not in doc.content
assert '_reveal_email_of_found_person' not in doc.content
assert '_reveal_phone_of_found_person' not in doc.content
# All contact information should be hidden on the multiview page, too.
doc = self.go('/multiview?subdomain=haiti' +
'&id1=test.google.com/person.123' +
'&id2=test.google.com/person.456')
assert '_reveal_author_email' not in doc.content
assert '_reveal_author_phone' not in doc.content
assert '_reveal_note_author_email' not in doc.content
assert '_reveal_note_author_phone' not in doc.content
assert '_reveal_email_of_found_person' not in doc.content
assert '_reveal_phone_of_found_person' not in doc.content
# Now supply a valid revelation signature.
signature = reveal.sign(u'multiview:test.google.com/person.123', 10)
doc = self.go('/multiview?subdomain=haiti' +
'&id1=test.google.com/person.123' +
'&signature=' + signature)
assert '_reveal_author_email' in doc.content
assert '_reveal_author_phone' in doc.content
# Notes are not shown on the multiview page.
def test_note_status(self):
"""Test the posting and viewing of the note status field in the UI."""
status_class = re.compile(r'\bstatus\b')
# Check that the right status options appear on the create page.
doc = self.go('/create?subdomain=haiti&role=provide')
note = doc.first(**{'class': 'note input'})
options = note.first('select', name='status').all('option')
assert len(options) == len(NOTE_STATUS_OPTIONS)
for option, text in zip(options, NOTE_STATUS_OPTIONS):
assert text in option.attrs['value']
# Create a record with no status and get the new record's ID.
form = doc.first('form')
doc = self.s.submit(form,
first_name='_test_first',
last_name='_test_last',
author_name='_test_author',
text='_test_text')
view_url = self.s.url
# Check that the right status options appear on the view page.
doc = self.s.go(view_url)
note = doc.first(**{'class': 'note input'})
options = note.first('select', name='status').all('option')
assert len(options) == len(NOTE_STATUS_OPTIONS)
for option, text in zip(options, NOTE_STATUS_OPTIONS):
assert text in option.attrs['value']
# Set the status in a note and check that it appears on the view page.
form = doc.first('form')
self.s.submit(form, author_name='_test_author2', text='_test_text',
status='believed_alive')
doc = self.s.go(view_url)
note = doc.last(**{'class': 'view note'})
assert 'believed_alive' in note.content
assert 'believed_dead' not in note.content
# Set status to is_note_author, but don't check found.
self.s.submit(form,
author_name='_test_author',
text='_test_text',
status='is_note_author')
self.assert_error_deadend(
self.s.submit(form,
author_name='_test_author',
text='_test_text',
status='is_note_author'),
'in contact', 'Status of this person')
def test_api_write_pfif_1_2(self):
"""Post a single entry as PFIF 1.2 using the upload API."""
data = get_test_data('test.pfif-1.2.xml')
self.set_utcnow_for_test(self.default_test_time)
self.go('/api/write?subdomain=haiti&key=test_key',
data=data, type='application/xml')
person = Person.get('haiti', 'test.google.com/person.21009')
assert person.first_name == u'_test_first_name'
assert person.last_name == u'_test_last_name'
assert person.sex == u'female'
assert person.date_of_birth == u'1970-01'
assert person.age == u'35-45'
assert person.author_name == u'_test_author_name'
assert person.author_email == u'_test_author_email'
assert person.author_phone == u'_test_author_phone'
assert person.home_street == u'_test_home_street'
assert person.home_neighborhood == u'_test_home_neighborhood'
assert person.home_city == u'_test_home_city'
assert person.home_state == u'_test_home_state'
assert person.home_postal_code == u'_test_home_postal_code'
assert person.home_country == u'US'
assert person.record_id == u'test.google.com/person.21009'
assert person.photo_url == u'_test_photo_url'
assert person.source_name == u'_test_source_name'
assert person.source_url == u'_test_source_url'
assert person.source_date == datetime.datetime(2000, 1, 1, 0, 0, 0)
# Current date should replace the provided entry_date.
self.assertEqual(utils.get_utcnow(), person.entry_date)
# The latest_status property should come from the third Note.
assert person.latest_status == u'is_note_author'
assert person.latest_status_source_date == \
datetime.datetime(2000, 1, 18, 20, 21, 22)
# The latest_found property should come from the fourth Note.
assert person.latest_found == False
assert person.latest_found_source_date == \
datetime.datetime(2000, 1, 18, 20, 0, 0)
notes = person.get_notes()
assert len(notes) == 4
notes.sort(key=lambda note: note.record_id)
note = notes[0]
assert note.author_name == u'_test_author_name'
assert note.author_email == u'_test_author_email'
assert note.author_phone == u'_test_author_phone'
assert note.email_of_found_person == u'_test_email_of_found_person'
assert note.phone_of_found_person == u'_test_phone_of_found_person'
assert note.last_known_location == u'_test_last_known_location'
assert note.record_id == u'test.google.com/note.27009'
assert note.person_record_id == u'test.google.com/person.21009'
assert note.text == u'_test_text'
assert note.source_date == datetime.datetime(2000, 1, 16, 4, 5, 6)
# Current date should replace the provided entry_date.
assert note.entry_date == utils.get_utcnow()
assert note.found == False
assert note.status == u'believed_missing'
assert note.linked_person_record_id == u'test.google.com/person.999'
note = notes[1]
assert note.author_name == u'inna-testing'
assert note.author_email == u'inna-testing@gmail.com'
assert note.author_phone == u'inna-testing-number'
assert note.email_of_found_person == u''
assert note.phone_of_found_person == u''
assert note.last_known_location == u'19.16592425362802 -71.9384765625'
assert note.record_id == u'test.google.com/note.31095'
assert note.person_record_id == u'test.google.com/person.21009'
assert note.text == u'new comment - testing'
assert note.source_date == datetime.datetime(2000, 1, 17, 14, 15, 16)
# Current date should replace the provided entry_date.
assert note.entry_date.year == utils.get_utcnow().year
assert note.found == True
assert note.status == ''
assert not note.linked_person_record_id
# Just confirm that a missing <found> tag is parsed as None.
# We already checked all the other fields above.
note = notes[2]
assert note.found == None
assert note.status == u'is_note_author'
note = notes[3]
assert note.found == False
assert note.status == u'believed_missing'
def test_api_write_pfif_1_2_note(self):
"""Post a single note-only entry as PFIF 1.2 using the upload API."""
self.set_utcnow_for_test(self.default_test_time)
# Create person records that the notes will attach to.
Person(key_name='haiti:test.google.com/person.21009',
subdomain='haiti',
first_name='_test_first_name_1',
last_name='_test_last_name_1',
entry_date=datetime.datetime(2001, 1, 1, 1, 1, 1)).put()
Person(key_name='haiti:test.google.com/person.21010',
subdomain='haiti',
first_name='_test_first_name_2',
last_name='_test_last_name_2',
entry_date=datetime.datetime(2002, 2, 2, 2, 2, 2)).put()
data = get_test_data('test.pfif-1.2-note.xml')
self.go('/api/write?subdomain=haiti&key=test_key',
data=data, type='application/xml')
person = Person.get('haiti', 'test.google.com/person.21009')
assert person
notes = person.get_notes()
assert len(notes) == 1
note = notes[0]
assert note.author_name == u'_test_author_name'
assert note.author_email == u'_test_author_email'
assert note.author_phone == u'_test_author_phone'
assert note.email_of_found_person == u'_test_email_of_found_person'
assert note.phone_of_found_person == u'_test_phone_of_found_person'
assert note.last_known_location == u'_test_last_known_location'
assert note.record_id == u'test.google.com/note.27009'
assert note.person_record_id == u'test.google.com/person.21009'
assert note.text == u'_test_text'
assert note.source_date == datetime.datetime(2000, 1, 16, 7, 8, 9)
# Current date should replace the provided entry_date.
self.assertEqual(note.entry_date, utils.get_utcnow())
assert note.found == False
assert note.status == u'believed_missing'
assert note.linked_person_record_id == u'test.google.com/person.999'
# Found flag and status should have propagated to the Person.
assert person.latest_found == False
assert person.latest_found_source_date == note.source_date
assert person.latest_status == u'believed_missing'
assert person.latest_status_source_date == note.source_date
person = Person.get('haiti', 'test.google.com/person.21010')
assert person
notes = person.get_notes()
assert len(notes) == 1
note = notes[0]
assert note.author_name == u'inna-testing'
assert note.author_email == u'inna-testing@gmail.com'
assert note.author_phone == u'inna-testing-number'
assert note.email_of_found_person == u''
assert note.phone_of_found_person == u''
assert note.last_known_location == u'19.16592425362802 -71.9384765625'
assert note.record_id == u'test.google.com/note.31095'
assert note.person_record_id == u'test.google.com/person.21010'
assert note.text == u'new comment - testing'
assert note.source_date == datetime.datetime(2000, 1, 17, 17, 18, 19)
# Current date should replace the provided entry_date.
assert note.entry_date == utils.get_utcnow()
assert note.found is None
assert note.status == u'is_note_author'
assert not note.linked_person_record_id
# Status should have propagated to the Person, but not found.
assert person.latest_found is None
assert person.latest_found_source_date is None
assert person.latest_status == u'is_note_author'
assert person.latest_status_source_date == note.source_date
def test_api_write_pfif_1_1(self):
"""Post a single entry as PFIF 1.1 using the upload API."""
data = get_test_data('test.pfif-1.1.xml')
self.set_utcnow_for_test(self.default_test_time)
self.go('/api/write?subdomain=haiti&key=test_key',
data=data, type='application/xml')
person = Person.get('haiti', 'test.google.com/person.21009')
assert person.first_name == u'_test_first_name'
assert person.last_name == u'_test_last_name'
assert person.author_name == u'_test_author_name'
assert person.author_email == u'_test_author_email'
assert person.author_phone == u'_test_author_phone'
assert person.home_city == u'_test_home_city'
assert person.home_street == u'_test_home_street'
assert person.home_neighborhood == u'_test_home_neighborhood'
assert person.home_state == u'_test_home_state'
assert person.home_postal_code == u'_test_home_zip'
assert person.record_id == u'test.google.com/person.21009'
assert person.photo_url == u'_test_photo_url'
assert person.source_name == u'_test_source_name'
assert person.source_url == u'_test_source_url'
assert person.source_date == datetime.datetime(2000, 1, 1, 0, 0, 0)
# Current date should replace the provided entry_date.
self.assertEqual(utils.get_utcnow(), person.entry_date)
# The latest_found property should come from the first Note.
self.assertTrue(person.latest_found)
assert person.latest_found_source_date == \
datetime.datetime(2000, 1, 16, 1, 2, 3)
# There's no status field in PFIF 1.1.
assert person.latest_status == ''
assert person.latest_status_source_date is None
notes = person.get_notes()
assert len(notes) == 2
notes.sort(key=lambda note: note.record_id)
note = notes[0]
assert note.author_name == u'_test_author_name'
assert note.author_email == u'_test_author_email'
assert note.author_phone == u'_test_author_phone'
assert note.email_of_found_person == u'_test_email_of_found_person'
assert note.phone_of_found_person == u'_test_phone_of_found_person'
assert note.last_known_location == u'_test_last_known_location'
assert note.record_id == u'test.google.com/note.27009'
assert note.text == u'_test_text'
assert note.source_date == datetime.datetime(2000, 1, 16, 1, 2, 3)
# Current date should replace the provided entry_date.
assert note.entry_date == utils.get_utcnow()
assert note.found == True
note = notes[1]
assert note.author_name == u'inna-testing'
assert note.author_email == u'inna-testing@gmail.com'
assert note.author_phone == u'inna-testing-number'
assert note.email_of_found_person == u''
assert note.phone_of_found_person == u''
assert note.last_known_location == u'19.16592425362802 -71.9384765625'
assert note.record_id == u'test.google.com/note.31095'
assert note.text == u'new comment - testing'
assert note.source_date == datetime.datetime(2000, 1, 17, 11, 12, 13)
# Current date should replace the provided entry_date.
assert note.entry_date.year == utils.get_utcnow().year
assert note.found is None
def test_api_write_bad_key(self):
"""Attempt to post an entry with an invalid API key."""
data = get_test_data('test.pfif-1.2.xml')
self.go('/api/write?subdomain=haiti&key=bad_key',
data=data, type='application/xml')
assert self.s.status == 403
def test_api_write_empty_record(self):
"""Verify that empty entries are accepted."""
doc = self.go('/api/write?subdomain=haiti&key=test_key',
data='''
<pfif xmlns="http://zesty.ca/pfif/1.2">
<person>
<person_record_id>test.google.com/person.empty</person_record_id>
</person>
</pfif>''', type='application/xml')
# The Person record should have been accepted.
person_status = doc.first('status:write')
assert person_status.first('status:written').text == '1'
# An empty Person entity should be in the datastore.
person = Person.get('haiti', 'test.google.com/person.empty')
def test_api_write_wrong_domain(self):
"""Attempt to post an entry with a domain that doesn't match the key."""
data = get_test_data('test.pfif-1.2.xml')
doc = self.go('/api/write?subdomain=haiti&key=other_key',
data=data, type='application/xml')
# The Person record should have been rejected.
person_status = doc.first('status:write')
assert person_status.first('status:written').text == '0'
assert ('Not in authorized domain' in
person_status.first('status:error').text)
# Both of the Note records should have been rejected.
note_status = person_status.next('status:write')
assert note_status.first('status:written').text == '0'
first_error = note_status.first('status:error')
second_error = first_error.next('status:error')
assert 'Not in authorized domain' in first_error.text
assert 'Not in authorized domain' in second_error.text
def test_api_read(self):
"""Fetch a single record as PFIF (1.1, 1.2 and 1.3) via the read API."""
self.set_utcnow_for_test(self.default_test_time)
db.put(Person(
key_name='haiti:test.google.com/person.123',
subdomain='haiti',
entry_date=utils.get_utcnow(),
author_email='_read_author_email',
author_name='_read_author_name',
author_phone='_read_author_phone',
first_name='_read_first_name',
last_name='_read_last_name',
full_name="_first_dot_last",
sex='female',
date_of_birth='1970-01-01',
age='40-50',
home_city='_read_home_city',
home_neighborhood='_read_home_neighborhood',
home_state='_read_home_state',
home_street='_read_home_street',
home_postal_code='_read_home_postal_code',
home_country='_read_home_country',
other='_read_other & < > "',
photo_url='_read_photo_url',
source_name='_read_source_name',
source_url='_read_source_url',
source_date=datetime.datetime(2001, 2, 3, 4, 5, 6),
))
db.put(Note(
key_name='haiti:test.google.com/note.456',
subdomain='haiti',
author_email='_read_author_email',
author_name='_read_author_name',
author_phone='_read_author_phone',
email_of_found_person='_read_email_of_found_person',
last_known_location='_read_last_known_location',
person_record_id='test.google.com/person.123',
linked_person_record_id='test.google.com/person.888',
phone_of_found_person='_read_phone_of_found_person',
text='_read_text',
source_date=datetime.datetime(2005, 5, 5, 5, 5, 5),
entry_date=utils.get_utcnow(), #datetime.datetime(2006, 6, 6, 6, 6, 6),
found=True,
status='believed_missing'
))
# Fetch a PFIF 1.1 document.
# Note that author_email, author_phone, email_of_found_person, and
# phone_of_found_person are omitted intentionally (see
# utils.filter_sensitive_fields).
doc = self.go('/api/read?subdomain=haiti' +
'&id=test.google.com/person.123&version=1.1')
expected_content = \
'''<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.1">
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_read_author_name</pfif:author_name>
<pfif:source_name>_read_source_name</pfif:source_name>
<pfif:source_date>2001-02-03T04:05:06Z</pfif:source_date>
<pfif:source_url>_read_source_url</pfif:source_url>
<pfif:first_name>_read_first_name</pfif:first_name>
<pfif:last_name>_read_last_name</pfif:last_name>
<pfif:home_city>_read_home_city</pfif:home_city>
<pfif:home_state>_read_home_state</pfif:home_state>
<pfif:home_neighborhood>_read_home_neighborhood</pfif:home_neighborhood>
<pfif:home_street>_read_home_street</pfif:home_street>
<pfif:home_zip>_read_home_postal_code</pfif:home_zip>
<pfif:photo_url>_read_photo_url</pfif:photo_url>
<pfif:other>_read_other & < > "</pfif:other>
<pfif:note>
<pfif:note_record_id>test.google.com/note.456</pfif:note_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_read_author_name</pfif:author_name>
<pfif:source_date>2005-05-05T05:05:05Z</pfif:source_date>
<pfif:found>true</pfif:found>
<pfif:last_known_location>_read_last_known_location</pfif:last_known_location>
<pfif:text>_read_text</pfif:text>
</pfif:note>
</pfif:person>
</pfif:pfif>
'''
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# Fetch a PFIF 1.2 document.
# Note that date_of_birth, author_email, author_phone,
# email_of_found_person, and phone_of_found_person are omitted
# intentionally (see utils.filter_sensitive_fields).
doc = self.go('/api/read?subdomain=haiti' +
'&id=test.google.com/person.123&version=1.2')
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.2">
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_read_author_name</pfif:author_name>
<pfif:source_name>_read_source_name</pfif:source_name>
<pfif:source_date>2001-02-03T04:05:06Z</pfif:source_date>
<pfif:source_url>_read_source_url</pfif:source_url>
<pfif:first_name>_read_first_name</pfif:first_name>
<pfif:last_name>_read_last_name</pfif:last_name>
<pfif:sex>female</pfif:sex>
<pfif:age>40-50</pfif:age>
<pfif:home_street>_read_home_street</pfif:home_street>
<pfif:home_neighborhood>_read_home_neighborhood</pfif:home_neighborhood>
<pfif:home_city>_read_home_city</pfif:home_city>
<pfif:home_state>_read_home_state</pfif:home_state>
<pfif:home_postal_code>_read_home_postal_code</pfif:home_postal_code>
<pfif:home_country>_read_home_country</pfif:home_country>
<pfif:photo_url>_read_photo_url</pfif:photo_url>
<pfif:other>_read_other & < > "</pfif:other>
<pfif:note>
<pfif:note_record_id>test.google.com/note.456</pfif:note_record_id>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:linked_person_record_id>test.google.com/person.888</pfif:linked_person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_read_author_name</pfif:author_name>
<pfif:source_date>2005-05-05T05:05:05Z</pfif:source_date>
<pfif:found>true</pfif:found>
<pfif:status>believed_missing</pfif:status>
<pfif:last_known_location>_read_last_known_location</pfif:last_known_location>
<pfif:text>_read_text</pfif:text>
</pfif:note>
</pfif:person>
</pfif:pfif>
'''
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# Verify that PFIF 1.2 is the default version.
default_doc = self.go(
'/api/read?subdomain=haiti&id=test.google.com/person.123')
assert default_doc.content == doc.content
# Fetch a PFIF 1.3 document.
# Note that date_of_birth, author_email, author_phone,
# email_of_found_person, and phone_of_found_person are omitted
# intentionally (see utils.filter_sensitive_fields).
doc = self.go('/api/read?subdomain=haiti' +
'&id=test.google.com/person.123&version=1.3')
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.3">
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_read_author_name</pfif:author_name>
<pfif:source_name>_read_source_name</pfif:source_name>
<pfif:source_date>2001-02-03T04:05:06Z</pfif:source_date>
<pfif:source_url>_read_source_url</pfif:source_url>
<pfif:full_name>_first_dot_last</pfif:full_name>
<pfif:first_name>_read_first_name</pfif:first_name>
<pfif:last_name>_read_last_name</pfif:last_name>
<pfif:sex>female</pfif:sex>
<pfif:age>40-50</pfif:age>
<pfif:home_street>_read_home_street</pfif:home_street>
<pfif:home_neighborhood>_read_home_neighborhood</pfif:home_neighborhood>
<pfif:home_city>_read_home_city</pfif:home_city>
<pfif:home_state>_read_home_state</pfif:home_state>
<pfif:home_postal_code>_read_home_postal_code</pfif:home_postal_code>
<pfif:home_country>_read_home_country</pfif:home_country>
<pfif:photo_url>_read_photo_url</pfif:photo_url>
<pfif:other>_read_other & < > "</pfif:other>
<pfif:note>
<pfif:note_record_id>test.google.com/note.456</pfif:note_record_id>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:linked_person_record_id>test.google.com/person.888</pfif:linked_person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_read_author_name</pfif:author_name>
<pfif:source_date>2005-05-05T05:05:05Z</pfif:source_date>
<pfif:found>true</pfif:found>
<pfif:status>believed_missing</pfif:status>
<pfif:last_known_location>_read_last_known_location</pfif:last_known_location>
<pfif:text>_read_text</pfif:text>
</pfif:note>
</pfif:person>
</pfif:pfif>
'''
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# Fetch a PFIF 1.2 document, with full read authorization.
doc = self.go('/api/read?subdomain=haiti&key=full_read_key' +
'&id=test.google.com/person.123&version=1.2')
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.2">
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_read_author_name</pfif:author_name>
<pfif:author_email>_read_author_email</pfif:author_email>
<pfif:author_phone>_read_author_phone</pfif:author_phone>
<pfif:source_name>_read_source_name</pfif:source_name>
<pfif:source_date>2001-02-03T04:05:06Z</pfif:source_date>
<pfif:source_url>_read_source_url</pfif:source_url>
<pfif:first_name>_read_first_name</pfif:first_name>
<pfif:last_name>_read_last_name</pfif:last_name>
<pfif:sex>female</pfif:sex>
<pfif:date_of_birth>1970-01-01</pfif:date_of_birth>
<pfif:age>40-50</pfif:age>
<pfif:home_street>_read_home_street</pfif:home_street>
<pfif:home_neighborhood>_read_home_neighborhood</pfif:home_neighborhood>
<pfif:home_city>_read_home_city</pfif:home_city>
<pfif:home_state>_read_home_state</pfif:home_state>
<pfif:home_postal_code>_read_home_postal_code</pfif:home_postal_code>
<pfif:home_country>_read_home_country</pfif:home_country>
<pfif:photo_url>_read_photo_url</pfif:photo_url>
<pfif:other>_read_other & < > "</pfif:other>
<pfif:note>
<pfif:note_record_id>test.google.com/note.456</pfif:note_record_id>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:linked_person_record_id>test.google.com/person.888</pfif:linked_person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_read_author_name</pfif:author_name>
<pfif:author_email>_read_author_email</pfif:author_email>
<pfif:author_phone>_read_author_phone</pfif:author_phone>
<pfif:source_date>2005-05-05T05:05:05Z</pfif:source_date>
<pfif:found>true</pfif:found>
<pfif:status>believed_missing</pfif:status>
<pfif:email_of_found_person>_read_email_of_found_person</pfif:email_of_found_person>
<pfif:phone_of_found_person>_read_phone_of_found_person</pfif:phone_of_found_person>
<pfif:last_known_location>_read_last_known_location</pfif:last_known_location>
<pfif:text>_read_text</pfif:text>
</pfif:note>
</pfif:person>
</pfif:pfif>
'''
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
def test_read_key(self):
"""Verifies that when read_auth_key_required is set, an authorization
key is required to read data from the API or feeds."""
db.put(Person(
key_name='haiti:test.google.com/person.123',
subdomain='haiti',
entry_date=utils.get_utcnow(),
author_email='_read_author_email',
author_name='_read_author_name',
author_phone='_read_author_phone',
first_name='_read_first_name',
last_name='_read_last_name',
sex='female',
date_of_birth='1970-01-01',
age='40-50',
home_city='_read_home_city',
home_neighborhood='_read_home_neighborhood',
home_state='_read_home_state',
home_street='_read_home_street',
home_postal_code='_read_home_postal_code',
home_country='_read_home_country',
other='_read_other & < > "',
photo_url='_read_photo_url',
source_name='_read_source_name',
source_url='_read_source_url',
source_date=datetime.datetime(2001, 2, 3, 4, 5, 6),
))
db.put(Note(
key_name='haiti:test.google.com/note.456',
subdomain='haiti',
author_email='_read_author_email',
author_name='_read_author_name',
author_phone='_read_author_phone',
email_of_found_person='_read_email_of_found_person',
last_known_location='_read_last_known_location',
person_record_id='test.google.com/person.123',
linked_person_record_id='test.google.com/person.888',
phone_of_found_person='_read_phone_of_found_person',
text='_read_text',
source_date=datetime.datetime(2005, 5, 5, 5, 5, 5),
entry_date=datetime.datetime(2006, 6, 6, 6, 6, 6),
found=True,
status='believed_missing'
))
config.set_for_subdomain('haiti', read_auth_key_required=True)
try:
# Fetch a PFIF 1.2 document from a domain that requires a read key.
# Without an authorization key, the request should fail.
doc = self.go('/api/read?subdomain=haiti' +
'&id=test.google.com/person.123&version=1.1')
assert self.s.status == 403
assert 'Missing or invalid authorization key' in doc.content
# With a non-read authorization key, the request should fail.
doc = self.go('/api/read?subdomain=haiti&key=test_key' +
'&id=test.google.com/person.123&version=1.1')
assert self.s.status == 403
assert 'Missing or invalid authorization key' in doc.content
# With a valid read authorization key, the request should succeed.
doc = self.go('/api/read?subdomain=haiti&key=read_key' +
'&id=test.google.com/person.123&version=1.2')
assert '_read_first_name' in doc.content
# Fetch the person feed from a domain that requires a read key.
# Without an authorization key, the request should fail.
doc = self.go('/feeds/person?subdomain=haiti')
assert self.s.status == 403
assert 'Missing or invalid authorization key' in doc.content
# With a non-read authorization key, the request should fail.
doc = self.go('/feeds/person?subdomain=haiti&key=test_key')
assert self.s.status == 403
assert 'Missing or invalid authorization key' in doc.content
# With a valid read authorization key, the request should succeed.
doc = self.go('/feeds/person?subdomain=haiti&key=read_key')
assert '_read_author_name' in doc.content
# Fetch the note feed from a domain that requires a read key.
# Without an authorization key, the request should fail.
doc = self.go('/feeds/note?subdomain=haiti')
assert self.s.status == 403
assert 'Missing or invalid authorization key' in doc.content
# With a non-read authorization key, the request should fail.
doc = self.go('/feeds/note?subdomain=haiti&key=test_key')
assert self.s.status == 403
assert 'Missing or invalid authorization key' in doc.content
# With a valid read authorization key, the request should succeed.
doc = self.go('/feeds/note?subdomain=haiti&key=read_key')
assert '_read_text' in doc.content
finally:
config.set_for_subdomain('haiti', read_auth_key_required=False)
def test_api_read_with_non_ascii(self):
"""Fetch a record containing non-ASCII characters using the read API.
This tests both PFIF 1.1 and 1.2."""
self.set_utcnow_for_test(self.default_test_time)
expiry_date = self.default_test_time + datetime.timedelta(1,0,0)
db.put(Person(
key_name='haiti:test.google.com/person.123',
subdomain='haiti',
entry_date=utils.get_utcnow(),
expiry_date=expiry_date,
author_name=u'a with acute = \u00e1',
source_name=u'c with cedilla = \u00e7',
source_url=u'e with acute = \u00e9',
full_name=u'arabic alif = \u0627',
first_name=u'greek alpha = \u03b1',
last_name=u'hebrew alef = \u05d0'
))
# Fetch a PFIF 1.1 document.
doc = self.go('/api/read?subdomain=haiti' +
'&id=test.google.com/person.123&version=1.1')
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.1">
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>a with acute = \xc3\xa1</pfif:author_name>
<pfif:source_name>c with cedilla = \xc3\xa7</pfif:source_name>
<pfif:source_url>e with acute = \xc3\xa9</pfif:source_url>
<pfif:first_name>greek alpha = \xce\xb1</pfif:first_name>
<pfif:last_name>hebrew alef = \xd7\x90</pfif:last_name>
</pfif:person>
</pfif:pfif>
'''
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# Fetch a PFIF 1.2 document.
doc = self.go('/api/read?subdomain=haiti' +
'&id=test.google.com/person.123&version=1.2')
assert re.match(r'''<\?xml version="1.0" encoding="UTF-8"\?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.2">
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>a with acute = \xc3\xa1</pfif:author_name>
<pfif:source_name>c with cedilla = \xc3\xa7</pfif:source_name>
<pfif:source_url>e with acute = \xc3\xa9</pfif:source_url>
<pfif:first_name>greek alpha = \xce\xb1</pfif:first_name>
<pfif:last_name>hebrew alef = \xd7\x90</pfif:last_name>
</pfif:person>
</pfif:pfif>
''', doc.content)
# Verify that PFIF 1.2 is the default version.
default_doc = self.go(
'/api/read?subdomain=haiti&id=test.google.com/person.123')
assert default_doc.content == doc.content, \
text_diff(default_doc.content, doc.content)
# Fetch a PFIF 1.3 document.
doc = self.go('/api/read?subdomain=haiti' +
'&id=test.google.com/person.123&version=1.3')
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.3">
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:expiry_date>2010-01-03T03:04:05Z</pfif:expiry_date>
<pfif:author_name>a with acute = \xc3\xa1</pfif:author_name>
<pfif:source_name>c with cedilla = \xc3\xa7</pfif:source_name>
<pfif:source_url>e with acute = \xc3\xa9</pfif:source_url>
<pfif:full_name>arabic alif = \xd8\xa7</pfif:full_name>
<pfif:first_name>greek alpha = \xce\xb1</pfif:first_name>
<pfif:last_name>hebrew alef = \xd7\x90</pfif:last_name>
</pfif:person>
</pfif:pfif>
'''
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# Verify that PFIF 1.3 is not the default version.
default_doc = self.go(
'/api/read?subdomain=haiti&id=test.google.com/person.123')
assert default_doc.content != doc.content
def test_search_api(self):
"""Verifies that search API works and returns person and notes correctly.
Also check that it optionally requires search_auth_key_."""
# Add a first person to datastore.
self.go('/create?subdomain=haiti')
self.s.submit(self.s.doc.first('form'),
first_name='_search_first_name',
last_name='_search_lastname',
author_name='_search_author_name')
# Add a note for this person.
self.s.submit(self.s.doc.first('form'),
found='yes',
text='this is text for first person',
author_name='_search_note_author_name')
# Add a 2nd person with same firstname but different lastname.
self.go('/create?subdomain=haiti')
self.s.submit(self.s.doc.first('form'),
first_name='_search_first_name',
last_name='_search_2ndlastname',
author_name='_search_2nd_author_name')
# Add a note for this 2nd person.
self.s.submit(self.s.doc.first('form'),
found='yes',
text='this is text for second person',
author_name='_search_note_2nd_author_name')
config.set_for_subdomain('haiti', search_auth_key_required=True)
try:
# Make a search without a key, it should fail as config requires
# a search_key.
doc = self.go('/api/search?subdomain=haiti' +
'&q=_search_lastname')
assert self.s.status == 403
assert 'Missing or invalid authorization key' in doc.content
# With a non-search authorization key, the request should fail.
doc = self.go('/api/search?subdomain=haiti&key=test_key' +
'&q=_search_lastname')
assert self.s.status == 403
assert 'Missing or invalid authorization key' in doc.content
# With a valid search authorization key, the request should succeed.
doc = self.go('/api/search?subdomain=haiti&key=search_key' +
'&q=_search_lastname')
assert self.s.status not in [403,404]
# Make sure we return the first record and not the 2nd one.
assert '_search_first_name' in doc.content
assert '_search_2ndlastname' not in doc.content
# Check we also retrieved the first note and not the second one.
assert '_search_note_author_name' in doc.content
assert '_search_note_2nd_author_name' not in doc.content
# Check that we can retrieve several persons matching a query
# and check their notes are also retrieved.
doc = self.go('/api/search?subdomain=haiti&key=search_key' +
'&q=_search_first_name')
assert self.s.status not in [403,404]
# Check we found the 2 records.
assert '_search_lastname' in doc.content
assert '_search_2ndlastname' in doc.content
# Check we also retrieved the notes.
assert '_search_note_author_name' in doc.content
assert '_search_note_2nd_author_name' in doc.content
# If no results are found we return an empty pfif file
doc = self.go('/api/search?subdomain=haiti&key=search_key' +
'&q=_wrong_last_name')
assert self.s.status not in [403,404]
empty_pfif = '''<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.2">
</pfif:pfif>
'''
assert (empty_pfif == doc.content)
# Check that we can get results without a key if no key is required.
config.set_for_subdomain('haiti', search_auth_key_required=False)
doc = self.go('/api/search?subdomain=haiti' +
'&q=_search_first_name')
assert self.s.status not in [403,404]
# Check we found 2 records.
assert '_search_lastname' in doc.content
assert '_search_2ndlastname' in doc.content
# Check we also retrieved the notes.
assert '_search_note_author_name' in doc.content
assert '_search_note_2nd_author_name' in doc.content
finally:
config.set_for_subdomain('haiti', search_auth_key_required=False)
def test_person_feed(self):
"""Fetch a single person using the PFIF Atom feed."""
self.set_utcnow_for_test(self.default_test_time)
db.put(Person(
key_name='haiti:test.google.com/person.123',
subdomain='haiti',
entry_date=utils.get_utcnow(),
author_email='_feed_author_email',
author_name='_feed_author_name',
author_phone='_feed_author_phone',
first_name='_feed_first_name',
last_name='_feed_last_name',
sex='male',
date_of_birth='1975',
age='30-40',
home_street='_feed_home_street',
home_neighborhood='_feed_home_neighborhood',
home_city='_feed_home_city',
home_state='_feed_home_state',
home_postal_code='_feed_home_postal_code',
home_country='_feed_home_country',
other='_feed_other & < > "',
photo_url='_feed_photo_url',
source_name='_feed_source_name',
source_url='_feed_source_url',
source_date=datetime.datetime(2001, 2, 3, 4, 5, 6),
))
db.put(Note(
key_name='haiti:test.google.com/note.456',
subdomain='haiti',
author_email='_feed_author_email',
author_name='_feed_author_name',
author_phone='_feed_author_phone',
email_of_found_person='_feed_email_of_found_person',
last_known_location='_feed_last_known_location',
person_record_id='test.google.com/person.123',
linked_person_record_id='test.google.com/person.888',
phone_of_found_person='_feed_phone_of_found_person',
text='_feed_text',
source_date=datetime.datetime(2005, 5, 5, 5, 5, 5),
entry_date=utils.get_utcnow(),
found=True,
status='is_note_author'
))
# sanity check.
note = Note.get('haiti', 'test.google.com/note.456')
self.debug_print('Note entry_date: %s' % note.entry_date)
self.assertEqual(note.entry_date, utils.get_utcnow())
note = None
# Feeds use PFIF 1.2.
# Note that date_of_birth, author_email, author_phone,
# email_of_found_person, and phone_of_found_person are omitted
# intentionally (see utils.filter_sensitive_fields).
doc = self.go('/feeds/person?subdomain=haiti')
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:pfif="http://zesty.ca/pfif/1.2">
<id>http://%s/feeds/person?subdomain=haiti</id>
<title>%s</title>
<updated>2010-01-02T03:04:05Z</updated>
<link rel="self">http://%s/feeds/person?subdomain=haiti</link>
<entry>
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_feed_author_name</pfif:author_name>
<pfif:source_name>_feed_source_name</pfif:source_name>
<pfif:source_date>2001-02-03T04:05:06Z</pfif:source_date>
<pfif:source_url>_feed_source_url</pfif:source_url>
<pfif:first_name>_feed_first_name</pfif:first_name>
<pfif:last_name>_feed_last_name</pfif:last_name>
<pfif:sex>male</pfif:sex>
<pfif:age>30-40</pfif:age>
<pfif:home_street>_feed_home_street</pfif:home_street>
<pfif:home_neighborhood>_feed_home_neighborhood</pfif:home_neighborhood>
<pfif:home_city>_feed_home_city</pfif:home_city>
<pfif:home_state>_feed_home_state</pfif:home_state>
<pfif:home_postal_code>_feed_home_postal_code</pfif:home_postal_code>
<pfif:home_country>_feed_home_country</pfif:home_country>
<pfif:photo_url>_feed_photo_url</pfif:photo_url>
<pfif:other>_feed_other & < > "</pfif:other>
<pfif:note>
<pfif:note_record_id>test.google.com/note.456</pfif:note_record_id>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:linked_person_record_id>test.google.com/person.888</pfif:linked_person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_feed_author_name</pfif:author_name>
<pfif:source_date>2005-05-05T05:05:05Z</pfif:source_date>
<pfif:found>true</pfif:found>
<pfif:status>is_note_author</pfif:status>
<pfif:last_known_location>_feed_last_known_location</pfif:last_known_location>
<pfif:text>_feed_text</pfif:text>
</pfif:note>
</pfif:person>
<id>pfif:test.google.com/person.123</id>
<title>_feed_first_name _feed_last_name</title>
<author>
<name>_feed_author_name</name>
</author>
<updated>2001-02-03T04:05:06Z</updated>
<source>
<title>%s</title>
</source>
<content>_feed_first_name _feed_last_name</content>
</entry>
</feed>
''' % (self.hostport, self.hostport, self.hostport, self.hostport)
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# Test the omit_notes parameter.
doc = self.go('/feeds/person?subdomain=haiti&omit_notes=yes')
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:pfif="http://zesty.ca/pfif/1.2">
<id>http://%s/feeds/person?subdomain=haiti&omit_notes=yes</id>
<title>%s</title>
<updated>2010-01-02T03:04:05Z</updated>
<link rel="self">http://%s/feeds/person?subdomain=haiti&omit_notes=yes</link>
<entry>
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_feed_author_name</pfif:author_name>
<pfif:source_name>_feed_source_name</pfif:source_name>
<pfif:source_date>2001-02-03T04:05:06Z</pfif:source_date>
<pfif:source_url>_feed_source_url</pfif:source_url>
<pfif:first_name>_feed_first_name</pfif:first_name>
<pfif:last_name>_feed_last_name</pfif:last_name>
<pfif:sex>male</pfif:sex>
<pfif:age>30-40</pfif:age>
<pfif:home_street>_feed_home_street</pfif:home_street>
<pfif:home_neighborhood>_feed_home_neighborhood</pfif:home_neighborhood>
<pfif:home_city>_feed_home_city</pfif:home_city>
<pfif:home_state>_feed_home_state</pfif:home_state>
<pfif:home_postal_code>_feed_home_postal_code</pfif:home_postal_code>
<pfif:home_country>_feed_home_country</pfif:home_country>
<pfif:photo_url>_feed_photo_url</pfif:photo_url>
<pfif:other>_feed_other & < > "</pfif:other>
</pfif:person>
<id>pfif:test.google.com/person.123</id>
<title>_feed_first_name _feed_last_name</title>
<author>
<name>_feed_author_name</name>
</author>
<updated>2001-02-03T04:05:06Z</updated>
<source>
<title>%s</title>
</source>
<content>_feed_first_name _feed_last_name</content>
</entry>
</feed>
''' % (self.hostport, self.hostport, self.hostport, self.hostport)
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# Fetch the entry, with full read authorization.
doc = self.go('/feeds/person?subdomain=haiti&key=full_read_key')
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:pfif="http://zesty.ca/pfif/1.2">
<id>http://%s/feeds/person?subdomain=haiti&key=full_read_key</id>
<title>%s</title>
<updated>2010-01-02T03:04:05Z</updated>
<link rel="self">http://%s/feeds/person?subdomain=haiti&key=full_read_key</link>
<entry>
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_feed_author_name</pfif:author_name>
<pfif:author_email>_feed_author_email</pfif:author_email>
<pfif:author_phone>_feed_author_phone</pfif:author_phone>
<pfif:source_name>_feed_source_name</pfif:source_name>
<pfif:source_date>2001-02-03T04:05:06Z</pfif:source_date>
<pfif:source_url>_feed_source_url</pfif:source_url>
<pfif:first_name>_feed_first_name</pfif:first_name>
<pfif:last_name>_feed_last_name</pfif:last_name>
<pfif:sex>male</pfif:sex>
<pfif:date_of_birth>1975</pfif:date_of_birth>
<pfif:age>30-40</pfif:age>
<pfif:home_street>_feed_home_street</pfif:home_street>
<pfif:home_neighborhood>_feed_home_neighborhood</pfif:home_neighborhood>
<pfif:home_city>_feed_home_city</pfif:home_city>
<pfif:home_state>_feed_home_state</pfif:home_state>
<pfif:home_postal_code>_feed_home_postal_code</pfif:home_postal_code>
<pfif:home_country>_feed_home_country</pfif:home_country>
<pfif:photo_url>_feed_photo_url</pfif:photo_url>
<pfif:other>_feed_other & < > "</pfif:other>
<pfif:note>
<pfif:note_record_id>test.google.com/note.456</pfif:note_record_id>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:linked_person_record_id>test.google.com/person.888</pfif:linked_person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_feed_author_name</pfif:author_name>
<pfif:author_email>_feed_author_email</pfif:author_email>
<pfif:author_phone>_feed_author_phone</pfif:author_phone>
<pfif:source_date>2005-05-05T05:05:05Z</pfif:source_date>
<pfif:found>true</pfif:found>
<pfif:status>is_note_author</pfif:status>
<pfif:email_of_found_person>_feed_email_of_found_person</pfif:email_of_found_person>
<pfif:phone_of_found_person>_feed_phone_of_found_person</pfif:phone_of_found_person>
<pfif:last_known_location>_feed_last_known_location</pfif:last_known_location>
<pfif:text>_feed_text</pfif:text>
</pfif:note>
</pfif:person>
<id>pfif:test.google.com/person.123</id>
<title>_feed_first_name _feed_last_name</title>
<author>
<name>_feed_author_name</name>
<email>_feed_author_email</email>
</author>
<updated>2001-02-03T04:05:06Z</updated>
<source>
<title>%s</title>
</source>
<content>_feed_first_name _feed_last_name</content>
</entry>
</feed>
''' % (self.hostport, self.hostport, self.hostport, self.hostport)
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
def test_note_feed(self):
"""Fetch a single note using the PFIF Atom feed."""
db.put(Person(
key_name='haiti:test.google.com/person.123',
subdomain='haiti',
entry_date=utils.get_utcnow(),
first_name='_feed_first_name',
last_name='_feed_last_name',
))
db.put(Note(
key_name='haiti:test.google.com/note.456',
subdomain='haiti',
person_record_id='test.google.com/person.123',
linked_person_record_id='test.google.com/person.888',
author_email='_feed_author_email',
author_name='_feed_author_name',
author_phone='_feed_author_phone',
email_of_found_person='_feed_email_of_found_person',
last_known_location='_feed_last_known_location',
phone_of_found_person='_feed_phone_of_found_person',
text='_feed_text',
source_date=datetime.datetime(2005, 5, 5, 5, 5, 5),
entry_date=datetime.datetime(2006, 6, 6, 6, 6, 6),
found=True,
status='believed_dead'
))
# Feeds use PFIF 1.2.
# Note that author_email, author_phone, email_of_found_person, and
# phone_of_found_person are omitted intentionally (see
# utils.filter_sensitive_fields).
doc = self.go('/feeds/note?subdomain=haiti')
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:pfif="http://zesty.ca/pfif/1.2">
<id>http://%s/feeds/note?subdomain=haiti</id>
<title>%s</title>
<updated>2006-06-06T06:06:06Z</updated>
<link rel="self">http://%s/feeds/note?subdomain=haiti</link>
<entry>
<pfif:note>
<pfif:note_record_id>test.google.com/note.456</pfif:note_record_id>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:linked_person_record_id>test.google.com/person.888</pfif:linked_person_record_id>
<pfif:entry_date>2006-06-06T06:06:06Z</pfif:entry_date>
<pfif:author_name>_feed_author_name</pfif:author_name>
<pfif:source_date>2005-05-05T05:05:05Z</pfif:source_date>
<pfif:found>true</pfif:found>
<pfif:status>believed_dead</pfif:status>
<pfif:last_known_location>_feed_last_known_location</pfif:last_known_location>
<pfif:text>_feed_text</pfif:text>
</pfif:note>
<id>pfif:test.google.com/note.456</id>
<title>_feed_text</title>
<author>
<name>_feed_author_name</name>
</author>
<updated>2006-06-06T06:06:06Z</updated>
<content>_feed_text</content>
</entry>
</feed>
''' % (self.hostport, self.hostport, self.hostport)
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
def test_person_feed_with_bad_chars(self):
"""Fetch a person whose fields contain characters that are not
legally representable in XML, using the PFIF Atom feed."""
# See: http://www.w3.org/TR/REC-xml/#charsets
self.set_utcnow_for_test(self.default_test_time)
db.put(Person(
key_name='haiti:test.google.com/person.123',
subdomain='haiti',
entry_date=utils.get_utcnow(),
author_name=u'illegal character (\x01)',
first_name=u'illegal character (\x1a)',
last_name=u'illegal character (\ud800)',
source_date=datetime.datetime(2001, 2, 3, 4, 5, 6)
))
# Note that author_email, author_phone, email_of_found_person, and
# phone_of_found_person are omitted intentionally (see
# utils.filter_sensitive_fields).
doc = self.go('/feeds/person?subdomain=haiti')
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:pfif="http://zesty.ca/pfif/1.2">
<id>http://%s/feeds/person?subdomain=haiti</id>
<title>%s</title>
<updated>2010-01-02T03:04:05Z</updated>
<link rel="self">http://%s/feeds/person?subdomain=haiti</link>
<entry>
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>illegal character ()</pfif:author_name>
<pfif:source_date>2001-02-03T04:05:06Z</pfif:source_date>
<pfif:first_name>illegal character ()</pfif:first_name>
<pfif:last_name>illegal character ()</pfif:last_name>
</pfif:person>
<id>pfif:test.google.com/person.123</id>
<title>illegal character () illegal character ()</title>
<author>
<name>illegal character ()</name>
</author>
<updated>2001-02-03T04:05:06Z</updated>
<source>
<title>%s</title>
</source>
<content>illegal character () illegal character ()</content>
</entry>
</feed>
''' % (self.hostport, self.hostport, self.hostport, self.hostport)
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
def test_person_feed_with_non_ascii(self):
"""Fetch a person whose fields contain non-ASCII characters,
using the PFIF Atom feed."""
self.set_utcnow_for_test(self.default_test_time)
db.put(Person(
key_name='haiti:test.google.com/person.123',
subdomain='haiti',
entry_date=utils.get_utcnow(),
author_name=u'a with acute = \u00e1',
source_name=u'c with cedilla = \u00e7',
source_url=u'e with acute = \u00e9',
first_name=u'greek alpha = \u03b1',
last_name=u'hebrew alef = \u05d0',
source_date=datetime.datetime(2001, 2, 3, 4, 5, 6)
))
# Note that author_email, author_phone, email_of_found_person, and
# phone_of_found_person are omitted intentionally (see
# utils.filter_sensitive_fields).
doc = self.go('/feeds/person?subdomain=haiti')
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:pfif="http://zesty.ca/pfif/1.2">
<id>http://%s/feeds/person?subdomain=haiti</id>
<title>%s</title>
<updated>2010-01-02T03:04:05Z</updated>
<link rel="self">http://%s/feeds/person?subdomain=haiti</link>
<entry>
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>a with acute = \xc3\xa1</pfif:author_name>
<pfif:source_name>c with cedilla = \xc3\xa7</pfif:source_name>
<pfif:source_date>2001-02-03T04:05:06Z</pfif:source_date>
<pfif:source_url>e with acute = \xc3\xa9</pfif:source_url>
<pfif:first_name>greek alpha = \xce\xb1</pfif:first_name>
<pfif:last_name>hebrew alef = \xd7\x90</pfif:last_name>
</pfif:person>
<id>pfif:test.google.com/person.123</id>
<title>greek alpha = \xce\xb1 hebrew alef = \xd7\x90</title>
<author>
<name>a with acute = \xc3\xa1</name>
</author>
<updated>2001-02-03T04:05:06Z</updated>
<source>
<title>%s</title>
</source>
<content>greek alpha = \xce\xb1 hebrew alef = \xd7\x90</content>
</entry>
</feed>
''' % (self.hostport, self.hostport, self.hostport, self.hostport)
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
def test_person_feed_parameters(self):
"""Test the max_results, skip, and min_entry_date parameters."""
db.put([Person(
key_name='haiti:test.google.com/person.%d' % i,
subdomain='haiti',
entry_date=datetime.datetime(2000, 1, 1, i, i, i),
first_name='first.%d' % i,
last_name='last.%d' % i
) for i in range(1, 21)]) # Create 20 persons.
def assert_ids(*ids):
person_ids = re.findall(r'record_id>test.google.com/person.(\d+)',
self.s.doc.content)
assert map(int, person_ids) == list(ids)
# Should get records in reverse chronological order by default.
doc = self.go('/feeds/person?subdomain=haiti')
assert_ids(20, 19, 18, 17, 16, 15, 14, 13, 12, 11)
# Fewer results.
doc = self.go('/feeds/person?subdomain=haiti&max_results=1')
assert_ids(20)
doc = self.go('/feeds/person?subdomain=haiti&max_results=9')
assert_ids(20, 19, 18, 17, 16, 15, 14, 13, 12)
# More results.
doc = self.go('/feeds/person?subdomain=haiti&max_results=12')
assert_ids(20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9)
# Skip some results.
doc = self.go('/feeds/person?subdomain=haiti&skip=12&max_results=5')
assert_ids(8, 7, 6, 5, 4)
# Should get records in forward chronological order with min_entry_date.
doc = self.go('/feeds/person?subdomain=haiti' +
'&min_entry_date=2000-01-01T18:18:18Z')
assert_ids(18, 19, 20)
doc = self.go('/feeds/person?subdomain=haiti' +
'&min_entry_date=2000-01-01T03:03:03Z')
assert_ids(3, 4, 5, 6, 7, 8, 9, 10, 11, 12)
doc = self.go('/feeds/person?subdomain=haiti' +
'&min_entry_date=2000-01-01T03:03:04Z')
assert_ids(4, 5, 6, 7, 8, 9, 10, 11, 12, 13)
def test_note_feed_parameters(self):
"""Test the max_results, skip, min_entry_date, and person_record_id
parameters."""
entities = []
for i in range(1, 3): # Create person.1 and person.2.
entities.append(Person(
key_name='haiti:test.google.com/person.%d' % i,
subdomain='haiti',
entry_date=datetime.datetime(2000, 1, 1, i, i, i),
first_name='first',
last_name='last'
))
for i in range(1, 6): # Create notes 1-5 on person.1.
entities.append(Note(
key_name='haiti:test.google.com/note.%d' % i,
subdomain='haiti',
person_record_id='test.google.com/person.1',
entry_date=datetime.datetime(2000, 1, 1, i, i, i)
))
for i in range(6, 18): # Create notes 6-17 on person.2.
entities.append(Note(
key_name='haiti:test.google.com/note.%d' % i,
subdomain='haiti',
person_record_id='test.google.com/person.2',
entry_date=datetime.datetime(2000, 1, 1, i, i, i)
))
for i in range(18, 21): # Create notes 18-20 on person.1.
entities.append(Note(
key_name='haiti:test.google.com/note.%d' % i,
subdomain='haiti',
person_record_id='test.google.com/person.1',
entry_date=datetime.datetime(2000, 1, 1, i, i, i)
))
db.put(entities)
def assert_ids(*ids):
note_ids = re.findall(r'record_id>test.google.com/note.(\d+)',
self.s.doc.content)
assert map(int, note_ids) == list(ids)
# Should get records in reverse chronological order by default.
doc = self.go('/feeds/note?subdomain=haiti')
assert_ids(20, 19, 18, 17, 16, 15, 14, 13, 12, 11)
# Fewer results.
doc = self.go('/feeds/note?subdomain=haiti&max_results=1')
assert_ids(20)
doc = self.go('/feeds/note?subdomain=haiti&max_results=9')
assert_ids(20, 19, 18, 17, 16, 15, 14, 13, 12)
# More results.
doc = self.go('/feeds/note?subdomain=haiti&max_results=12')
assert_ids(20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9)
# Skip some results.
doc = self.go('/feeds/note?subdomain=haiti&skip=12&max_results=5')
assert_ids(8, 7, 6, 5, 4)
# Should get records in forward chronological order.
doc = self.go('/feeds/note?subdomain=haiti' +
'&min_entry_date=2000-01-01T18:18:18Z')
assert_ids(18, 19, 20)
doc = self.go('/feeds/note?subdomain=haiti' +
'&min_entry_date=2000-01-01T03:03:03Z')
assert_ids(3, 4, 5, 6, 7, 8, 9, 10, 11, 12)
doc = self.go('/feeds/note?subdomain=haiti' +
'&min_entry_date=2000-01-01T03:03:04Z')
assert_ids(4, 5, 6, 7, 8, 9, 10, 11, 12, 13)
# Filter by person_record_id.
doc = self.go('/feeds/note?subdomain=haiti' +
'&person_record_id=test.google.com/person.1')
assert_ids(20, 19, 18, 5, 4, 3, 2, 1)
doc = self.go('/feeds/note?subdomain=haiti' +
'&person_record_id=test.google.com/person.2')
assert_ids(17, 16, 15, 14, 13, 12, 11, 10, 9, 8)
doc = self.go('/feeds/note?subdomain=haiti' +
'&person_record_id=test.google.com/person.2' +
'&max_results=11')
assert_ids(17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7)
doc = self.go('/feeds/note?subdomain=haiti' +
'&person_record_id=test.google.com/person.1' +
'&min_entry_date=2000-01-01T03:03:03Z')
assert_ids(3, 4, 5, 18, 19, 20)
doc = self.go('/feeds/note?subdomain=haiti' +
'&person_record_id=test.google.com/person.1' +
'&min_entry_date=2000-01-01T03:03:04Z')
assert_ids(4, 5, 18, 19, 20)
doc = self.go('/feeds/note?subdomain=haiti' +
'&person_record_id=test.google.com/person.2' +
'&min_entry_date=2000-01-01T06:06:06Z')
assert_ids(6, 7, 8, 9, 10, 11, 12, 13, 14, 15)
def test_api_read_status(self):
"""Test the reading of the note status field at /api/read and /feeds."""
# A missing status should not appear as a tag.
db.put(Person(
key_name='haiti:test.google.com/person.1001',
subdomain='haiti',
entry_date=utils.get_utcnow(),
first_name='_status_first_name',
last_name='_status_last_name',
author_name='_status_author_name'
))
doc = self.go('/api/read?subdomain=haiti' +
'&id=test.google.com/person.1001')
assert '<pfif:status>' not in doc.content
doc = self.go('/feeds/person?subdomain=haiti')
assert '<pfif:status>' not in doc.content
doc = self.go('/feeds/note?subdomain=haiti')
assert '<pfif:status>' not in doc.content
# An unspecified status should not appear as a tag.
db.put(Note(
key_name='haiti:test.google.com/note.2002',
subdomain='haiti',
person_record_id='test.google.com/person.1001',
entry_date=utils.get_utcnow()
))
doc = self.go('/api/read?subdomain=haiti' +
'&id=test.google.com/person.1001')
assert '<pfif:status>' not in doc.content
doc = self.go('/feeds/person?subdomain=haiti')
assert '<pfif:status>' not in doc.content
doc = self.go('/feeds/note?subdomain=haiti')
assert '<pfif:status>' not in doc.content
# An empty status should not appear as a tag.
db.put(Note(
key_name='haiti:test.google.com/note.2002',
subdomain='haiti',
person_record_id='test.google.com/person.1001',
status='',
entry_date=utils.get_utcnow()
))
doc = self.go('/api/read?subdomain=haiti' +
'&id=test.google.com/person.1001')
assert '<pfif:status>' not in doc.content
doc = self.go('/feeds/person?subdomain=haiti')
assert '<pfif:status>' not in doc.content
doc = self.go('/feeds/note?subdomain=haiti')
assert '<pfif:status>' not in doc.content
# When the status is specified, it should appear in the feed.
db.put(Note(
key_name='haiti:test.google.com/note.2002',
subdomain='haiti',
person_record_id='test.google.com/person.1001',
entry_date=utils.get_utcnow(),
status='believed_alive'
))
doc = self.go('/api/read?subdomain=haiti' +
'&id=test.google.com/person.1001')
assert '<pfif:status>believed_alive</pfif:status>' in doc.content
doc = self.go('/feeds/person?subdomain=haiti')
assert '<pfif:status>believed_alive</pfif:status>' in doc.content
doc = self.go('/feeds/note?subdomain=haiti')
assert '<pfif:status>believed_alive</pfif:status>' in doc.content
def test_delete_clone(self):
"""Confirms that attempting to delete clone records produces the
appropriate UI message."""
now, person, note = self.setup_person_and_note('test.google.com')
# Check that there is a Delete button on the view page.
doc = self.go('/view?subdomain=haiti&id=test.google.com/person.123')
button = doc.firsttag('input', value='Delete this record')
# Check that the deletion confirmation page shows the right message.
doc = self.s.submit(button)
assert 'we might later receive another copy' in doc.text
# Click the button to delete a record.
button = doc.firsttag('input', value='Yes, delete the record')
doc = self.s.submit(button)
# Check to make sure that the user was redirected to the same page due
# to an invalid captcha.
assert 'delete the record for "_test_first_name ' + \
'_test_last_name"' in doc.text
assert 'incorrect-captcha-sol' in doc.content
# Continue with a valid captcha (faked, for purpose of test). Check the
# sent messages for proper notification of related e-mail accounts.
doc = self.s.go(
'/delete',
data='subdomain=haiti&id=test.google.com/person.123&' +
'reason_for_deletion=spam_received&test_mode=yes')
# Both entities should be gone.
assert not db.get(person.key())
assert not db.get(note.key())
# Clone deletion cannot be undone, so no e-mail should have been sent.
assert len(MailThread.messages) == 0
def setup_person_and_note(self, domain='haiti.person-finder.appspot.com'):
"""Puts a Person with associated Note into the datastore, returning
(now, person, note) for testing. This creates an original record
by default; to make a clone record, pass in a domain name."""
now = datetime.datetime(2010, 1, 1, 0, 0, 0)
self.set_utcnow_for_test(now)
person = Person(
key_name='haiti:%s/person.123' % domain,
subdomain='haiti',
author_name='_test_author_name',
author_email='test@example.com',
first_name='_test_first_name',
last_name='_test_last_name',
source_date=now,
entry_date=now
)
person.update_index(['old', 'new'])
note = Note(
key_name='haiti:%s/note.456' % domain,
subdomain='haiti',
author_email='test2@example.com',
person_record_id='%s/person.123' % domain,
source_date=now,
entry_date=now,
text='Testing'
)
db.put([person, note])
return now, person, note
def setup_photo(self, person):
"""Stores a Photo for the given person, for testing."""
photo = Photo(bin_data='xyz')
photo.put()
person.photo = photo
person.photo_url = '_test_photo_url'
person.put()
return photo
def test_photo(self):
"""Checks that a stored photo can be retrieved."""
now, person, note = self.setup_person_and_note()
photo = self.setup_photo(person)
doc = self.go('/photo?id=%s&subdomain=haiti' % photo.key().id())
assert doc.content == 'xyz'
def test_delete_and_restore(self):
"""Checks that deleting a record through the UI, then undeleting
it using the link in the deletion notification, causes the record to
disappear and reappear correctly, produces e-mail notifications,
and has the correct effect on the outgoing API and feeds."""
now, person, note = self.setup_person_and_note()
photo = self.setup_photo(person)
# Advance time by one day.
now = datetime.datetime(2010, 1, 2, 0, 0, 0)
self.set_utcnow_for_test(now)
# Visit the page and click the button to delete a record.
doc = self.go('/view?subdomain=haiti&' +
'id=haiti.person-finder.appspot.com/person.123')
button = doc.firsttag('input', value='Delete this record')
doc = self.s.submit(button)
assert 'delete the record for "_test_first_name ' + \
'_test_last_name"' in doc.text
button = doc.firsttag('input', value='Yes, delete the record')
doc = self.s.submit(button)
# Check to make sure that the user was redirected to the same page due
# to an invalid captcha.
assert 'delete the record for "_test_first_name ' + \
'_test_last_name"' in doc.text
assert 'incorrect-captcha-sol' in doc.content
# Continue with a valid captcha (faked, for purpose of test). Check the
# sent messages for proper notification of related e-mail accounts.
doc = self.s.go(
'/delete',
data='subdomain=haiti&' +
'id=haiti.person-finder.appspot.com/person.123&' +
'reason_for_deletion=spam_received&test_mode=yes')
assert len(MailThread.messages) == 2
messages = sorted(MailThread.messages, key=lambda m: m['to'][0])
# After sorting by recipient, the second message should be to the
# person author, test@example.com (sorts after test2@example.com).
assert messages[1]['to'] == ['test@example.com']
words = ' '.join(messages[1]['data'].split())
assert ('Subject: [Person Finder] Deletion notice for ' +
'"_test_first_name _test_last_name"' in words)
assert 'the author of this record' in words
assert 'restore it by following this link' in words
restore_url = re.search('(/restore.*)', messages[1]['data']).group(1)
# The first message should be to the note author, test2@example.com.
assert messages[0]['to'] == ['test2@example.com']
words = ' '.join(messages[0]['data'].split())
assert ('Subject: [Person Finder] Deletion notice for ' +
'"_test_first_name _test_last_name"' in words)
assert 'the author of a note on this record' in words
assert 'restore it by following this link' not in words
# The Person and Note records should now be marked expired.
person = db.get(person.key())
assert person.is_expired
assert person.source_date == now
assert person.entry_date == now
assert person.expiry_date == now
note = db.get(note.key())
assert note.is_expired
# The Person and Note records should be inaccessible.
assert not Person.get('haiti', person.record_id)
assert not Note.get('haiti', note.record_id)
# Make sure that a UserActionLog row was created.
last_log_entry = UserActionLog.all().order('-time').get()
assert last_log_entry
assert last_log_entry.action == 'delete'
assert last_log_entry.entity_kind == 'Person'
assert (last_log_entry.entity_key_name ==
'haiti:haiti.person-finder.appspot.com/person.123')
assert last_log_entry.reason == 'spam_received'
assert Photo.get_by_id(photo.key().id())
# Search for the record. Make sure it does not show up.
doc = self.go('/results?subdomain=haiti&role=seek&' +
'query=_test_first_name+_test_last_name')
assert 'No results found' in doc.text
# The read API should expose an expired record.
doc = self.go('/api/read?subdomain=haiti&id=haiti.person-finder.appspot.com/person.123&version=1.3') # PFIF 1.3
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.3">
<pfif:person>
<pfif:person_record_id>haiti.person-finder.appspot.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T00:00:00Z</pfif:entry_date>
<pfif:expiry_date>2010-01-02T00:00:00Z</pfif:expiry_date>
<pfif:source_date>2010-01-02T00:00:00Z</pfif:source_date>
<pfif:full_name></pfif:full_name>
</pfif:person>
</pfif:pfif>
'''
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# The outgoing person feed should contain an expired record.
doc = self.go('/feeds/person?subdomain=haiti&version=1.3') # PFIF 1.3
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:pfif="http://zesty.ca/pfif/1.3">
<id>http://%s/feeds/person?subdomain=haiti&version=1.3</id>
<title>%s</title>
<updated>2010-01-02T00:00:00Z</updated>
<link rel="self">http://%s/feeds/person?subdomain=haiti&version=1.3</link>
<entry>
<pfif:person>
<pfif:person_record_id>haiti.person-finder.appspot.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T00:00:00Z</pfif:entry_date>
<pfif:expiry_date>2010-01-02T00:00:00Z</pfif:expiry_date>
<pfif:source_date>2010-01-02T00:00:00Z</pfif:source_date>
<pfif:full_name></pfif:full_name>
</pfif:person>
<id>pfif:haiti.person-finder.appspot.com/person.123</id>
<author>
</author>
<updated>2010-01-02T00:00:00Z</updated>
<source>
<title>%s</title>
</source>
</entry>
</feed>
''' % (self.hostport, self.hostport, self.hostport, self.hostport)
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
doc = self.go('/feeds/person?subdomain=haiti') # PFIF 1.2
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:pfif="http://zesty.ca/pfif/1.2">
<id>http://%s/feeds/person?subdomain=haiti</id>
<title>%s</title>
<updated>2010-01-02T00:00:00Z</updated>
<link rel="self">http://%s/feeds/person?subdomain=haiti</link>
<entry>
<pfif:person>
<pfif:person_record_id>haiti.person-finder.appspot.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T00:00:00Z</pfif:entry_date>
<pfif:source_date>2010-01-02T00:00:00Z</pfif:source_date>
<pfif:first_name></pfif:first_name>
<pfif:last_name></pfif:last_name>
</pfif:person>
<id>pfif:haiti.person-finder.appspot.com/person.123</id>
<author>
</author>
<updated>2010-01-02T00:00:00Z</updated>
<source>
<title>%s</title>
</source>
</entry>
</feed>
''' % (self.hostport, self.hostport, self.hostport, self.hostport)
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# Advance time by one day.
now = datetime.datetime(2010, 1, 3, 0, 0, 0)
self.set_utcnow_for_test(now)
# Restore the record using the URL in the e-mail. Clicking the link
# should take you to a CAPTCHA page to confirm.
doc = self.go(restore_url)
assert 'captcha' in doc.content
# Fake a valid captcha and actually reverse the deletion
url = restore_url + '&test_mode=yes'
doc = self.s.submit(button, url=url)
assert 'Identifying information' in doc.text
assert '_test_first_name _test_last_name' in doc.text
assert Person.get('haiti', 'haiti.person-finder.appspot.com/person.123')
note = Note.get('haiti', 'haiti.person-finder.appspot.com/note.456')
assert note
self.assertEquals([note.record_id],
[n.record_id for n in person.get_notes()])
assert 'Testing' in doc.text, \
'Testing not in: %s' % str(doc.text.encode('ascii', 'ignore'))
new_id = self.s.url[
self.s.url.find('haiti'):self.s.url.find('&subdomain')]
new_id = new_id.replace('%2F', '/')
# Make sure that Person/Note records are now visible, with all
# of their original attributes from prior to deletion.
person = Person.get_by_key_name('haiti:' + new_id)
notes = Note.get_by_person_record_id('haiti', person.record_id)
assert person
assert len(notes) == 1
assert person.author_name == '_test_author_name'
assert person.author_email == 'test@example.com'
assert person.first_name == '_test_first_name'
assert person.last_name == '_test_last_name'
assert person.photo_url == '_test_photo_url'
assert person.subdomain == 'haiti'
assert person.source_date == now
assert person.entry_date == now
assert person.expiry_date == now + datetime.timedelta(60, 0, 0)
assert not person.is_expired
assert notes[0].author_email == 'test2@example.com'
assert notes[0].text == 'Testing'
assert notes[0].person_record_id == new_id
assert not notes[0].is_expired
# Search for the record. Make sure it shows up.
doc = self.go('/results?subdomain=haiti&role=seek&' +
'query=_test_first_name+_test_last_name')
assert 'No results found' not in doc.text
# The read API should show a record with all the fields present,
# as if the record was just written with new field values.
doc = self.go('/api/read?subdomain=haiti&id=haiti.person-finder.appspot.com/person.123&version=1.3') # PFIF 1.3
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.3">
<pfif:person_record_id>haiti.person-finder.appspot.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-03T00:00:00Z</pfif:entry_date>
<pfif:expiry_date>2010-03-04T00:00:00Z</pfif:expiry_date>
<pfif:author_name>_test_author_name</pfif:author_name>
<pfif:source_date>2010-01-03T00:00:00Z</pfif:source_date>
<pfif:full_name></pfif:full_name>
<pfif:first_name>_test_first_name</pfif:first_name>
<pfif:last_name>_test_last_name</pfif:last_name>
<pfif:photo_url>_test_photo_url</pfif:photo_url>
<pfif:note>
<pfif:note_record_id>haiti.person-finder.appspot.com/note.456</pfif:note_record_id>
<pfif:person_record_id>haiti.person-finder.appspot.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-01T00:00:00Z</pfif:entry_date>
<pfif:author_name></pfif:author_name>
<pfif:source_date>2010-01-01T00:00:00Z</pfif:source_date>
<pfif:text>Testing</pfif:text>
</pfif:note>
</pfif:pfif>
'''
# The outgoing feed should contain a complete record also.
doc = self.go('/feeds/person?subdomain=haiti&version=1.3') # PFIF 1.3
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:pfif="http://zesty.ca/pfif/1.3">
<id>http://%s/feeds/person?subdomain=haiti&version=1.3</id>
<title>%s</title>
<updated>2010-01-03T00:00:00Z</updated>
<link rel="self">http://%s/feeds/person?subdomain=haiti&version=1.3</link>
<entry>
<pfif:person>
<pfif:person_record_id>haiti.person-finder.appspot.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-03T00:00:00Z</pfif:entry_date>
<pfif:expiry_date>2010-03-04T00:00:00Z</pfif:expiry_date>
<pfif:author_name>_test_author_name</pfif:author_name>
<pfif:source_date>2010-01-03T00:00:00Z</pfif:source_date>
<pfif:full_name></pfif:full_name>
<pfif:first_name>_test_first_name</pfif:first_name>
<pfif:last_name>_test_last_name</pfif:last_name>
<pfif:photo_url>_test_photo_url</pfif:photo_url>
<pfif:note>
<pfif:note_record_id>haiti.person-finder.appspot.com/note.456</pfif:note_record_id>
<pfif:person_record_id>haiti.person-finder.appspot.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-01T00:00:00Z</pfif:entry_date>
<pfif:author_name></pfif:author_name>
<pfif:source_date>2010-01-01T00:00:00Z</pfif:source_date>
<pfif:text>Testing</pfif:text>
</pfif:note>
</pfif:person>
<id>pfif:haiti.person-finder.appspot.com/person.123</id>
<title>_test_first_name _test_last_name</title>
<author>
<name>_test_author_name</name>
</author>
<updated>2010-01-03T00:00:00Z</updated>
<source>
<title>%s</title>
</source>
<content>_test_first_name _test_last_name</content>
</entry>
</feed>
''' % (self.hostport, self.hostport, self.hostport, self.hostport)
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# Confirm that restoration notifications were sent.
assert len(MailThread.messages) == 4
messages = sorted(MailThread.messages[2:], key=lambda m: m['to'][0])
# After sorting by recipient, the second message should be to the
# person author, test@example.com (sorts after test2@example.com).
assert messages[1]['to'] == ['test@example.com']
words = ' '.join(messages[1]['data'].split())
assert ('Subject: [Person Finder] Record restoration notice for ' +
'"_test_first_name _test_last_name"' in words)
# The first message should be to the note author, test2@example.com.
assert messages[0]['to'] == ['test2@example.com']
words = ' '.join(messages[0]['data'].split())
assert ('Subject: [Person Finder] Record restoration notice for ' +
'"_test_first_name _test_last_name"' in words)
def test_delete_and_wipe(self):
"""Checks that deleting a record through the UI, then waiting until
after the expiration grace period ends, causes the record to
disappear and be deleted permanently from the datastore, leaving
behind the appropriate placeholder in the outgoing API and feeds."""
now, person, note = self.setup_person_and_note()
photo = self.setup_photo(person)
# Advance time by one day.
now = datetime.datetime(2010, 1, 2, 0, 0, 0)
self.set_utcnow_for_test(now)
# Simulate a deletion request with a valid Turing test response.
# (test_delete_and_restore already tests this flow in more detail.)
doc = self.s.go('/delete',
data='subdomain=haiti&' +
'id=haiti.person-finder.appspot.com/person.123&' +
'reason_for_deletion=spam_received&test_mode=yes')
# Run the DeleteExpired task.
doc = self.s.go('/tasks/delete_expired')
# The Person and Note records should be marked expired but retain data.
person = db.get(person.key())
assert person.is_expired
assert person.first_name == '_test_first_name'
assert person.source_date == now
assert person.entry_date == now
assert person.expiry_date == now
note = db.get(note.key())
assert note.is_expired
assert note.text == 'Testing'
# The Photo should still be there.
assert db.get(photo.key())
# The Person and Note records should be inaccessible.
assert not Person.get('haiti', person.record_id)
assert not Note.get('haiti', note.record_id)
# Search for the record. Make sure it does not show up.
doc = self.go('/results?subdomain=haiti&role=seek&' +
'query=_test_first_name+_test_last_name')
assert 'No results found' in doc.text
# The read API should expose an expired record.
doc = self.go('/api/read?subdomain=haiti&id=haiti.person-finder.appspot.com/person.123&version=1.3') # PFIF 1.3
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.3">
<pfif:person>
<pfif:person_record_id>haiti.person-finder.appspot.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T00:00:00Z</pfif:entry_date>
<pfif:expiry_date>2010-01-02T00:00:00Z</pfif:expiry_date>
<pfif:source_date>2010-01-02T00:00:00Z</pfif:source_date>
<pfif:full_name></pfif:full_name>
</pfif:person>
</pfif:pfif>
'''
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# Advance time past the end of the expiration grace period.
now = datetime.datetime(2010, 1, 6, 0, 0, 0)
self.set_utcnow_for_test(now)
# Run the DeleteExpired task.
doc = self.s.go('/tasks/delete_expired')
# The Person record should still exist but now be empty.
# The timestamps should be unchanged.
person = db.get(person.key())
assert person.is_expired
assert person.first_name == None
assert person.source_date == datetime.datetime(2010, 1, 2, 0, 0, 0)
assert person.entry_date == datetime.datetime(2010, 1, 2, 0, 0, 0)
assert person.expiry_date == datetime.datetime(2010, 1, 2, 0, 0, 0)
# The Note and Photo should be gone.
assert not db.get(note.key())
assert not db.get(photo.key())
# The placeholder exposed by the read API should be unchanged.
doc = self.go('/api/read?subdomain=haiti&id=haiti.person-finder.appspot.com/person.123&version=1.3') # PFIF 1.3
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# The Person and Note records should be inaccessible.
assert not Person.get('haiti', person.record_id)
assert not Note.get('haiti', note.record_id)
# Search for the record. Make sure it does not show up.
doc = self.go('/results?subdomain=haiti&role=seek&' +
'query=_test_first_name+_test_last_name')
assert 'No results found' in doc.text
def test_incoming_expired_record(self):
"""Tests that an incoming expired record can cause an existing record
to expire and be deleted."""
now, person, note = self.setup_person_and_note('test.google.com')
assert person.first_name == '_test_first_name'
# Advance time by one day.
now = datetime.datetime(2010, 1, 2, 0, 0, 0)
self.set_utcnow_for_test(now)
# Simulate the arrival of an update that expires this record.
data = '''\
<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.3">
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T00:00:00Z</pfif:entry_date>
<pfif:expiry_date>2010-01-02T00:00:00Z</pfif:expiry_date>
<pfif:source_date>2001-01-02T00:00:00Z</pfif:source_date>
<pfif:full_name></pfif:full_name>
</pfif:person>
</pfif:pfif>
'''
self.go('/api/write?subdomain=haiti&key=test_key',
data=data, type='application/xml')
# Advance time by one day.
now = datetime.datetime(2010, 1, 3, 0, 0, 0)
self.set_utcnow_for_test(now)
# Run the DeleteExpired task.
self.s.go('/tasks/delete_expired').content
# The Person record should be hidden but not yet gone.
# The timestamps should reflect the time that the record was hidden.
assert not Person.get('haiti', person.record_id)
person = db.get(person.key())
assert person.is_expired
assert person.first_name == ''
assert person.source_date == datetime.datetime(2010, 1, 3, 0, 0, 0)
assert person.entry_date == datetime.datetime(2010, 1, 3, 0, 0, 0)
assert person.expiry_date == datetime.datetime(2010, 1, 2, 0, 0, 0)
# The Note record should be hidden but not yet gone.
assert not Note.get('haiti', note.record_id)
assert db.get(note.key())
# The read API should expose an expired record.
doc = self.go('/api/read?subdomain=haiti&id=test.google.com/person.123&version=1.3') # PFIF 1.3
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.3">
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-03T00:00:00Z</pfif:entry_date>
<pfif:expiry_date>2010-01-02T00:00:00Z</pfif:expiry_date>
<pfif:source_date>2010-01-03T00:00:00Z</pfif:source_date>
<pfif:full_name></pfif:full_name>
</pfif:person>
</pfif:pfif>
'''
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# Advance time by three more days (past the expiration grace period).
now = datetime.datetime(2010, 1, 6, 0, 0, 0)
self.set_utcnow_for_test(now)
# Run the DeleteExpired task.
self.s.go('/tasks/delete_expired').content
# The Person record should still exist but now be empty.
# The timestamps should be unchanged.
person = db.get(person.key())
assert person.is_expired
assert person.first_name is None
assert person.source_date == datetime.datetime(2010, 1, 3, 0, 0, 0)
assert person.entry_date == datetime.datetime(2010, 1, 3, 0, 0, 0)
assert person.expiry_date == datetime.datetime(2010, 1, 2, 0, 0, 0)
# The Note record should be gone.
assert not db.get(note.key())
# The read API should show the same expired record as before.
doc = self.go('/api/read?subdomain=haiti&id=test.google.com/person.123&version=1.3') # PFIF 1.3
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
def test_mark_notes_as_spam(self):
db.put(Person(
key_name='haiti:test.google.com/person.123',
subdomain='haiti',
author_name='_test_author_name',
author_email='test@example.com',
first_name='_test_first_name',
last_name='_test_last_name',
entry_date=datetime.datetime.now()
))
db.put(Note(
key_name='haiti:test.google.com/note.456',
subdomain='haiti',
author_email='test2@example.com',
person_record_id='test.google.com/person.123',
entry_date=utils.get_utcnow(),
text='Testing'
))
person = Person.get('haiti', 'test.google.com/person.123')
assert len(person.get_notes()) == 1
assert Note.get('haiti', 'test.google.com/note.456')
# Visit the page and click the button to mark a note as spam.
# Bring up confirmation page.
doc = self.go('/view?subdomain=haiti&id=test.google.com/person.123')
doc = self.s.follow('Report spam')
assert 'Are you sure' in doc.text
assert 'Testing' in doc.text
assert 'captcha' not in doc.content
button = doc.firsttag('input', value='Yes, update the note')
doc = self.s.submit(button)
assert 'Status updates for this person' in doc.text
assert 'This note has been marked as spam.' in doc.text
assert 'Not spam' in doc.text
assert 'Reveal note' in doc.text
# When a note is flagged, these new links appear.
assert doc.first('a', id='reveal-note')
assert doc.first('a', id='hide-note')
# When a note is flagged, the contents of the note are hidden.
assert doc.first('div', class_='contents')['style'] == 'display: none;'
# Make sure that a UserActionLog entry was created
assert len(UserActionLog.all().fetch(10)) == 1
# Unmark the note as spam.
doc = self.s.follow('Not spam')
assert 'Are you sure' in doc.text
assert 'Testing' in doc.text
assert 'captcha' in doc.content
# Make sure it redirects to the same page with error
doc = self.s.submit(button)
assert 'incorrect-captcha-sol' in doc.content
assert 'Are you sure' in doc.text
assert 'Testing' in doc.text
url = '/flag_note?subdomain=haiti&id=test.google.com/note.456&' + \
'test_mode=yes'
doc = self.s.submit(button, url=url)
assert 'This note has been marked as spam.' not in doc.text
assert 'Status updates for this person' in doc.text
assert 'Report spam' in doc.text
# Make sure that a second UserActionLog entry was created
assert len(UserActionLog.all().fetch(10)) == 2
def test_subscriber_notifications(self):
"Tests that a notification is sent when a record is updated"
SUBSCRIBER = 'example1@example.com'
db.put(Person(
key_name='haiti:test.google.com/person.123',
subdomain='haiti',
author_name='_test_author_name',
author_email='test@example.com',
first_name='_test_first_name',
last_name='_test_last_name',
entry_date=datetime.datetime.utcnow(),
))
db.put(Note(
key_name='haiti:test.google.com/note.456',
subdomain='haiti',
person_record_id='test.google.com/person.123',
text='Testing',
entry_date=datetime.datetime.utcnow(),
))
db.put(Subscription(
key_name='haiti:test.google.com/person.123:example1@example.com',
subdomain='haiti',
person_record_id='test.google.com/person.123',
email=SUBSCRIBER,
language='fr'
))
# Reset the MailThread queue _before_ making any requests
# to the server, else risk errantly deleting messages
MailThread.messages = []
# Visit the details page and add a note, triggering notification
# to the subscriber
doc = self.go('/view?subdomain=haiti&id=test.google.com/person.123')
self.verify_details_page(1)
self.verify_note_form()
self.verify_update_notes(False, '_test A note body',
'_test A note author',
status='information_sought')
self.verify_email_sent()
message = MailThread.messages[0]
assert message['to'] == [SUBSCRIBER]
assert 'do-not-reply@' in message['from']
assert '_test_first_name _test_last_name' in message['data']
# Subscription is French, email should be, too
assert 'recherche des informations' in message['data']
assert '_test A note body' in message['data']
assert 'view?id=test.google.com%2Fperson.123' in message['data']
def test_subscribe_and_unsubscribe(self):
"""Tests subscribing to notifications on status updating"""
SUBSCRIBE_EMAIL = 'testsubscribe@example.com'
db.put(Person(
key_name='haiti:test.google.com/person.111',
subdomain='haiti',
author_name='_test_author_name',
author_email='test@example.com',
first_name='_test_first_name',
last_name='_test_last_name',
entry_date=datetime.datetime.utcnow()
))
person = Person.get('haiti', 'test.google.com/person.111')
# Reset the MailThread queue _before_ making any requests
# to the server, else risk errantly deleting messages
MailThread.messages = []
d = self.go('/create?subdomain=haiti')
doc = self.s.submit(d.first('form'),
first_name='_test_first',
last_name='_test_last',
author_name='_test_author',
subscribe='on')
assert 'Subscribe to updates about _test_first _test_last' in doc.text
# Empty email is an error.
button = doc.firsttag('input', value='Subscribe')
doc = self.s.submit(button)
assert 'Invalid e-mail address. Please try again.' in doc.text
assert len(person.get_subscriptions()) == 0
# Invalid captcha response is an error
button = doc.firsttag('input', value='Subscribe')
doc = self.s.submit(button, subscribe_email=SUBSCRIBE_EMAIL)
assert 'iframe' in doc.content
assert 'recaptcha_response_field' in doc.content
assert len(person.get_subscriptions()) == 0
# Invalid email is an error (even with valid captcha)
INVALID_EMAIL = 'test@example'
url = ('/subscribe?subdomain=haiti&id=test.google.com/person.111&'
'test_mode=yes')
doc = self.s.submit(button, url=url, paramdict = {'subscribe_email':
INVALID_EMAIL})
assert 'Invalid e-mail address. Please try again.' in doc.text
assert len(person.get_subscriptions()) == 0
# Valid email and captcha is success
url = ('/subscribe?subdomain=haiti&id=test.google.com/person.111&'
'test_mode=yes')
doc = self.s.submit(button, url=url, paramdict = {'subscribe_email':
SUBSCRIBE_EMAIL})
assert 'successfully subscribed. ' in doc.text
assert '_test_first_name _test_last_name' in doc.text
subscriptions = person.get_subscriptions()
assert len(subscriptions) == 1
assert subscriptions[0].email == SUBSCRIBE_EMAIL
assert subscriptions[0].language == 'en'
# Already subscribed person is shown info page
doc = self.s.submit(button, url=url, paramdict = {'subscribe_email':
SUBSCRIBE_EMAIL})
assert 'already subscribed. ' in doc.text
assert 'for _test_first_name _test_last_name' in doc.text
assert len(person.get_subscriptions()) == 1
self.verify_email_sent()
message = MailThread.messages[0]
assert message['to'] == [SUBSCRIBE_EMAIL]
assert 'do-not-reply@' in message['from']
assert '_test_first_name _test_last_name' in message['data']
assert 'view?id=test.google.com%2Fperson.111' in message['data']
# Already subscribed person with new language is success
url = url + '&lang=fr'
doc = self.s.submit(button, url=url, paramdict = {'subscribe_email':
SUBSCRIBE_EMAIL})
assert 'successfully subscribed. ' in doc.text
assert '_test_first_name _test_last_name' in doc.text
subscriptions = person.get_subscriptions()
assert len(subscriptions) == 1
assert subscriptions[0].email == SUBSCRIBE_EMAIL
assert subscriptions[0].language == 'fr'
# Test the unsubscribe link in the email
unsub_url = re.search('(/unsubscribe.*)', message['data']).group(1)
doc = self.go(unsub_url)
assert 'successfully unsubscribed' in doc.content
assert len(person.get_subscriptions()) == 0
def test_config_use_family_name(self):
# use_family_name=True
d = self.go('/create?subdomain=haiti')
assert d.first('label', for_='first_name').text.strip() == 'Given name:'
assert d.first('label', for_='last_name').text.strip() == 'Family name:'
assert d.firsttag('input', name='first_name')
assert d.firsttag('input', name='last_name')
self.s.submit(d.first('form'),
first_name='_test_first',
last_name='_test_last',
author_name='_test_author')
person = Person.all().get()
d = self.go('/view?id=%s&subdomain=haiti' % person.record_id)
f = d.first('table', class_='fields').all('tr')
assert f[0].first('td', class_='label').text.strip() == 'Given name:'
assert f[0].first('td', class_='field').text.strip() == '_test_first'
assert f[1].first('td', class_='label').text.strip() == 'Family name:'
assert f[1].first('td', class_='field').text.strip() == '_test_last'
person.delete()
# use_family_name=False
d = self.go('/create?subdomain=pakistan')
assert d.first('label', for_='first_name').text.strip() == 'Name:'
assert not d.all('label', for_='last_name')
assert d.firsttag('input', name='first_name')
assert not d.alltags('input', name='last_name')
assert 'Given name' not in d.text
assert 'Family name' not in d.text
self.s.submit(d.first('form'),
first_name='_test_first',
last_name='_test_last',
author_name='_test_author')
person = Person.all().get()
d = self.go(
'/view?id=%s&subdomain=pakistan' % person.record_id)
f = d.first('table', class_='fields').all('tr')
assert f[0].first('td', class_='label').text.strip() == 'Name:'
assert f[0].first('td', class_='field').text.strip() == '_test_first'
assert 'Given name' not in d.text
assert 'Family name' not in d.text
assert '_test_last' not in d.first('body').text
person.delete()
def test_config_family_name_first(self):
# family_name_first=True
doc = self.go('/create?subdomain=china')
given_label = doc.first('label', for_='first_name')
family_label = doc.first('label', for_='last_name')
assert given_label.text.strip() == 'Given name:'
assert family_label.text.strip() == 'Family name:'
assert family_label.start < given_label.start
given_input = doc.firsttag('input', name='first_name')
family_input = doc.firsttag('input', name='last_name')
assert family_input.start < given_input.start
self.s.submit(doc.first('form'),
first_name='_test_first',
last_name='_test_last',
author_name='_test_author')
person = Person.all().get()
doc = self.go('/view?id=%s&subdomain=china' % person.record_id)
f = doc.first('table', class_='fields').all('tr')
assert f[0].first('td', class_='label').text.strip() == 'Family name:'
assert f[0].first('td', class_='field').text.strip() == '_test_last'
assert f[1].first('td', class_='label').text.strip() == 'Given name:'
assert f[1].first('td', class_='field').text.strip() == '_test_first'
person.delete()
# family_name_first=False
doc = self.go('/create?subdomain=haiti')
given_label = doc.first('label', for_='first_name')
family_label = doc.first('label', for_='last_name')
assert given_label.text.strip() == 'Given name:'
assert family_label.text.strip() == 'Family name:'
assert family_label.start > given_label.start
given_input = doc.firsttag('input', name='first_name')
family_input = doc.firsttag('input', name='last_name')
assert family_input.start > given_input.start
self.s.submit(doc.first('form'),
first_name='_test_first',
last_name='_test_last',
author_name='_test_author')
person = Person.all().get()
doc = self.go('/view?id=%s&subdomain=haiti' % person.record_id)
f = doc.first('table', class_='fields').all('tr')
assert f[0].first('td', class_='label').text.strip() == 'Given name:'
assert f[0].first('td', class_='field').text.strip() == '_test_first'
assert f[1].first('td', class_='label').text.strip() == 'Family name:'
assert f[1].first('td', class_='field').text.strip() == '_test_last'
person.delete()
def test_config_use_postal_code(self):
# use_postal_code=True
doc = self.go('/create?subdomain=haiti')
assert doc.first('label', for_='home_postal_code')
assert doc.firsttag('input', name='home_postal_code')
self.s.submit(doc.first('form'),
first_name='_test_first',
last_name='_test_last',
home_postal_code='_test_12345',
author_name='_test_author')
person = Person.all().get()
doc = self.go('/view?id=%s&subdomain=haiti' % person.record_id)
assert 'Postal or zip code' in doc.text
assert '_test_12345' in doc.text
person.delete()
# use_postal_code=False
doc = self.go('/create?subdomain=pakistan')
assert not doc.all('label', for_='home_postal_code')
assert not doc.alltags('input', name='home_postal_code')
self.s.submit(doc.first('form'),
first_name='_test_first',
last_name='_test_last',
home_postal_code='_test_12345',
author_name='_test_author')
person = Person.all().get()
doc = self.go('/view?id=%s&subdomain=pakistan' % person.record_id)
assert 'Postal or zip code' not in doc.text
assert '_test_12345' not in doc.text
person.delete()
class PersonNoteCounterTests(TestsBase):
"""Tests that modify Person, Note, and Counter entities in the datastore
go here. The contents of the datastore will be reset for each test."""
kinds_written_by_tests = [Person, Note, Counter]
def test_tasks_count(self):
"""Tests the counting task."""
# Add two Persons and two Notes in the 'haiti' subdomain.
db.put(Person(
key_name='haiti:test.google.com/person.123',
subdomain='haiti',
author_name='_test1_author_name',
entry_date=utils.get_utcnow(),
first_name='_test1_first_name',
last_name='_test1_last_name',
sex='male',
date_of_birth='1970-01-01',
age='50-60',
latest_status='believed_missing'
))
db.put(Note(
key_name='haiti:test.google.com/note.123',
subdomain='haiti',
person_record_id='haiti:test.google.com/person.123',
entry_date=utils.get_utcnow(),
status='believed_missing'
))
db.put(Person(
key_name='haiti:test.google.com/person.456',
subdomain='haiti',
author_name='_test2_author_name',
entry_date=utils.get_utcnow(),
first_name='_test2_first_name',
last_name='_test2_last_name',
sex='female',
date_of_birth='1970-02-02',
age='30-40',
latest_found=True
))
db.put(Note(
key_name='haiti:test.google.com/note.456',
subdomain='haiti',
person_record_id='haiti:test.google.com/person.456',
entry_date=utils.get_utcnow(),
found=True
))
# Run the counting task (should finish counting in a single run).
doc = self.go('/tasks/count/person?subdomain=haiti')
button = doc.firsttag('input', value='Login')
doc = self.s.submit(button, admin='True')
# Check the resulting counters.
assert Counter.get_count('haiti', 'person.all') == 2
assert Counter.get_count('haiti', 'person.sex=male') == 1
assert Counter.get_count('haiti', 'person.sex=female') == 1
assert Counter.get_count('haiti', 'person.sex=other') == 0
assert Counter.get_count('haiti', 'person.found=TRUE') == 1
assert Counter.get_count('haiti', 'person.found=') == 1
assert Counter.get_count('haiti', 'person.status=believed_missing') == 1
assert Counter.get_count('haiti', 'person.status=') == 1
assert Counter.get_count('pakistan', 'person.all') == 0
# Add a Person in the 'pakistan' subdomain.
db.put(Person(
key_name='pakistan:test.google.com/person.789',
subdomain='pakistan',
author_name='_test3_author_name',
entry_date=utils.get_utcnow(),
first_name='_test3_first_name',
last_name='_test3_last_name',
sex='male',
date_of_birth='1970-03-03',
age='30-40',
))
# Re-run the counting tasks for both subdomains.
doc = self.go('/tasks/count/person?subdomain=haiti')
doc = self.go('/tasks/count/person?subdomain=pakistan')
# Check the resulting counters.
assert Counter.get_count('haiti', 'person.all') == 2
assert Counter.get_count('pakistan', 'person.all') == 1
# Check that the counted value shows up correctly on the main page.
doc = self.go('/?subdomain=haiti&flush_cache=yes')
assert 'Currently tracking' not in doc.text
db.put(Counter(scan_name=u'person', subdomain=u'haiti', last_key=u'',
count_all=5L))
doc = self.go('/?subdomain=haiti&flush_cache=yes')
assert 'Currently tracking' not in doc.text
db.put(Counter(scan_name=u'person', subdomain=u'haiti', last_key=u'',
count_all=86L))
doc = self.go('/?subdomain=haiti&flush_cache=yes')
assert 'Currently tracking' not in doc.text
db.put(Counter(scan_name=u'person', subdomain=u'haiti', last_key=u'',
count_all=278L))
doc = self.go('/?subdomain=haiti&flush_cache=yes')
assert 'Currently tracking about 300 records' in doc.text
def test_admin_dashboard(self):
"""Visits the dashboard page and makes sure it doesn't crash."""
db.put(Counter(scan_name='Person', subdomain='haiti', last_key='',
count_all=278))
db.put(Counter(scan_name='Person', subdomain='pakistan', last_key='',
count_all=127))
db.put(Counter(scan_name='Note', subdomain='haiti', last_key='',
count_all=12))
db.put(Counter(scan_name='Note', subdomain='pakistan', last_key='',
count_all=8))
assert self.get_url_as_admin('/admin/dashboard')
assert self.s.status == 200
class ConfigTests(TestsBase):
"""Tests that modify ConfigEntry entities in the datastore go here.
The contents of the datastore will be reset for each test."""
def tearDown(self):
reset_data() # This is very expensive due to all the put()s in setup.
def test_admin_page(self):
# Load the administration page.
doc = self.go('/admin?subdomain=haiti')
button = doc.firsttag('input', value='Login')
doc = self.s.submit(button, admin='True')
assert self.s.status == 200
# Activate a new subdomain.
assert not Subdomain.get_by_key_name('xyz')
create_form = doc.first('form', id='subdomain_create')
doc = self.s.submit(create_form, subdomain_new='xyz')
assert Subdomain.get_by_key_name('xyz')
# Change some settings for the new subdomain.
settings_form = doc.first('form', id='subdomain_save')
doc = self.s.submit(settings_form,
language_menu_options='["no"]',
subdomain_titles='{"no": "Jordskjelv"}',
keywords='foo, bar',
use_family_name='false',
family_name_first='false',
use_postal_code='false',
min_query_word_length='1',
map_default_zoom='6',
map_default_center='[4, 5]',
map_size_pixels='[300, 300]',
read_auth_key_required='false'
)
cfg = config.Configuration('xyz')
assert cfg.language_menu_options == ['no']
assert cfg.subdomain_titles == {'no': 'Jordskjelv'}
assert cfg.keywords == 'foo, bar'
assert not cfg.use_family_name
assert not cfg.family_name_first
assert not cfg.use_postal_code
assert cfg.min_query_word_length == 1
assert cfg.map_default_zoom == 6
assert cfg.map_default_center == [4, 5]
assert cfg.map_size_pixels == [300, 300]
assert not cfg.read_auth_key_required
# Change settings again and make sure they took effect.
settings_form = doc.first('form', id='subdomain_save')
doc = self.s.submit(settings_form,
language_menu_options='["nl"]',
subdomain_titles='{"nl": "Aardbeving"}',
keywords='spam, ham',
use_family_name='true',
family_name_first='true',
use_postal_code='true',
min_query_word_length='2',
map_default_zoom='7',
map_default_center='[-3, -7]',
map_size_pixels='[123, 456]',
read_auth_key_required='true'
)
cfg = config.Configuration('xyz')
assert cfg.language_menu_options == ['nl']
assert cfg.subdomain_titles == {'nl': 'Aardbeving'}
assert cfg.keywords == 'spam, ham'
assert cfg.use_family_name
assert cfg.family_name_first
assert cfg.use_postal_code
assert cfg.min_query_word_length == 2
assert cfg.map_default_zoom == 7
assert cfg.map_default_center == [-3, -7]
assert cfg.map_size_pixels == [123, 456]
assert cfg.read_auth_key_required
def test_deactivation(self):
# Load the administration page.
doc = self.go('/admin?subdomain=haiti')
button = doc.firsttag('input', value='Login')
doc = self.s.submit(button, admin='True')
assert self.s.status == 200
# Deactivate an existing subdomain.
settings_form = doc.first('form', id='subdomain_save')
doc = self.s.submit(settings_form,
language_menu_options='["en"]',
subdomain_titles='{"en": "Foo"}',
keywords='foo, bar',
deactivated='true',
deactivation_message_html='de<i>acti</i>vated',
)
cfg = config.Configuration('haiti')
assert cfg.deactivated
assert cfg.deactivation_message_html == 'de<i>acti</i>vated'
# Ensure all paths listed in app.yaml are inaccessible, except /admin.
for path in ['/', '/query', '/results', '/create', '/view',
'/multiview', '/reveal', '/photo', '/embed',
'/gadget', '/delete', '/sitemap', '/api/read',
'/api/write', '/feeds/note', '/feeds/person']:
doc = self.go(path + '?subdomain=haiti')
assert 'de<i>acti</i>vated' in doc.content
assert doc.alltags('form') == []
assert doc.alltags('input') == []
assert doc.alltags('table') == []
assert doc.alltags('td') == []
def test_custom_messages(self):
# Load the administration page.
doc = self.go('/admin?subdomain=haiti')
button = doc.firsttag('input', value='Login')
doc = self.s.submit(button, admin='True')
assert self.s.status == 200
# Edit the custom text fields
settings_form = doc.first('form', id='subdomain_save')
doc = self.s.submit(settings_form,
language_menu_options='["en"]',
subdomain_titles='{"en": "Foo"}',
keywords='foo, bar',
main_page_custom_html='<b>main page</b> message',
results_page_custom_html='<u>results page</u> message',
view_page_custom_html='<a href="http://test">view page</a> message'
)
cfg = config.Configuration('haiti')
assert cfg.main_page_custom_html == '<b>main page</b> message'
assert cfg.results_page_custom_html == '<u>results page</u> message'
assert cfg.view_page_custom_html == \
'<a href="http://test">view page</a> message'
# Add a person record
db.put(Person(
key_name='haiti:test.google.com/person.1001',
subdomain='haiti',
entry_date=utils.get_utcnow(),
first_name='_status_first_name',
last_name='_status_last_name',
author_name='_status_author_name'
))
# Check for custom message on main page
doc = self.go('/?subdomain=haiti&flush_cache=yes')
assert 'main page message' in doc.text
# Check for custom message on results page
doc = self.go('/results?subdomain=haiti&query=xy')
assert 'results page message' in doc.text
# Check for custom message on view page
doc = self.go('/view?subdomain=haiti&id=test.google.com/person.1001')
assert 'view page message' in doc.text
class SecretTests(TestsBase):
"""Tests that modify Secret entities in the datastore go here.
The contents of the datastore will be reset for each test."""
kinds_written_by_tests = [Secret]
def test_analytics_id(self):
"""Checks that the analytics_id Secret is used for analytics."""
doc = self.go('/create?subdomain=haiti')
assert 'getTracker(' not in doc.content
db.put(Secret(key_name='analytics_id', secret='analytics_id_xyz'))
doc = self.go('/create?subdomain=haiti')
assert "getTracker('analytics_id_xyz')" in doc.content
def test_maps_api_key(self):
"""Checks that maps don't appear when there is no maps_api_key."""
db.put(Person(
key_name='haiti:test.google.com/person.1001',
subdomain='haiti',
entry_date=utils.get_utcnow(),
first_name='_status_first_name',
last_name='_status_last_name',
author_name='_status_author_name'
))
doc = self.go('/create?subdomain=haiti&role=provide')
assert 'map_canvas' not in doc.content
doc = self.go('/view?subdomain=haiti&id=test.google.com/person.1001')
assert 'map_canvas' not in doc.content
assert 'id="map_' not in doc.content
db.put(Secret(key_name='maps_api_key', secret='maps_api_key_xyz'))
doc = self.go('/create?subdomain=haiti&role=provide')
assert 'maps_api_key_xyz' in doc.content
assert 'map_canvas' in doc.content
doc = self.go('/view?subdomain=haiti&id=test.google.com/person.1001')
assert 'maps_api_key_xyz' in doc.content
assert 'map_canvas' in doc.content
assert 'id="map_' in doc.content
def main():
parser = optparse.OptionParser()
parser.add_option('-a', '--address', default='localhost',
help='appserver hostname (default: localhost)')
parser.add_option('-p', '--port', type='int', default=8081,
help='appserver port number (default: 8081)')
parser.add_option('-m', '--mail_port', type='int', default=8025,
help='SMTP server port number (default: 8025)')
parser.add_option('-v', '--verbose', action='store_true')
options, args = parser.parse_args()
try:
threads = []
if options.address == 'localhost':
# We need to start up a clean new appserver for testing.
threads.append(AppServerRunner(options.port, options.mail_port))
threads.append(MailThread(options.mail_port))
for thread in threads:
thread.start()
for thread in threads:
thread.wait_until_ready()
# Connect to the datastore.
hostport = '%s:%d' % (options.address, options.port)
remote_api.connect(hostport, remote_api.get_app_id(), 'test', 'test')
TestsBase.hostport = hostport
TestsBase.verbose = options.verbose
reset_data() # Reset the datastore for the first test.
unittest.main() # You can select tests using command-line arguments.
except Exception, e:
# Something went wrong during testing.
for thread in threads:
if hasattr(thread, 'flush_output'):
thread.flush_output()
traceback.print_exc()
raise SystemExit
finally:
for thread in threads:
thread.stop()
thread.join()
if __name__ == '__main__':
main()
| Python |
from google.appengine.ext import db
class Realtor(db.Model):
name = db.StringProperty()
email = db.StringProperty()
phone = db.StringProperty()
date = db.DateTimeProperty(auto_now_add=True)
class Listing(db.Model):
name = db.StringProperty()
author = db.UserProperty()
realtor = db.ReferenceProperty(Realtor)
address = db.StringProperty(multiline=True)
city = db.StringProperty()
state = db.StringProperty()
zip_code = db.StringProperty()
content = db.TextProperty()
gphoto = db.StringProperty() # Full URL to default photo for listing
galbum_id = db.StringProperty()
galbum_url = db.LinkProperty() # Link to Google Web Album
available_date = db.DateTimeProperty()
date = db.DateTimeProperty(auto_now_add=True)
| Python |
import os
import wsgiref.handlers
from google.appengine.ext.webapp.util import run_wsgi_app, login_required
from google.appengine.ext.webapp import template
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext import db
import gdata.gauth
import gdata.photos.service
from config import *
from models import *
def render_page(webob, tmpl_file, params):
params['company_name'] = config['company_name']
params['company_email'] = config['gapp_email']
params['gapp_uid'] = config['gapp_uid']
params['request'] = webob.request
params['user'] = users.get_current_user()
if params['user']:
params['user_link'] = '<a href="%s">Sign out</a>' % \
users.create_logout_url(webob.request.uri)
else:
params['user_link'] = '<a href="%s">Sign in</a>' % \
users.create_login_url(webob.request.uri)
params['is_admin'] = users.is_current_user_admin()
path = os.path.join(os.path.dirname(__file__), config['template_path'] + 'custom/' + tmpl_file)
try:
webob.response.out.write(template.render(path, params))
except:
path = os.path.join(os.path.dirname(__file__), config['template_path'] + tmpl_file)
webob.response.out.write(template.render(path, params))
class MainPage(webapp.RequestHandler):
def get(self):
# for static pages pass ?page=the_name and place the html template in templates/pages/the_name.html
if self.request.get('page'):
render_page(self, 'pages/' + self.request.get('page') + '.html', {})
else:
listings = Listing.all().order('-available_date')[:5]
params = { 'listings' : listings,}
render_page(self, 'index.html', {})
class ListingSearch(webapp.RequestHandler):
def get(self):
listings = Listing.all().order('-available_date')[:100]
params = { 'listings' : listings,}
render_page(self, 'listing_results.html', params)
class ListingView(webapp.RequestHandler):
def get(self):
try:
listing = Listing.get(self.request.get('key'))
except db.BadKeyError:
self.redirect("/")
else:
params = { 'listing': listing, }
render_page(self, 'listing_view.html', params)
class ListingModify(webapp.RequestHandler):
def get(self, action):
if not users.is_current_user_admin():
self.redirect(users.create_login_url("/"))
params = {}
if action=='modify':
try:
params['listing'] = Listing.get(self.request.get('key'))
except:
self.redirect("/")
params['realtors'] = Realtor.all()
render_page(self, 'listing_form.html', params)
def post(self, action):
if users.is_current_user_admin():
if self.request.get('key'):
l = Listing.get(self.request.get('key'))
else:
l = Listing()
l.author = users.get_current_user()
l.realtor = Realtor.get(self.request.get('realtor')).key()
l.title = self.request.get('title')
l.address = self.request.get('address')
l.city = self.request.get('city')
l.state = self.request.get('state')
l.zip_code = self.request.get('zip_code')
l.content = self.request.get('content')
l.available_date = self.request.get('available_date')
l.gphoto = self.request.get('gphoto')
l.put()
# check for webalbum, if none, create one.
if not l.galbum_id:
gd_client = gdata.photos.service.PhotosService()
#gdata.alt.appengine.run_on_appengine()
gd_client.email = config['gapp_email']
gd_client.password = config['gapp_password']
gd_client.source = 'gae-realty-listing'
gd_client.ProgrammaticLogin()
galbum_summary = 'Available at %s/listing?key=%s' % \
(LISTING_URL, l.key)
galbum_title = '%s Listing: %s' % \
(config['company_name'], l.title)
album = gd_client.InsertAlbum(title=galbum_title, summary=galbum_summary)
l.galbum_id = album.gphoto_id.text
l.galbum_url = album.GetHtmlLink().href
l.put()
self.redirect('/')
else:
self.redirect(users.create_login_url(self.request.uri))
class RealtorModify(webapp.RequestHandler):
def get(self, action):
if not users.is_current_user_admin():
self.redirect(users.create_login_url(self.request.uri))
if self.request.get('key'):
realtor = Realtor.get(self.request.get('key'))
else:
realtor = None
params = { 'realtor' : realtor }
render_page(self, 'realtor_form.html', params)
def post(self, action):
if users.is_current_user_admin():
r = Realtor()
r.name = self.request.get('name')
r.email = self.request.get('email')
r.phone = self.request.get('phone')
r.put()
self.redirect('/listing/add')
else:
self.redirect(users.create_login_url(self.request.uri))
def main():
application = webapp.WSGIApplication([
('/', MainPage),
('/listings', ListingSearch),
('/listing', ListingView),
('/listing/(modify|add)', ListingModify),
('/realtor/(modify|add)', RealtorModify),
], debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
"""
asciidocapi - AsciiDoc API wrapper class.
The AsciiDocAPI class provides an API for executing asciidoc. Minimal example
compiles `mydoc.txt` to `mydoc.html`:
import asciidocapi
asciidoc = asciidocapi.AsciiDocAPI()
asciidoc.execute('mydoc.txt')
- Full documentation in asciidocapi.txt.
- See the doctests below for more examples.
Doctests:
1. Check execution:
>>> import StringIO
>>> infile = StringIO.StringIO('Hello *{author}*')
>>> outfile = StringIO.StringIO()
>>> asciidoc = AsciiDocAPI()
>>> asciidoc.options('--no-header-footer')
>>> asciidoc.attributes['author'] = 'Joe Bloggs'
>>> asciidoc.execute(infile, outfile, backend='html4')
>>> print outfile.getvalue()
<p>Hello <strong>Joe Bloggs</strong></p>
>>> asciidoc.attributes['author'] = 'Bill Smith'
>>> infile = StringIO.StringIO('Hello _{author}_')
>>> outfile = StringIO.StringIO()
>>> asciidoc.execute(infile, outfile, backend='docbook')
>>> print outfile.getvalue()
<simpara>Hello <emphasis>Bill Smith</emphasis></simpara>
2. Check error handling:
>>> import StringIO
>>> asciidoc = AsciiDocAPI()
>>> infile = StringIO.StringIO('---------')
>>> outfile = StringIO.StringIO()
>>> asciidoc.execute(infile, outfile)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "asciidocapi.py", line 189, in execute
raise AsciiDocError(self.messages[-1])
AsciiDocError: ERROR: <stdin>: line 1: [blockdef-listing] missing closing delimiter
Copyright (C) 2009 Stuart Rackham. Free use of this software is granted
under the terms of the GNU General Public License (GPL).
"""
import sys,os,re,imp
API_VERSION = '0.1.2'
MIN_ASCIIDOC_VERSION = '8.4.1' # Minimum acceptable AsciiDoc version.
def find_in_path(fname, path=None):
"""
Find file fname in paths. Return None if not found.
"""
if path is None:
path = os.environ.get('PATH', '')
for dir in path.split(os.pathsep):
fpath = os.path.join(dir, fname)
if os.path.isfile(fpath):
return fpath
else:
return None
class AsciiDocError(Exception):
pass
class Options(object):
"""
Stores asciidoc(1) command options.
"""
def __init__(self, values=[]):
self.values = values[:]
def __call__(self, name, value=None):
"""Shortcut for append method."""
self.append(name, value)
def append(self, name, value=None):
if type(value) in (int,float):
value = str(value)
self.values.append((name,value))
class Version(object):
"""
Parse and compare AsciiDoc version numbers. Instance attributes:
string: String version number '<major>.<minor>[.<micro>][suffix]'.
major: Integer major version number.
minor: Integer minor version number.
micro: Integer micro version number.
suffix: Suffix (begins with non-numeric character) is ignored when
comparing.
Doctest examples:
>>> Version('8.2.5') < Version('8.3 beta 1')
True
>>> Version('8.3.0') == Version('8.3. beta 1')
True
>>> Version('8.2.0') < Version('8.20')
True
>>> Version('8.20').major
8
>>> Version('8.20').minor
20
>>> Version('8.20').micro
0
>>> Version('8.20').suffix
''
>>> Version('8.20 beta 1').suffix
'beta 1'
"""
def __init__(self, version):
self.string = version
reo = re.match(r'^(\d+)\.(\d+)(\.(\d+))?\s*(.*?)\s*$', self.string)
if not reo:
raise ValueError('invalid version number: %s' % self.string)
groups = reo.groups()
self.major = int(groups[0])
self.minor = int(groups[1])
self.micro = int(groups[3] or '0')
self.suffix = groups[4] or ''
def __cmp__(self, other):
result = cmp(self.major, other.major)
if result == 0:
result = cmp(self.minor, other.minor)
if result == 0:
result = cmp(self.micro, other.micro)
return result
class AsciiDocAPI(object):
"""
AsciiDoc API class.
"""
def __init__(self, asciidoc_py=None):
"""
Locate and import asciidoc.py.
Initialize instance attributes.
"""
self.options = Options()
self.attributes = {}
self.messages = []
# Search for the asciidoc command file.
# Try ASCIIDOC_PY environment variable first.
cmd = os.environ.get('ASCIIDOC_PY')
if cmd:
if not os.path.isfile(cmd):
raise AsciiDocError('missing ASCIIDOC_PY file: %s' % cmd)
elif asciidoc_py:
# Next try path specified by caller.
cmd = asciidoc_py
if not os.path.isfile(cmd):
raise AsciiDocError('missing file: %s' % cmd)
else:
# Try shell search paths.
for fname in ['asciidoc.py','asciidoc.pyc','asciidoc']:
cmd = find_in_path(fname)
if cmd: break
else:
# Finally try current working directory.
for cmd in ['asciidoc.py','asciidoc.pyc','asciidoc']:
if os.path.isfile(cmd): break
else:
raise AsciiDocError('failed to locate asciidoc')
self.cmd = os.path.realpath(cmd)
self.__import_asciidoc()
def __import_asciidoc(self, reload=False):
'''
Import asciidoc module (script or compiled .pyc).
See
http://groups.google.com/group/asciidoc/browse_frm/thread/66e7b59d12cd2f91
for an explanation of why a seemingly straight-forward job turned out
quite complicated.
'''
if os.path.splitext(self.cmd)[1] in ['.py','.pyc']:
sys.path.insert(0, os.path.dirname(self.cmd))
try:
try:
if reload:
import __builtin__ # Because reload() is shadowed.
__builtin__.reload(self.asciidoc)
else:
import asciidoc
self.asciidoc = asciidoc
except ImportError:
raise AsciiDocError('failed to import ' + self.cmd)
finally:
del sys.path[0]
else:
# The import statement can only handle .py or .pyc files, have to
# use imp.load_source() for scripts with other names.
try:
imp.load_source('asciidoc', self.cmd)
import asciidoc
self.asciidoc = asciidoc
except ImportError:
raise AsciiDocError('failed to import ' + self.cmd)
if Version(self.asciidoc.VERSION) < Version(MIN_ASCIIDOC_VERSION):
raise AsciiDocError(
'asciidocapi %s requires asciidoc %s or better'
% (API_VERSION, MIN_ASCIIDOC_VERSION))
def execute(self, infile, outfile=None, backend=None):
"""
Compile infile to outfile using backend format.
infile can outfile can be file path strings or file like objects.
"""
self.messages = []
opts = Options(self.options.values)
if outfile is not None:
opts('--out-file', outfile)
if backend is not None:
opts('--backend', backend)
for k,v in self.attributes.items():
if v == '' or k[-1] in '!@':
s = k
elif v is None: # A None value undefines the attribute.
s = k + '!'
else:
s = '%s=%s' % (k,v)
opts('--attribute', s)
args = [infile]
# The AsciiDoc command was designed to process source text then
# exit, there are globals and statics in asciidoc.py that have
# to be reinitialized before each run -- hence the reload.
self.__import_asciidoc(reload=True)
try:
try:
self.asciidoc.execute(self.cmd, opts.values, args)
finally:
self.messages = self.asciidoc.messages[:]
except SystemExit, e:
if e.code:
raise AsciiDocError(self.messages[-1])
if __name__ == "__main__":
"""
Run module doctests.
"""
import doctest
options = doctest.NORMALIZE_WHITESPACE + doctest.ELLIPSIS
doctest.testmod(optionflags=options)
| Python |
#!/usr/bin/env python
"""
asciidocapi - AsciiDoc API wrapper class.
The AsciiDocAPI class provides an API for executing asciidoc. Minimal example
compiles `mydoc.txt` to `mydoc.html`:
import asciidocapi
asciidoc = asciidocapi.AsciiDocAPI()
asciidoc.execute('mydoc.txt')
- Full documentation in asciidocapi.txt.
- See the doctests below for more examples.
Doctests:
1. Check execution:
>>> import StringIO
>>> infile = StringIO.StringIO('Hello *{author}*')
>>> outfile = StringIO.StringIO()
>>> asciidoc = AsciiDocAPI()
>>> asciidoc.options('--no-header-footer')
>>> asciidoc.attributes['author'] = 'Joe Bloggs'
>>> asciidoc.execute(infile, outfile, backend='html4')
>>> print outfile.getvalue()
<p>Hello <strong>Joe Bloggs</strong></p>
>>> asciidoc.attributes['author'] = 'Bill Smith'
>>> infile = StringIO.StringIO('Hello _{author}_')
>>> outfile = StringIO.StringIO()
>>> asciidoc.execute(infile, outfile, backend='docbook')
>>> print outfile.getvalue()
<simpara>Hello <emphasis>Bill Smith</emphasis></simpara>
2. Check error handling:
>>> import StringIO
>>> asciidoc = AsciiDocAPI()
>>> infile = StringIO.StringIO('---------')
>>> outfile = StringIO.StringIO()
>>> asciidoc.execute(infile, outfile)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "asciidocapi.py", line 189, in execute
raise AsciiDocError(self.messages[-1])
AsciiDocError: ERROR: <stdin>: line 1: [blockdef-listing] missing closing delimiter
Copyright (C) 2009 Stuart Rackham. Free use of this software is granted
under the terms of the GNU General Public License (GPL).
"""
import sys,os,re,imp
API_VERSION = '0.1.2'
MIN_ASCIIDOC_VERSION = '8.4.1' # Minimum acceptable AsciiDoc version.
def find_in_path(fname, path=None):
"""
Find file fname in paths. Return None if not found.
"""
if path is None:
path = os.environ.get('PATH', '')
for dir in path.split(os.pathsep):
fpath = os.path.join(dir, fname)
if os.path.isfile(fpath):
return fpath
else:
return None
class AsciiDocError(Exception):
pass
class Options(object):
"""
Stores asciidoc(1) command options.
"""
def __init__(self, values=[]):
self.values = values[:]
def __call__(self, name, value=None):
"""Shortcut for append method."""
self.append(name, value)
def append(self, name, value=None):
if type(value) in (int,float):
value = str(value)
self.values.append((name,value))
class Version(object):
"""
Parse and compare AsciiDoc version numbers. Instance attributes:
string: String version number '<major>.<minor>[.<micro>][suffix]'.
major: Integer major version number.
minor: Integer minor version number.
micro: Integer micro version number.
suffix: Suffix (begins with non-numeric character) is ignored when
comparing.
Doctest examples:
>>> Version('8.2.5') < Version('8.3 beta 1')
True
>>> Version('8.3.0') == Version('8.3. beta 1')
True
>>> Version('8.2.0') < Version('8.20')
True
>>> Version('8.20').major
8
>>> Version('8.20').minor
20
>>> Version('8.20').micro
0
>>> Version('8.20').suffix
''
>>> Version('8.20 beta 1').suffix
'beta 1'
"""
def __init__(self, version):
self.string = version
reo = re.match(r'^(\d+)\.(\d+)(\.(\d+))?\s*(.*?)\s*$', self.string)
if not reo:
raise ValueError('invalid version number: %s' % self.string)
groups = reo.groups()
self.major = int(groups[0])
self.minor = int(groups[1])
self.micro = int(groups[3] or '0')
self.suffix = groups[4] or ''
def __cmp__(self, other):
result = cmp(self.major, other.major)
if result == 0:
result = cmp(self.minor, other.minor)
if result == 0:
result = cmp(self.micro, other.micro)
return result
class AsciiDocAPI(object):
"""
AsciiDoc API class.
"""
def __init__(self, asciidoc_py=None):
"""
Locate and import asciidoc.py.
Initialize instance attributes.
"""
self.options = Options()
self.attributes = {}
self.messages = []
# Search for the asciidoc command file.
# Try ASCIIDOC_PY environment variable first.
cmd = os.environ.get('ASCIIDOC_PY')
if cmd:
if not os.path.isfile(cmd):
raise AsciiDocError('missing ASCIIDOC_PY file: %s' % cmd)
elif asciidoc_py:
# Next try path specified by caller.
cmd = asciidoc_py
if not os.path.isfile(cmd):
raise AsciiDocError('missing file: %s' % cmd)
else:
# Try shell search paths.
for fname in ['asciidoc.py','asciidoc.pyc','asciidoc']:
cmd = find_in_path(fname)
if cmd: break
else:
# Finally try current working directory.
for cmd in ['asciidoc.py','asciidoc.pyc','asciidoc']:
if os.path.isfile(cmd): break
else:
raise AsciiDocError('failed to locate asciidoc')
self.cmd = os.path.realpath(cmd)
self.__import_asciidoc()
def __import_asciidoc(self, reload=False):
'''
Import asciidoc module (script or compiled .pyc).
See
http://groups.google.com/group/asciidoc/browse_frm/thread/66e7b59d12cd2f91
for an explanation of why a seemingly straight-forward job turned out
quite complicated.
'''
if os.path.splitext(self.cmd)[1] in ['.py','.pyc']:
sys.path.insert(0, os.path.dirname(self.cmd))
try:
try:
if reload:
import __builtin__ # Because reload() is shadowed.
__builtin__.reload(self.asciidoc)
else:
import asciidoc
self.asciidoc = asciidoc
except ImportError:
raise AsciiDocError('failed to import ' + self.cmd)
finally:
del sys.path[0]
else:
# The import statement can only handle .py or .pyc files, have to
# use imp.load_source() for scripts with other names.
try:
imp.load_source('asciidoc', self.cmd)
import asciidoc
self.asciidoc = asciidoc
except ImportError:
raise AsciiDocError('failed to import ' + self.cmd)
if Version(self.asciidoc.VERSION) < Version(MIN_ASCIIDOC_VERSION):
raise AsciiDocError(
'asciidocapi %s requires asciidoc %s or better'
% (API_VERSION, MIN_ASCIIDOC_VERSION))
def execute(self, infile, outfile=None, backend=None):
"""
Compile infile to outfile using backend format.
infile can outfile can be file path strings or file like objects.
"""
self.messages = []
opts = Options(self.options.values)
if outfile is not None:
opts('--out-file', outfile)
if backend is not None:
opts('--backend', backend)
for k,v in self.attributes.items():
if v == '' or k[-1] in '!@':
s = k
elif v is None: # A None value undefines the attribute.
s = k + '!'
else:
s = '%s=%s' % (k,v)
opts('--attribute', s)
args = [infile]
# The AsciiDoc command was designed to process source text then
# exit, there are globals and statics in asciidoc.py that have
# to be reinitialized before each run -- hence the reload.
self.__import_asciidoc(reload=True)
try:
try:
self.asciidoc.execute(self.cmd, opts.values, args)
finally:
self.messages = self.asciidoc.messages[:]
except SystemExit, e:
if e.code:
raise AsciiDocError(self.messages[-1])
if __name__ == "__main__":
"""
Run module doctests.
"""
import doctest
options = doctest.NORMALIZE_WHITESPACE + doctest.ELLIPSIS
doctest.testmod(optionflags=options)
| Python |
#!/usr/bin/env python
USAGE = '''Usage: testasciidoc.py [OPTIONS] COMMAND
Run AsciiDoc conformance tests specified in configuration FILE.
Commands:
list List tests
run [NUMBER] [BACKEND] Execute tests
update [NUMBER] [BACKEND] Regenerate and update test data
Options:
-f, --conf-file=CONF_FILE
Use configuration file CONF_FILE (default configuration file is
testasciidoc.conf in testasciidoc.py directory)
--force
Update all test data overwriting existing data'''
__version__ = '0.1.1'
__copyright__ = 'Copyright (C) 2009 Stuart Rackham'
import os, sys, re, StringIO, difflib
import asciidocapi
BACKENDS = ('html4','xhtml11','docbook','wordpress','html5') # Default backends.
BACKEND_EXT = {'html4':'.html', 'xhtml11':'.html', 'docbook':'.xml',
'wordpress':'.html','slidy':'.html','html5':'.html'}
def iif(condition, iftrue, iffalse=None):
"""
Immediate if c.f. ternary ?: operator.
False value defaults to '' if the true value is a string.
False value defaults to 0 if the true value is a number.
"""
if iffalse is None:
if isinstance(iftrue, basestring):
iffalse = ''
if type(iftrue) in (int, float):
iffalse = 0
if condition:
return iftrue
else:
return iffalse
def message(msg=''):
print >>sys.stderr, msg
def strip_end(lines):
"""
Strip blank strings from the end of list of strings.
"""
for i in range(len(lines)-1,-1,-1):
if not lines[i]:
del lines[i]
else:
break
def normalize_data(lines):
"""
Strip comments and trailing blank strings from lines.
"""
result = [ s for s in lines if not s.startswith('#') ]
strip_end(result)
return result
class AsciiDocTest(object):
def __init__(self):
self.number = None # Test number (1..).
self.name = '' # Optional test name.
self.title = '' # Optional test name.
self.description = [] # List of lines followoing title.
self.source = None # AsciiDoc test source file name.
self.options = []
self.attributes = {}
self.backends = BACKENDS
self.datadir = None # Where output files are stored.
self.disabled = False
def backend_filename(self, backend):
"""
Return the path name of the backend output file that is generated from
the test name and output file type.
"""
return '%s-%s%s' % (
os.path.normpath(os.path.join(self.datadir, self.name)),
backend,
BACKEND_EXT[backend])
def parse(self, lines, confdir, datadir):
"""
Parse conf file test section from list of text lines.
"""
self.__init__()
self.confdir = confdir
self.datadir = datadir
lines = Lines(lines)
while not lines.eol():
l = lines.read_until(r'^%')
if l:
if not l[0].startswith('%'):
if l[0][0] == '!':
self.disabled = True
self.title = l[0][1:]
else:
self.title = l[0]
self.description = l[1:]
continue
reo = re.match(r'^%\s*(?P<directive>[\w_-]+)', l[0])
if not reo:
raise (ValueError, 'illegal directive: %s' % l[0])
directive = reo.groupdict()['directive']
data = normalize_data(l[1:])
if directive == 'source':
if data:
self.source = os.path.normpath(os.path.join(
self.confdir, os.path.normpath(data[0])))
elif directive == 'options':
self.options = eval(' '.join(data))
for i,v in enumerate(self.options):
if isinstance(v, basestring):
self.options[i] = (v,None)
elif directive == 'attributes':
self.attributes = eval(' '.join(data))
elif directive == 'backends':
self.backends = eval(' '.join(data))
elif directive == 'name':
self.name = data[0].strip()
else:
raise (ValueError, 'illegal directive: %s' % l[0])
if not self.title:
self.title = self.source
if not self.name:
self.name = os.path.basename(os.path.splitext(self.source)[0])
def is_missing(self, backend):
"""
Returns True if there is no output test data file for backend.
"""
return not os.path.isfile(self.backend_filename(backend))
def is_missing_or_outdated(self, backend):
"""
Returns True if the output test data file is missing or out of date.
"""
return self.is_missing(backend) or (
os.path.getmtime(self.source)
> os.path.getmtime(self.backend_filename(backend)))
def get_expected(self, backend):
"""
Return expected test data output for backend.
"""
f = open(self.backend_filename(backend))
try:
result = f.readlines()
# Strip line terminators.
result = [ s.rstrip() for s in result ]
finally:
f.close()
return result
def generate_expected(self, backend):
"""
Generate and return test data output for backend.
"""
asciidoc = asciidocapi.AsciiDocAPI()
asciidoc.options.values = self.options
asciidoc.attributes = self.attributes
infile = self.source
outfile = StringIO.StringIO()
asciidoc.execute(infile, outfile, backend)
return outfile.getvalue().splitlines()
def update_expected(self, backend):
"""
Generate and write backend data.
"""
lines = self.generate_expected(backend)
if not os.path.isdir(self.datadir):
print('CREATING: %s' % self.datadir)
os.mkdir(self.datadir)
f = open(self.backend_filename(backend),'w+')
try:
print('WRITING: %s' % f.name)
f.writelines([ s + os.linesep for s in lines])
finally:
f.close()
def update(self, backend=None, force=False):
"""
Regenerate and update expected test data outputs.
"""
if backend is None:
backends = self.backends
else:
backends = [backend]
for backend in backends:
if force or self.is_missing_or_outdated(backend):
self.update_expected(backend)
def run(self, backend=None):
"""
Execute test.
Return True if test passes.
"""
if backend is None:
backends = self.backends
else:
backends = [backend]
result = True # Assume success.
self.passed = self.failed = self.skipped = 0
print('%d: %s' % (self.number, self.title))
if self.source and os.path.isfile(self.source):
print('SOURCE: asciidoc: %s' % self.source)
for backend in backends:
fromfile = self.backend_filename(backend)
if not self.is_missing(backend):
expected = self.get_expected(backend)
strip_end(expected)
got = self.generate_expected(backend)
strip_end(got)
lines = []
for line in difflib.unified_diff(got, expected, n=0):
lines.append(line)
if lines:
result = False
self.failed +=1
lines = lines[3:]
print('FAILED: %s: %s' % (backend, fromfile))
message('+++ %s' % fromfile)
message('--- got')
for line in lines:
message(line)
message()
else:
self.passed += 1
print('PASSED: %s: %s' % (backend, fromfile))
else:
self.skipped += 1
print('SKIPPED: %s: %s' % (backend, fromfile))
else:
self.skipped += len(backends)
if self.source:
msg = 'MISSING: %s' % self.source
else:
msg = 'NO ASCIIDOC SOURCE FILE SPECIFIED'
print(msg)
print('')
return result
class AsciiDocTests(object):
def __init__(self, conffile):
"""
Parse configuration file.
"""
self.conffile = os.path.normpath(conffile)
# All file names are relative to configuration file directory.
self.confdir = os.path.dirname(self.conffile)
self.datadir = self.confdir # Default expected files directory.
self.tests = [] # List of parsed AsciiDocTest objects.
self.globals = {}
f = open(self.conffile)
try:
lines = Lines(f.readlines())
finally:
f.close()
first = True
while not lines.eol():
s = lines.read_until(r'^%+$')
if s:
# Optional globals precede all tests.
if first and re.match(r'^%\s*globals$',s[0]):
self.globals = eval(' '.join(normalize_data(s[1:])))
if 'datadir' in self.globals:
self.datadir = os.path.join(
self.confdir,
os.path.normpath(self.globals['datadir']))
else:
test = AsciiDocTest()
test.parse(s[1:], self.confdir, self.datadir)
self.tests.append(test)
test.number = len(self.tests)
first = False
def run(self, number=None, backend=None):
"""
Run all tests.
If number is specified run test number (1..).
"""
self.passed = self.failed = self.skipped = 0
for test in self.tests:
if (not test.disabled or number) and (not number or number == test.number) and (not backend or backend in test.backends):
test.run(backend)
self.passed += test.passed
self.failed += test.failed
self.skipped += test.skipped
if self.passed > 0:
print('TOTAL PASSED: %s' % self.passed)
if self.failed > 0:
print('TOTAL FAILED: %s' % self.failed)
if self.skipped > 0:
print('TOTAL SKIPPED: %s' % self.skipped)
def update(self, number=None, backend=None, force=False):
"""
Regenerate expected test data and update configuratio file.
"""
for test in self.tests:
if (not test.disabled or number) and (not number or number == test.number):
test.update(backend, force=force)
def list(self):
"""
Lists tests to stdout.
"""
for test in self.tests:
print '%d: %s%s' % (test.number, iif(test.disabled,'!'), test.title)
class Lines(list):
"""
A list of strings.
Adds eol() and read_until() to list type.
"""
def __init__(self, lines):
super(Lines, self).__init__()
self.extend([s.rstrip() for s in lines])
self.pos = 0
def eol(self):
return self.pos >= len(self)
def read_until(self, regexp):
"""
Return a list of lines from current position up until the next line
matching regexp.
Advance position to matching line.
"""
result = []
if not self.eol():
result.append(self[self.pos])
self.pos += 1
while not self.eol():
if re.match(regexp, self[self.pos]):
break
result.append(self[self.pos])
self.pos += 1
return result
def usage(msg=None):
if msg:
message(msg + '\n')
message(USAGE)
if __name__ == '__main__':
# Process command line options.
import getopt
try:
opts,args = getopt.getopt(sys.argv[1:], 'f:', ['force'])
except getopt.GetoptError:
usage('illegal command options')
sys.exit(1)
if len(args) == 0:
usage()
sys.exit(1)
conffile = os.path.join(os.path.dirname(sys.argv[0]), 'testasciidoc.conf')
force = False
for o,v in opts:
if o == '--force':
force = True
if o in ('-f','--conf-file'):
conffile = v
if not os.path.isfile(conffile):
message('missing CONF_FILE: %s' % conffile)
sys.exit(1)
tests = AsciiDocTests(conffile)
cmd = args[0]
number = None
backend = None
for arg in args[1:3]:
try:
number = int(arg)
except ValueError:
backend = arg
if backend and backend not in BACKENDS:
message('illegal BACKEND: %s' % backend)
sys.exit(1)
if number is not None and number not in range(1, len(tests.tests)+1):
message('illegal test NUMBER: %d' % number)
sys.exit(1)
if cmd == 'run':
tests.run(number, backend)
if tests.failed:
exit(1)
elif cmd == 'update':
tests.update(number, backend, force=force)
elif cmd == 'list':
tests.list()
else:
usage('illegal COMMAND: %s' % cmd)
| Python |
#!/usr/bin/env python
'''
a2x - A toolchain manager for AsciiDoc (converts Asciidoc text files to other
file formats)
Copyright: Stuart Rackham (c) 2009
License: MIT
Email: srackham@gmail.com
'''
import os
import fnmatch
import HTMLParser
import re
import shutil
import subprocess
import sys
import traceback
import urlparse
import zipfile
import xml.dom.minidom
import mimetypes
PROG = os.path.basename(os.path.splitext(__file__)[0])
VERSION = '8.6.4'
# AsciiDoc global configuration file directory.
# NOTE: CONF_DIR is "fixed up" by Makefile -- don't rename or change syntax.
CONF_DIR = '/etc/asciidoc'
######################################################################
# Default configuration file parameters.
######################################################################
# Optional environment variable dictionary passed to
# executing programs. If set to None the existing
# environment is used.
ENV = None
# External executables.
ASCIIDOC = 'asciidoc'
XSLTPROC = 'xsltproc'
DBLATEX = 'dblatex' # pdf generation.
FOP = 'fop' # pdf generation (--fop option).
W3M = 'w3m' # text generation.
LYNX = 'lynx' # text generation (if no w3m).
XMLLINT = 'xmllint' # Set to '' to disable.
EPUBCHECK = 'epubcheck' # Set to '' to disable.
# External executable default options.
ASCIIDOC_OPTS = ''
DBLATEX_OPTS = ''
FOP_OPTS = ''
XSLTPROC_OPTS = ''
######################################################################
# End of configuration file parameters.
######################################################################
#####################################################################
# Utility functions
#####################################################################
OPTIONS = None # These functions read verbose and dry_run command options.
def errmsg(msg):
sys.stderr.write('%s: %s\n' % (PROG,msg))
def warning(msg):
errmsg('WARNING: %s' % msg)
def infomsg(msg):
print '%s: %s' % (PROG,msg)
def die(msg, exit_code=1):
errmsg('ERROR: %s' % msg)
sys.exit(exit_code)
def trace():
"""Print traceback to stderr."""
errmsg('-'*60)
traceback.print_exc(file=sys.stderr)
errmsg('-'*60)
def verbose(msg):
if OPTIONS.verbose or OPTIONS.dry_run:
infomsg(msg)
class AttrDict(dict):
"""
Like a dictionary except values can be accessed as attributes i.e. obj.foo
can be used in addition to obj['foo'].
If self._default has been set then it will be returned if a non-existant
attribute is accessed (instead of raising an AttributeError).
"""
def __getattr__(self, key):
try:
return self[key]
except KeyError, k:
if self.has_key('_default'):
return self['_default']
else:
raise AttributeError, k
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try: del self[key]
except KeyError, k: raise AttributeError, k
def __repr__(self):
return '<AttrDict ' + dict.__repr__(self) + '>'
def __getstate__(self):
return dict(self)
def __setstate__(self,value):
for k,v in value.items(): self[k]=v
def isexecutable(file_name):
return os.path.isfile(file_name) and os.access(file_name, os.X_OK)
def find_executable(file_name):
'''
Search for executable file_name in the system PATH.
Return full path name or None if not found.
'''
def _find_executable(file_name):
if os.path.split(file_name)[0] != '':
# file_name includes directory so don't search path.
if not isexecutable(file_name):
return None
else:
return file_name
for p in os.environ.get('PATH', os.defpath).split(os.pathsep):
f = os.path.join(p, file_name)
if isexecutable(f):
return os.path.realpath(f)
return None
if os.name == 'nt' and os.path.splitext(file_name)[1] == '':
for ext in ('.cmd','.bat','.exe'):
result = _find_executable(file_name + ext)
if result: break
else:
result = _find_executable(file_name)
return result
def shell_cd(path):
verbose('chdir %s' % path)
if not OPTIONS.dry_run:
os.chdir(path)
def shell_makedirs(path):
if os.path.isdir(path):
return
verbose('creating %s' % path)
if not OPTIONS.dry_run:
os.makedirs(path)
def shell_copy(src, dst):
verbose('copying "%s" to "%s"' % (src,dst))
if not OPTIONS.dry_run:
shutil.copy(src, dst)
def shell_rm(path):
if not os.path.exists(path):
return
verbose('deleting %s' % path)
if not OPTIONS.dry_run:
os.unlink(path)
def shell_rmtree(path):
if not os.path.isdir(path):
return
verbose('deleting %s' % path)
if not OPTIONS.dry_run:
shutil.rmtree(path)
def shell(cmd, raise_error=True):
'''
Execute command cmd in shell and return resulting subprocess.Popen object.
If raise_error is True then a non-zero return terminates the application.
'''
if os.name == 'nt':
# TODO: this is probably unnecessary, see:
# http://groups.google.com/group/asciidoc/browse_frm/thread/9442ee0c419f1242
# Windows doesn't like running scripts directly so explicitly
# specify python interpreter.
# Extract first (quoted or unquoted) argument.
mo = re.match(r'^\s*"\s*(?P<arg0>[^"]+)\s*"', cmd)
if not mo:
mo = re.match(r'^\s*(?P<arg0>[^ ]+)', cmd)
if mo.group('arg0').endswith('.py'):
cmd = 'python ' + cmd
# Remove redundant quoting -- this is not just costmetic, quoting seems to
# dramatically decrease the allowed command length in Windows XP.
cmd = re.sub(r'"([^ ]+?)"', r'\1', cmd)
verbose('executing: %s' % cmd)
if OPTIONS.dry_run:
return
if OPTIONS.verbose:
stdout = stderr = None
else:
stdout = stderr = subprocess.PIPE
try:
popen = subprocess.Popen(cmd, stdout=stdout, stderr=stderr,
shell=True, env=ENV)
except OSError, e:
die('failed: %s: %s' % (cmd, e))
popen.wait()
if popen.returncode != 0 and raise_error:
die('%s returned non-zero exit status %d' % (cmd, popen.returncode))
return popen
def find_resources(files, tagname, attrname, filter=None):
'''
Search all files and return a list of local URIs from attrname attribute
values in tagname tags.
Handles HTML open and XHTML closed tags.
Non-local URIs are skipped.
files can be a file name or a list of file names.
The filter function takes a dictionary of tag attributes and returns True if
the URI is to be included.
'''
class FindResources(HTMLParser.HTMLParser):
# Nested parser class shares locals with enclosing function.
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if tag == tagname and (filter is None or filter(attrs)):
# Accept only local URIs.
uri = urlparse.urlparse(attrs[attrname])
if uri[0] in ('','file') and not uri[1] and uri[2]:
result.append(uri[2])
if isinstance(files, str):
files = [files]
result = []
for f in files:
verbose('finding resources in: %s' % f)
if OPTIONS.dry_run:
continue
parser = FindResources()
# HTMLParser has problems with non-ASCII strings.
# See http://bugs.python.org/issue3932
mo = re.search(r'^<\?xml.* encoding="(.*?)"', open(f).readline())
if mo:
encoding = mo.group(1)
parser.feed(open(f).read().decode(encoding))
else:
parser.feed(open(f).read())
parser.close()
result = list(set(result)) # Drop duplicate values.
result.sort()
return result
# NOT USED.
def copy_files(files, src_dir, dst_dir):
'''
Copy list of relative file names from src_dir to dst_dir.
'''
for f in files:
f = os.path.normpath(f)
if os.path.isabs(f):
continue
src = os.path.join(src_dir, f)
dst = os.path.join(dst_dir, f)
if not os.path.exists(dst):
if not os.path.isfile(src):
warning('missing file: %s' % src)
continue
dstdir = os.path.dirname(dst)
shell_makedirs(dstdir)
shell_copy(src, dst)
def find_files(path, pattern):
'''
Return list of file names matching pattern in directory path.
'''
result = []
for (p,dirs,files) in os.walk(path):
for f in files:
if fnmatch.fnmatch(f, pattern):
result.append(os.path.normpath(os.path.join(p,f)))
return result
def exec_xsltproc(xsl_file, xml_file, dst_dir, opts = ''):
cwd = os.getcwd()
shell_cd(dst_dir)
try:
shell('"%s" %s "%s" "%s"' % (XSLTPROC, opts, xsl_file, xml_file))
finally:
shell_cd(cwd)
def get_source_options(asciidoc_file):
'''
Look for a2x command options in AsciiDoc source file.
Limitation: options cannot contain double-quote characters.
'''
def parse_options():
# Parse options to result sequence.
inquotes = False
opt = ''
for c in options:
if c == '"':
if inquotes:
result.append(opt)
opt = ''
inquotes = False
else:
inquotes = True
elif c == ' ':
if inquotes:
opt += c
elif opt:
result.append(opt)
opt = ''
else:
opt += c
if opt:
result.append(opt)
result = []
if os.path.isfile(asciidoc_file):
options = ''
for line in open(asciidoc_file):
mo = re.search(r'^//\s*a2x:', line)
if mo:
options += ' ' + line[mo.end():].strip()
parse_options()
return result
#####################################################################
# Application class
#####################################################################
class A2X(AttrDict):
'''
a2x options and conversion functions.
'''
def execute(self):
'''
Process a2x command.
'''
self.process_options()
# Append configuration file options.
self.asciidoc_opts += ' ' + ASCIIDOC_OPTS
self.dblatex_opts += ' ' + DBLATEX_OPTS
self.fop_opts += ' ' + FOP_OPTS
self.xsltproc_opts += ' ' + XSLTPROC_OPTS
# Execute to_* functions.
self.__getattribute__('to_'+self.format)()
if not (self.keep_artifacts or self.format == 'docbook' or self.skip_asciidoc):
shell_rm(self.dst_path('.xml'))
def load_conf(self):
'''
Load a2x configuration file from default locations and --conf-file
option.
'''
global ASCIIDOC
CONF_FILE = 'a2x.conf'
a2xdir = os.path.dirname(os.path.realpath(__file__))
conf_files = []
# From a2x.py directory.
conf_files.append(os.path.join(a2xdir, CONF_FILE))
# If the asciidoc executable and conf files are in the a2x directory
# then use the local copy of asciidoc and skip the global a2x conf.
asciidoc = os.path.join(a2xdir, 'asciidoc.py')
asciidoc_conf = os.path.join(a2xdir, 'asciidoc.conf')
if os.path.isfile(asciidoc) and os.path.isfile(asciidoc_conf):
self.asciidoc = asciidoc
else:
self.asciidoc = None
# From global conf directory.
conf_files.append(os.path.join(CONF_DIR, CONF_FILE))
# From $HOME directory.
home_dir = os.environ.get('HOME')
if home_dir is not None:
conf_files.append(os.path.join(home_dir, '.asciidoc', CONF_FILE))
# From --conf-file option.
if self.conf_file is not None:
if not os.path.isfile(self.conf_file):
die('missing configuration file: %s' % self.conf_file)
conf_files.append(self.conf_file)
# From --xsl-file option.
if self.xsl_file is not None:
if not os.path.isfile(self.xsl_file):
die('missing XSL file: %s' % self.xsl_file)
self.xsl_file = os.path.abspath(self.xsl_file)
# Load ordered files.
for f in conf_files:
if os.path.isfile(f):
verbose('loading conf file: %s' % f)
execfile(f, globals())
# If asciidoc is not local to a2x then search the PATH.
if not self.asciidoc:
self.asciidoc = find_executable(ASCIIDOC)
if not self.asciidoc:
die('unable to find asciidoc: %s' % ASCIIDOC)
def process_options(self):
'''
Validate and command options and set defaults.
'''
if not os.path.isfile(self.asciidoc_file):
die('missing SOURCE_FILE: %s' % self.asciidoc_file)
self.asciidoc_file = os.path.abspath(self.asciidoc_file)
if not self.destination_dir:
self.destination_dir = os.path.dirname(self.asciidoc_file)
else:
if not os.path.isdir(self.destination_dir):
die('missing --destination-dir: %s' % self.destination_dir)
self.destination_dir = os.path.abspath(self.destination_dir)
self.resource_dirs = []
self.resource_files = []
if self.resource_manifest:
if not os.path.isfile(self.resource_manifest):
die('missing --resource-manifest: %s' % self.resource_manifest)
for r in open(self.resource_manifest):
self.resources.append(r.strip())
for r in self.resources:
r = os.path.expanduser(r)
r = os.path.expandvars(r)
if r.endswith(('/','\\')):
if os.path.isdir(r):
self.resource_dirs.append(r)
else:
die('missing resource directory: %s' % r)
elif os.path.isdir(r):
self.resource_dirs.append(r)
elif r.startswith('.') and '=' in r:
ext, mimetype = r.split('=')
mimetypes.add_type(mimetype, ext)
else:
self.resource_files.append(r)
for p in (os.path.dirname(self.asciidoc), CONF_DIR):
for d in ('images','stylesheets'):
d = os.path.join(p,d)
if os.path.isdir(d):
self.resource_dirs.append(d)
verbose('resource files: %s' % self.resource_files)
verbose('resource directories: %s' % self.resource_dirs)
if not self.doctype and self.format == 'manpage':
self.doctype = 'manpage'
if self.doctype:
self.asciidoc_opts += ' --doctype %s' % self.doctype
for attr in self.attributes:
self.asciidoc_opts += ' --attribute "%s"' % attr
# self.xsltproc_opts += ' --nonet'
if self.verbose:
self.asciidoc_opts += ' --verbose'
self.dblatex_opts += ' -V'
if self.icons or self.icons_dir:
params = [
'callout.graphics 1',
'navig.graphics 1',
'admon.textlabel 0',
'admon.graphics 1',
]
if self.icons_dir:
params += [
'admon.graphics.path "%s/"' % self.icons_dir,
'callout.graphics.path "%s/callouts/"' % self.icons_dir,
'navig.graphics.path "%s/"' % self.icons_dir,
]
else:
params = [
'callout.graphics 0',
'navig.graphics 0',
'admon.textlabel 1',
'admon.graphics 0',
]
if self.stylesheet:
params += ['html.stylesheet "%s"' % self.stylesheet]
if self.format == 'htmlhelp':
params += ['htmlhelp.chm "%s"' % self.basename('.chm'),
'htmlhelp.hhp "%s"' % self.basename('.hhp'),
'htmlhelp.hhk "%s"' % self.basename('.hhk'),
'htmlhelp.hhc "%s"' % self.basename('.hhc')]
if self.doctype == 'book':
params += ['toc.section.depth 1']
# Books are chunked at chapter level.
params += ['chunk.section.depth 0']
for o in params:
if o.split()[0]+' ' not in self.xsltproc_opts:
self.xsltproc_opts += ' --stringparam ' + o
if self.fop_opts:
self.fop = True
if os.path.splitext(self.asciidoc_file)[1].lower() == '.xml':
self.skip_asciidoc = True
else:
self.skip_asciidoc = False
def dst_path(self, ext):
'''
Return name of file or directory in the destination directory with
the same name as the asciidoc source file but with extension ext.
'''
return os.path.join(self.destination_dir, self.basename(ext))
def basename(self, ext):
'''
Return the base name of the asciidoc source file but with extension
ext.
'''
return os.path.basename(os.path.splitext(self.asciidoc_file)[0]) + ext
def asciidoc_conf_file(self, path):
'''
Return full path name of file in asciidoc configuration files directory.
Search first the directory containing the asciidoc executable then
the global configuration file directory.
'''
f = os.path.join(os.path.dirname(self.asciidoc), path)
if not os.path.isfile(f):
f = os.path.join(CONF_DIR, path)
if not os.path.isfile(f):
die('missing configuration file: %s' % f)
return os.path.normpath(f)
def xsl_stylesheet(self, file_name=None):
'''
Return full path name of file in asciidoc docbook-xsl configuration
directory.
If an XSL file was specified with the --xsl-file option then it is
returned.
'''
if self.xsl_file is not None:
return self.xsl_file
if not file_name:
file_name = self.format + '.xsl'
return self.asciidoc_conf_file(os.path.join('docbook-xsl', file_name))
def copy_resources(self, html_files, src_dir, dst_dir, resources=[]):
'''
Search html_files for images and CSS resource URIs (html_files can be a
list of file names or a single file name).
Copy them from the src_dir to the dst_dir.
If not found in src_dir then recursively search all specified
resource directories.
Optional additional resources files can be passed in the resources list.
'''
resources = resources[:]
resources += find_resources(html_files, 'link', 'href',
lambda attrs: attrs.get('type') == 'text/css')
resources += find_resources(html_files, 'img', 'src')
resources += self.resource_files
resources = list(set(resources)) # Drop duplicates.
resources.sort()
for f in resources:
if '=' in f:
src, dst = f.split('=')
if not dst:
dst = src
else:
src = dst = f
src = os.path.normpath(src)
dst = os.path.normpath(dst)
if os.path.isabs(dst):
die('absolute resource file name: %s' % dst)
if dst.startswith(os.pardir):
die('resource file outside destination directory: %s' % dst)
src = os.path.join(src_dir, src)
dst = os.path.join(dst_dir, dst)
if not os.path.isfile(src):
for d in self.resource_dirs:
d = os.path.join(src_dir, d)
found = find_files(d, os.path.basename(src))
if found:
src = found[0]
break
else:
if not os.path.isfile(dst):
die('missing resource: %s' % src)
continue
# Arrive here if resource file has been found.
if os.path.normpath(src) != os.path.normpath(dst):
dstdir = os.path.dirname(dst)
shell_makedirs(dstdir)
shell_copy(src, dst)
def to_docbook(self):
'''
Use asciidoc to convert asciidoc_file to DocBook.
args is a string containing additional asciidoc arguments.
'''
docbook_file = self.dst_path('.xml')
if self.skip_asciidoc:
if not os.path.isfile(docbook_file):
die('missing docbook file: %s' % docbook_file)
return
shell('"%s" --backend docbook %s --out-file "%s" "%s"' %
(self.asciidoc, self.asciidoc_opts, docbook_file, self.asciidoc_file))
if not self.no_xmllint and XMLLINT:
shell('"%s" --nonet --noout --valid "%s"' % (XMLLINT, docbook_file))
def to_xhtml(self):
self.to_docbook()
docbook_file = self.dst_path('.xml')
xhtml_file = self.dst_path('.html')
opts = '%s --output "%s"' % (self.xsltproc_opts, xhtml_file)
exec_xsltproc(self.xsl_stylesheet(), docbook_file, self.destination_dir, opts)
src_dir = os.path.dirname(self.asciidoc_file)
self.copy_resources(xhtml_file, src_dir, self.destination_dir)
def to_manpage(self):
self.to_docbook()
docbook_file = self.dst_path('.xml')
opts = self.xsltproc_opts
exec_xsltproc(self.xsl_stylesheet(), docbook_file, self.destination_dir, opts)
def to_pdf(self):
if self.fop:
self.exec_fop()
else:
self.exec_dblatex()
def exec_fop(self):
self.to_docbook()
docbook_file = self.dst_path('.xml')
xsl = self.xsl_stylesheet('fo.xsl')
fo = self.dst_path('.fo')
pdf = self.dst_path('.pdf')
opts = '%s --output "%s"' % (self.xsltproc_opts, fo)
exec_xsltproc(xsl, docbook_file, self.destination_dir, opts)
shell('"%s" %s -fo "%s" -pdf "%s"' % (FOP, self.fop_opts, fo, pdf))
if not self.keep_artifacts:
shell_rm(fo)
def exec_dblatex(self):
self.to_docbook()
docbook_file = self.dst_path('.xml')
xsl = self.asciidoc_conf_file(os.path.join('dblatex','asciidoc-dblatex.xsl'))
sty = self.asciidoc_conf_file(os.path.join('dblatex','asciidoc-dblatex.sty'))
shell('"%s" -t %s -p "%s" -s "%s" %s "%s"' %
(DBLATEX, self.format, xsl, sty, self.dblatex_opts, docbook_file))
def to_dvi(self):
self.exec_dblatex()
def to_ps(self):
self.exec_dblatex()
def to_tex(self):
self.exec_dblatex()
def to_htmlhelp(self):
self.to_chunked()
def to_chunked(self):
self.to_docbook()
docbook_file = self.dst_path('.xml')
opts = self.xsltproc_opts
xsl_file = self.xsl_stylesheet()
if self.format == 'chunked':
dst_dir = self.dst_path('.chunked')
elif self.format == 'htmlhelp':
dst_dir = self.dst_path('.htmlhelp')
if not 'base.dir ' in opts:
opts += ' --stringparam base.dir "%s/"' % os.path.basename(dst_dir)
# Create content.
shell_rmtree(dst_dir)
shell_makedirs(dst_dir)
exec_xsltproc(xsl_file, docbook_file, self.destination_dir, opts)
html_files = find_files(dst_dir, '*.html')
src_dir = os.path.dirname(self.asciidoc_file)
self.copy_resources(html_files, src_dir, dst_dir)
def update_epub_manifest(self, opf_file):
'''
Scan the OEBPS directory for any files that have not been registered in
the OPF manifest then add them to the manifest.
'''
opf_dir = os.path.dirname(opf_file)
resource_files = []
for (p,dirs,files) in os.walk(os.path.dirname(opf_file)):
for f in files:
f = os.path.join(p,f)
if os.path.isfile(f):
assert f.startswith(opf_dir)
f = '.' + f[len(opf_dir):]
f = os.path.normpath(f)
if f not in ['content.opf']:
resource_files.append(f)
opf = xml.dom.minidom.parseString(open(opf_file).read())
manifest_files = []
manifest = opf.getElementsByTagName('manifest')[0]
for el in manifest.getElementsByTagName('item'):
f = el.getAttribute('href')
f = os.path.normpath(f)
manifest_files.append(f)
count = 0
for f in resource_files:
if f not in manifest_files:
count += 1
verbose('adding to manifest: %s' % f)
item = opf.createElement('item')
item.setAttribute('href', f.replace(os.path.sep, '/'))
item.setAttribute('id', 'a2x-%d' % count)
mimetype = mimetypes.guess_type(f)[0]
if mimetype is None:
die('unknown mimetype: %s' % f)
item.setAttribute('media-type', mimetype)
manifest.appendChild(item)
if count > 0:
open(opf_file, 'w').write(opf.toxml())
def to_epub(self):
self.to_docbook()
xsl_file = self.xsl_stylesheet()
docbook_file = self.dst_path('.xml')
epub_file = self.dst_path('.epub')
build_dir = epub_file + '.d'
shell_rmtree(build_dir)
shell_makedirs(build_dir)
# Create content.
exec_xsltproc(xsl_file, docbook_file, build_dir, self.xsltproc_opts)
# Copy resources referenced in the OPF and resources referenced by the
# generated HTML (in theory DocBook XSL should ensure they are
# identical but this is not always the case).
src_dir = os.path.dirname(self.asciidoc_file)
dst_dir = os.path.join(build_dir, 'OEBPS')
opf_file = os.path.join(dst_dir, 'content.opf')
opf_resources = find_resources(opf_file, 'item', 'href')
html_files = find_files(dst_dir, '*.html')
self.copy_resources(html_files, src_dir, dst_dir, opf_resources)
# Register any unregistered resources.
self.update_epub_manifest(opf_file)
# Build epub archive.
cwd = os.getcwd()
shell_cd(build_dir)
try:
if not self.dry_run:
zip = zipfile.ZipFile(epub_file, 'w')
try:
# Create and add uncompressed mimetype file.
verbose('archiving: mimetype')
open('mimetype','w').write('application/epub+zip')
zip.write('mimetype', compress_type=zipfile.ZIP_STORED)
# Compress all remaining files.
for (p,dirs,files) in os.walk('.'):
for f in files:
f = os.path.normpath(os.path.join(p,f))
if f != 'mimetype':
verbose('archiving: %s' % f)
zip.write(f, compress_type=zipfile.ZIP_DEFLATED)
finally:
zip.close()
verbose('created archive: %s' % epub_file)
finally:
shell_cd(cwd)
if not self.keep_artifacts:
shell_rmtree(build_dir)
if self.epubcheck and EPUBCHECK:
if not find_executable(EPUBCHECK):
warning('epubcheck skipped: unable to find executable: %s' % EPUBCHECK)
else:
shell('"%s" "%s"' % (EPUBCHECK, epub_file))
def to_text(self):
text_file = self.dst_path('.text')
html_file = self.dst_path('.text.html')
if self.lynx:
shell('"%s" %s --conf-file "%s" -b html4 -o "%s" "%s"' %
(self.asciidoc, self.asciidoc_opts, self.asciidoc_conf_file('text.conf'),
html_file, self.asciidoc_file))
shell('"%s" -dump "%s" > "%s"' %
(LYNX, html_file, text_file))
else:
# Use w3m(1).
self.to_docbook()
docbook_file = self.dst_path('.xml')
opts = '%s --output "%s"' % (self.xsltproc_opts, html_file)
exec_xsltproc(self.xsl_stylesheet(), docbook_file,
self.destination_dir, opts)
shell('"%s" -cols 70 -dump -T text/html -no-graph "%s" > "%s"' %
(W3M, html_file, text_file))
if not self.keep_artifacts:
shell_rm(html_file)
#####################################################################
# Script main line.
#####################################################################
if __name__ == '__main__':
description = '''A toolchain manager for AsciiDoc (converts Asciidoc text files to other file formats)'''
from optparse import OptionParser
parser = OptionParser(usage='usage: %prog [OPTIONS] SOURCE_FILE',
version='%s %s' % (PROG,VERSION),
description=description)
parser.add_option('-a', '--attribute',
action='append', dest='attributes', default=[], metavar='ATTRIBUTE',
help='set asciidoc attribute value')
parser.add_option('--asciidoc-opts',
action='append', dest='asciidoc_opts', default=[],
metavar='ASCIIDOC_OPTS', help='asciidoc options')
#DEPRECATED
parser.add_option('--copy',
action='store_true', dest='copy', default=False,
help='DEPRECATED: does nothing')
parser.add_option('--conf-file',
dest='conf_file', default=None, metavar='CONF_FILE',
help='configuration file')
parser.add_option('-D', '--destination-dir',
action='store', dest='destination_dir', default=None, metavar='PATH',
help='output directory (defaults to SOURCE_FILE directory)')
parser.add_option('-d','--doctype',
action='store', dest='doctype', metavar='DOCTYPE',
choices=('article','manpage','book'),
help='article, manpage, book')
parser.add_option('--epubcheck',
action='store_true', dest='epubcheck', default=False,
help='check EPUB output with epubcheck')
parser.add_option('-f','--format',
action='store', dest='format', metavar='FORMAT', default = 'pdf',
choices=('chunked','epub','htmlhelp','manpage','pdf', 'text',
'xhtml','dvi','ps','tex','docbook'),
help='chunked, epub, htmlhelp, manpage, pdf, text, xhtml, dvi, ps, tex, docbook')
parser.add_option('--icons',
action='store_true', dest='icons', default=False,
help='use admonition, callout and navigation icons')
parser.add_option('--icons-dir',
action='store', dest='icons_dir',
default=None, metavar='PATH',
help='admonition and navigation icon directory')
parser.add_option('-k', '--keep-artifacts',
action='store_true', dest='keep_artifacts', default=False,
help='do not delete temporary build files')
parser.add_option('--lynx',
action='store_true', dest='lynx', default=False,
help='use lynx to generate text files')
parser.add_option('-L', '--no-xmllint',
action='store_true', dest='no_xmllint', default=False,
help='do not check asciidoc output with xmllint')
parser.add_option('-n','--dry-run',
action='store_true', dest='dry_run', default=False,
help='just print the commands that would have been executed')
parser.add_option('-r','--resource',
action='append', dest='resources', default=[],
metavar='PATH',
help='resource file or directory containing resource files')
parser.add_option('-m', '--resource-manifest',
action='store', dest='resource_manifest', default=None, metavar='FILE',
help='read resources from FILE')
#DEPRECATED
parser.add_option('--resource-dir',
action='append', dest='resources', default=[],
metavar='PATH',
help='DEPRECATED: use --resource')
#DEPRECATED
parser.add_option('-s','--skip-asciidoc',
action='store_true', dest='skip_asciidoc', default=False,
help='DEPRECATED: redundant')
parser.add_option('--stylesheet',
action='store', dest='stylesheet', default=None,
metavar='STYLESHEET',
help='HTML CSS stylesheet file name')
#DEPRECATED
parser.add_option('--safe',
action='store_true', dest='safe', default=False,
help='DEPRECATED: does nothing')
parser.add_option('--dblatex-opts',
action='append', dest='dblatex_opts', default=[],
metavar='DBLATEX_OPTS', help='dblatex options')
parser.add_option('--fop',
action='store_true', dest='fop', default=False,
help='use FOP to generate PDF files')
parser.add_option('--fop-opts',
action='append', dest='fop_opts', default=[],
metavar='FOP_OPTS', help='options for FOP pdf generation')
parser.add_option('--xsltproc-opts',
action='append', dest='xsltproc_opts', default=[],
metavar='XSLTPROC_OPTS', help='xsltproc options for XSL stylesheets')
parser.add_option('--xsl-file',
action='store', dest='xsl_file', metavar='XSL_FILE',
help='custom XSL stylesheet')
parser.add_option('-v', '--verbose',
action='count', dest='verbose', default=0,
help='increase verbosity')
if len(sys.argv) == 1:
parser.parse_args(['--help'])
source_options = get_source_options(sys.argv[-1])
argv = source_options + sys.argv[1:]
opts, args = parser.parse_args(argv)
if len(args) != 1:
parser.error('incorrect number of arguments')
opts.asciidoc_opts = ' '.join(opts.asciidoc_opts)
opts.dblatex_opts = ' '.join(opts.dblatex_opts)
opts.fop_opts = ' '.join(opts.fop_opts)
opts.xsltproc_opts = ' '.join(opts.xsltproc_opts)
opts = eval(str(opts)) # Convert optparse.Values to dict.
a2x = A2X(opts)
OPTIONS = a2x # verbose and dry_run used by utility functions.
verbose('args: %r' % argv)
a2x.asciidoc_file = args[0]
try:
a2x.load_conf()
a2x.execute()
except KeyboardInterrupt:
exit(1)
| Python |
#!/usr/bin/env python
"""
asciidoc - converts an AsciiDoc text file to HTML or DocBook
Copyright (C) 2002-2010 Stuart Rackham. Free use of this software is granted
under the terms of the GNU General Public License (GPL).
"""
import sys, os, re, time, traceback, tempfile, subprocess, codecs, locale, unicodedata
### Used by asciidocapi.py ###
VERSION = '8.6.4' # See CHANGLOG file for version history.
MIN_PYTHON_VERSION = 2.4 # Require this version of Python or better.
#---------------------------------------------------------------------------
# Program constants.
#---------------------------------------------------------------------------
DEFAULT_BACKEND = 'html'
DEFAULT_DOCTYPE = 'article'
# Allowed substitution options for List, Paragraph and DelimitedBlock
# definition subs entry.
SUBS_OPTIONS = ('specialcharacters','quotes','specialwords',
'replacements', 'attributes','macros','callouts','normal','verbatim',
'none','replacements2')
# Default value for unspecified subs and presubs configuration file entries.
SUBS_NORMAL = ('specialcharacters','quotes','attributes',
'specialwords','replacements','macros','replacements2')
SUBS_VERBATIM = ('specialcharacters','callouts')
NAME_RE = r'(?u)[^\W\d][-\w]*' # Valid section or attribute name.
OR, AND = ',', '+' # Attribute list separators.
#---------------------------------------------------------------------------
# Utility functions and classes.
#---------------------------------------------------------------------------
class EAsciiDoc(Exception): pass
class OrderedDict(dict):
"""
Dictionary ordered by insertion order.
Python Cookbook: Ordered Dictionary, Submitter: David Benjamin.
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
"""
def __init__(self, d=None, **kwargs):
self._keys = []
if d is None: d = kwargs
dict.__init__(self, d)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._keys.remove(key)
def __setitem__(self, key, item):
dict.__setitem__(self, key, item)
if key not in self._keys: self._keys.append(key)
def clear(self):
dict.clear(self)
self._keys = []
def copy(self):
d = dict.copy(self)
d._keys = self._keys[:]
return d
def items(self):
return zip(self._keys, self.values())
def keys(self):
return self._keys
def popitem(self):
try:
key = self._keys[-1]
except IndexError:
raise KeyError('dictionary is empty')
val = self[key]
del self[key]
return (key, val)
def setdefault(self, key, failobj = None):
dict.setdefault(self, key, failobj)
if key not in self._keys: self._keys.append(key)
def update(self, d=None, **kwargs):
if d is None:
d = kwargs
dict.update(self, d)
for key in d.keys():
if key not in self._keys: self._keys.append(key)
def values(self):
return map(self.get, self._keys)
class AttrDict(dict):
"""
Like a dictionary except values can be accessed as attributes i.e. obj.foo
can be used in addition to obj['foo'].
If an item is not present None is returned.
"""
def __getattr__(self, key):
try: return self[key]
except KeyError: return None
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try: del self[key]
except KeyError, k: raise AttributeError, k
def __repr__(self):
return '<AttrDict ' + dict.__repr__(self) + '>'
def __getstate__(self):
return dict(self)
def __setstate__(self,value):
for k,v in value.items(): self[k]=v
class InsensitiveDict(dict):
"""
Like a dictionary except key access is case insensitive.
Keys are stored in lower case.
"""
def __getitem__(self, key):
return dict.__getitem__(self, key.lower())
def __setitem__(self, key, value):
dict.__setitem__(self, key.lower(), value)
def has_key(self, key):
return dict.has_key(self,key.lower())
def get(self, key, default=None):
return dict.get(self, key.lower(), default)
def update(self, dict):
for k,v in dict.items():
self[k] = v
def setdefault(self, key, default = None):
return dict.setdefault(self, key.lower(), default)
class Trace(object):
"""
Used in conjunction with the 'trace' attribute to generate diagnostic
output. There is a single global instance of this class named trace.
"""
SUBS_NAMES = ('specialcharacters','quotes','specialwords',
'replacements', 'attributes','macros','callouts',
'replacements2')
def __init__(self):
self.name_re = '' # Regexp pattern to match trace names.
self.linenos = True
self.offset = 0
def __call__(self, name, before, after=None):
"""
Print trace message if tracing is on and the trace 'name' matches the
document 'trace' attribute (treated as a regexp).
The 'before' and 'after' messages are only printed if they differ.
"""
name_re = document.attributes.get('trace')
if name_re == 'subs': # Alias for all the inline substitutions.
name_re = '|'.join(self.SUBS_NAMES)
self.name_re = name_re
if self.name_re is not None:
msg = message.format(name, 'TRACE: ', self.linenos, offset=self.offset)
if before != after and re.match(self.name_re,name):
if is_array(before):
before = '\n'.join(before)
if after is None:
msg += '\n%s\n' % before
else:
if is_array(after):
after = '\n'.join(after)
msg += '\n<<<\n%s\n>>>\n%s\n' % (before,after)
message.stderr(msg)
class Message:
"""
Message functions.
"""
PROG = os.path.basename(os.path.splitext(__file__)[0])
def __init__(self):
# Set to True or False to globally override line numbers method
# argument. Has no effect when set to None.
self.linenos = None
self.messages = []
def stdout(self,msg):
print msg
def stderr(self,msg=''):
self.messages.append(msg)
if __name__ == '__main__':
sys.stderr.write('%s: %s%s' % (self.PROG, msg, os.linesep))
def verbose(self, msg,linenos=True):
if config.verbose:
msg = self.format(msg,linenos=linenos)
self.stderr(msg)
def warning(self, msg,linenos=True,offset=0):
msg = self.format(msg,'WARNING: ',linenos,offset=offset)
document.has_warnings = True
self.stderr(msg)
def deprecated(self, msg, linenos=True):
msg = self.format(msg, 'DEPRECATED: ', linenos)
self.stderr(msg)
def format(self, msg, prefix='', linenos=True, cursor=None, offset=0):
"""Return formatted message string."""
if self.linenos is not False and ((linenos or self.linenos) and reader.cursor):
if cursor is None:
cursor = reader.cursor
prefix += '%s: line %d: ' % (os.path.basename(cursor[0]),cursor[1]+offset)
return prefix + msg
def error(self, msg, cursor=None, halt=False):
"""
Report fatal error.
If halt=True raise EAsciiDoc exception.
If halt=False don't exit application, continue in the hope of reporting
all fatal errors finishing with a non-zero exit code.
"""
if halt:
raise EAsciiDoc, self.format(msg,linenos=False,cursor=cursor)
else:
msg = self.format(msg,'ERROR: ',cursor=cursor)
self.stderr(msg)
document.has_errors = True
def unsafe(self, msg):
self.error('unsafe: '+msg)
def userdir():
"""
Return user's home directory or None if it is not defined.
"""
result = os.path.expanduser('~')
if result == '~':
result = None
return result
def localapp():
"""
Return True if we are not executing the system wide version
i.e. the configuration is in the executable's directory.
"""
return os.path.isfile(os.path.join(APP_DIR, 'asciidoc.conf'))
def file_in(fname, directory):
"""Return True if file fname resides inside directory."""
assert os.path.isfile(fname)
# Empty directory (not to be confused with None) is the current directory.
if directory == '':
directory = os.getcwd()
else:
assert os.path.isdir(directory)
directory = os.path.realpath(directory)
fname = os.path.realpath(fname)
return os.path.commonprefix((directory, fname)) == directory
def safe():
return document.safe
def is_safe_file(fname, directory=None):
# A safe file must reside in directory directory (defaults to the source
# file directory).
if directory is None:
if document.infile == '<stdin>':
return not safe()
directory = os.path.dirname(document.infile)
elif directory == '':
directory = '.'
return (
not safe()
or file_in(fname, directory)
or file_in(fname, APP_DIR)
or file_in(fname, CONF_DIR)
)
def safe_filename(fname, parentdir):
"""
Return file name which must reside in the parent file directory.
Return None if file is not found or not safe.
"""
if not os.path.isabs(fname):
# Include files are relative to parent document
# directory.
fname = os.path.normpath(os.path.join(parentdir,fname))
if not os.path.isfile(fname):
message.warning('include file not found: %s' % fname)
return None
if not is_safe_file(fname, parentdir):
message.unsafe('include file: %s' % fname)
return None
return fname
def assign(dst,src):
"""Assign all attributes from 'src' object to 'dst' object."""
for a,v in src.__dict__.items():
setattr(dst,a,v)
def strip_quotes(s):
"""Trim white space and, if necessary, quote characters from s."""
s = s.strip()
# Strip quotation mark characters from quoted strings.
if len(s) >= 3 and s[0] == '"' and s[-1] == '"':
s = s[1:-1]
return s
def is_re(s):
"""Return True if s is a valid regular expression else return False."""
try: re.compile(s)
except: return False
else: return True
def re_join(relist):
"""Join list of regular expressions re1,re2,... to single regular
expression (re1)|(re2)|..."""
if len(relist) == 0:
return None
result = []
# Delete named groups to avoid ambiguity.
for s in relist:
result.append(re.sub(r'\?P<\S+?>','',s))
result = ')|('.join(result)
result = '('+result+')'
return result
def validate(value,rule,errmsg):
"""Validate value against rule expression. Throw EAsciiDoc exception with
errmsg if validation fails."""
try:
if not eval(rule.replace('$',str(value))):
raise EAsciiDoc,errmsg
except Exception:
raise EAsciiDoc,errmsg
return value
def lstrip_list(s):
"""
Return list with empty items from start of list removed.
"""
for i in range(len(s)):
if s[i]: break
else:
return []
return s[i:]
def rstrip_list(s):
"""
Return list with empty items from end of list removed.
"""
for i in range(len(s)-1,-1,-1):
if s[i]: break
else:
return []
return s[:i+1]
def strip_list(s):
"""
Return list with empty items from start and end of list removed.
"""
s = lstrip_list(s)
s = rstrip_list(s)
return s
def is_array(obj):
"""
Return True if object is list or tuple type.
"""
return isinstance(obj,list) or isinstance(obj,tuple)
def dovetail(lines1, lines2):
"""
Append list or tuple of strings 'lines2' to list 'lines1'. Join the last
non-blank item in 'lines1' with the first non-blank item in 'lines2' into a
single string.
"""
assert is_array(lines1)
assert is_array(lines2)
lines1 = strip_list(lines1)
lines2 = strip_list(lines2)
if not lines1 or not lines2:
return list(lines1) + list(lines2)
result = list(lines1[:-1])
result.append(lines1[-1] + lines2[0])
result += list(lines2[1:])
return result
def dovetail_tags(stag,content,etag):
"""Merge the end tag with the first content line and the last
content line with the end tag. This ensures verbatim elements don't
include extraneous opening and closing line breaks."""
return dovetail(dovetail(stag,content), etag)
def parse_attributes(attrs,dict):
"""Update a dictionary with name/value attributes from the attrs string.
The attrs string is a comma separated list of values and keyword name=value
pairs. Values must preceed keywords and are named '1','2'... The entire
attributes list is named '0'. If keywords are specified string values must
be quoted. Examples:
attrs: ''
dict: {}
attrs: 'hello,world'
dict: {'2': 'world', '0': 'hello,world', '1': 'hello'}
attrs: '"hello", planet="earth"'
dict: {'planet': 'earth', '0': '"hello",planet="earth"', '1': 'hello'}
"""
def f(*args,**keywords):
# Name and add aguments '1','2'... to keywords.
for i in range(len(args)):
if not str(i+1) in keywords:
keywords[str(i+1)] = args[i]
return keywords
if not attrs:
return
dict['0'] = attrs
# Replace line separators with spaces so line spanning works.
s = re.sub(r'\s', ' ', attrs)
try:
d = eval('f('+s+')')
# Attributes must evaluate to strings, numbers or None.
for v in d.values():
if not (isinstance(v,str) or isinstance(v,int) or isinstance(v,float) or v is None):
raise Exception
except Exception:
s = s.replace('"','\\"')
s = s.split(',')
s = map(lambda x: '"' + x.strip() + '"', s)
s = ','.join(s)
try:
d = eval('f('+s+')')
except Exception:
return # If there's a syntax error leave with {0}=attrs.
for k in d.keys(): # Drop any empty positional arguments.
if d[k] == '': del d[k]
dict.update(d)
assert len(d) > 0
def parse_named_attributes(s,attrs):
"""Update a attrs dictionary with name="value" attributes from the s string.
Returns False if invalid syntax.
Example:
attrs: 'star="sun",planet="earth"'
dict: {'planet':'earth', 'star':'sun'}
"""
def f(**keywords): return keywords
try:
d = eval('f('+s+')')
attrs.update(d)
return True
except Exception:
return False
def parse_list(s):
"""Parse comma separated string of Python literals. Return a tuple of of
parsed values."""
try:
result = eval('tuple(['+s+'])')
except Exception:
raise EAsciiDoc,'malformed list: '+s
return result
def parse_options(options,allowed,errmsg):
"""Parse comma separated string of unquoted option names and return as a
tuple of valid options. 'allowed' is a list of allowed option values.
If allowed=() then all legitimate names are allowed.
'errmsg' is an error message prefix if an illegal option error is thrown."""
result = []
if options:
for s in re.split(r'\s*,\s*',options):
if (allowed and s not in allowed) or not is_name(s):
raise EAsciiDoc,'%s: %s' % (errmsg,s)
result.append(s)
return tuple(result)
def symbolize(s):
"""Drop non-symbol characters and convert to lowercase."""
return re.sub(r'(?u)[^\w\-_]', '', s).lower()
def is_name(s):
"""Return True if s is valid attribute, macro or tag name
(starts with alpha containing alphanumeric and dashes only)."""
return re.match(r'^'+NAME_RE+r'$',s) is not None
def subs_quotes(text):
"""Quoted text is marked up and the resulting text is
returned."""
keys = config.quotes.keys()
for q in keys:
i = q.find('|')
if i != -1 and q != '|' and q != '||':
lq = q[:i] # Left quote.
rq = q[i+1:] # Right quote.
else:
lq = rq = q
tag = config.quotes[q]
if not tag: continue
# Unconstrained quotes prefix the tag name with a hash.
if tag[0] == '#':
tag = tag[1:]
# Unconstrained quotes can appear anywhere.
reo = re.compile(r'(?msu)(^|.)(\[(?P<attrlist>[^[\]]+?)\])?' \
+ r'(?:' + re.escape(lq) + r')' \
+ r'(?P<content>.+?)(?:'+re.escape(rq)+r')')
else:
# The text within constrained quotes must be bounded by white space.
# Non-word (\W) characters are allowed at boundaries to accomodate
# enveloping quotes and punctuation e.g. a='x', ('x'), 'x', ['x'].
reo = re.compile(r'(?msu)(^|[^\w;:}])(\[(?P<attrlist>[^[\]]+?)\])?' \
+ r'(?:' + re.escape(lq) + r')' \
+ r'(?P<content>\S|\S.*?\S)(?:'+re.escape(rq)+r')(?=\W|$)')
pos = 0
while True:
mo = reo.search(text,pos)
if not mo: break
if text[mo.start()] == '\\':
# Delete leading backslash.
text = text[:mo.start()] + text[mo.start()+1:]
# Skip past start of match.
pos = mo.start() + 1
else:
attrlist = {}
parse_attributes(mo.group('attrlist'), attrlist)
stag,etag = config.tag(tag, attrlist)
s = mo.group(1) + stag + mo.group('content') + etag
text = text[:mo.start()] + s + text[mo.end():]
pos = mo.start() + len(s)
return text
def subs_tag(tag,dict={}):
"""Perform attribute substitution and split tag string returning start, end
tag tuple (c.f. Config.tag())."""
if not tag:
return [None,None]
s = subs_attrs(tag,dict)
if not s:
message.warning('tag \'%s\' dropped: contains undefined attribute' % tag)
return [None,None]
result = s.split('|')
if len(result) == 1:
return result+[None]
elif len(result) == 2:
return result
else:
raise EAsciiDoc,'malformed tag: %s' % tag
def parse_entry(entry, dict=None, unquote=False, unique_values=False,
allow_name_only=False, escape_delimiter=True):
"""Parse name=value entry to dictionary 'dict'. Return tuple (name,value)
or None if illegal entry.
If name= then value is set to ''.
If name and allow_name_only=True then value is set to ''.
If name! and allow_name_only=True then value is set to None.
Leading and trailing white space is striped from 'name' and 'value'.
'name' can contain any printable characters.
If the '=' delimiter character is allowed in the 'name' then
it must be escaped with a backslash and escape_delimiter must be True.
If 'unquote' is True leading and trailing double-quotes are stripped from
'name' and 'value'.
If unique_values' is True then dictionary entries with the same value are
removed before the parsed entry is added."""
if escape_delimiter:
mo = re.search(r'(?:[^\\](=))',entry)
else:
mo = re.search(r'(=)',entry)
if mo: # name=value entry.
if mo.group(1):
name = entry[:mo.start(1)]
if escape_delimiter:
name = name.replace(r'\=','=') # Unescape \= in name.
value = entry[mo.end(1):]
elif allow_name_only and entry: # name or name! entry.
name = entry
if name[-1] == '!':
name = name[:-1]
value = None
else:
value = ''
else:
return None
if unquote:
name = strip_quotes(name)
if value is not None:
value = strip_quotes(value)
else:
name = name.strip()
if value is not None:
value = value.strip()
if not name:
return None
if dict is not None:
if unique_values:
for k,v in dict.items():
if v == value: del dict[k]
dict[name] = value
return name,value
def parse_entries(entries, dict, unquote=False, unique_values=False,
allow_name_only=False,escape_delimiter=True):
"""Parse name=value entries from from lines of text in 'entries' into
dictionary 'dict'. Blank lines are skipped."""
entries = config.expand_templates(entries)
for entry in entries:
if entry and not parse_entry(entry, dict, unquote, unique_values,
allow_name_only, escape_delimiter):
raise EAsciiDoc,'malformed section entry: %s' % entry
def dump_section(name,dict,f=sys.stdout):
"""Write parameters in 'dict' as in configuration file section format with
section 'name'."""
f.write('[%s]%s' % (name,writer.newline))
for k,v in dict.items():
k = str(k)
k = k.replace('=',r'\=') # Escape = in name.
# Quote if necessary.
if len(k) != len(k.strip()):
k = '"'+k+'"'
if v and len(v) != len(v.strip()):
v = '"'+v+'"'
if v is None:
# Don't dump undefined attributes.
continue
else:
s = k+'='+v
if s[0] == '#':
s = '\\' + s # Escape so not treated as comment lines.
f.write('%s%s' % (s,writer.newline))
f.write(writer.newline)
def update_attrs(attrs,dict):
"""Update 'attrs' dictionary with parsed attributes in dictionary 'dict'."""
for k,v in dict.items():
if not is_name(k):
raise EAsciiDoc,'illegal attribute name: %s' % k
attrs[k] = v
def is_attr_defined(attrs,dic):
"""
Check if the sequence of attributes is defined in dictionary 'dic'.
Valid 'attrs' sequence syntax:
<attr> Return True if single attrbiute is defined.
<attr1>,<attr2>,... Return True if one or more attributes are defined.
<attr1>+<attr2>+... Return True if all the attributes are defined.
"""
if OR in attrs:
for a in attrs.split(OR):
if dic.get(a.strip()) is not None:
return True
else: return False
elif AND in attrs:
for a in attrs.split(AND):
if dic.get(a.strip()) is None:
return False
else: return True
else:
return dic.get(attrs.strip()) is not None
def filter_lines(filter_cmd, lines, attrs={}):
"""
Run 'lines' through the 'filter_cmd' shell command and return the result.
The 'attrs' dictionary contains additional filter attributes.
"""
def findfilter(name,dir,filter):
"""Find filter file 'fname' with style name 'name' in directory
'dir'. Return found file path or None if not found."""
if name:
result = os.path.join(dir,'filters',name,filter)
if os.path.isfile(result):
return result
result = os.path.join(dir,'filters',filter)
if os.path.isfile(result):
return result
return None
# Return input lines if there's not filter.
if not filter_cmd or not filter_cmd.strip():
return lines
# Perform attributes substitution on the filter command.
s = subs_attrs(filter_cmd, attrs)
if not s:
message.error('undefined filter attribute in command: %s' % filter_cmd)
return []
filter_cmd = s.strip()
# Parse for quoted and unquoted command and command tail.
# Double quoted.
mo = re.match(r'^"(?P<cmd>[^"]+)"(?P<tail>.*)$', filter_cmd)
if not mo:
# Single quoted.
mo = re.match(r"^'(?P<cmd>[^']+)'(?P<tail>.*)$", filter_cmd)
if not mo:
# Unquoted catch all.
mo = re.match(r'^(?P<cmd>\S+)(?P<tail>.*)$', filter_cmd)
cmd = mo.group('cmd').strip()
found = None
if not os.path.dirname(cmd):
# Filter command has no directory path so search filter directories.
filtername = attrs.get('style')
d = document.attributes.get('docdir')
if d:
found = findfilter(filtername, d, cmd)
if not found:
if USER_DIR:
found = findfilter(filtername, USER_DIR, cmd)
if not found:
if localapp():
found = findfilter(filtername, APP_DIR, cmd)
else:
found = findfilter(filtername, CONF_DIR, cmd)
else:
if os.path.isfile(cmd):
found = cmd
else:
message.warning('filter not found: %s' % cmd)
if found:
filter_cmd = '"' + found + '"' + mo.group('tail')
if sys.platform == 'win32':
# Windows doesn't like running scripts directly so explicitly
# specify interpreter.
if found:
if cmd.endswith('.py'):
filter_cmd = 'python ' + filter_cmd
elif cmd.endswith('.rb'):
filter_cmd = 'ruby ' + filter_cmd
message.verbose('filtering: ' + filter_cmd)
try:
p = subprocess.Popen(filter_cmd, shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
output = p.communicate(os.linesep.join(lines))[0]
except Exception:
raise EAsciiDoc,'filter error: %s: %s' % (filter_cmd, sys.exc_info()[1])
if output:
result = [s.rstrip() for s in output.split(os.linesep)]
else:
result = []
filter_status = p.wait()
if filter_status:
message.warning('filter non-zero exit code: %s: returned %d' %
(filter_cmd, filter_status))
if lines and not result:
message.warning('no output from filter: %s' % filter_cmd)
return result
def system(name, args, is_macro=False, attrs=None):
"""
Evaluate a system attribute ({name:args}) or system block macro
(name::[args]).
If is_macro is True then we are processing a system block macro otherwise
it's a system attribute.
The attrs dictionary is updated by the counter and set system attributes.
NOTE: The include1 attribute is used internally by the include1::[] macro
and is not for public use.
"""
if is_macro:
syntax = '%s::[%s]' % (name,args)
separator = '\n'
else:
syntax = '{%s:%s}' % (name,args)
separator = writer.newline
if name not in ('eval','eval3','sys','sys2','sys3','include','include1','counter','counter2','set','set2','template'):
if is_macro:
msg = 'illegal system macro name: %s' % name
else:
msg = 'illegal system attribute name: %s' % name
message.warning(msg)
return None
if is_macro:
s = subs_attrs(args)
if s is None:
message.warning('skipped %s: undefined attribute in: %s' % (name,args))
return None
args = s
if name != 'include1':
message.verbose('evaluating: %s' % syntax)
if safe() and name not in ('include','include1'):
message.unsafe(syntax)
return None
result = None
if name in ('eval','eval3'):
try:
result = eval(args)
if result is True:
result = ''
elif result is False:
result = None
elif result is not None:
result = str(result)
except Exception:
message.warning('%s: evaluation error' % syntax)
elif name in ('sys','sys2','sys3'):
result = ''
fd,tmp = tempfile.mkstemp()
os.close(fd)
try:
cmd = args
cmd = cmd + (' > %s' % tmp)
if name == 'sys2':
cmd = cmd + ' 2>&1'
if os.system(cmd):
message.warning('%s: non-zero exit status' % syntax)
try:
if os.path.isfile(tmp):
lines = [s.rstrip() for s in open(tmp)]
else:
lines = []
except Exception:
raise EAsciiDoc,'%s: temp file read error' % syntax
result = separator.join(lines)
finally:
if os.path.isfile(tmp):
os.remove(tmp)
elif name in ('counter','counter2'):
mo = re.match(r'^(?P<attr>[^:]*?)(:(?P<seed>.*))?$', args)
attr = mo.group('attr')
seed = mo.group('seed')
if seed and (not re.match(r'^\d+$', seed) and len(seed) > 1):
message.warning('%s: illegal counter seed: %s' % (syntax,seed))
return None
if not is_name(attr):
message.warning('%s: illegal attribute name' % syntax)
return None
value = document.attributes.get(attr)
if value:
if not re.match(r'^\d+$', value) and len(value) > 1:
message.warning('%s: illegal counter value: %s'
% (syntax,value))
return None
if re.match(r'^\d+$', value):
expr = value + '+1'
else:
expr = 'chr(ord("%s")+1)' % value
try:
result = str(eval(expr))
except Exception:
message.warning('%s: evaluation error: %s' % (syntax, expr))
else:
if seed:
result = seed
else:
result = '1'
document.attributes[attr] = result
if attrs is not None:
attrs[attr] = result
if name == 'counter2':
result = ''
elif name in ('set','set2'):
mo = re.match(r'^(?P<attr>[^:]*?)(:(?P<value>.*))?$', args)
attr = mo.group('attr')
value = mo.group('value')
if value is None:
value = ''
if attr.endswith('!'):
attr = attr[:-1]
value = None
if not is_name(attr):
message.warning('%s: illegal attribute name' % syntax)
else:
if attrs is not None:
attrs[attr] = value
if name != 'set2': # set2 only updates local attributes.
document.attributes[attr] = value
if value is None:
result = None
else:
result = ''
elif name == 'include':
if not os.path.exists(args):
message.warning('%s: file does not exist' % syntax)
elif not is_safe_file(args):
message.unsafe(syntax)
else:
result = [s.rstrip() for s in open(args)]
if result:
result = subs_attrs(result)
result = separator.join(result)
result = result.expandtabs(reader.tabsize)
else:
result = ''
elif name == 'include1':
result = separator.join(config.include1[args])
elif name == 'template':
if not args in config.sections:
message.warning('%s: template does not exist' % syntax)
else:
result = []
for line in config.sections[args]:
line = subs_attrs(line)
if line is not None:
result.append(line)
result = '\n'.join(result)
else:
assert False
if result and name in ('eval3','sys3'):
macros.passthroughs.append(result)
result = '\x07' + str(len(macros.passthroughs)-1) + '\x07'
return result
def subs_attrs(lines, dictionary=None):
"""Substitute 'lines' of text with attributes from the global
document.attributes dictionary and from 'dictionary' ('dictionary'
entries take precedence). Return a tuple of the substituted lines. 'lines'
containing undefined attributes are deleted. If 'lines' is a string then
return a string.
- Attribute references are substituted in the following order: simple,
conditional, system.
- Attribute references inside 'dictionary' entry values are substituted.
"""
def end_brace(text,start):
"""Return index following end brace that matches brace at start in
text."""
assert text[start] == '{'
n = 0
result = start
for c in text[start:]:
# Skip braces that are followed by a backslash.
if result == len(text)-1 or text[result+1] != '\\':
if c == '{': n = n + 1
elif c == '}': n = n - 1
result = result + 1
if n == 0: break
return result
if type(lines) == str:
string_result = True
lines = [lines]
else:
string_result = False
if dictionary is None:
attrs = document.attributes
else:
# Remove numbered document attributes so they don't clash with
# attribute list positional attributes.
attrs = {}
for k,v in document.attributes.items():
if not re.match(r'^\d+$', k):
attrs[k] = v
# Substitute attribute references inside dictionary values.
for k,v in dictionary.items():
if v is None:
del dictionary[k]
else:
v = subs_attrs(str(v))
if v is None:
del dictionary[k]
else:
dictionary[k] = v
attrs.update(dictionary)
# Substitute all attributes in all lines.
result = []
for line in lines:
# Make it easier for regular expressions.
line = line.replace('\\{','{\\')
line = line.replace('\\}','}\\')
# Expand simple attributes ({name}).
# Nested attributes not allowed.
reo = re.compile(r'(?su)\{(?P<name>[^\\\W][-\w]*?)\}(?!\\)')
pos = 0
while True:
mo = reo.search(line,pos)
if not mo: break
s = attrs.get(mo.group('name'))
if s is None:
pos = mo.end()
else:
s = str(s)
line = line[:mo.start()] + s + line[mo.end():]
pos = mo.start() + len(s)
# Expand conditional attributes.
# Single name -- higher precedence.
reo1 = re.compile(r'(?su)\{(?P<name>[^\\\W][-\w]*?)' \
r'(?P<op>\=|\?|!|#|%|@|\$)' \
r'(?P<value>.*?)\}(?!\\)')
# Multiple names (n1,n2,... or n1+n2+...) -- lower precedence.
reo2 = re.compile(r'(?su)\{(?P<name>[^\\\W][-\w'+OR+AND+r']*?)' \
r'(?P<op>\=|\?|!|#|%|@|\$)' \
r'(?P<value>.*?)\}(?!\\)')
for reo in [reo1,reo2]:
pos = 0
while True:
mo = reo.search(line,pos)
if not mo: break
attr = mo.group()
name = mo.group('name')
if reo == reo2:
if OR in name:
sep = OR
else:
sep = AND
names = [s.strip() for s in name.split(sep) if s.strip() ]
for n in names:
if not re.match(r'^[^\\\W][-\w]*$',n):
message.error('illegal attribute syntax: %s' % attr)
if sep == OR:
# Process OR name expression: n1,n2,...
for n in names:
if attrs.get(n) is not None:
lval = ''
break
else:
lval = None
else:
# Process AND name expression: n1+n2+...
for n in names:
if attrs.get(n) is None:
lval = None
break
else:
lval = ''
else:
lval = attrs.get(name)
op = mo.group('op')
# mo.end() not good enough because '{x={y}}' matches '{x={y}'.
end = end_brace(line,mo.start())
rval = line[mo.start('value'):end-1]
UNDEFINED = '{zzzzz}'
if lval is None:
if op == '=': s = rval
elif op == '?': s = ''
elif op == '!': s = rval
elif op == '#': s = UNDEFINED # So the line is dropped.
elif op == '%': s = rval
elif op in ('@','$'):
s = UNDEFINED # So the line is dropped.
else:
assert False, 'illegal attribute: %s' % attr
else:
if op == '=': s = lval
elif op == '?': s = rval
elif op == '!': s = ''
elif op == '#': s = rval
elif op == '%': s = UNDEFINED # So the line is dropped.
elif op in ('@','$'):
v = re.split(r'(?<!\\):',rval)
if len(v) not in (2,3):
message.error('illegal attribute syntax: %s' % attr)
s = ''
elif not is_re('^'+v[0]+'$'):
message.error('illegal attribute regexp: %s' % attr)
s = ''
else:
v = [s.replace('\\:',':') for s in v]
re_mo = re.match('^'+v[0]+'$',lval)
if op == '@':
if re_mo:
s = v[1] # {<name>@<re>:<v1>[:<v2>]}
else:
if len(v) == 3: # {<name>@<re>:<v1>:<v2>}
s = v[2]
else: # {<name>@<re>:<v1>}
s = ''
else:
if re_mo:
if len(v) == 2: # {<name>$<re>:<v1>}
s = v[1]
elif v[1] == '': # {<name>$<re>::<v2>}
s = UNDEFINED # So the line is dropped.
else: # {<name>$<re>:<v1>:<v2>}
s = v[1]
else:
if len(v) == 2: # {<name>$<re>:<v1>}
s = UNDEFINED # So the line is dropped.
else: # {<name>$<re>:<v1>:<v2>}
s = v[2]
else:
assert False, 'illegal attribute: %s' % attr
s = str(s)
line = line[:mo.start()] + s + line[end:]
pos = mo.start() + len(s)
# Drop line if it contains unsubstituted {name} references.
skipped = re.search(r'(?su)\{[^\\\W][-\w]*?\}(?!\\)', line)
if skipped:
continue;
# Expand system attributes (eval has precedence).
reos = [
re.compile(r'(?su)\{(?P<action>eval):(?P<expr>.*?)\}(?!\\)'),
re.compile(r'(?su)\{(?P<action>[^\\\W][-\w]*?):(?P<expr>.*?)\}(?!\\)'),
]
skipped = False
for reo in reos:
pos = 0
while True:
mo = reo.search(line,pos)
if not mo: break
expr = mo.group('expr')
action = mo.group('action')
expr = expr.replace('{\\','{')
expr = expr.replace('}\\','}')
s = system(action, expr, attrs=dictionary)
if dictionary is not None and action in ('counter','counter2','set','set2'):
# These actions create and update attributes.
attrs.update(dictionary)
if s is None:
# Drop line if the action returns None.
skipped = True
break
line = line[:mo.start()] + s + line[mo.end():]
pos = mo.start() + len(s)
if skipped:
break
if not skipped:
# Remove backslash from escaped entries.
line = line.replace('{\\','{')
line = line.replace('}\\','}')
result.append(line)
if string_result:
if result:
return '\n'.join(result)
else:
return None
else:
return tuple(result)
def char_encoding():
encoding = document.attributes.get('encoding')
if encoding:
try:
codecs.lookup(encoding)
except LookupError,e:
raise EAsciiDoc,str(e)
return encoding
def char_len(s):
return len(char_decode(s))
east_asian_widths = {'W': 2, # Wide
'F': 2, # Full-width (wide)
'Na': 1, # Narrow
'H': 1, # Half-width (narrow)
'N': 1, # Neutral (not East Asian, treated as narrow)
'A': 1} # Ambiguous (s/b wide in East Asian context,
# narrow otherwise, but that doesn't work)
"""Mapping of result codes from `unicodedata.east_asian_width()` to character
column widths."""
def column_width(s):
text = char_decode(s)
if isinstance(text, unicode):
width = 0
for c in text:
width += east_asian_widths[unicodedata.east_asian_width(c)]
return width
else:
return len(text)
def char_decode(s):
if char_encoding():
try:
return s.decode(char_encoding())
except Exception:
raise EAsciiDoc, \
"'%s' codec can't decode \"%s\"" % (char_encoding(), s)
else:
return s
def char_encode(s):
if char_encoding():
return s.encode(char_encoding())
else:
return s
def time_str(t):
"""Convert seconds since the Epoch to formatted local time string."""
t = time.localtime(t)
s = time.strftime('%H:%M:%S',t)
if time.daylight and t.tm_isdst == 1:
result = s + ' ' + time.tzname[1]
else:
result = s + ' ' + time.tzname[0]
# Attempt to convert the localtime to the output encoding.
try:
result = char_encode(result.decode(locale.getdefaultlocale()[1]))
except Exception:
pass
return result
def date_str(t):
"""Convert seconds since the Epoch to formatted local date string."""
t = time.localtime(t)
return time.strftime('%Y-%m-%d',t)
class Lex:
"""Lexical analysis routines. Static methods and attributes only."""
prev_element = None
prev_cursor = None
def __init__(self):
raise AssertionError,'no class instances allowed'
@staticmethod
def next():
"""Returns class of next element on the input (None if EOF). The
reader is assumed to be at the first line following a previous element,
end of file or line one. Exits with the reader pointing to the first
line of the next element or EOF (leading blank lines are skipped)."""
reader.skip_blank_lines()
if reader.eof(): return None
# Optimization: If we've already checked for an element at this
# position return the element.
if Lex.prev_element and Lex.prev_cursor == reader.cursor:
return Lex.prev_element
if AttributeEntry.isnext():
result = AttributeEntry
elif AttributeList.isnext():
result = AttributeList
elif BlockTitle.isnext() and not tables_OLD.isnext():
result = BlockTitle
elif Title.isnext():
if AttributeList.style() == 'float':
result = FloatingTitle
else:
result = Title
elif macros.isnext():
result = macros.current
elif lists.isnext():
result = lists.current
elif blocks.isnext():
result = blocks.current
elif tables_OLD.isnext():
result = tables_OLD.current
elif tables.isnext():
result = tables.current
else:
if not paragraphs.isnext():
raise EAsciiDoc,'paragraph expected'
result = paragraphs.current
# Optimization: Cache answer.
Lex.prev_cursor = reader.cursor
Lex.prev_element = result
return result
@staticmethod
def canonical_subs(options):
"""Translate composite subs values."""
if len(options) == 1:
if options[0] == 'none':
options = ()
elif options[0] == 'normal':
options = config.subsnormal
elif options[0] == 'verbatim':
options = config.subsverbatim
return options
@staticmethod
def subs_1(s,options):
"""Perform substitution specified in 'options' (in 'options' order)."""
if not s:
return s
if document.attributes.get('plaintext') is not None:
options = ('specialcharacters',)
result = s
options = Lex.canonical_subs(options)
for o in options:
if o == 'specialcharacters':
result = config.subs_specialchars(result)
elif o == 'attributes':
result = subs_attrs(result)
elif o == 'quotes':
result = subs_quotes(result)
elif o == 'specialwords':
result = config.subs_specialwords(result)
elif o in ('replacements','replacements2'):
result = config.subs_replacements(result,o)
elif o == 'macros':
result = macros.subs(result)
elif o == 'callouts':
result = macros.subs(result,callouts=True)
else:
raise EAsciiDoc,'illegal substitution option: %s' % o
trace(o, s, result)
if not result:
break
return result
@staticmethod
def subs(lines,options):
"""Perform inline processing specified by 'options' (in 'options'
order) on sequence of 'lines'."""
if not lines or not options:
return lines
options = Lex.canonical_subs(options)
# Join lines so quoting can span multiple lines.
para = '\n'.join(lines)
if 'macros' in options:
para = macros.extract_passthroughs(para)
for o in options:
if o == 'attributes':
# If we don't substitute attributes line-by-line then a single
# undefined attribute will drop the entire paragraph.
lines = subs_attrs(para.split('\n'))
para = '\n'.join(lines)
else:
para = Lex.subs_1(para,(o,))
if 'macros' in options:
para = macros.restore_passthroughs(para)
return para.splitlines()
@staticmethod
def set_margin(lines, margin=0):
"""Utility routine that sets the left margin to 'margin' space in a
block of non-blank lines."""
# Calculate width of block margin.
lines = list(lines)
width = len(lines[0])
for s in lines:
i = re.search(r'\S',s).start()
if i < width: width = i
# Strip margin width from all lines.
for i in range(len(lines)):
lines[i] = ' '*margin + lines[i][width:]
return lines
#---------------------------------------------------------------------------
# Document element classes parse AsciiDoc reader input and write DocBook writer
# output.
#---------------------------------------------------------------------------
class Document(object):
# doctype property.
def getdoctype(self):
return self.attributes.get('doctype')
def setdoctype(self,doctype):
self.attributes['doctype'] = doctype
doctype = property(getdoctype,setdoctype)
# backend property.
def getbackend(self):
return self.attributes.get('backend')
def setbackend(self,backend):
if backend:
backend = self.attributes.get('backend-alias-' + backend, backend)
self.attributes['backend'] = backend
backend = property(getbackend,setbackend)
def __init__(self):
self.infile = None # Source file name.
self.outfile = None # Output file name.
self.attributes = InsensitiveDict()
self.level = 0 # 0 => front matter. 1,2,3 => sect1,2,3.
self.has_errors = False # Set true if processing errors were flagged.
self.has_warnings = False # Set true if warnings were flagged.
self.safe = False # Default safe mode.
def update_attributes(self,attrs=None):
"""
Set implicit attributes and attributes in 'attrs'.
"""
t = time.time()
self.attributes['localtime'] = time_str(t)
self.attributes['localdate'] = date_str(t)
self.attributes['asciidoc-version'] = VERSION
self.attributes['asciidoc-file'] = APP_FILE
self.attributes['asciidoc-dir'] = APP_DIR
self.attributes['asciidoc-confdir'] = CONF_DIR
self.attributes['user-dir'] = USER_DIR
if config.verbose:
self.attributes['verbose'] = ''
# Update with configuration file attributes.
if attrs:
self.attributes.update(attrs)
# Update with command-line attributes.
self.attributes.update(config.cmd_attrs)
# Extract miscellaneous configuration section entries from attributes.
if attrs:
config.load_miscellaneous(attrs)
config.load_miscellaneous(config.cmd_attrs)
self.attributes['newline'] = config.newline
# File name related attributes can't be overridden.
if self.infile is not None:
if self.infile and os.path.exists(self.infile):
t = os.path.getmtime(self.infile)
elif self.infile == '<stdin>':
t = time.time()
else:
t = None
if t:
self.attributes['doctime'] = time_str(t)
self.attributes['docdate'] = date_str(t)
if self.infile != '<stdin>':
self.attributes['infile'] = self.infile
self.attributes['indir'] = os.path.dirname(self.infile)
self.attributes['docfile'] = self.infile
self.attributes['docdir'] = os.path.dirname(self.infile)
self.attributes['docname'] = os.path.splitext(
os.path.basename(self.infile))[0]
if self.outfile:
if self.outfile != '<stdout>':
self.attributes['outfile'] = self.outfile
self.attributes['outdir'] = os.path.dirname(self.outfile)
if self.infile == '<stdin>':
self.attributes['docname'] = os.path.splitext(
os.path.basename(self.outfile))[0]
ext = os.path.splitext(self.outfile)[1][1:]
elif config.outfilesuffix:
ext = config.outfilesuffix[1:]
else:
ext = ''
if ext:
self.attributes['filetype'] = ext
self.attributes['filetype-'+ext] = ''
def load_lang(self):
"""
Load language configuration file.
"""
lang = self.attributes.get('lang')
if lang is None:
filename = 'lang-en.conf' # Default language file.
else:
filename = 'lang-' + lang + '.conf'
if config.load_from_dirs(filename):
self.attributes['lang'] = lang # Reinstate new lang attribute.
else:
if lang is None:
# The default language file must exist.
message.error('missing conf file: %s' % filename, halt=True)
else:
message.warning('missing language conf file: %s' % filename)
def set_deprecated_attribute(self,old,new):
"""
Ensures the 'old' name of an attribute that was renamed to 'new' is
still honored.
"""
if self.attributes.get(new) is None:
if self.attributes.get(old) is not None:
self.attributes[new] = self.attributes[old]
else:
self.attributes[old] = self.attributes[new]
def consume_attributes_and_comments(self,comments_only=False,noblanks=False):
"""
Returns True if one or more attributes or comments were consumed.
If 'noblanks' is True then consumation halts if a blank line is
encountered.
"""
result = False
finished = False
while not finished:
finished = True
if noblanks and not reader.read_next(): return result
if blocks.isnext() and 'skip' in blocks.current.options:
result = True
finished = False
blocks.current.translate()
if noblanks and not reader.read_next(): return result
if macros.isnext() and macros.current.name == 'comment':
result = True
finished = False
macros.current.translate()
if not comments_only:
if AttributeEntry.isnext():
result = True
finished = False
AttributeEntry.translate()
if AttributeList.isnext():
result = True
finished = False
AttributeList.translate()
return result
def parse_header(self,doctype,backend):
"""
Parses header, sets corresponding document attributes and finalizes
document doctype and backend properties.
Returns False if the document does not have a header.
'doctype' and 'backend' are the doctype and backend option values
passed on the command-line, None if no command-line option was not
specified.
"""
assert self.level == 0
# Skip comments and attribute entries that preceed the header.
self.consume_attributes_and_comments()
if doctype is not None:
# Command-line overrides header.
self.doctype = doctype
elif self.doctype is None:
# Was not set on command-line or in document header.
self.doctype = DEFAULT_DOCTYPE
# Process document header.
has_header = (Title.isnext() and Title.level == 0
and AttributeList.style() != 'float')
if self.doctype == 'manpage' and not has_header:
message.error('manpage document title is mandatory',halt=True)
if has_header:
Header.parse()
# Command-line entries override header derived entries.
self.attributes.update(config.cmd_attrs)
# DEPRECATED: revision renamed to revnumber.
self.set_deprecated_attribute('revision','revnumber')
# DEPRECATED: date renamed to revdate.
self.set_deprecated_attribute('date','revdate')
if doctype is not None:
# Command-line overrides header.
self.doctype = doctype
if backend is not None:
# Command-line overrides header.
self.backend = backend
elif self.backend is None:
# Was not set on command-line or in document header.
self.backend = DEFAULT_BACKEND
else:
# Has been set in document header.
self.backend = self.backend # Translate alias in header.
assert self.doctype in ('article','manpage','book'), 'illegal document type'
return has_header
def translate(self,has_header):
if self.doctype == 'manpage':
# Translate mandatory NAME section.
if Lex.next() is not Title:
message.error('name section expected')
else:
Title.translate()
if Title.level != 1:
message.error('name section title must be at level 1')
if not isinstance(Lex.next(),Paragraph):
message.error('malformed name section body')
lines = reader.read_until(r'^$')
s = ' '.join(lines)
mo = re.match(r'^(?P<manname>.*?)\s+-\s+(?P<manpurpose>.*)$',s)
if not mo:
message.error('malformed name section body')
self.attributes['manname'] = mo.group('manname').strip()
self.attributes['manpurpose'] = mo.group('manpurpose').strip()
names = [s.strip() for s in self.attributes['manname'].split(',')]
if len(names) > 9:
message.warning('to many manpage names')
for i,name in enumerate(names):
self.attributes['manname%d' % (i+1)] = name
if has_header:
# Do postponed substitutions (backend confs have been loaded).
self.attributes['doctitle'] = Title.dosubs(self.attributes['doctitle'])
if config.header_footer:
hdr = config.subs_section('header',{})
writer.write(hdr,trace='header')
self.consume_attributes_and_comments()
if self.doctype in ('article','book'):
# Translate 'preamble' (untitled elements between header
# and first section title).
if Lex.next() is not Title:
stag,etag = config.section2tags('preamble')
writer.write(stag,trace='preamble open')
Section.translate_body()
writer.write(etag,trace='preamble close')
elif self.doctype == 'manpage' and 'name' in config.sections:
writer.write(config.subs_section('name',{}), trace='name')
else:
self.process_author_names()
if config.header_footer:
hdr = config.subs_section('header',{})
writer.write(hdr,trace='header')
if Lex.next() is not Title:
Section.translate_body()
# Process remaining sections.
while not reader.eof():
if Lex.next() is not Title:
raise EAsciiDoc,'section title expected'
Section.translate()
Section.setlevel(0) # Write remaining unwritten section close tags.
# Substitute document parameters and write document footer.
if config.header_footer:
ftr = config.subs_section('footer',{})
writer.write(ftr,trace='footer')
def parse_author(self,s):
""" Return False if the author is malformed."""
attrs = self.attributes # Alias for readability.
s = s.strip()
mo = re.match(r'^(?P<name1>[^<>\s]+)'
'(\s+(?P<name2>[^<>\s]+))?'
'(\s+(?P<name3>[^<>\s]+))?'
'(\s+<(?P<email>\S+)>)?$',s)
if not mo:
# Names that don't match the formal specification.
if s:
attrs['firstname'] = s
return
firstname = mo.group('name1')
if mo.group('name3'):
middlename = mo.group('name2')
lastname = mo.group('name3')
else:
middlename = None
lastname = mo.group('name2')
firstname = firstname.replace('_',' ')
if middlename:
middlename = middlename.replace('_',' ')
if lastname:
lastname = lastname.replace('_',' ')
email = mo.group('email')
if firstname:
attrs['firstname'] = firstname
if middlename:
attrs['middlename'] = middlename
if lastname:
attrs['lastname'] = lastname
if email:
attrs['email'] = email
return
def process_author_names(self):
""" Calculate any missing author related attributes."""
attrs = self.attributes # Alias for readability.
firstname = attrs.get('firstname','')
middlename = attrs.get('middlename','')
lastname = attrs.get('lastname','')
author = attrs.get('author')
initials = attrs.get('authorinitials')
if author and not (firstname or middlename or lastname):
self.parse_author(author)
attrs['author'] = author.replace('_',' ')
self.process_author_names()
return
if not author:
author = '%s %s %s' % (firstname, middlename, lastname)
author = author.strip()
author = re.sub(r'\s+',' ', author)
if not initials:
initials = (char_decode(firstname)[:1] +
char_decode(middlename)[:1] + char_decode(lastname)[:1])
initials = char_encode(initials).upper()
names = [firstname,middlename,lastname,author,initials]
for i,v in enumerate(names):
v = config.subs_specialchars(v)
v = subs_attrs(v)
names[i] = v
firstname,middlename,lastname,author,initials = names
if firstname:
attrs['firstname'] = firstname
if middlename:
attrs['middlename'] = middlename
if lastname:
attrs['lastname'] = lastname
if author:
attrs['author'] = author
if initials:
attrs['authorinitials'] = initials
if author:
attrs['authored'] = ''
class Header:
"""Static methods and attributes only."""
REV_LINE_RE = r'^(\D*(?P<revnumber>.*?),)?(?P<revdate>.*?)(:\s*(?P<revremark>.*))?$'
RCS_ID_RE = r'^\$Id: \S+ (?P<revnumber>\S+) (?P<revdate>\S+) \S+ (?P<author>\S+) (\S+ )?\$$'
def __init__(self):
raise AssertionError,'no class instances allowed'
@staticmethod
def parse():
assert Lex.next() is Title and Title.level == 0
attrs = document.attributes # Alias for readability.
# Postpone title subs until backend conf files have been loaded.
Title.translate(skipsubs=True)
attrs['doctitle'] = Title.attributes['title']
document.consume_attributes_and_comments(noblanks=True)
s = reader.read_next()
mo = None
if s:
# Process first header line after the title that is not a comment
# or an attribute entry.
s = reader.read()
mo = re.match(Header.RCS_ID_RE,s)
if not mo:
document.parse_author(s)
document.consume_attributes_and_comments(noblanks=True)
if reader.read_next():
# Process second header line after the title that is not a
# comment or an attribute entry.
s = reader.read()
s = subs_attrs(s)
if s:
mo = re.match(Header.RCS_ID_RE,s)
if not mo:
mo = re.match(Header.REV_LINE_RE,s)
document.consume_attributes_and_comments(noblanks=True)
s = attrs.get('revnumber')
if s:
mo = re.match(Header.RCS_ID_RE,s)
if mo:
revnumber = mo.group('revnumber')
if revnumber:
attrs['revnumber'] = revnumber.strip()
author = mo.groupdict().get('author')
if author and 'firstname' not in attrs:
document.parse_author(author)
revremark = mo.groupdict().get('revremark')
if revremark is not None:
revremark = [revremark]
# Revision remarks can continue on following lines.
while reader.read_next():
if document.consume_attributes_and_comments(noblanks=True):
break
revremark.append(reader.read())
revremark = Lex.subs(revremark,['normal'])
revremark = '\n'.join(revremark).strip()
attrs['revremark'] = revremark
revdate = mo.group('revdate')
if revdate:
attrs['revdate'] = revdate.strip()
elif revnumber or revremark:
# Set revision date to ensure valid DocBook revision.
attrs['revdate'] = attrs['docdate']
document.process_author_names()
if document.doctype == 'manpage':
# manpage title formatted like mantitle(manvolnum).
mo = re.match(r'^(?P<mantitle>.*)\((?P<manvolnum>.*)\)$',
attrs['doctitle'])
if not mo:
message.error('malformed manpage title')
else:
mantitle = mo.group('mantitle').strip()
mantitle = subs_attrs(mantitle)
if mantitle is None:
message.error('undefined attribute in manpage title')
# mantitle is lowered only if in ALL CAPS
if mantitle == mantitle.upper():
mantitle = mantitle.lower()
attrs['mantitle'] = mantitle;
attrs['manvolnum'] = mo.group('manvolnum').strip()
class AttributeEntry:
"""Static methods and attributes only."""
pattern = None
subs = None
name = None
name2 = None
value = None
attributes = {} # Accumulates all the parsed attribute entries.
def __init__(self):
raise AssertionError,'no class instances allowed'
@staticmethod
def isnext():
result = False # Assume not next.
if not AttributeEntry.pattern:
pat = document.attributes.get('attributeentry-pattern')
if not pat:
message.error("[attributes] missing 'attributeentry-pattern' entry")
AttributeEntry.pattern = pat
line = reader.read_next()
if line:
# Attribute entry formatted like :<name>[.<name2>]:[ <value>]
mo = re.match(AttributeEntry.pattern,line)
if mo:
AttributeEntry.name = mo.group('attrname')
AttributeEntry.name2 = mo.group('attrname2')
AttributeEntry.value = mo.group('attrvalue') or ''
AttributeEntry.value = AttributeEntry.value.strip()
result = True
return result
@staticmethod
def translate():
assert Lex.next() is AttributeEntry
attr = AttributeEntry # Alias for brevity.
reader.read() # Discard attribute entry from reader.
while attr.value.endswith(' +'):
if not reader.read_next(): break
attr.value = attr.value[:-1] + reader.read().strip()
if attr.name2 is not None:
# Configuration file attribute.
if attr.name2 != '':
# Section entry attribute.
section = {}
# Some sections can have name! syntax.
if attr.name in ('attributes','miscellaneous') and attr.name2[-1] == '!':
section[attr.name] = [attr.name2]
else:
section[attr.name] = ['%s=%s' % (attr.name2,attr.value)]
config.load_sections(section)
config.load_miscellaneous(config.conf_attrs)
else:
# Markup template section attribute.
if attr.name in config.sections:
config.sections[attr.name] = [attr.value]
else:
message.warning('missing configuration section: %s' % attr.name)
else:
# Normal attribute.
if attr.name[-1] == '!':
# Names like name! undefine the attribute.
attr.name = attr.name[:-1]
attr.value = None
# Strip white space and illegal name chars.
attr.name = re.sub(r'(?u)[^\w\-_]', '', attr.name).lower()
# Don't override most command-line attributes.
if attr.name in config.cmd_attrs \
and attr.name not in ('trace','numbered'):
return
# Update document attributes with attribute value.
if attr.value is not None:
mo = re.match(r'^pass:(?P<attrs>.*)\[(?P<value>.*)\]$', attr.value)
if mo:
# Inline passthrough syntax.
attr.subs = mo.group('attrs')
attr.value = mo.group('value') # Passthrough.
else:
# Default substitution.
# DEPRECATED: attributeentry-subs
attr.subs = document.attributes.get('attributeentry-subs',
'specialcharacters,attributes')
attr.subs = parse_options(attr.subs, SUBS_OPTIONS,
'illegal substitution option')
attr.value = Lex.subs((attr.value,), attr.subs)
attr.value = writer.newline.join(attr.value)
document.attributes[attr.name] = attr.value
elif attr.name in document.attributes:
del document.attributes[attr.name]
attr.attributes[attr.name] = attr.value
class AttributeList:
"""Static methods and attributes only."""
pattern = None
match = None
attrs = {}
def __init__(self):
raise AssertionError,'no class instances allowed'
@staticmethod
def initialize():
if not 'attributelist-pattern' in document.attributes:
message.error("[attributes] missing 'attributelist-pattern' entry")
AttributeList.pattern = document.attributes['attributelist-pattern']
@staticmethod
def isnext():
result = False # Assume not next.
line = reader.read_next()
if line:
mo = re.match(AttributeList.pattern, line)
if mo:
AttributeList.match = mo
result = True
return result
@staticmethod
def translate():
assert Lex.next() is AttributeList
reader.read() # Discard attribute list from reader.
attrs = {}
d = AttributeList.match.groupdict()
for k,v in d.items():
if v is not None:
if k == 'attrlist':
v = subs_attrs(v)
if v:
parse_attributes(v, attrs)
else:
AttributeList.attrs[k] = v
AttributeList.subs(attrs)
AttributeList.attrs.update(attrs)
@staticmethod
def subs(attrs):
'''Substitute single quoted attribute values normally.'''
reo = re.compile(r"^'.*'$")
for k,v in attrs.items():
if reo.match(str(v)):
attrs[k] = Lex.subs_1(v[1:-1],SUBS_NORMAL)
@staticmethod
def style():
return AttributeList.attrs.get('style') or AttributeList.attrs.get('1')
@staticmethod
def consume(d):
"""Add attribute list to the dictionary 'd' and reset the
list."""
if AttributeList.attrs:
d.update(AttributeList.attrs)
AttributeList.attrs = {}
# Generate option attributes.
if 'options' in d:
options = parse_options(d['options'], (), 'illegal option name')
for option in options:
d[option+'-option'] = ''
class BlockTitle:
"""Static methods and attributes only."""
title = None
pattern = None
def __init__(self):
raise AssertionError,'no class instances allowed'
@staticmethod
def isnext():
result = False # Assume not next.
line = reader.read_next()
if line:
mo = re.match(BlockTitle.pattern,line)
if mo:
BlockTitle.title = mo.group('title')
result = True
return result
@staticmethod
def translate():
assert Lex.next() is BlockTitle
reader.read() # Discard title from reader.
# Perform title substitutions.
if not Title.subs:
Title.subs = config.subsnormal
s = Lex.subs((BlockTitle.title,), Title.subs)
s = writer.newline.join(s)
if not s:
message.warning('blank block title')
BlockTitle.title = s
@staticmethod
def consume(d):
"""If there is a title add it to dictionary 'd' then reset title."""
if BlockTitle.title:
d['title'] = BlockTitle.title
BlockTitle.title = None
class Title:
"""Processes Header and Section titles. Static methods and attributes
only."""
# Class variables
underlines = ('==','--','~~','^^','++') # Levels 0,1,2,3,4.
subs = ()
pattern = None
level = 0
attributes = {}
sectname = None
section_numbers = [0]*len(underlines)
dump_dict = {}
linecount = None # Number of lines in title (1 or 2).
def __init__(self):
raise AssertionError,'no class instances allowed'
@staticmethod
def translate(skipsubs=False):
"""Parse the Title.attributes and Title.level from the reader. The
real work has already been done by parse()."""
assert Lex.next() in (Title,FloatingTitle)
# Discard title from reader.
for i in range(Title.linecount):
reader.read()
Title.setsectname()
if not skipsubs:
Title.attributes['title'] = Title.dosubs(Title.attributes['title'])
@staticmethod
def dosubs(title):
"""
Perform title substitutions.
"""
if not Title.subs:
Title.subs = config.subsnormal
title = Lex.subs((title,), Title.subs)
title = writer.newline.join(title)
if not title:
message.warning('blank section title')
return title
@staticmethod
def isnext():
lines = reader.read_ahead(2)
return Title.parse(lines)
@staticmethod
def parse(lines):
"""Parse title at start of lines tuple."""
if len(lines) == 0: return False
if len(lines[0]) == 0: return False # Title can't be blank.
# Check for single-line titles.
result = False
for level in range(len(Title.underlines)):
k = 'sect%s' % level
if k in Title.dump_dict:
mo = re.match(Title.dump_dict[k], lines[0])
if mo:
Title.attributes = mo.groupdict()
Title.level = level
Title.linecount = 1
result = True
break
if not result:
# Check for double-line titles.
if not Title.pattern: return False # Single-line titles only.
if len(lines) < 2: return False
title,ul = lines[:2]
title_len = column_width(title)
ul_len = char_len(ul)
if ul_len < 2: return False
# Fast elimination check.
if ul[:2] not in Title.underlines: return False
# Length of underline must be within +-3 of title.
if not ((ul_len-3 < title_len < ul_len+3)
# Next test for backward compatibility.
or (ul_len-3 < char_len(title) < ul_len+3)):
return False
# Check for valid repetition of underline character pairs.
s = ul[:2]*((ul_len+1)/2)
if ul != s[:ul_len]: return False
# Don't be fooled by back-to-back delimited blocks, require at
# least one alphanumeric character in title.
if not re.search(r'(?u)\w',title): return False
mo = re.match(Title.pattern, title)
if mo:
Title.attributes = mo.groupdict()
Title.level = list(Title.underlines).index(ul[:2])
Title.linecount = 2
result = True
# Check for expected pattern match groups.
if result:
if not 'title' in Title.attributes:
message.warning('[titles] entry has no <title> group')
Title.attributes['title'] = lines[0]
for k,v in Title.attributes.items():
if v is None: del Title.attributes[k]
try:
Title.level += int(document.attributes.get('leveloffset','0'))
except:
pass
Title.attributes['level'] = str(Title.level)
return result
@staticmethod
def load(entries):
"""Load and validate [titles] section entries dictionary."""
if 'underlines' in entries:
errmsg = 'malformed [titles] underlines entry'
try:
underlines = parse_list(entries['underlines'])
except Exception:
raise EAsciiDoc,errmsg
if len(underlines) != len(Title.underlines):
raise EAsciiDoc,errmsg
for s in underlines:
if len(s) !=2:
raise EAsciiDoc,errmsg
Title.underlines = tuple(underlines)
Title.dump_dict['underlines'] = entries['underlines']
if 'subs' in entries:
Title.subs = parse_options(entries['subs'], SUBS_OPTIONS,
'illegal [titles] subs entry')
Title.dump_dict['subs'] = entries['subs']
if 'sectiontitle' in entries:
pat = entries['sectiontitle']
if not pat or not is_re(pat):
raise EAsciiDoc,'malformed [titles] sectiontitle entry'
Title.pattern = pat
Title.dump_dict['sectiontitle'] = pat
if 'blocktitle' in entries:
pat = entries['blocktitle']
if not pat or not is_re(pat):
raise EAsciiDoc,'malformed [titles] blocktitle entry'
BlockTitle.pattern = pat
Title.dump_dict['blocktitle'] = pat
# Load single-line title patterns.
for k in ('sect0','sect1','sect2','sect3','sect4'):
if k in entries:
pat = entries[k]
if not pat or not is_re(pat):
raise EAsciiDoc,'malformed [titles] %s entry' % k
Title.dump_dict[k] = pat
# TODO: Check we have either a Title.pattern or at least one
# single-line title pattern -- can this be done here or do we need
# check routine like the other block checkers?
@staticmethod
def dump():
dump_section('titles',Title.dump_dict)
@staticmethod
def setsectname():
"""
Set Title section name:
If the first positional or 'template' attribute is set use it,
next search for section title in [specialsections],
if not found use default 'sect<level>' name.
"""
sectname = AttributeList.attrs.get('1')
if sectname and sectname != 'float':
Title.sectname = sectname
elif 'template' in AttributeList.attrs:
Title.sectname = AttributeList.attrs['template']
else:
for pat,sect in config.specialsections.items():
mo = re.match(pat,Title.attributes['title'])
if mo:
title = mo.groupdict().get('title')
if title is not None:
Title.attributes['title'] = title.strip()
else:
Title.attributes['title'] = mo.group().strip()
Title.sectname = sect
break
else:
Title.sectname = 'sect%d' % Title.level
@staticmethod
def getnumber(level):
"""Return next section number at section 'level' formatted like
1.2.3.4."""
number = ''
for l in range(len(Title.section_numbers)):
n = Title.section_numbers[l]
if l == 0:
continue
elif l < level:
number = '%s%d.' % (number, n)
elif l == level:
number = '%s%d.' % (number, n + 1)
Title.section_numbers[l] = n + 1
elif l > level:
# Reset unprocessed section levels.
Title.section_numbers[l] = 0
return number
class FloatingTitle(Title):
'''Floated titles are translated differently.'''
@staticmethod
def isnext():
return Title.isnext() and AttributeList.style() == 'float'
@staticmethod
def translate():
assert Lex.next() is FloatingTitle
Title.translate()
Section.set_id()
AttributeList.consume(Title.attributes)
template = 'floatingtitle'
if template in config.sections:
stag,etag = config.section2tags(template,Title.attributes)
writer.write(stag,trace='floating title')
else:
message.warning('missing template section: [%s]' % template)
class Section:
"""Static methods and attributes only."""
endtags = [] # Stack of currently open section (level,endtag) tuples.
ids = [] # List of already used ids.
def __init__(self):
raise AssertionError,'no class instances allowed'
@staticmethod
def savetag(level,etag):
"""Save section end."""
Section.endtags.append((level,etag))
@staticmethod
def setlevel(level):
"""Set document level and write open section close tags up to level."""
while Section.endtags and Section.endtags[-1][0] >= level:
writer.write(Section.endtags.pop()[1],trace='section close')
document.level = level
@staticmethod
def gen_id(title):
"""
The normalized value of the id attribute is an NCName according to
the 'Namespaces in XML' Recommendation:
NCName ::= NCNameStartChar NCNameChar*
NCNameChar ::= NameChar - ':'
NCNameStartChar ::= Letter | '_'
NameChar ::= Letter | Digit | '.' | '-' | '_' | ':'
"""
# Replace non-alpha numeric characters in title with underscores and
# convert to lower case.
base_ident = char_encode(re.sub(r'(?u)\W+', '_',
char_decode(title)).strip('_').lower())
# Prefix the ID name with idprefix attribute or underscore if not
# defined. Prefix ensures the ID does not clash with existing IDs.
idprefix = document.attributes.get('idprefix','_')
base_ident = idprefix + base_ident
i = 1
while True:
if i == 1:
ident = base_ident
else:
ident = '%s_%d' % (base_ident, i)
if ident not in Section.ids:
Section.ids.append(ident)
return ident
else:
ident = base_ident
i += 1
@staticmethod
def set_id():
if not document.attributes.get('sectids') is None \
and 'id' not in AttributeList.attrs:
# Generate ids for sections.
AttributeList.attrs['id'] = Section.gen_id(Title.attributes['title'])
@staticmethod
def translate():
assert Lex.next() is Title
prev_sectname = Title.sectname
Title.translate()
if Title.level == 0 and document.doctype != 'book':
message.error('only book doctypes can contain level 0 sections')
if Title.level > document.level \
and 'basebackend-docbook' in document.attributes \
and prev_sectname in ('colophon','abstract', \
'dedication','glossary','bibliography'):
message.error('%s section cannot contain sub-sections' % prev_sectname)
if Title.level > document.level+1:
# Sub-sections of multi-part book level zero Preface and Appendices
# are meant to be out of sequence.
if document.doctype == 'book' \
and document.level == 0 \
and Title.level == 2 \
and prev_sectname in ('preface','appendix'):
pass
else:
message.warning('section title out of sequence: '
'expected level %d, got level %d'
% (document.level+1, Title.level))
Section.set_id()
Section.setlevel(Title.level)
if 'numbered' in document.attributes:
Title.attributes['sectnum'] = Title.getnumber(document.level)
else:
Title.attributes['sectnum'] = ''
AttributeList.consume(Title.attributes)
stag,etag = config.section2tags(Title.sectname,Title.attributes)
Section.savetag(Title.level,etag)
writer.write(stag,trace='section open: level %d: %s' %
(Title.level, Title.attributes['title']))
Section.translate_body()
@staticmethod
def translate_body(terminator=Title):
isempty = True
next = Lex.next()
while next and next is not terminator:
if isinstance(terminator,DelimitedBlock) and next is Title:
message.error('section title not permitted in delimited block')
next.translate()
next = Lex.next()
isempty = False
# The section is not empty if contains a subsection.
if next and isempty and Title.level > document.level:
isempty = False
# Report empty sections if invalid markup will result.
if isempty:
if document.backend == 'docbook' and Title.sectname != 'index':
message.error('empty section is not valid')
class AbstractBlock:
def __init__(self):
# Configuration parameter names common to all blocks.
self.CONF_ENTRIES = ('delimiter','options','subs','presubs','postsubs',
'posattrs','style','.*-style','template','filter')
self.start = None # File reader cursor at start delimiter.
self.name=None # Configuration file section name.
# Configuration parameters.
self.delimiter=None # Regular expression matching block delimiter.
self.delimiter_reo=None # Compiled delimiter.
self.template=None # template section entry.
self.options=() # options entry list.
self.presubs=None # presubs/subs entry list.
self.postsubs=() # postsubs entry list.
self.filter=None # filter entry.
self.posattrs=() # posattrs entry list.
self.style=None # Default style.
self.styles=OrderedDict() # Each entry is a styles dictionary.
# Before a block is processed it's attributes (from it's
# attributes list) are merged with the block configuration parameters
# (by self.merge_attributes()) resulting in the template substitution
# dictionary (self.attributes) and the block's processing parameters
# (self.parameters).
self.attributes={}
# The names of block parameters.
self.PARAM_NAMES=('template','options','presubs','postsubs','filter')
self.parameters=None
# Leading delimiter match object.
self.mo=None
def short_name(self):
""" Return the text following the last dash in the section name."""
i = self.name.rfind('-')
if i == -1:
return self.name
else:
return self.name[i+1:]
def error(self, msg, cursor=None, halt=False):
message.error('[%s] %s' % (self.name,msg), cursor, halt)
def is_conf_entry(self,param):
"""Return True if param matches an allowed configuration file entry
name."""
for s in self.CONF_ENTRIES:
if re.match('^'+s+'$',param):
return True
return False
def load(self,name,entries):
"""Update block definition from section 'entries' dictionary."""
self.name = name
self.update_parameters(entries, self, all=True)
def update_parameters(self, src, dst=None, all=False):
"""
Parse processing parameters from src dictionary to dst object.
dst defaults to self.parameters.
If all is True then copy src entries that aren't parameter names.
"""
dst = dst or self.parameters
msg = '[%s] malformed entry %%s: %%s' % self.name
def copy(obj,k,v):
if isinstance(obj,dict):
obj[k] = v
else:
setattr(obj,k,v)
for k,v in src.items():
if not re.match(r'\d+',k) and not is_name(k):
raise EAsciiDoc, msg % (k,v)
if k == 'template':
if not is_name(v):
raise EAsciiDoc, msg % (k,v)
copy(dst,k,v)
elif k == 'filter':
copy(dst,k,v)
elif k == 'options':
if isinstance(v,str):
v = parse_options(v, (), msg % (k,v))
# Merge with existing options.
v = tuple(set(dst.options).union(set(v)))
copy(dst,k,v)
elif k in ('subs','presubs','postsubs'):
# Subs is an alias for presubs.
if k == 'subs': k = 'presubs'
if isinstance(v,str):
v = parse_options(v, SUBS_OPTIONS, msg % (k,v))
copy(dst,k,v)
elif k == 'delimiter':
if v and is_re(v):
copy(dst,k,v)
else:
raise EAsciiDoc, msg % (k,v)
elif k == 'style':
if is_name(v):
copy(dst,k,v)
else:
raise EAsciiDoc, msg % (k,v)
elif k == 'posattrs':
v = parse_options(v, (), msg % (k,v))
copy(dst,k,v)
else:
mo = re.match(r'^(?P<style>.*)-style$',k)
if mo:
if not v:
raise EAsciiDoc, msg % (k,v)
style = mo.group('style')
if not is_name(style):
raise EAsciiDoc, msg % (k,v)
d = {}
if not parse_named_attributes(v,d):
raise EAsciiDoc, msg % (k,v)
if 'subs' in d:
# Subs is an alias for presubs.
d['presubs'] = d['subs']
del d['subs']
self.styles[style] = d
elif all or k in self.PARAM_NAMES:
copy(dst,k,v) # Derived class specific entries.
def get_param(self,name,params=None):
"""
Return named processing parameter from params dictionary.
If the parameter is not in params look in self.parameters.
"""
if params and name in params:
return params[name]
elif name in self.parameters:
return self.parameters[name]
else:
return None
def get_subs(self,params=None):
"""
Return (presubs,postsubs) tuple.
"""
presubs = self.get_param('presubs',params)
postsubs = self.get_param('postsubs',params)
return (presubs,postsubs)
def dump(self):
"""Write block definition to stdout."""
write = lambda s: sys.stdout.write('%s%s' % (s,writer.newline))
write('['+self.name+']')
if self.is_conf_entry('delimiter'):
write('delimiter='+self.delimiter)
if self.template:
write('template='+self.template)
if self.options:
write('options='+','.join(self.options))
if self.presubs:
if self.postsubs:
write('presubs='+','.join(self.presubs))
else:
write('subs='+','.join(self.presubs))
if self.postsubs:
write('postsubs='+','.join(self.postsubs))
if self.filter:
write('filter='+self.filter)
if self.posattrs:
write('posattrs='+','.join(self.posattrs))
if self.style:
write('style='+self.style)
if self.styles:
for style,d in self.styles.items():
s = ''
for k,v in d.items(): s += '%s=%r,' % (k,v)
write('%s-style=%s' % (style,s[:-1]))
def validate(self):
"""Validate block after the complete configuration has been loaded."""
if self.is_conf_entry('delimiter') and not self.delimiter:
raise EAsciiDoc,'[%s] missing delimiter' % self.name
if self.style:
if not is_name(self.style):
raise EAsciiDoc, 'illegal style name: %s' % self.style
if not self.style in self.styles:
if not isinstance(self,List): # Lists don't have templates.
message.warning('[%s] \'%s\' style not in %s' % (
self.name,self.style,self.styles.keys()))
# Check all styles for missing templates.
all_styles_have_template = True
for k,v in self.styles.items():
t = v.get('template')
if t and not t in config.sections:
# Defer check if template name contains attributes.
if not re.search(r'{.+}',t):
message.warning('missing template section: [%s]' % t)
if not t:
all_styles_have_template = False
# Check we have a valid template entry or alternatively that all the
# styles have templates.
if self.is_conf_entry('template') and not 'skip' in self.options:
if self.template:
if not self.template in config.sections:
# Defer check if template name contains attributes.
if not re.search(r'{.+}',self.template):
message.warning('missing template section: [%s]'
% self.template)
elif not all_styles_have_template:
if not isinstance(self,List): # Lists don't have templates.
message.warning('missing styles templates: [%s]' % self.name)
def isnext(self):
"""Check if this block is next in document reader."""
result = False
reader.skip_blank_lines()
if reader.read_next():
if not self.delimiter_reo:
# Cache compiled delimiter optimization.
self.delimiter_reo = re.compile(self.delimiter)
mo = self.delimiter_reo.match(reader.read_next())
if mo:
self.mo = mo
result = True
return result
def translate(self):
"""Translate block from document reader."""
if not self.presubs:
self.presubs = config.subsnormal
if reader.cursor:
self.start = reader.cursor[:]
def merge_attributes(self,attrs,params=[]):
"""
Use the current blocks attribute list (attrs dictionary) to build a
dictionary of block processing parameters (self.parameters) and tag
substitution attributes (self.attributes).
1. Copy the default parameters (self.*) to self.parameters.
self.parameters are used internally to render the current block.
Optional params array of additional parameters.
2. Copy attrs to self.attributes. self.attributes are used for template
and tag substitution in the current block.
3. If a style attribute was specified update self.parameters with the
corresponding style parameters; if there are any style parameters
remaining add them to self.attributes (existing attribute list entries
take precedence).
4. Set named positional attributes in self.attributes if self.posattrs
was specified.
5. Finally self.parameters is updated with any corresponding parameters
specified in attrs.
"""
def check_array_parameter(param):
# Check the parameter is a sequence type.
if not is_array(self.parameters[param]):
message.error('malformed presubs attribute: %s' %
self.parameters[param])
# Revert to default value.
self.parameters[param] = getattr(self,param)
params = list(self.PARAM_NAMES) + params
self.attributes = {}
if self.style:
# If a default style is defined make it available in the template.
self.attributes['style'] = self.style
self.attributes.update(attrs)
# Calculate dynamic block parameters.
# Start with configuration file defaults.
self.parameters = AttrDict()
for name in params:
self.parameters[name] = getattr(self,name)
# Load the selected style attributes.
posattrs = self.posattrs
if posattrs and posattrs[0] == 'style':
# Positional attribute style has highest precedence.
style = self.attributes.get('1')
else:
style = None
if not style:
# Use explicit style attribute, fall back to default style.
style = self.attributes.get('style',self.style)
if style:
if not is_name(style):
message.error('illegal style name: %s' % style)
style = self.style
# Lists have implicit styles and do their own style checks.
elif style not in self.styles and not isinstance(self,List):
message.warning('missing style: [%s]: %s' % (self.name,style))
style = self.style
if style in self.styles:
self.attributes['style'] = style
for k,v in self.styles[style].items():
if k == 'posattrs':
posattrs = v
elif k in params:
self.parameters[k] = v
elif not k in self.attributes:
# Style attributes don't take precedence over explicit.
self.attributes[k] = v
# Set named positional attributes.
for i,v in enumerate(posattrs):
if str(i+1) in self.attributes:
self.attributes[v] = self.attributes[str(i+1)]
# Override config and style attributes with attribute list attributes.
self.update_parameters(attrs)
check_array_parameter('options')
check_array_parameter('presubs')
check_array_parameter('postsubs')
class AbstractBlocks:
"""List of block definitions."""
PREFIX = '' # Conf file section name prefix set in derived classes.
BLOCK_TYPE = None # Block type set in derived classes.
def __init__(self):
self.current=None
self.blocks = [] # List of Block objects.
self.default = None # Default Block.
self.delimiters = None # Combined delimiters regular expression.
def load(self,sections):
"""Load block definition from 'sections' dictionary."""
for k in sections.keys():
if re.match(r'^'+ self.PREFIX + r'.+$',k):
d = {}
parse_entries(sections.get(k,()),d)
for b in self.blocks:
if b.name == k:
break
else:
b = self.BLOCK_TYPE()
self.blocks.append(b)
try:
b.load(k,d)
except EAsciiDoc,e:
raise EAsciiDoc,'[%s] %s' % (k,str(e))
def dump(self):
for b in self.blocks:
b.dump()
def isnext(self):
for b in self.blocks:
if b.isnext():
self.current = b
return True;
return False
def validate(self):
"""Validate the block definitions."""
# Validate delimiters and build combined lists delimiter pattern.
delimiters = []
for b in self.blocks:
assert b.__class__ is self.BLOCK_TYPE
b.validate()
if b.delimiter:
delimiters.append(b.delimiter)
self.delimiters = re_join(delimiters)
class Paragraph(AbstractBlock):
def __init__(self):
AbstractBlock.__init__(self)
self.text=None # Text in first line of paragraph.
def load(self,name,entries):
AbstractBlock.load(self,name,entries)
def dump(self):
AbstractBlock.dump(self)
write = lambda s: sys.stdout.write('%s%s' % (s,writer.newline))
write('')
def isnext(self):
result = AbstractBlock.isnext(self)
if result:
self.text = self.mo.groupdict().get('text')
return result
def translate(self):
AbstractBlock.translate(self)
attrs = self.mo.groupdict().copy()
if 'text' in attrs: del attrs['text']
BlockTitle.consume(attrs)
AttributeList.consume(attrs)
self.merge_attributes(attrs)
reader.read() # Discard (already parsed item first line).
body = reader.read_until(paragraphs.terminators)
body = [self.text] + list(body)
presubs = self.parameters.presubs
postsubs = self.parameters.postsubs
if document.attributes.get('plaintext') is None:
body = Lex.set_margin(body) # Move body to left margin.
body = Lex.subs(body,presubs)
template = self.parameters.template
template = subs_attrs(template,attrs)
stag = config.section2tags(template, self.attributes,skipend=True)[0]
if self.parameters.filter:
body = filter_lines(self.parameters.filter,body,self.attributes)
body = Lex.subs(body,postsubs)
etag = config.section2tags(template, self.attributes,skipstart=True)[1]
# Write start tag, content, end tag.
writer.write(dovetail_tags(stag,body,etag),trace='paragraph')
class Paragraphs(AbstractBlocks):
"""List of paragraph definitions."""
BLOCK_TYPE = Paragraph
PREFIX = 'paradef-'
def __init__(self):
AbstractBlocks.__init__(self)
self.terminators=None # List of compiled re's.
def initialize(self):
self.terminators = [
re.compile(r'^\+$|^$'),
re.compile(AttributeList.pattern),
re.compile(blocks.delimiters),
re.compile(tables.delimiters),
re.compile(tables_OLD.delimiters),
]
def load(self,sections):
AbstractBlocks.load(self,sections)
def validate(self):
AbstractBlocks.validate(self)
# Check we have a default paragraph definition, put it last in list.
for b in self.blocks:
if b.name == 'paradef-default':
self.blocks.append(b)
self.default = b
self.blocks.remove(b)
break
else:
raise EAsciiDoc,'missing section: [paradef-default]'
class List(AbstractBlock):
NUMBER_STYLES= ('arabic','loweralpha','upperalpha','lowerroman',
'upperroman')
def __init__(self):
AbstractBlock.__init__(self)
self.CONF_ENTRIES += ('type','tags')
self.PARAM_NAMES += ('tags',)
# tabledef conf file parameters.
self.type=None
self.tags=None # Name of listtags-<tags> conf section.
# Calculated parameters.
self.tag=None # Current tags AttrDict.
self.label=None # List item label (labeled lists).
self.text=None # Text in first line of list item.
self.index=None # Matched delimiter 'index' group (numbered lists).
self.type=None # List type ('numbered','bulleted','labeled').
self.ordinal=None # Current list item ordinal number (1..)
self.number_style=None # Current numbered list style ('arabic'..)
def load(self,name,entries):
AbstractBlock.load(self,name,entries)
def dump(self):
AbstractBlock.dump(self)
write = lambda s: sys.stdout.write('%s%s' % (s,writer.newline))
write('type='+self.type)
write('tags='+self.tags)
write('')
def validate(self):
AbstractBlock.validate(self)
tags = [self.tags]
tags += [s['tags'] for s in self.styles.values() if 'tags' in s]
for t in tags:
if t not in lists.tags:
self.error('missing section: [listtags-%s]' % t,halt=True)
def isnext(self):
result = AbstractBlock.isnext(self)
if result:
self.label = self.mo.groupdict().get('label')
self.text = self.mo.groupdict().get('text')
self.index = self.mo.groupdict().get('index')
return result
def translate_entry(self):
assert self.type == 'labeled'
entrytag = subs_tag(self.tag.entry, self.attributes)
labeltag = subs_tag(self.tag.label, self.attributes)
writer.write(entrytag[0],trace='list entry open')
writer.write(labeltag[0],trace='list label open')
# Write labels.
while Lex.next() is self:
reader.read() # Discard (already parsed item first line).
writer.write_tag(self.tag.term, [self.label],
self.presubs, self.attributes,trace='list term')
if self.text: break
writer.write(labeltag[1],trace='list label close')
# Write item text.
self.translate_item()
writer.write(entrytag[1],trace='list entry close')
def translate_item(self):
if self.type == 'callout':
self.attributes['coids'] = calloutmap.calloutids(self.ordinal)
itemtag = subs_tag(self.tag.item, self.attributes)
writer.write(itemtag[0],trace='list item open')
# Write ItemText.
text = reader.read_until(lists.terminators)
if self.text:
text = [self.text] + list(text)
if text:
writer.write_tag(self.tag.text, text, self.presubs, self.attributes,trace='list text')
# Process explicit and implicit list item continuations.
while True:
continuation = reader.read_next() == '+'
if continuation: reader.read() # Discard continuation line.
while Lex.next() in (BlockTitle,AttributeList):
# Consume continued element title and attributes.
Lex.next().translate()
if not continuation and BlockTitle.title:
# Titled elements terminate the list.
break
next = Lex.next()
if next in lists.open:
break
elif isinstance(next,List):
next.translate()
elif isinstance(next,Paragraph) and 'listelement' in next.options:
next.translate()
elif continuation:
# This is where continued elements are processed.
if next is Title:
message.error('section title not allowed in list item',halt=True)
next.translate()
else:
break
writer.write(itemtag[1],trace='list item close')
@staticmethod
def calc_style(index):
"""Return the numbered list style ('arabic'...) of the list item index.
Return None if unrecognized style."""
if re.match(r'^\d+[\.>]$', index):
style = 'arabic'
elif re.match(r'^[ivx]+\)$', index):
style = 'lowerroman'
elif re.match(r'^[IVX]+\)$', index):
style = 'upperroman'
elif re.match(r'^[a-z]\.$', index):
style = 'loweralpha'
elif re.match(r'^[A-Z]\.$', index):
style = 'upperalpha'
else:
assert False
return style
@staticmethod
def calc_index(index,style):
"""Return the ordinal number of (1...) of the list item index
for the given list style."""
def roman_to_int(roman):
roman = roman.lower()
digits = {'i':1,'v':5,'x':10}
result = 0
for i in range(len(roman)):
digit = digits[roman[i]]
# If next digit is larger this digit is negative.
if i+1 < len(roman) and digits[roman[i+1]] > digit:
result -= digit
else:
result += digit
return result
index = index[:-1]
if style == 'arabic':
ordinal = int(index)
elif style == 'lowerroman':
ordinal = roman_to_int(index)
elif style == 'upperroman':
ordinal = roman_to_int(index)
elif style == 'loweralpha':
ordinal = ord(index) - ord('a') + 1
elif style == 'upperalpha':
ordinal = ord(index) - ord('A') + 1
else:
assert False
return ordinal
def check_index(self):
"""Check calculated self.ordinal (1,2,...) against the item number
in the document (self.index) and check the number style is the same as
the first item (self.number_style)."""
assert self.type in ('numbered','callout')
if self.index:
style = self.calc_style(self.index)
if style != self.number_style:
message.warning('list item style: expected %s got %s' %
(self.number_style,style), offset=1)
ordinal = self.calc_index(self.index,style)
if ordinal != self.ordinal:
message.warning('list item index: expected %s got %s' %
(self.ordinal,ordinal), offset=1)
def check_tags(self):
""" Check that all necessary tags are present. """
tags = set(Lists.TAGS)
if self.type != 'labeled':
tags = tags.difference(['entry','label','term'])
missing = tags.difference(self.tag.keys())
if missing:
self.error('missing tag(s): %s' % ','.join(missing), halt=True)
def translate(self):
AbstractBlock.translate(self)
if self.short_name() in ('bibliography','glossary','qanda'):
message.deprecated('old %s list syntax' % self.short_name())
lists.open.append(self)
attrs = self.mo.groupdict().copy()
for k in ('label','text','index'):
if k in attrs: del attrs[k]
if self.index:
# Set the numbering style from first list item.
attrs['style'] = self.calc_style(self.index)
BlockTitle.consume(attrs)
AttributeList.consume(attrs)
self.merge_attributes(attrs,['tags'])
if self.type in ('numbered','callout'):
self.number_style = self.attributes.get('style')
if self.number_style not in self.NUMBER_STYLES:
message.error('illegal numbered list style: %s' % self.number_style)
# Fall back to default style.
self.attributes['style'] = self.number_style = self.style
self.tag = lists.tags[self.parameters.tags]
self.check_tags()
if 'width' in self.attributes:
# Set horizontal list 'labelwidth' and 'itemwidth' attributes.
v = str(self.attributes['width'])
mo = re.match(r'^(\d{1,2})%?$',v)
if mo:
labelwidth = int(mo.group(1))
self.attributes['labelwidth'] = str(labelwidth)
self.attributes['itemwidth'] = str(100-labelwidth)
else:
self.error('illegal attribute value: width="%s"' % v)
stag,etag = subs_tag(self.tag.list, self.attributes)
if stag:
writer.write(stag,trace='list open')
self.ordinal = 0
# Process list till list syntax changes or there is a new title.
while Lex.next() is self and not BlockTitle.title:
self.ordinal += 1
document.attributes['listindex'] = str(self.ordinal)
if self.type in ('numbered','callout'):
self.check_index()
if self.type in ('bulleted','numbered','callout'):
reader.read() # Discard (already parsed item first line).
self.translate_item()
elif self.type == 'labeled':
self.translate_entry()
else:
raise AssertionError,'illegal [%s] list type' % self.name
if etag:
writer.write(etag,trace='list close')
if self.type == 'callout':
calloutmap.validate(self.ordinal)
calloutmap.listclose()
lists.open.pop()
if len(lists.open):
document.attributes['listindex'] = str(lists.open[-1].ordinal)
class Lists(AbstractBlocks):
"""List of List objects."""
BLOCK_TYPE = List
PREFIX = 'listdef-'
TYPES = ('bulleted','numbered','labeled','callout')
TAGS = ('list', 'entry','item','text', 'label','term')
def __init__(self):
AbstractBlocks.__init__(self)
self.open = [] # A stack of the current and parent lists.
self.tags={} # List tags dictionary. Each entry is a tags AttrDict.
self.terminators=None # List of compiled re's.
def initialize(self):
self.terminators = [
re.compile(r'^\+$|^$'),
re.compile(AttributeList.pattern),
re.compile(lists.delimiters),
re.compile(blocks.delimiters),
re.compile(tables.delimiters),
re.compile(tables_OLD.delimiters),
]
def load(self,sections):
AbstractBlocks.load(self,sections)
self.load_tags(sections)
def load_tags(self,sections):
"""
Load listtags-* conf file sections to self.tags.
"""
for section in sections.keys():
mo = re.match(r'^listtags-(?P<name>\w+)$',section)
if mo:
name = mo.group('name')
if name in self.tags:
d = self.tags[name]
else:
d = AttrDict()
parse_entries(sections.get(section,()),d)
for k in d.keys():
if k not in self.TAGS:
message.warning('[%s] contains illegal list tag: %s' %
(section,k))
self.tags[name] = d
def validate(self):
AbstractBlocks.validate(self)
for b in self.blocks:
# Check list has valid type.
if not b.type in Lists.TYPES:
raise EAsciiDoc,'[%s] illegal type' % b.name
b.validate()
def dump(self):
AbstractBlocks.dump(self)
for k,v in self.tags.items():
dump_section('listtags-'+k, v)
class DelimitedBlock(AbstractBlock):
def __init__(self):
AbstractBlock.__init__(self)
def load(self,name,entries):
AbstractBlock.load(self,name,entries)
def dump(self):
AbstractBlock.dump(self)
write = lambda s: sys.stdout.write('%s%s' % (s,writer.newline))
write('')
def isnext(self):
return AbstractBlock.isnext(self)
def translate(self):
AbstractBlock.translate(self)
reader.read() # Discard delimiter.
attrs = {}
if self.short_name() != 'comment':
BlockTitle.consume(attrs)
AttributeList.consume(attrs)
self.merge_attributes(attrs)
options = self.parameters.options
if 'skip' in options:
reader.read_until(self.delimiter,same_file=True)
elif safe() and self.name == 'blockdef-backend':
message.unsafe('Backend Block')
reader.read_until(self.delimiter,same_file=True)
else:
template = self.parameters.template
template = subs_attrs(template,attrs)
name = self.short_name()+' block'
if 'sectionbody' in options:
# The body is treated like a section body.
stag,etag = config.section2tags(template,self.attributes)
writer.write(stag,trace=name+' open')
Section.translate_body(self)
writer.write(etag,trace=name+' close')
else:
stag = config.section2tags(template,self.attributes,skipend=True)[0]
body = reader.read_until(self.delimiter,same_file=True)
presubs = self.parameters.presubs
postsubs = self.parameters.postsubs
body = Lex.subs(body,presubs)
if self.parameters.filter:
body = filter_lines(self.parameters.filter,body,self.attributes)
body = Lex.subs(body,postsubs)
# Write start tag, content, end tag.
etag = config.section2tags(template,self.attributes,skipstart=True)[1]
writer.write(dovetail_tags(stag,body,etag),trace=name)
trace(self.short_name()+' block close',etag)
if reader.eof():
self.error('missing closing delimiter',self.start)
else:
delimiter = reader.read() # Discard delimiter line.
assert re.match(self.delimiter,delimiter)
class DelimitedBlocks(AbstractBlocks):
"""List of delimited blocks."""
BLOCK_TYPE = DelimitedBlock
PREFIX = 'blockdef-'
def __init__(self):
AbstractBlocks.__init__(self)
def load(self,sections):
"""Update blocks defined in 'sections' dictionary."""
AbstractBlocks.load(self,sections)
def validate(self):
AbstractBlocks.validate(self)
class Column:
"""Table column."""
def __init__(self, width=None, align_spec=None, style=None):
self.width = width or '1'
self.halign, self.valign = Table.parse_align_spec(align_spec)
self.style = style # Style name or None.
# Calculated attribute values.
self.abswidth = None # 1.. (page units).
self.pcwidth = None # 1..99 (percentage).
class Cell:
def __init__(self, data, span_spec=None, align_spec=None, style=None):
self.data = data
self.span, self.vspan = Table.parse_span_spec(span_spec)
self.halign, self.valign = Table.parse_align_spec(align_spec)
self.style = style
def __repr__(self):
return '<Cell: %d.%d %s.%s %s "%s">' % (
self.span, self.vspan,
self.halign, self.valign,
self.style or '',
self.data)
class Table(AbstractBlock):
ALIGN = {'<':'left', '>':'right', '^':'center'}
VALIGN = {'<':'top', '>':'bottom', '^':'middle'}
FORMATS = ('psv','csv','dsv')
SEPARATORS = dict(
csv=',',
dsv=r':|\n',
# The count and align group matches are not exact.
psv=r'((?<!\S)((?P<span>[\d.]+)(?P<op>[*+]))?(?P<align>[<\^>.]{,3})?(?P<style>[a-z])?)?\|'
)
def __init__(self):
AbstractBlock.__init__(self)
self.CONF_ENTRIES += ('format','tags','separator')
# tabledef conf file parameters.
self.format='psv'
self.separator=None
self.tags=None # Name of tabletags-<tags> conf section.
# Calculated parameters.
self.abswidth=None # 1.. (page units).
self.pcwidth = None # 1..99 (percentage).
self.rows=[] # Parsed rows, each row is a list of Cells.
self.columns=[] # List of Columns.
@staticmethod
def parse_align_spec(align_spec):
"""
Parse AsciiDoc cell alignment specifier and return 2-tuple with
horizonatal and vertical alignment names. Unspecified alignments
set to None.
"""
result = (None, None)
if align_spec:
mo = re.match(r'^([<\^>])?(\.([<\^>]))?$', align_spec)
if mo:
result = (Table.ALIGN.get(mo.group(1)),
Table.VALIGN.get(mo.group(3)))
return result
@staticmethod
def parse_span_spec(span_spec):
"""
Parse AsciiDoc cell span specifier and return 2-tuple with horizonatal
and vertical span counts. Set default values (1,1) if not
specified.
"""
result = (None, None)
if span_spec:
mo = re.match(r'^(\d+)?(\.(\d+))?$', span_spec)
if mo:
result = (mo.group(1) and int(mo.group(1)),
mo.group(3) and int(mo.group(3)))
return (result[0] or 1, result[1] or 1)
def load(self,name,entries):
AbstractBlock.load(self,name,entries)
def dump(self):
AbstractBlock.dump(self)
write = lambda s: sys.stdout.write('%s%s' % (s,writer.newline))
write('format='+self.format)
write('')
def validate(self):
AbstractBlock.validate(self)
if self.format not in Table.FORMATS:
self.error('illegal format=%s' % self.format,halt=True)
self.tags = self.tags or 'default'
tags = [self.tags]
tags += [s['tags'] for s in self.styles.values() if 'tags' in s]
for t in tags:
if t not in tables.tags:
self.error('missing section: [tabletags-%s]' % t,halt=True)
if self.separator:
# Evaluate escape characters.
self.separator = eval('"'+self.separator+'"')
#TODO: Move to class Tables
# Check global table parameters.
elif config.pagewidth is None:
self.error('missing [miscellaneous] entry: pagewidth')
elif config.pageunits is None:
self.error('missing [miscellaneous] entry: pageunits')
def validate_attributes(self):
"""Validate and parse table attributes."""
# Set defaults.
format = self.format
tags = self.tags
separator = self.separator
abswidth = float(config.pagewidth)
pcwidth = 100.0
for k,v in self.attributes.items():
if k == 'format':
if v not in self.FORMATS:
self.error('illegal %s=%s' % (k,v))
else:
format = v
elif k == 'tags':
if v not in tables.tags:
self.error('illegal %s=%s' % (k,v))
else:
tags = v
elif k == 'separator':
separator = v
elif k == 'width':
if not re.match(r'^\d{1,3}%$',v) or int(v[:-1]) > 100:
self.error('illegal %s=%s' % (k,v))
else:
abswidth = float(v[:-1])/100 * config.pagewidth
pcwidth = float(v[:-1])
# Calculate separator if it has not been specified.
if not separator:
separator = Table.SEPARATORS[format]
if format == 'csv':
if len(separator) > 1:
self.error('illegal csv separator=%s' % separator)
separator = ','
else:
if not is_re(separator):
self.error('illegal regular expression: separator=%s' %
separator)
self.parameters.format = format
self.parameters.tags = tags
self.parameters.separator = separator
self.abswidth = abswidth
self.pcwidth = pcwidth
def get_tags(self,params):
tags = self.get_param('tags',params)
assert(tags and tags in tables.tags)
return tables.tags[tags]
def get_style(self,prefix):
"""
Return the style dictionary whose name starts with 'prefix'.
"""
if prefix is None:
return None
names = self.styles.keys()
names.sort()
for name in names:
if name.startswith(prefix):
return self.styles[name]
else:
self.error('missing style: %s*' % prefix)
return None
def parse_cols(self, cols, halign, valign):
"""
Build list of column objects from table 'cols', 'halign' and 'valign'
attributes.
"""
# [<multiplier>*][<align>][<width>][<style>]
COLS_RE1 = r'^((?P<count>\d+)\*)?(?P<align>[<\^>.]{,3})?(?P<width>\d+%?)?(?P<style>[a-z]\w*)?$'
# [<multiplier>*][<width>][<align>][<style>]
COLS_RE2 = r'^((?P<count>\d+)\*)?(?P<width>\d+%?)?(?P<align>[<\^>.]{,3})?(?P<style>[a-z]\w*)?$'
reo1 = re.compile(COLS_RE1)
reo2 = re.compile(COLS_RE2)
cols = str(cols)
if re.match(r'^\d+$',cols):
for i in range(int(cols)):
self.columns.append(Column())
else:
for col in re.split(r'\s*,\s*',cols):
mo = reo1.match(col)
if not mo:
mo = reo2.match(col)
if mo:
count = int(mo.groupdict().get('count') or 1)
for i in range(count):
self.columns.append(
Column(mo.group('width'), mo.group('align'),
self.get_style(mo.group('style')))
)
else:
self.error('illegal column spec: %s' % col,self.start)
# Set column (and indirectly cell) default alignments.
for col in self.columns:
col.halign = col.halign or halign or document.attributes.get('halign') or 'left'
col.valign = col.valign or valign or document.attributes.get('valign') or 'top'
# Validate widths and calculate missing widths.
n = 0; percents = 0; props = 0
for col in self.columns:
if col.width:
if col.width[-1] == '%': percents += int(col.width[:-1])
else: props += int(col.width)
n += 1
if percents > 0 and props > 0:
self.error('mixed percent and proportional widths: %s'
% cols,self.start)
pcunits = percents > 0
# Fill in missing widths.
if n < len(self.columns) and percents < 100:
if pcunits:
width = float(100 - percents)/float(len(self.columns) - n)
else:
width = 1
for col in self.columns:
if not col.width:
if pcunits:
col.width = str(int(width))+'%'
percents += width
else:
col.width = str(width)
props += width
# Calculate column alignment and absolute and percent width values.
percents = 0
for col in self.columns:
if pcunits:
col.pcwidth = float(col.width[:-1])
else:
col.pcwidth = (float(col.width)/props)*100
col.abswidth = self.abswidth * (col.pcwidth/100)
if config.pageunits in ('cm','mm','in','em'):
col.abswidth = '%.2f' % round(col.abswidth,2)
else:
col.abswidth = '%d' % round(col.abswidth)
percents += col.pcwidth
col.pcwidth = int(col.pcwidth)
if round(percents) > 100:
self.error('total width exceeds 100%%: %s' % cols,self.start)
elif round(percents) < 100:
self.error('total width less than 100%%: %s' % cols,self.start)
def build_colspecs(self):
"""
Generate column related substitution attributes.
"""
cols = []
i = 1
for col in self.columns:
colspec = self.get_tags(col.style).colspec
if colspec:
self.attributes['halign'] = col.halign
self.attributes['valign'] = col.valign
self.attributes['colabswidth'] = col.abswidth
self.attributes['colpcwidth'] = col.pcwidth
self.attributes['colnumber'] = str(i)
s = subs_attrs(colspec, self.attributes)
if not s:
message.warning('colspec dropped: contains undefined attribute')
else:
cols.append(s)
i += 1
if cols:
self.attributes['colspecs'] = writer.newline.join(cols)
def parse_rows(self, text):
"""
Parse the table source text into self.rows (a list of rows, each row
is a list of Cells.
"""
reserved = {} # Cols reserved by rowspans (indexed by row number).
if self.parameters.format in ('psv','dsv'):
ri = 0 # Current row index 0..
cells = self.parse_psv_dsv(text)
row = []
ci = 0 # Column counter 0..colcount
for cell in cells:
colcount = len(self.columns) - reserved.get(ri,0)
if cell.vspan > 1:
# Reserve spanned columns from ensuing rows.
for i in range(1, cell.vspan):
reserved[ri+i] = reserved.get(ri+i, 0) + cell.span
ci += cell.span
if ci <= colcount:
row.append(cell)
if ci >= colcount:
self.rows.append(row)
ri += 1
row = []
ci = 0
if ci > colcount:
message.warning('table row %d: span exceeds number of columns'
% ri)
elif self.parameters.format == 'csv':
self.rows = self.parse_csv(text)
else:
assert True,'illegal table format'
# Check that all row spans match.
for ri,row in enumerate(self.rows):
row_span = 0
for cell in row:
row_span += cell.span
row_span += reserved.get(ri,0)
if ri == 0:
header_span = row_span
if row_span < header_span:
message.warning('table row %d: does not span all columns' % (ri+1))
if row_span > header_span:
message.warning('table row %d: exceeds columns span' % (ri+1))
# Check that now row spans exceed the number of rows.
if len([x for x in reserved.keys() if x >= len(self.rows)]) > 0:
message.warning('one or more cell spans exceed the available rows')
def subs_rows(self, rows, rowtype='body'):
"""
Return a string of output markup from a list of rows, each row
is a list of raw data text.
"""
tags = tables.tags[self.parameters.tags]
if rowtype == 'header':
rtag = tags.headrow
elif rowtype == 'footer':
rtag = tags.footrow
else:
rtag = tags.bodyrow
result = []
stag,etag = subs_tag(rtag,self.attributes)
for row in rows:
result.append(stag)
result += self.subs_row(row,rowtype)
result.append(etag)
return writer.newline.join(result)
def subs_row(self, row, rowtype):
"""
Substitute the list of Cells using the data tag.
Returns a list of marked up table cell elements.
"""
result = []
i = 0
for cell in row:
if i >= len(self.columns):
break # Skip cells outside the header width.
col = self.columns[i]
self.attributes['halign'] = cell.halign or col.halign
self.attributes['valign'] = cell.valign or col.valign
self.attributes['colabswidth'] = col.abswidth
self.attributes['colpcwidth'] = col.pcwidth
self.attributes['colnumber'] = str(i+1)
self.attributes['colspan'] = str(cell.span)
self.attributes['colstart'] = self.attributes['colnumber']
self.attributes['colend'] = str(i+cell.span)
self.attributes['rowspan'] = str(cell.vspan)
self.attributes['morerows'] = str(cell.vspan-1)
# Fill missing column data with blanks.
if i > len(self.columns) - 1:
data = ''
else:
data = cell.data
if rowtype == 'header':
# Use table style unless overriden by cell style.
colstyle = cell.style
else:
# If the cell style is not defined use the column style.
colstyle = cell.style or col.style
tags = self.get_tags(colstyle)
presubs,postsubs = self.get_subs(colstyle)
data = [data]
data = Lex.subs(data, presubs)
data = filter_lines(self.get_param('filter',colstyle),
data, self.attributes)
data = Lex.subs(data, postsubs)
if rowtype != 'header':
ptag = tags.paragraph
if ptag:
stag,etag = subs_tag(ptag,self.attributes)
text = '\n'.join(data).strip()
data = []
for para in re.split(r'\n{2,}',text):
data += dovetail_tags([stag],para.split('\n'),[etag])
if rowtype == 'header':
dtag = tags.headdata
elif rowtype == 'footer':
dtag = tags.footdata
else:
dtag = tags.bodydata
stag,etag = subs_tag(dtag,self.attributes)
result = result + dovetail_tags([stag],data,[etag])
i += cell.span
return result
def parse_csv(self,text):
"""
Parse the table source text and return a list of rows, each row
is a list of Cells.
"""
import StringIO
import csv
rows = []
rdr = csv.reader(StringIO.StringIO('\r\n'.join(text)),
delimiter=self.parameters.separator, skipinitialspace=True)
try:
for row in rdr:
rows.append([Cell(data) for data in row])
except Exception:
self.error('csv parse error: %s' % row)
return rows
def parse_psv_dsv(self,text):
"""
Parse list of PSV or DSV table source text lines and return a list of
Cells.
"""
def append_cell(data, span_spec, op, align_spec, style):
op = op or '+'
if op == '*': # Cell multiplier.
span = Table.parse_span_spec(span_spec)[0]
for i in range(span):
cells.append(Cell(data, '1', align_spec, style))
elif op == '+': # Column spanner.
cells.append(Cell(data, span_spec, align_spec, style))
else:
self.error('illegal table cell operator')
text = '\n'.join(text)
separator = '(?msu)'+self.parameters.separator
format = self.parameters.format
start = 0
span = None
op = None
align = None
style = None
cells = []
data = ''
for mo in re.finditer(separator,text):
data += text[start:mo.start()]
if data.endswith('\\'):
data = data[:-1]+mo.group() # Reinstate escaped separators.
else:
append_cell(data, span, op, align, style)
span = mo.groupdict().get('span')
op = mo.groupdict().get('op')
align = mo.groupdict().get('align')
style = mo.groupdict().get('style')
if style:
style = self.get_style(style)
data = ''
start = mo.end()
# Last cell follows final separator.
data += text[start:]
append_cell(data, span, op, align, style)
# We expect a dummy blank item preceeding first PSV cell.
if format == 'psv':
if cells[0].data.strip() != '':
self.error('missing leading separator: %s' % separator,
self.start)
else:
cells.pop(0)
return cells
def translate(self):
AbstractBlock.translate(self)
reader.read() # Discard delimiter.
# Reset instance specific properties.
self.columns = []
self.rows = []
attrs = {}
BlockTitle.consume(attrs)
# Mix in document attribute list.
AttributeList.consume(attrs)
self.merge_attributes(attrs)
self.validate_attributes()
# Add global and calculated configuration parameters.
self.attributes['pagewidth'] = config.pagewidth
self.attributes['pageunits'] = config.pageunits
self.attributes['tableabswidth'] = int(self.abswidth)
self.attributes['tablepcwidth'] = int(self.pcwidth)
# Read the entire table.
text = reader.read_until(self.delimiter)
if reader.eof():
self.error('missing closing delimiter',self.start)
else:
delimiter = reader.read() # Discard closing delimiter.
assert re.match(self.delimiter,delimiter)
if len(text) == 0:
message.warning('[%s] table is empty' % self.name)
return
cols = attrs.get('cols')
if not cols:
# Calculate column count from number of items in first line.
if self.parameters.format == 'csv':
cols = text[0].count(self.parameters.separator) + 1
else:
cols = 0
for cell in self.parse_psv_dsv(text[:1]):
cols += cell.span
self.parse_cols(cols, attrs.get('halign'), attrs.get('valign'))
# Set calculated attributes.
self.attributes['colcount'] = len(self.columns)
self.build_colspecs()
self.parse_rows(text)
# The 'rowcount' attribute is used by the experimental LaTeX backend.
self.attributes['rowcount'] = str(len(self.rows))
# Generate headrows, footrows, bodyrows.
# Headrow, footrow and bodyrow data replaces same named attributes in
# the table markup template. In order to ensure this data does not get
# a second attribute substitution (which would interfere with any
# already substituted inline passthroughs) unique placeholders are used
# (the tab character does not appear elsewhere since it is expanded on
# input) which are replaced after template attribute substitution.
headrows = footrows = bodyrows = None
if self.rows and 'header' in self.parameters.options:
headrows = self.subs_rows(self.rows[0:1],'header')
self.attributes['headrows'] = '\x07headrows\x07'
self.rows = self.rows[1:]
if self.rows and 'footer' in self.parameters.options:
footrows = self.subs_rows( self.rows[-1:], 'footer')
self.attributes['footrows'] = '\x07footrows\x07'
self.rows = self.rows[:-1]
if self.rows:
bodyrows = self.subs_rows(self.rows)
self.attributes['bodyrows'] = '\x07bodyrows\x07'
table = subs_attrs(config.sections[self.parameters.template],
self.attributes)
table = writer.newline.join(table)
# Before we finish replace the table head, foot and body place holders
# with the real data.
if headrows:
table = table.replace('\x07headrows\x07', headrows, 1)
if footrows:
table = table.replace('\x07footrows\x07', footrows, 1)
if bodyrows:
table = table.replace('\x07bodyrows\x07', bodyrows, 1)
writer.write(table,trace='table')
class Tables(AbstractBlocks):
"""List of tables."""
BLOCK_TYPE = Table
PREFIX = 'tabledef-'
TAGS = ('colspec', 'headrow','footrow','bodyrow',
'headdata','footdata', 'bodydata','paragraph')
def __init__(self):
AbstractBlocks.__init__(self)
# Table tags dictionary. Each entry is a tags dictionary.
self.tags={}
def load(self,sections):
AbstractBlocks.load(self,sections)
self.load_tags(sections)
def load_tags(self,sections):
"""
Load tabletags-* conf file sections to self.tags.
"""
for section in sections.keys():
mo = re.match(r'^tabletags-(?P<name>\w+)$',section)
if mo:
name = mo.group('name')
if name in self.tags:
d = self.tags[name]
else:
d = AttrDict()
parse_entries(sections.get(section,()),d)
for k in d.keys():
if k not in self.TAGS:
message.warning('[%s] contains illegal table tag: %s' %
(section,k))
self.tags[name] = d
def validate(self):
AbstractBlocks.validate(self)
# Check we have a default table definition,
for i in range(len(self.blocks)):
if self.blocks[i].name == 'tabledef-default':
default = self.blocks[i]
break
else:
raise EAsciiDoc,'missing section: [tabledef-default]'
# Propagate defaults to unspecified table parameters.
for b in self.blocks:
if b is not default:
if b.format is None: b.format = default.format
if b.template is None: b.template = default.template
# Check tags and propagate default tags.
if not 'default' in self.tags:
raise EAsciiDoc,'missing section: [tabletags-default]'
default = self.tags['default']
for tag in ('bodyrow','bodydata','paragraph'): # Mandatory default tags.
if tag not in default:
raise EAsciiDoc,'missing [tabletags-default] entry: %s' % tag
for t in self.tags.values():
if t is not default:
if t.colspec is None: t.colspec = default.colspec
if t.headrow is None: t.headrow = default.headrow
if t.footrow is None: t.footrow = default.footrow
if t.bodyrow is None: t.bodyrow = default.bodyrow
if t.headdata is None: t.headdata = default.headdata
if t.footdata is None: t.footdata = default.footdata
if t.bodydata is None: t.bodydata = default.bodydata
if t.paragraph is None: t.paragraph = default.paragraph
# Use body tags if header and footer tags are not specified.
for t in self.tags.values():
if not t.headrow: t.headrow = t.bodyrow
if not t.footrow: t.footrow = t.bodyrow
if not t.headdata: t.headdata = t.bodydata
if not t.footdata: t.footdata = t.bodydata
# Check table definitions are valid.
for b in self.blocks:
b.validate()
def dump(self):
AbstractBlocks.dump(self)
for k,v in self.tags.items():
dump_section('tabletags-'+k, v)
class Macros:
# Default system macro syntax.
SYS_RE = r'(?u)^(?P<name>[\\]?\w(\w|-)*?)::(?P<target>\S*?)' + \
r'(\[(?P<attrlist>.*?)\])$'
def __init__(self):
self.macros = [] # List of Macros.
self.current = None # The last matched block macro.
self.passthroughs = []
# Initialize default system macro.
m = Macro()
m.pattern = self.SYS_RE
m.prefix = '+'
m.reo = re.compile(m.pattern)
self.macros.append(m)
def load(self,entries):
for entry in entries:
m = Macro()
m.load(entry)
if m.name is None:
# Delete undefined macro.
for i,m2 in enumerate(self.macros):
if m2.pattern == m.pattern:
del self.macros[i]
break
else:
message.warning('unable to delete missing macro: %s' % m.pattern)
else:
# Check for duplicates.
for m2 in self.macros:
if m2.pattern == m.pattern:
message.verbose('macro redefinition: %s%s' % (m.prefix,m.name))
break
else:
self.macros.append(m)
def dump(self):
write = lambda s: sys.stdout.write('%s%s' % (s,writer.newline))
write('[macros]')
# Dump all macros except the first (built-in system) macro.
for m in self.macros[1:]:
# Escape = in pattern.
macro = '%s=%s%s' % (m.pattern.replace('=',r'\='), m.prefix, m.name)
if m.subslist is not None:
macro += '[' + ','.join(m.subslist) + ']'
write(macro)
write('')
def validate(self):
# Check all named sections exist.
if config.verbose:
for m in self.macros:
if m.name and m.prefix != '+':
m.section_name()
def subs(self,text,prefix='',callouts=False):
# If callouts is True then only callout macros are processed, if False
# then all non-callout macros are processed.
result = text
for m in self.macros:
if m.prefix == prefix:
if callouts ^ (m.name != 'callout'):
result = m.subs(result)
return result
def isnext(self):
"""Return matching macro if block macro is next on reader."""
reader.skip_blank_lines()
line = reader.read_next()
if line:
for m in self.macros:
if m.prefix == '#':
if m.reo.match(line):
self.current = m
return m
return False
def match(self,prefix,name,text):
"""Return re match object matching 'text' with macro type 'prefix',
macro name 'name'."""
for m in self.macros:
if m.prefix == prefix:
mo = m.reo.match(text)
if mo:
if m.name == name:
return mo
if re.match(name,mo.group('name')):
return mo
return None
def extract_passthroughs(self,text,prefix=''):
""" Extract the passthrough text and replace with temporary
placeholders."""
self.passthroughs = []
for m in self.macros:
if m.has_passthrough() and m.prefix == prefix:
text = m.subs_passthroughs(text, self.passthroughs)
return text
def restore_passthroughs(self,text):
""" Replace passthough placeholders with the original passthrough
text."""
for i,v in enumerate(self.passthroughs):
text = text.replace('\x07'+str(i)+'\x07', self.passthroughs[i])
return text
class Macro:
def __init__(self):
self.pattern = None # Matching regular expression.
self.name = '' # Conf file macro name (None if implicit).
self.prefix = '' # '' if inline, '+' if system, '#' if block.
self.reo = None # Compiled pattern re object.
self.subslist = [] # Default subs for macros passtext group.
def has_passthrough(self):
return self.pattern.find(r'(?P<passtext>') >= 0
def section_name(self,name=None):
"""Return macro markup template section name based on macro name and
prefix. Return None section not found."""
assert self.prefix != '+'
if not name:
assert self.name
name = self.name
if self.prefix == '#':
suffix = '-blockmacro'
else:
suffix = '-inlinemacro'
if name+suffix in config.sections:
return name+suffix
else:
message.warning('missing macro section: [%s]' % (name+suffix))
return None
def load(self,entry):
e = parse_entry(entry)
if e is None:
# Only the macro pattern was specified, mark for deletion.
self.name = None
self.pattern = entry
return
if not is_re(e[0]):
raise EAsciiDoc,'illegal macro regular expression: %s' % e[0]
pattern, name = e
if name and name[0] in ('+','#'):
prefix, name = name[0], name[1:]
else:
prefix = ''
# Parse passthrough subslist.
mo = re.match(r'^(?P<name>[^[]*)(\[(?P<subslist>.*)\])?$', name)
name = mo.group('name')
if name and not is_name(name):
raise EAsciiDoc,'illegal section name in macro entry: %s' % entry
subslist = mo.group('subslist')
if subslist is not None:
# Parse and validate passthrough subs.
subslist = parse_options(subslist, SUBS_OPTIONS,
'illegal subs in macro entry: %s' % entry)
self.pattern = pattern
self.reo = re.compile(pattern)
self.prefix = prefix
self.name = name
self.subslist = subslist or []
def subs(self,text):
def subs_func(mo):
"""Function called to perform macro substitution.
Uses matched macro regular expression object and returns string
containing the substituted macro body."""
# Check if macro reference is escaped.
if mo.group()[0] == '\\':
return mo.group()[1:] # Strip leading backslash.
d = mo.groupdict()
# Delete groups that didn't participate in match.
for k,v in d.items():
if v is None: del d[k]
if self.name:
name = self.name
else:
if not 'name' in d:
message.warning('missing macro name group: %s' % mo.re.pattern)
return ''
name = d['name']
section_name = self.section_name(name)
if not section_name:
return ''
# If we're dealing with a block macro get optional block ID and
# block title.
if self.prefix == '#' and self.name != 'comment':
AttributeList.consume(d)
BlockTitle.consume(d)
# Parse macro attributes.
if 'attrlist' in d:
if d['attrlist'] in (None,''):
del d['attrlist']
else:
if self.prefix == '':
# Unescape ] characters in inline macros.
d['attrlist'] = d['attrlist'].replace('\\]',']')
parse_attributes(d['attrlist'],d)
# Generate option attributes.
if 'options' in d:
options = parse_options(d['options'], (),
'%s: illegal option name' % name)
for option in options:
d[option+'-option'] = ''
# Substitute single quoted attribute values in block macros.
if self.prefix == '#':
AttributeList.subs(d)
if name == 'callout':
listindex =int(d['index'])
d['coid'] = calloutmap.add(listindex)
# The alt attribute is the first image macro positional attribute.
if name == 'image' and '1' in d:
d['alt'] = d['1']
# Unescape special characters in LaTeX target file names.
if document.backend == 'latex' and 'target' in d and d['target']:
if not '0' in d:
d['0'] = d['target']
d['target']= config.subs_specialchars_reverse(d['target'])
# BUG: We've already done attribute substitution on the macro which
# means that any escaped attribute references are now unescaped and
# will be substituted by config.subs_section() below. As a partial
# fix have withheld {0} from substitution but this kludge doesn't
# fix it for other attributes containing unescaped references.
# Passthrough macros don't have this problem.
a0 = d.get('0')
if a0:
d['0'] = chr(0) # Replace temporarily with unused character.
body = config.subs_section(section_name,d)
if len(body) == 0:
result = ''
elif len(body) == 1:
result = body[0]
else:
if self.prefix == '#':
result = writer.newline.join(body)
else:
# Internally processed inline macros use UNIX line
# separator.
result = '\n'.join(body)
if a0:
result = result.replace(chr(0), a0)
return result
return self.reo.sub(subs_func, text)
def translate(self):
""" Block macro translation."""
assert self.prefix == '#'
s = reader.read()
before = s
if self.has_passthrough():
s = macros.extract_passthroughs(s,'#')
s = subs_attrs(s)
if s:
s = self.subs(s)
if self.has_passthrough():
s = macros.restore_passthroughs(s)
if s:
trace('macro',before,s)
writer.write(s)
def subs_passthroughs(self, text, passthroughs):
""" Replace macro attribute lists in text with placeholders.
Substitute and append the passthrough attribute lists to the
passthroughs list."""
def subs_func(mo):
"""Function called to perform inline macro substitution.
Uses matched macro regular expression object and returns string
containing the substituted macro body."""
# Don't process escaped macro references.
if mo.group()[0] == '\\':
return mo.group()
d = mo.groupdict()
if not 'passtext' in d:
message.warning('passthrough macro %s: missing passtext group' %
d.get('name',''))
return mo.group()
passtext = d['passtext']
if re.search('\x07\\d+\x07', passtext):
message.warning('nested inline passthrough')
return mo.group()
if d.get('subslist'):
if d['subslist'].startswith(':'):
message.error('block macro cannot occur here: %s' % mo.group(),
halt=True)
subslist = parse_options(d['subslist'], SUBS_OPTIONS,
'illegal passthrough macro subs option')
else:
subslist = self.subslist
passtext = Lex.subs_1(passtext,subslist)
if passtext is None: passtext = ''
if self.prefix == '':
# Unescape ] characters in inline macros.
passtext = passtext.replace('\\]',']')
passthroughs.append(passtext)
# Tabs guarantee the placeholders are unambiguous.
result = (
text[mo.start():mo.start('passtext')] +
'\x07' + str(len(passthroughs)-1) + '\x07' +
text[mo.end('passtext'):mo.end()]
)
return result
return self.reo.sub(subs_func, text)
class CalloutMap:
def __init__(self):
self.comap = {} # key = list index, value = callouts list.
self.calloutindex = 0 # Current callout index number.
self.listnumber = 1 # Current callout list number.
def listclose(self):
# Called when callout list is closed.
self.listnumber += 1
self.calloutindex = 0
self.comap = {}
def add(self,listindex):
# Add next callout index to listindex map entry. Return the callout id.
self.calloutindex += 1
# Append the coindex to a list in the comap dictionary.
if not listindex in self.comap:
self.comap[listindex] = [self.calloutindex]
else:
self.comap[listindex].append(self.calloutindex)
return self.calloutid(self.listnumber, self.calloutindex)
@staticmethod
def calloutid(listnumber,calloutindex):
return 'CO%d-%d' % (listnumber,calloutindex)
def calloutids(self,listindex):
# Retieve list of callout indexes that refer to listindex.
if listindex in self.comap:
result = ''
for coindex in self.comap[listindex]:
result += ' ' + self.calloutid(self.listnumber,coindex)
return result.strip()
else:
message.warning('no callouts refer to list item '+str(listindex))
return ''
def validate(self,maxlistindex):
# Check that all list indexes referenced by callouts exist.
for listindex in self.comap.keys():
if listindex > maxlistindex:
message.warning('callout refers to non-existent list item '
+ str(listindex))
#---------------------------------------------------------------------------
# Input stream Reader and output stream writer classes.
#---------------------------------------------------------------------------
UTF8_BOM = '\xef\xbb\xbf'
class Reader1:
"""Line oriented AsciiDoc input file reader. Processes include and
conditional inclusion system macros. Tabs are expanded and lines are right
trimmed."""
# This class is not used directly, use Reader class instead.
READ_BUFFER_MIN = 10 # Read buffer low level.
def __init__(self):
self.f = None # Input file object.
self.fname = None # Input file name.
self.next = [] # Read ahead buffer containing
# [filename,linenumber,linetext] lists.
self.cursor = None # Last read() [filename,linenumber,linetext].
self.tabsize = 8 # Tab expansion number of spaces.
self.parent = None # Included reader's parent reader.
self._lineno = 0 # The last line read from file object f.
self.current_depth = 0 # Current include depth.
self.max_depth = 5 # Initial maxiumum allowed include depth.
self.bom = None # Byte order mark (BOM).
self.infile = None # Saved document 'infile' attribute.
self.indir = None # Saved document 'indir' attribute.
def open(self,fname):
self.fname = fname
message.verbose('reading: '+fname)
if fname == '<stdin>':
self.f = sys.stdin
self.infile = None
self.indir = None
else:
self.f = open(fname,'rb')
self.infile = fname
self.indir = os.path.dirname(fname)
document.attributes['infile'] = self.infile
document.attributes['indir'] = self.indir
self._lineno = 0 # The last line read from file object f.
self.next = []
# Prefill buffer by reading the first line and then pushing it back.
if Reader1.read(self):
if self.cursor[2].startswith(UTF8_BOM):
self.cursor[2] = self.cursor[2][len(UTF8_BOM):]
self.bom = UTF8_BOM
self.unread(self.cursor)
self.cursor = None
def closefile(self):
"""Used by class methods to close nested include files."""
self.f.close()
self.next = []
def close(self):
self.closefile()
self.__init__()
def read(self, skip=False):
"""Read next line. Return None if EOF. Expand tabs. Strip trailing
white space. Maintain self.next read ahead buffer. If skip=True then
conditional exclusion is active (ifdef and ifndef macros)."""
# Top up buffer.
if len(self.next) <= self.READ_BUFFER_MIN:
s = self.f.readline()
if s:
self._lineno = self._lineno + 1
while s:
if self.tabsize != 0:
s = s.expandtabs(self.tabsize)
s = s.rstrip()
self.next.append([self.fname,self._lineno,s])
if len(self.next) > self.READ_BUFFER_MIN:
break
s = self.f.readline()
if s:
self._lineno = self._lineno + 1
# Return first (oldest) buffer entry.
if len(self.next) > 0:
self.cursor = self.next[0]
del self.next[0]
result = self.cursor[2]
# Check for include macro.
mo = macros.match('+',r'include[1]?',result)
if mo and not skip:
# Don't process include macro once the maximum depth is reached.
if self.current_depth >= self.max_depth:
return result
# Perform attribute substitution on include macro file name.
fname = subs_attrs(mo.group('target'))
if not fname:
return Reader1.read(self) # Return next input line.
if self.fname != '<stdin>':
fname = os.path.expandvars(os.path.expanduser(fname))
fname = safe_filename(fname, os.path.dirname(self.fname))
if not fname:
return Reader1.read(self) # Return next input line.
if mo.group('name') == 'include1':
if not config.dumping:
# Store the include file in memory for later
# retrieval by the {include1:} system attribute.
config.include1[fname] = [
s.rstrip() for s in open(fname)]
return '{include1:%s}' % fname
else:
# This is a configuration dump, just pass the macro
# call through.
return result
# Parse include macro attributes.
attrs = {}
parse_attributes(mo.group('attrlist'),attrs)
# Clone self and set as parent (self assumes the role of child).
parent = Reader1()
assign(parent,self)
self.parent = parent
# Set attributes in child.
if 'tabsize' in attrs:
self.tabsize = int(validate(attrs['tabsize'],
'int($)>=0',
'illegal include macro tabsize argument'))
else:
self.tabsize = config.tabsize
if 'depth' in attrs:
attrs['depth'] = int(validate(attrs['depth'],
'int($)>=1',
'illegal include macro depth argument'))
self.max_depth = self.current_depth + attrs['depth']
# Process included file.
self.open(fname)
self.current_depth = self.current_depth + 1
result = Reader1.read(self)
else:
if not Reader1.eof(self):
result = Reader1.read(self)
else:
result = None
return result
def eof(self):
"""Returns True if all lines have been read."""
if len(self.next) == 0:
# End of current file.
if self.parent:
self.closefile()
assign(self,self.parent) # Restore parent reader.
document.attributes['infile'] = self.infile
document.attributes['indir'] = self.indir
return Reader1.eof(self)
else:
return True
else:
return False
def read_next(self):
"""Like read() but does not advance file pointer."""
if Reader1.eof(self):
return None
else:
return self.next[0][2]
def unread(self,cursor):
"""Push the line (filename,linenumber,linetext) tuple back into the read
buffer. Note that it's up to the caller to restore the previous
cursor."""
assert cursor
self.next.insert(0,cursor)
class Reader(Reader1):
""" Wraps (well, sought of) Reader1 class and implements conditional text
inclusion."""
def __init__(self):
Reader1.__init__(self)
self.depth = 0 # if nesting depth.
self.skip = False # true if we're skipping ifdef...endif.
self.skipname = '' # Name of current endif macro target.
self.skipto = -1 # The depth at which skipping is reenabled.
def read_super(self):
result = Reader1.read(self,self.skip)
if result is None and self.skip:
raise EAsciiDoc,'missing endif::%s[]' % self.skipname
return result
def read(self):
result = self.read_super()
if result is None:
return None
while self.skip:
mo = macros.match('+',r'ifdef|ifndef|ifeval|endif',result)
if mo:
name = mo.group('name')
target = mo.group('target')
attrlist = mo.group('attrlist')
if name == 'endif':
self.depth -= 1
if self.depth < 0:
raise EAsciiDoc,'mismatched macro: %s' % result
if self.depth == self.skipto:
self.skip = False
if target and self.skipname != target:
raise EAsciiDoc,'mismatched macro: %s' % result
else:
if name in ('ifdef','ifndef'):
if not target:
raise EAsciiDoc,'missing macro target: %s' % result
if not attrlist:
self.depth += 1
elif name == 'ifeval':
if not attrlist:
raise EAsciiDoc,'missing ifeval condition: %s' % result
self.depth += 1
result = self.read_super()
if result is None:
return None
mo = macros.match('+',r'ifdef|ifndef|ifeval|endif',result)
if mo:
name = mo.group('name')
target = mo.group('target')
attrlist = mo.group('attrlist')
if name == 'endif':
self.depth = self.depth-1
else:
if not target and name in ('ifdef','ifndef'):
raise EAsciiDoc,'missing macro target: %s' % result
defined = is_attr_defined(target, document.attributes)
if name == 'ifdef':
if attrlist:
if defined: return attrlist
else:
self.skip = not defined
elif name == 'ifndef':
if attrlist:
if not defined: return attrlist
else:
self.skip = defined
elif name == 'ifeval':
if not attrlist:
raise EAsciiDoc,'missing ifeval condition: %s' % result
cond = False
attrlist = subs_attrs(attrlist)
if attrlist:
try:
cond = eval(attrlist)
except Exception,e:
raise EAsciiDoc,'error evaluating ifeval condition: %s: %s' % (result, str(e))
self.skip = not cond
if not attrlist or name == 'ifeval':
if self.skip:
self.skipto = self.depth
self.skipname = target
self.depth = self.depth+1
result = self.read()
if result:
# Expand executable block macros.
mo = macros.match('+',r'eval|sys|sys2',result)
if mo:
action = mo.group('name')
cmd = mo.group('attrlist')
s = system(action, cmd, is_macro=True)
if s is not None:
self.cursor[2] = s # So we don't re-evaluate.
result = s
if result:
# Unescape escaped system macros.
if macros.match('+',r'\\eval|\\sys|\\sys2|\\ifdef|\\ifndef|\\endif|\\include|\\include1',result):
result = result[1:]
return result
def eof(self):
return self.read_next() is None
def read_next(self):
save_cursor = self.cursor
result = self.read()
if result is not None:
self.unread(self.cursor)
self.cursor = save_cursor
return result
def read_lines(self,count=1):
"""Return tuple containing count lines."""
result = []
i = 0
while i < count and not self.eof():
result.append(self.read())
return tuple(result)
def read_ahead(self,count=1):
"""Same as read_lines() but does not advance the file pointer."""
result = []
putback = []
save_cursor = self.cursor
try:
i = 0
while i < count and not self.eof():
result.append(self.read())
putback.append(self.cursor)
i = i+1
while putback:
self.unread(putback.pop())
finally:
self.cursor = save_cursor
return tuple(result)
def skip_blank_lines(self):
reader.read_until(r'\s*\S+')
def read_until(self,terminators,same_file=False):
"""Like read() but reads lines up to (but not including) the first line
that matches the terminator regular expression, regular expression
object or list of regular expression objects. If same_file is True then
the terminating pattern must occur in the file the was being read when
the routine was called."""
if same_file:
fname = self.cursor[0]
result = []
if not isinstance(terminators,list):
if isinstance(terminators,basestring):
terminators = [re.compile(terminators)]
else:
terminators = [terminators]
while not self.eof():
save_cursor = self.cursor
s = self.read()
if not same_file or fname == self.cursor[0]:
for reo in terminators:
if reo.match(s):
self.unread(self.cursor)
self.cursor = save_cursor
return tuple(result)
result.append(s)
return tuple(result)
class Writer:
"""Writes lines to output file."""
def __init__(self):
self.newline = '\r\n' # End of line terminator.
self.f = None # Output file object.
self.fname = None # Output file name.
self.lines_out = 0 # Number of lines written.
self.skip_blank_lines = False # If True don't output blank lines.
def open(self,fname,bom=None):
'''
bom is optional byte order mark.
http://en.wikipedia.org/wiki/Byte-order_mark
'''
self.fname = fname
if fname == '<stdout>':
self.f = sys.stdout
else:
self.f = open(fname,'wb+')
message.verbose('writing: '+writer.fname,False)
if bom:
self.f.write(bom)
self.lines_out = 0
def close(self):
if self.fname != '<stdout>':
self.f.close()
def write_line(self, line=None):
if not (self.skip_blank_lines and (not line or not line.strip())):
self.f.write((line or '') + self.newline)
self.lines_out = self.lines_out + 1
def write(self,*args,**kwargs):
"""Iterates arguments, writes tuple and list arguments one line per
element, else writes argument as single line. If no arguments writes
blank line. If argument is None nothing is written. self.newline is
appended to each line."""
if 'trace' in kwargs and len(args) > 0:
trace(kwargs['trace'],args[0])
if len(args) == 0:
self.write_line()
self.lines_out = self.lines_out + 1
else:
for arg in args:
if is_array(arg):
for s in arg:
self.write_line(s)
elif arg is not None:
self.write_line(arg)
def write_tag(self,tag,content,subs=None,d=None,**kwargs):
"""Write content enveloped by tag.
Substitutions specified in the 'subs' list are perform on the
'content'."""
if subs is None:
subs = config.subsnormal
stag,etag = subs_tag(tag,d)
content = Lex.subs(content,subs)
if 'trace' in kwargs:
trace(kwargs['trace'],[stag]+content+[etag])
if stag:
self.write(stag)
if content:
self.write(content)
if etag:
self.write(etag)
#---------------------------------------------------------------------------
# Configuration file processing.
#---------------------------------------------------------------------------
def _subs_specialwords(mo):
"""Special word substitution function called by
Config.subs_specialwords()."""
word = mo.re.pattern # The special word.
template = config.specialwords[word] # The corresponding markup template.
if not template in config.sections:
raise EAsciiDoc,'missing special word template [%s]' % template
if mo.group()[0] == '\\':
return mo.group()[1:] # Return escaped word.
args = {}
args['words'] = mo.group() # The full match string is argument 'words'.
args.update(mo.groupdict()) # Add other named match groups to the arguments.
# Delete groups that didn't participate in match.
for k,v in args.items():
if v is None: del args[k]
lines = subs_attrs(config.sections[template],args)
if len(lines) == 0:
result = ''
elif len(lines) == 1:
result = lines[0]
else:
result = writer.newline.join(lines)
return result
class Config:
"""Methods to process configuration files."""
# Non-template section name regexp's.
ENTRIES_SECTIONS= ('tags','miscellaneous','attributes','specialcharacters',
'specialwords','macros','replacements','quotes','titles',
r'paradef-.+',r'listdef-.+',r'blockdef-.+',r'tabledef-.+',
r'tabletags-.+',r'listtags-.+','replacements2',
r'old_tabledef-.+')
def __init__(self):
self.sections = OrderedDict() # Keyed by section name containing
# lists of section lines.
# Command-line options.
self.verbose = False
self.header_footer = True # -s, --no-header-footer option.
# [miscellaneous] section.
self.tabsize = 8
self.textwidth = 70 # DEPRECATED: Old tables only.
self.newline = '\r\n'
self.pagewidth = None
self.pageunits = None
self.outfilesuffix = ''
self.subsnormal = SUBS_NORMAL
self.subsverbatim = SUBS_VERBATIM
self.tags = {} # Values contain (stag,etag) tuples.
self.specialchars = {} # Values of special character substitutions.
self.specialwords = {} # Name is special word pattern, value is macro.
self.replacements = OrderedDict() # Key is find pattern, value is
#replace pattern.
self.replacements2 = OrderedDict()
self.specialsections = {} # Name is special section name pattern, value
# is corresponding section name.
self.quotes = OrderedDict() # Values contain corresponding tag name.
self.fname = '' # Most recently loaded configuration file name.
self.conf_attrs = {} # Attributes entries from conf files.
self.cmd_attrs = {} # Attributes from command-line -a options.
self.loaded = [] # Loaded conf files.
self.include1 = {} # Holds include1::[] files for {include1:}.
self.dumping = False # True if asciidoc -c option specified.
def init(self, cmd):
"""
Check Python version and locate the executable and configuration files
directory.
cmd is the asciidoc command or asciidoc.py path.
"""
if float(sys.version[:3]) < MIN_PYTHON_VERSION:
message.stderr('FAILED: Python 2.3 or better required')
sys.exit(1)
if not os.path.exists(cmd):
message.stderr('FAILED: Missing asciidoc command: %s' % cmd)
sys.exit(1)
global APP_FILE
APP_FILE = os.path.realpath(cmd)
global APP_DIR
APP_DIR = os.path.dirname(APP_FILE)
global USER_DIR
USER_DIR = userdir()
if USER_DIR is not None:
USER_DIR = os.path.join(USER_DIR,'.asciidoc')
if not os.path.isdir(USER_DIR):
USER_DIR = None
def load_file(self, fname, dir=None, include=[], exclude=[]):
"""
Loads sections dictionary with sections from file fname.
Existing sections are overlaid.
The 'include' list contains the section names to be loaded.
The 'exclude' list contains section names not to be loaded.
Return False if no file was found in any of the locations.
"""
if dir:
fname = os.path.join(dir, fname)
# Sliently skip missing configuration file.
if not os.path.isfile(fname):
return False
# Don't load conf files twice (local and application conf files are the
# same if the source file is in the application directory).
if os.path.realpath(fname) in self.loaded:
return True
rdr = Reader() # Reader processes system macros.
message.linenos = False # Disable document line numbers.
rdr.open(fname)
message.linenos = None
self.fname = fname
reo = re.compile(r'(?u)^\[(?P<section>[^\W\d][\w-]*)\]\s*$')
sections = OrderedDict()
section,contents = '',[]
while not rdr.eof():
s = rdr.read()
if s and s[0] == '#': # Skip comment lines.
continue
if s[:2] == '\\#': # Unescape lines starting with '#'.
s = s[1:]
s = s.rstrip()
found = reo.findall(s)
if found:
if section: # Store previous section.
if section in sections \
and self.entries_section(section):
if ''.join(contents):
# Merge entries.
sections[section] = sections[section] + contents
else:
del sections[section]
else:
sections[section] = contents
section = found[0].lower()
contents = []
else:
contents.append(s)
if section and contents: # Store last section.
if section in sections \
and self.entries_section(section):
if ''.join(contents):
# Merge entries.
sections[section] = sections[section] + contents
else:
del sections[section]
else:
sections[section] = contents
rdr.close()
if include:
for s in set(sections) - set(include):
del sections[s]
if exclude:
for s in set(sections) & set(exclude):
del sections[s]
attrs = {}
self.load_sections(sections,attrs)
if not include:
# If all sections are loaded mark this file as loaded.
self.loaded.append(os.path.realpath(fname))
document.update_attributes(attrs) # So they are available immediately.
return True
def load_sections(self,sections,attrs=None):
"""
Loads sections dictionary. Each dictionary entry contains a
list of lines.
Updates 'attrs' with parsed [attributes] section entries.
"""
# Delete trailing blank lines from sections.
for k in sections.keys():
for i in range(len(sections[k])-1,-1,-1):
if not sections[k][i]:
del sections[k][i]
elif not self.entries_section(k):
break
# Add/overwrite new sections.
self.sections.update(sections)
self.parse_tags()
# Internally [miscellaneous] section entries are just attributes.
d = {}
parse_entries(sections.get('miscellaneous',()), d, unquote=True,
allow_name_only=True)
parse_entries(sections.get('attributes',()), d, unquote=True,
allow_name_only=True)
update_attrs(self.conf_attrs,d)
if attrs is not None:
attrs.update(d)
d = {}
parse_entries(sections.get('titles',()),d)
Title.load(d)
parse_entries(sections.get('specialcharacters',()),self.specialchars,escape_delimiter=False)
parse_entries(sections.get('quotes',()),self.quotes)
self.parse_specialwords()
self.parse_replacements()
self.parse_replacements('replacements2')
self.parse_specialsections()
paragraphs.load(sections)
lists.load(sections)
blocks.load(sections)
tables_OLD.load(sections)
tables.load(sections)
macros.load(sections.get('macros',()))
def get_load_dirs(self):
"""
Return list of well known paths with conf files.
"""
result = []
if localapp():
# Load from folders in asciidoc executable directory.
result.append(APP_DIR)
else:
# Load from global configuration directory.
result.append(CONF_DIR)
# Load configuration files from ~/.asciidoc if it exists.
if USER_DIR is not None:
result.append(USER_DIR)
return result
def find_in_dirs(self, filename, dirs=None):
"""
Find conf files from dirs list.
Return list of found file paths.
Return empty list if not found in any of the locations.
"""
result = []
if dirs is None:
dirs = self.get_load_dirs()
for d in dirs:
f = os.path.join(d,filename)
if os.path.isfile(f):
result.append(f)
return result
def load_from_dirs(self, filename, dirs=None, include=[]):
"""
Load conf file from dirs list.
If dirs not specified try all the well known locations.
Return False if no file was sucessfully loaded.
"""
count = 0
for f in self.find_in_dirs(filename,dirs):
if self.load_file(f, include=include):
count += 1
return count != 0
def load_backend(self, dirs=None):
"""
Load the backend configuration files from dirs list.
If dirs not specified try all the well known locations.
"""
if dirs is None:
dirs = self.get_load_dirs()
for d in dirs:
conf = document.backend + '.conf'
self.load_file(conf,d)
conf = document.backend + '-' + document.doctype + '.conf'
self.load_file(conf,d)
def load_filters(self, dirs=None):
"""
Load filter configuration files from 'filters' directory in dirs list.
If dirs not specified try all the well known locations.
"""
if dirs is None:
dirs = self.get_load_dirs()
for d in dirs:
# Load filter .conf files.
filtersdir = os.path.join(d,'filters')
for dirpath,dirnames,filenames in os.walk(filtersdir):
for f in filenames:
if re.match(r'^.+\.conf$',f):
self.load_file(f,dirpath)
def load_miscellaneous(self,d):
"""Set miscellaneous configuration entries from dictionary 'd'."""
def set_misc(name,rule='True',intval=False):
if name in d:
errmsg = 'illegal [miscellaneous] %s entry' % name
if intval:
setattr(self, name, int(validate(d[name],rule,errmsg)))
else:
setattr(self, name, validate(d[name],rule,errmsg))
set_misc('tabsize','int($)>0',intval=True)
set_misc('textwidth','int($)>0',intval=True) # DEPRECATED: Old tables only.
set_misc('pagewidth','"%f" % $')
if 'pagewidth' in d:
self.pagewidth = float(self.pagewidth)
set_misc('pageunits')
set_misc('outfilesuffix')
if 'newline' in d:
# Convert escape sequences to their character values.
self.newline = eval('"'+d['newline']+'"')
if 'subsnormal' in d:
self.subsnormal = parse_options(d['subsnormal'],SUBS_OPTIONS,
'illegal [%s] %s: %s' %
('miscellaneous','subsnormal',d['subsnormal']))
if 'subsverbatim' in d:
self.subsverbatim = parse_options(d['subsverbatim'],SUBS_OPTIONS,
'illegal [%s] %s: %s' %
('miscellaneous','subsverbatim',d['subsverbatim']))
def validate(self):
"""Check the configuration for internal consistancy. Called after all
configuration files have been loaded."""
message.linenos = False # Disable document line numbers.
# Heuristic to validate that at least one configuration file was loaded.
if not self.specialchars or not self.tags or not lists:
raise EAsciiDoc,'incomplete configuration files'
# Check special characters are only one character long.
for k in self.specialchars.keys():
if len(k) != 1:
raise EAsciiDoc,'[specialcharacters] ' \
'must be a single character: %s' % k
# Check all special words have a corresponding inline macro body.
for macro in self.specialwords.values():
if not is_name(macro):
raise EAsciiDoc,'illegal special word name: %s' % macro
if not macro in self.sections:
message.warning('missing special word macro: [%s]' % macro)
# Check all text quotes have a corresponding tag.
for q in self.quotes.keys()[:]:
tag = self.quotes[q]
if not tag:
del self.quotes[q] # Undefine quote.
else:
if tag[0] == '#':
tag = tag[1:]
if not tag in self.tags:
message.warning('[quotes] %s missing tag definition: %s' % (q,tag))
# Check all specialsections section names exist.
for k,v in self.specialsections.items():
if not v:
del self.specialsections[k]
elif not v in self.sections:
message.warning('missing specialsections section: [%s]' % v)
paragraphs.validate()
lists.validate()
blocks.validate()
tables_OLD.validate()
tables.validate()
macros.validate()
message.linenos = None
def entries_section(self,section_name):
"""
Return True if conf file section contains entries, not a markup
template.
"""
for name in self.ENTRIES_SECTIONS:
if re.match(name,section_name):
return True
return False
def dump(self):
"""Dump configuration to stdout."""
# Header.
hdr = ''
hdr = hdr + '#' + writer.newline
hdr = hdr + '# Generated by AsciiDoc %s for %s %s.%s' % \
(VERSION,document.backend,document.doctype,writer.newline)
t = time.asctime(time.localtime(time.time()))
hdr = hdr + '# %s%s' % (t,writer.newline)
hdr = hdr + '#' + writer.newline
sys.stdout.write(hdr)
# Dump special sections.
# Dump only the configuration file and command-line attributes.
# [miscellanous] entries are dumped as part of the [attributes].
d = {}
d.update(self.conf_attrs)
d.update(self.cmd_attrs)
dump_section('attributes',d)
Title.dump()
dump_section('quotes',self.quotes)
dump_section('specialcharacters',self.specialchars)
d = {}
for k,v in self.specialwords.items():
if v in d:
d[v] = '%s "%s"' % (d[v],k) # Append word list.
else:
d[v] = '"%s"' % k
dump_section('specialwords',d)
dump_section('replacements',self.replacements)
dump_section('replacements2',self.replacements2)
dump_section('specialsections',self.specialsections)
d = {}
for k,v in self.tags.items():
d[k] = '%s|%s' % v
dump_section('tags',d)
paragraphs.dump()
lists.dump()
blocks.dump()
tables_OLD.dump()
tables.dump()
macros.dump()
# Dump remaining sections.
for k in self.sections.keys():
if not self.entries_section(k):
sys.stdout.write('[%s]%s' % (k,writer.newline))
for line in self.sections[k]:
sys.stdout.write('%s%s' % (line,writer.newline))
sys.stdout.write(writer.newline)
def subs_section(self,section,d):
"""Section attribute substitution using attributes from
document.attributes and 'd'. Lines containing undefinded
attributes are deleted."""
if section in self.sections:
return subs_attrs(self.sections[section],d)
else:
message.warning('missing section: [%s]' % section)
return ()
def parse_tags(self):
"""Parse [tags] section entries into self.tags dictionary."""
d = {}
parse_entries(self.sections.get('tags',()),d)
for k,v in d.items():
if v is None:
if k in self.tags:
del self.tags[k]
elif v == '':
self.tags[k] = (None,None)
else:
mo = re.match(r'(?P<stag>.*)\|(?P<etag>.*)',v)
if mo:
self.tags[k] = (mo.group('stag'), mo.group('etag'))
else:
raise EAsciiDoc,'[tag] %s value malformed' % k
def tag(self, name, d=None):
"""Returns (starttag,endtag) tuple named name from configuration file
[tags] section. Raise error if not found. If a dictionary 'd' is
passed then merge with document attributes and perform attribute
substitution on tags."""
if not name in self.tags:
raise EAsciiDoc, 'missing tag: %s' % name
stag,etag = self.tags[name]
if d is not None:
# TODO: Should we warn if substitution drops a tag?
if stag:
stag = subs_attrs(stag,d)
if etag:
etag = subs_attrs(etag,d)
if stag is None: stag = ''
if etag is None: etag = ''
return (stag,etag)
def parse_specialsections(self):
"""Parse specialsections section to self.specialsections dictionary."""
# TODO: This is virtually the same as parse_replacements() and should
# be factored to single routine.
d = {}
parse_entries(self.sections.get('specialsections',()),d,unquote=True)
for pat,sectname in d.items():
pat = strip_quotes(pat)
if not is_re(pat):
raise EAsciiDoc,'[specialsections] entry ' \
'is not a valid regular expression: %s' % pat
if sectname is None:
if pat in self.specialsections:
del self.specialsections[pat]
else:
self.specialsections[pat] = sectname
def parse_replacements(self,sect='replacements'):
"""Parse replacements section into self.replacements dictionary."""
d = OrderedDict()
parse_entries(self.sections.get(sect,()), d, unquote=True)
for pat,rep in d.items():
if not self.set_replacement(pat, rep, getattr(self,sect)):
raise EAsciiDoc,'[%s] entry in %s is not a valid' \
' regular expression: %s' % (sect,self.fname,pat)
@staticmethod
def set_replacement(pat, rep, replacements):
"""Add pattern and replacement to replacements dictionary."""
pat = strip_quotes(pat)
if not is_re(pat):
return False
if rep is None:
if pat in replacements:
del replacements[pat]
else:
replacements[pat] = strip_quotes(rep)
return True
def subs_replacements(self,s,sect='replacements'):
"""Substitute patterns from self.replacements in 's'."""
result = s
for pat,rep in getattr(self,sect).items():
result = re.sub(pat, rep, result)
return result
def parse_specialwords(self):
"""Parse special words section into self.specialwords dictionary."""
reo = re.compile(r'(?:\s|^)(".+?"|[^"\s]+)(?=\s|$)')
for line in self.sections.get('specialwords',()):
e = parse_entry(line)
if not e:
raise EAsciiDoc,'[specialwords] entry in %s is malformed: %s' \
% (self.fname,line)
name,wordlist = e
if not is_name(name):
raise EAsciiDoc,'[specialwords] name in %s is illegal: %s' \
% (self.fname,name)
if wordlist is None:
# Undefine all words associated with 'name'.
for k,v in self.specialwords.items():
if v == name:
del self.specialwords[k]
else:
words = reo.findall(wordlist)
for word in words:
word = strip_quotes(word)
if not is_re(word):
raise EAsciiDoc,'[specialwords] entry in %s ' \
'is not a valid regular expression: %s' \
% (self.fname,word)
self.specialwords[word] = name
def subs_specialchars(self,s):
"""Perform special character substitution on string 's'."""
"""It may seem like a good idea to escape special characters with a '\'
character, the reason we don't is because the escape character itself
then has to be escaped and this makes including code listings
problematic. Use the predefined {amp},{lt},{gt} attributes instead."""
result = ''
for ch in s:
result = result + self.specialchars.get(ch,ch)
return result
def subs_specialchars_reverse(self,s):
"""Perform reverse special character substitution on string 's'."""
result = s
for k,v in self.specialchars.items():
result = result.replace(v, k)
return result
def subs_specialwords(self,s):
"""Search for word patterns from self.specialwords in 's' and
substitute using corresponding macro."""
result = s
for word in self.specialwords.keys():
result = re.sub(word, _subs_specialwords, result)
return result
def expand_templates(self,entries):
"""Expand any template::[] macros in a list of section entries."""
result = []
for line in entries:
mo = macros.match('+',r'template',line)
if mo:
s = mo.group('attrlist')
if s in self.sections:
result += self.expand_templates(self.sections[s])
else:
message.warning('missing section: [%s]' % s)
result.append(line)
else:
result.append(line)
return result
def expand_all_templates(self):
for k,v in self.sections.items():
self.sections[k] = self.expand_templates(v)
def section2tags(self, section, d={}, skipstart=False, skipend=False):
"""Perform attribute substitution on 'section' using document
attributes plus 'd' attributes. Return tuple (stag,etag) containing
pre and post | placeholder tags. 'skipstart' and 'skipend' are
used to suppress substitution."""
assert section is not None
if section in self.sections:
body = self.sections[section]
else:
message.warning('missing section: [%s]' % section)
body = ()
# Split macro body into start and end tag lists.
stag = []
etag = []
in_stag = True
for s in body:
if in_stag:
mo = re.match(r'(?P<stag>.*)\|(?P<etag>.*)',s)
if mo:
if mo.group('stag'):
stag.append(mo.group('stag'))
if mo.group('etag'):
etag.append(mo.group('etag'))
in_stag = False
else:
stag.append(s)
else:
etag.append(s)
# Do attribute substitution last so {brkbar} can be used to escape |.
# But don't do attribute substitution on title -- we've already done it.
title = d.get('title')
if title:
d['title'] = chr(0) # Replace with unused character.
if not skipstart:
stag = subs_attrs(stag, d)
if not skipend:
etag = subs_attrs(etag, d)
# Put the {title} back.
if title:
stag = map(lambda x: x.replace(chr(0), title), stag)
etag = map(lambda x: x.replace(chr(0), title), etag)
d['title'] = title
return (stag,etag)
#---------------------------------------------------------------------------
# Deprecated old table classes follow.
# Naming convention is an _OLD name suffix.
# These will be removed from future versions of AsciiDoc
def join_lines_OLD(lines):
"""Return a list in which lines terminated with the backslash line
continuation character are joined."""
result = []
s = ''
continuation = False
for line in lines:
if line and line[-1] == '\\':
s = s + line[:-1]
continuation = True
continue
if continuation:
result.append(s+line)
s = ''
continuation = False
else:
result.append(line)
if continuation:
result.append(s)
return result
class Column_OLD:
"""Table column."""
def __init__(self):
self.colalign = None # 'left','right','center'
self.rulerwidth = None
self.colwidth = None # Output width in page units.
class Table_OLD(AbstractBlock):
COL_STOP = r"(`|'|\.)" # RE.
ALIGNMENTS = {'`':'left', "'":'right', '.':'center'}
FORMATS = ('fixed','csv','dsv')
def __init__(self):
AbstractBlock.__init__(self)
self.CONF_ENTRIES += ('template','fillchar','format','colspec',
'headrow','footrow','bodyrow','headdata',
'footdata', 'bodydata')
# Configuration parameters.
self.fillchar=None
self.format=None # 'fixed','csv','dsv'
self.colspec=None
self.headrow=None
self.footrow=None
self.bodyrow=None
self.headdata=None
self.footdata=None
self.bodydata=None
# Calculated parameters.
self.underline=None # RE matching current table underline.
self.isnumeric=False # True if numeric ruler.
self.tablewidth=None # Optional table width scale factor.
self.columns=[] # List of Columns.
# Other.
self.check_msg='' # Message set by previous self.validate() call.
def load(self,name,entries):
AbstractBlock.load(self,name,entries)
"""Update table definition from section entries in 'entries'."""
for k,v in entries.items():
if k == 'fillchar':
if v and len(v) == 1:
self.fillchar = v
else:
raise EAsciiDoc,'malformed table fillchar: %s' % v
elif k == 'format':
if v in Table_OLD.FORMATS:
self.format = v
else:
raise EAsciiDoc,'illegal table format: %s' % v
elif k == 'colspec':
self.colspec = v
elif k == 'headrow':
self.headrow = v
elif k == 'footrow':
self.footrow = v
elif k == 'bodyrow':
self.bodyrow = v
elif k == 'headdata':
self.headdata = v
elif k == 'footdata':
self.footdata = v
elif k == 'bodydata':
self.bodydata = v
def dump(self):
AbstractBlock.dump(self)
write = lambda s: sys.stdout.write('%s%s' % (s,writer.newline))
write('fillchar='+self.fillchar)
write('format='+self.format)
if self.colspec:
write('colspec='+self.colspec)
if self.headrow:
write('headrow='+self.headrow)
if self.footrow:
write('footrow='+self.footrow)
write('bodyrow='+self.bodyrow)
if self.headdata:
write('headdata='+self.headdata)
if self.footdata:
write('footdata='+self.footdata)
write('bodydata='+self.bodydata)
write('')
def validate(self):
AbstractBlock.validate(self)
"""Check table definition and set self.check_msg if invalid else set
self.check_msg to blank string."""
# Check global table parameters.
if config.textwidth is None:
self.check_msg = 'missing [miscellaneous] textwidth entry'
elif config.pagewidth is None:
self.check_msg = 'missing [miscellaneous] pagewidth entry'
elif config.pageunits is None:
self.check_msg = 'missing [miscellaneous] pageunits entry'
elif self.headrow is None:
self.check_msg = 'missing headrow entry'
elif self.footrow is None:
self.check_msg = 'missing footrow entry'
elif self.bodyrow is None:
self.check_msg = 'missing bodyrow entry'
elif self.headdata is None:
self.check_msg = 'missing headdata entry'
elif self.footdata is None:
self.check_msg = 'missing footdata entry'
elif self.bodydata is None:
self.check_msg = 'missing bodydata entry'
else:
# No errors.
self.check_msg = ''
def isnext(self):
return AbstractBlock.isnext(self)
def parse_ruler(self,ruler):
"""Parse ruler calculating underline and ruler column widths."""
fc = re.escape(self.fillchar)
# Strip and save optional tablewidth from end of ruler.
mo = re.match(r'^(.*'+fc+r'+)([\d\.]+)$',ruler)
if mo:
ruler = mo.group(1)
self.tablewidth = float(mo.group(2))
self.attributes['tablewidth'] = str(float(self.tablewidth))
else:
self.tablewidth = None
self.attributes['tablewidth'] = '100.0'
# Guess whether column widths are specified numerically or not.
if ruler[1] != self.fillchar:
# If the first column does not start with a fillchar then numeric.
self.isnumeric = True
elif ruler[1:] == self.fillchar*len(ruler[1:]):
# The case of one column followed by fillchars is numeric.
self.isnumeric = True
else:
self.isnumeric = False
# Underlines must be 3 or more fillchars.
self.underline = r'^' + fc + r'{3,}$'
splits = re.split(self.COL_STOP,ruler)[1:]
# Build self.columns.
for i in range(0,len(splits),2):
c = Column_OLD()
c.colalign = self.ALIGNMENTS[splits[i]]
s = splits[i+1]
if self.isnumeric:
# Strip trailing fillchars.
s = re.sub(fc+r'+$','',s)
if s == '':
c.rulerwidth = None
else:
c.rulerwidth = int(validate(s,'int($)>0',
'malformed ruler: bad width'))
else: # Calculate column width from inter-fillchar intervals.
if not re.match(r'^'+fc+r'+$',s):
raise EAsciiDoc,'malformed ruler: illegal fillchars'
c.rulerwidth = len(s)+1
self.columns.append(c)
# Fill in unspecified ruler widths.
if self.isnumeric:
if self.columns[0].rulerwidth is None:
prevwidth = 1
for c in self.columns:
if c.rulerwidth is None:
c.rulerwidth = prevwidth
prevwidth = c.rulerwidth
def build_colspecs(self):
"""Generate colwidths and colspecs. This can only be done after the
table arguments have been parsed since we use the table format."""
self.attributes['cols'] = len(self.columns)
# Calculate total ruler width.
totalwidth = 0
for c in self.columns:
totalwidth = totalwidth + c.rulerwidth
if totalwidth <= 0:
raise EAsciiDoc,'zero width table'
# Calculate marked up colwidths from rulerwidths.
for c in self.columns:
# Convert ruler width to output page width.
width = float(c.rulerwidth)
if self.format == 'fixed':
if self.tablewidth is None:
# Size proportional to ruler width.
colfraction = width/config.textwidth
else:
# Size proportional to page width.
colfraction = width/totalwidth
else:
# Size proportional to page width.
colfraction = width/totalwidth
c.colwidth = colfraction * config.pagewidth # To page units.
if self.tablewidth is not None:
c.colwidth = c.colwidth * self.tablewidth # Scale factor.
if self.tablewidth > 1:
c.colwidth = c.colwidth/100 # tablewidth is in percent.
# Build colspecs.
if self.colspec:
cols = []
i = 0
for c in self.columns:
i += 1
self.attributes['colalign'] = c.colalign
self.attributes['colwidth'] = str(int(c.colwidth))
self.attributes['colnumber'] = str(i + 1)
s = subs_attrs(self.colspec,self.attributes)
if not s:
message.warning('colspec dropped: contains undefined attribute')
else:
cols.append(s)
self.attributes['colspecs'] = writer.newline.join(cols)
def split_rows(self,rows):
"""Return a two item tuple containing a list of lines up to but not
including the next underline (continued lines are joined ) and the
tuple of all lines after the underline."""
reo = re.compile(self.underline)
i = 0
while not reo.match(rows[i]):
i = i+1
if i == 0:
raise EAsciiDoc,'missing table rows'
if i >= len(rows):
raise EAsciiDoc,'closing [%s] underline expected' % self.name
return (join_lines_OLD(rows[:i]), rows[i+1:])
def parse_rows(self, rows, rtag, dtag):
"""Parse rows list using the row and data tags. Returns a substituted
list of output lines."""
result = []
# Source rows are parsed as single block, rather than line by line, to
# allow the CSV reader to handle multi-line rows.
if self.format == 'fixed':
rows = self.parse_fixed(rows)
elif self.format == 'csv':
rows = self.parse_csv(rows)
elif self.format == 'dsv':
rows = self.parse_dsv(rows)
else:
assert True,'illegal table format'
# Substitute and indent all data in all rows.
stag,etag = subs_tag(rtag,self.attributes)
for row in rows:
result.append(' '+stag)
for data in self.subs_row(row,dtag):
result.append(' '+data)
result.append(' '+etag)
return result
def subs_row(self, data, dtag):
"""Substitute the list of source row data elements using the data tag.
Returns a substituted list of output table data items."""
result = []
if len(data) < len(self.columns):
message.warning('fewer row data items then table columns')
if len(data) > len(self.columns):
message.warning('more row data items than table columns')
for i in range(len(self.columns)):
if i > len(data) - 1:
d = '' # Fill missing column data with blanks.
else:
d = data[i]
c = self.columns[i]
self.attributes['colalign'] = c.colalign
self.attributes['colwidth'] = str(int(c.colwidth))
self.attributes['colnumber'] = str(i + 1)
stag,etag = subs_tag(dtag,self.attributes)
# Insert AsciiDoc line break (' +') where row data has newlines
# ('\n'). This is really only useful when the table format is csv
# and the output markup is HTML. It's also a bit dubious in that it
# assumes the user has not modified the shipped line break pattern.
subs = self.get_subs()[0]
if 'replacements' in subs:
# Insert line breaks in cell data.
d = re.sub(r'(?m)\n',r' +\n',d)
d = d.split('\n') # So writer.newline is written.
else:
d = [d]
result = result + [stag] + Lex.subs(d,subs) + [etag]
return result
def parse_fixed(self,rows):
"""Parse the list of source table rows. Each row item in the returned
list contains a list of cell data elements."""
result = []
for row in rows:
data = []
start = 0
# build an encoded representation
row = char_decode(row)
for c in self.columns:
end = start + c.rulerwidth
if c is self.columns[-1]:
# Text in last column can continue forever.
# Use the encoded string to slice, but convert back
# to plain string before further processing
data.append(char_encode(row[start:]).strip())
else:
data.append(char_encode(row[start:end]).strip())
start = end
result.append(data)
return result
def parse_csv(self,rows):
"""Parse the list of source table rows. Each row item in the returned
list contains a list of cell data elements."""
import StringIO
import csv
result = []
rdr = csv.reader(StringIO.StringIO('\r\n'.join(rows)),
skipinitialspace=True)
try:
for row in rdr:
result.append(row)
except Exception:
raise EAsciiDoc,'csv parse error: %s' % row
return result
def parse_dsv(self,rows):
"""Parse the list of source table rows. Each row item in the returned
list contains a list of cell data elements."""
separator = self.attributes.get('separator',':')
separator = eval('"'+separator+'"')
if len(separator) != 1:
raise EAsciiDoc,'malformed dsv separator: %s' % separator
# TODO If separator is preceeded by an odd number of backslashes then
# it is escaped and should not delimit.
result = []
for row in rows:
# Skip blank lines
if row == '': continue
# Unescape escaped characters.
row = eval('"'+row.replace('"','\\"')+'"')
data = row.split(separator)
data = [s.strip() for s in data]
result.append(data)
return result
def translate(self):
message.deprecated('old tables syntax')
AbstractBlock.translate(self)
# Reset instance specific properties.
self.underline = None
self.columns = []
attrs = {}
BlockTitle.consume(attrs)
# Add relevant globals to table substitutions.
attrs['pagewidth'] = str(config.pagewidth)
attrs['pageunits'] = config.pageunits
# Mix in document attribute list.
AttributeList.consume(attrs)
# Validate overridable attributes.
for k,v in attrs.items():
if k == 'format':
if v not in self.FORMATS:
raise EAsciiDoc, 'illegal [%s] %s: %s' % (self.name,k,v)
self.format = v
elif k == 'tablewidth':
try:
self.tablewidth = float(attrs['tablewidth'])
except Exception:
raise EAsciiDoc, 'illegal [%s] %s: %s' % (self.name,k,v)
self.merge_attributes(attrs)
# Parse table ruler.
ruler = reader.read()
assert re.match(self.delimiter,ruler)
self.parse_ruler(ruler)
# Read the entire table.
table = []
while True:
line = reader.read_next()
# Table terminated by underline followed by a blank line or EOF.
if len(table) > 0 and re.match(self.underline,table[-1]):
if line in ('',None):
break;
if line is None:
raise EAsciiDoc,'closing [%s] underline expected' % self.name
table.append(reader.read())
# EXPERIMENTAL: The number of lines in the table, requested by Benjamin Klum.
self.attributes['rows'] = str(len(table))
if self.check_msg: # Skip if table definition was marked invalid.
message.warning('skipping %s table: %s' % (self.name,self.check_msg))
return
# Generate colwidths and colspecs.
self.build_colspecs()
# Generate headrows, footrows, bodyrows.
# Headrow, footrow and bodyrow data replaces same named attributes in
# the table markup template. In order to ensure this data does not get
# a second attribute substitution (which would interfere with any
# already substituted inline passthroughs) unique placeholders are used
# (the tab character does not appear elsewhere since it is expanded on
# input) which are replaced after template attribute substitution.
headrows = footrows = []
bodyrows,table = self.split_rows(table)
if table:
headrows = bodyrows
bodyrows,table = self.split_rows(table)
if table:
footrows,table = self.split_rows(table)
if headrows:
headrows = self.parse_rows(headrows, self.headrow, self.headdata)
headrows = writer.newline.join(headrows)
self.attributes['headrows'] = '\x07headrows\x07'
if footrows:
footrows = self.parse_rows(footrows, self.footrow, self.footdata)
footrows = writer.newline.join(footrows)
self.attributes['footrows'] = '\x07footrows\x07'
bodyrows = self.parse_rows(bodyrows, self.bodyrow, self.bodydata)
bodyrows = writer.newline.join(bodyrows)
self.attributes['bodyrows'] = '\x07bodyrows\x07'
table = subs_attrs(config.sections[self.template],self.attributes)
table = writer.newline.join(table)
# Before we finish replace the table head, foot and body place holders
# with the real data.
if headrows:
table = table.replace('\x07headrows\x07', headrows, 1)
if footrows:
table = table.replace('\x07footrows\x07', footrows, 1)
table = table.replace('\x07bodyrows\x07', bodyrows, 1)
writer.write(table,trace='table')
class Tables_OLD(AbstractBlocks):
"""List of tables."""
BLOCK_TYPE = Table_OLD
PREFIX = 'old_tabledef-'
def __init__(self):
AbstractBlocks.__init__(self)
def load(self,sections):
AbstractBlocks.load(self,sections)
def validate(self):
# Does not call AbstractBlocks.validate().
# Check we have a default table definition,
for i in range(len(self.blocks)):
if self.blocks[i].name == 'old_tabledef-default':
default = self.blocks[i]
break
else:
raise EAsciiDoc,'missing section: [OLD_tabledef-default]'
# Set default table defaults.
if default.format is None: default.subs = 'fixed'
# Propagate defaults to unspecified table parameters.
for b in self.blocks:
if b is not default:
if b.fillchar is None: b.fillchar = default.fillchar
if b.format is None: b.format = default.format
if b.template is None: b.template = default.template
if b.colspec is None: b.colspec = default.colspec
if b.headrow is None: b.headrow = default.headrow
if b.footrow is None: b.footrow = default.footrow
if b.bodyrow is None: b.bodyrow = default.bodyrow
if b.headdata is None: b.headdata = default.headdata
if b.footdata is None: b.footdata = default.footdata
if b.bodydata is None: b.bodydata = default.bodydata
# Check all tables have valid fill character.
for b in self.blocks:
if not b.fillchar or len(b.fillchar) != 1:
raise EAsciiDoc,'[%s] missing or illegal fillchar' % b.name
# Build combined tables delimiter patterns and assign defaults.
delimiters = []
for b in self.blocks:
# Ruler is:
# (ColStop,(ColWidth,FillChar+)?)+, FillChar+, TableWidth?
b.delimiter = r'^(' + Table_OLD.COL_STOP \
+ r'(\d*|' + re.escape(b.fillchar) + r'*)' \
+ r')+' \
+ re.escape(b.fillchar) + r'+' \
+ '([\d\.]*)$'
delimiters.append(b.delimiter)
if not b.headrow:
b.headrow = b.bodyrow
if not b.footrow:
b.footrow = b.bodyrow
if not b.headdata:
b.headdata = b.bodydata
if not b.footdata:
b.footdata = b.bodydata
self.delimiters = re_join(delimiters)
# Check table definitions are valid.
for b in self.blocks:
b.validate()
if config.verbose:
if b.check_msg:
message.warning('[%s] table definition: %s' % (b.name,b.check_msg))
# End of deprecated old table classes.
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# Filter commands.
#---------------------------------------------------------------------------
import shutil, zipfile
def die(msg):
message.stderr(msg)
sys.exit(1)
def unzip(zip_file, destdir):
"""
Unzip Zip file to destination directory.
Throws exception if error occurs.
"""
zipo = zipfile.ZipFile(zip_file, 'r')
try:
for zi in zipo.infolist():
outfile = zi.filename
if not outfile.endswith('/'):
d, outfile = os.path.split(outfile)
directory = os.path.normpath(os.path.join(destdir, d))
if not os.path.isdir(directory):
os.makedirs(directory)
outfile = os.path.join(directory, outfile)
perms = (zi.external_attr >> 16) & 0777
message.verbose('extracting: %s' % outfile)
fh = os.open(outfile, os.O_CREAT | os.O_WRONLY, perms)
try:
os.write(fh, zipo.read(zi.filename))
finally:
os.close(fh)
finally:
zipo.close()
class Filter:
"""
--filter option commands.
"""
@staticmethod
def get_filters_dir():
"""
Return path of .asciidoc/filters in user's home direcory or None if
user home not defined.
"""
result = userdir()
if result:
result = os.path.join(result,'.asciidoc','filters')
return result
@staticmethod
def install(args):
"""
Install filter Zip file.
args[0] is filter zip file path.
args[1] is optional destination filters directory.
"""
if len(args) not in (1,2):
die('invalid number of arguments: --filter install %s'
% ' '.join(args))
zip_file = args[0]
if not os.path.isfile(zip_file):
die('file not found: %s' % zip_file)
reo = re.match(r'^\w+',os.path.split(zip_file)[1])
if not reo:
die('filter file name does not start with legal filter name: %s'
% zip_file)
filter_name = reo.group()
if len(args) == 2:
filters_dir = args[1]
if not os.path.isdir(filters_dir):
die('directory not found: %s' % filters_dir)
else:
filters_dir = Filter.get_filters_dir()
if not filters_dir:
die('user home directory is not defined')
filter_dir = os.path.join(filters_dir, filter_name)
if os.path.exists(filter_dir):
die('filter is already installed: %s' % filter_dir)
try:
os.makedirs(filter_dir)
except Exception,e:
die('failed to create filter directory: %s' % str(e))
try:
unzip(zip_file, filter_dir)
except Exception,e:
die('failed to extract filter: %s' % str(e))
@staticmethod
def remove(args):
"""
Delete filter from .asciidoc/filters/ in user's home directory.
args[0] is filter name.
args[1] is optional filters directory.
"""
if len(args) not in (1,2):
die('invalid number of arguments: --filter remove %s'
% ' '.join(args))
filter_name = args[0]
if not re.match(r'^\w+$',filter_name):
die('illegal filter name: %s' % filter_name)
if len(args) == 2:
d = args[1]
if not os.path.isdir(d):
die('directory not found: %s' % d)
else:
d = Filter.get_filters_dir()
if not d:
die('user directory is not defined')
filter_dir = os.path.join(d, filter_name)
if not os.path.isdir(filter_dir):
die('cannot find filter: %s' % filter_dir)
try:
message.verbose('removing: %s' % filter_dir)
shutil.rmtree(filter_dir)
except Exception,e:
die('failed to delete filter: %s' % str(e))
@staticmethod
def list():
"""
List all filter directories (global and local).
"""
for d in [os.path.join(d,'filters') for d in config.get_load_dirs()]:
if os.path.isdir(d):
for f in os.walk(d).next()[1]:
message.stdout(os.path.join(d,f))
#---------------------------------------------------------------------------
# Application code.
#---------------------------------------------------------------------------
# Constants
# ---------
APP_FILE = None # This file's full path.
APP_DIR = None # This file's directory.
USER_DIR = None # ~/.asciidoc
# Global configuration files directory (set by Makefile build target).
CONF_DIR = '/etc/asciidoc'
HELP_FILE = 'help.conf' # Default (English) help file.
# Globals
# -------
document = Document() # The document being processed.
config = Config() # Configuration file reader.
reader = Reader() # Input stream line reader.
writer = Writer() # Output stream line writer.
message = Message() # Message functions.
paragraphs = Paragraphs() # Paragraph definitions.
lists = Lists() # List definitions.
blocks = DelimitedBlocks() # DelimitedBlock definitions.
tables_OLD = Tables_OLD() # Table_OLD definitions.
tables = Tables() # Table definitions.
macros = Macros() # Macro definitions.
calloutmap = CalloutMap() # Coordinates callouts and callout list.
trace = Trace() # Implements trace attribute processing.
### Used by asciidocapi.py ###
# List of message strings written to stderr.
messages = message.messages
def asciidoc(backend, doctype, confiles, infile, outfile, options):
"""Convert AsciiDoc document to DocBook document of type doctype
The AsciiDoc document is read from file object src the translated
DocBook file written to file object dst."""
def load_conffiles(include=[], exclude=[]):
# Load conf files specified on the command-line and by the conf-files attribute.
files = document.attributes.get('conf-files','')
files = [f.strip() for f in files.split('|') if f.strip()]
files += confiles
if files:
for f in files:
if os.path.isfile(f):
config.load_file(f, include=include, exclude=exclude)
else:
raise EAsciiDoc,'configuration file %s missing' % f
try:
if doctype not in (None,'article','manpage','book'):
raise EAsciiDoc,'illegal document type'
# Set processing options.
for o in options:
if o == '-c': config.dumping = True
if o == '-s': config.header_footer = False
if o == '-v': config.verbose = True
document.update_attributes()
if '-e' not in options:
# Load asciidoc.conf files in two passes: the first for attributes
# the second for everything. This is so that locally set attributes
# available are in the global asciidoc.conf
if not config.load_from_dirs('asciidoc.conf',include=['attributes']):
raise EAsciiDoc,'configuration file asciidoc.conf missing'
load_conffiles(include=['attributes'])
config.load_from_dirs('asciidoc.conf')
if infile != '<stdin>':
indir = os.path.dirname(infile)
config.load_file('asciidoc.conf', indir,
include=['attributes','titles','specialchars'])
else:
load_conffiles(include=['attributes','titles','specialchars'])
document.update_attributes()
# Check the infile exists.
if infile != '<stdin>':
if not os.path.isfile(infile):
raise EAsciiDoc,'input file %s missing' % infile
document.infile = infile
AttributeList.initialize()
# Open input file and parse document header.
reader.tabsize = config.tabsize
reader.open(infile)
has_header = document.parse_header(doctype,backend)
# doctype is now finalized.
document.attributes['doctype-'+document.doctype] = ''
# Load backend configuration files.
if '-e' not in options:
f = document.backend + '.conf'
if not config.find_in_dirs(f):
message.warning('missing backend conf file: %s' % f, linenos=False)
config.load_backend()
# backend is now known.
document.attributes['backend-'+document.backend] = ''
document.attributes[document.backend+'-'+document.doctype] = ''
if '-e' not in options:
# Load filters and language file.
config.load_filters()
document.load_lang()
if infile != '<stdin>':
# Load local conf files (files in the source file directory).
config.load_file('asciidoc.conf', indir)
config.load_backend([indir])
config.load_filters([indir])
# Load document specific configuration files.
f = os.path.splitext(infile)[0]
config.load_file(f + '.conf')
config.load_file(f + '-' + document.backend + '.conf')
load_conffiles()
# Build outfile name.
if outfile is None:
outfile = os.path.splitext(infile)[0] + '.' + document.backend
if config.outfilesuffix:
# Change file extension.
outfile = os.path.splitext(outfile)[0] + config.outfilesuffix
document.outfile = outfile
# Document header attributes override conf file attributes.
document.attributes.update(AttributeEntry.attributes)
document.update_attributes()
# Configuration is fully loaded so can expand templates.
config.expand_all_templates()
# Check configuration for consistency.
config.validate()
paragraphs.initialize()
lists.initialize()
if config.dumping:
config.dump()
else:
writer.newline = config.newline
try:
writer.open(outfile, reader.bom)
try:
document.translate(has_header) # Generate the output.
finally:
writer.close()
finally:
reader.closefile()
except KeyboardInterrupt:
raise
except Exception,e:
# Cleanup.
if outfile and outfile != '<stdout>' and os.path.isfile(outfile):
os.unlink(outfile)
# Build and print error description.
msg = 'FAILED: '
if reader.cursor:
msg = message.format('', msg)
if isinstance(e, EAsciiDoc):
message.stderr('%s%s' % (msg,str(e)))
else:
if __name__ == '__main__':
message.stderr(msg+'unexpected error:')
message.stderr('-'*60)
traceback.print_exc(file=sys.stderr)
message.stderr('-'*60)
else:
message.stderr('%sunexpected error: %s' % (msg,str(e)))
sys.exit(1)
def usage(msg=''):
if msg:
message.stderr(msg)
show_help('default', sys.stderr)
def show_help(topic, f=None):
"""Print help topic to file object f."""
if f is None:
f = sys.stdout
# Select help file.
lang = config.cmd_attrs.get('lang')
if lang and lang != 'en':
help_file = 'help-' + lang + '.conf'
else:
help_file = HELP_FILE
# Print [topic] section from help file.
config.load_from_dirs(help_file)
if len(config.sections) == 0:
# Default to English if specified language help files not found.
help_file = HELP_FILE
config.load_from_dirs(help_file)
if len(config.sections) == 0:
message.stderr('no help topics found')
sys.exit(1)
n = 0
for k in config.sections:
if re.match(re.escape(topic), k):
n += 1
lines = config.sections[k]
if n == 0:
message.stderr('help topic not found: [%s] in %s' % (topic, help_file))
message.stderr('available help topics: %s' % ', '.join(config.sections.keys()))
sys.exit(1)
elif n > 1:
message.stderr('ambiguous help topic: %s' % topic)
else:
for line in lines:
print >>f, line
### Used by asciidocapi.py ###
def execute(cmd,opts,args):
"""
Execute asciidoc with command-line options and arguments.
cmd is asciidoc command or asciidoc.py path.
opts and args conform to values returned by getopt.getopt().
Raises SystemExit if an error occurs.
Doctests:
1. Check execution:
>>> import StringIO
>>> infile = StringIO.StringIO('Hello *{author}*')
>>> outfile = StringIO.StringIO()
>>> opts = []
>>> opts.append(('--backend','html4'))
>>> opts.append(('--no-header-footer',None))
>>> opts.append(('--attribute','author=Joe Bloggs'))
>>> opts.append(('--out-file',outfile))
>>> execute(__file__, opts, [infile])
>>> print outfile.getvalue()
<p>Hello <strong>Joe Bloggs</strong></p>
>>>
"""
config.init(cmd)
if len(args) > 1:
usage('To many arguments')
sys.exit(1)
backend = None
doctype = None
confiles = []
outfile = None
options = []
help_option = False
for o,v in opts:
if o in ('--help','-h'):
help_option = True
#DEPRECATED: --unsafe option.
if o == '--unsafe':
document.safe = False
if o == '--safe':
document.safe = True
if o == '--version':
print('asciidoc %s' % VERSION)
sys.exit(0)
if o in ('-b','--backend'):
backend = v
# config.cmd_attrs['backend'] = v
if o in ('-c','--dump-conf'):
options.append('-c')
if o in ('-d','--doctype'):
doctype = v
# config.cmd_attrs['doctype'] = v
if o in ('-e','--no-conf'):
options.append('-e')
if o in ('-f','--conf-file'):
confiles.append(v)
if o in ('-n','--section-numbers'):
o = '-a'
v = 'numbered'
if o in ('-a','--attribute'):
e = parse_entry(v, allow_name_only=True)
if not e:
usage('Illegal -a option: %s' % v)
sys.exit(1)
k,v = e
# A @ suffix denotes don't override existing document attributes.
if v and v[-1] == '@':
document.attributes[k] = v[:-1]
else:
config.cmd_attrs[k] = v
if o in ('-o','--out-file'):
outfile = v
if o in ('-s','--no-header-footer'):
options.append('-s')
if o in ('-v','--verbose'):
options.append('-v')
if help_option:
if len(args) == 0:
show_help('default')
else:
show_help(args[-1])
sys.exit(0)
if len(args) == 0 and len(opts) == 0:
usage()
sys.exit(0)
if len(args) == 0:
usage('No source file specified')
sys.exit(1)
# if not backend:
# usage('No --backend option specified')
# sys.exit(1)
stdin,stdout = sys.stdin,sys.stdout
try:
infile = args[0]
if infile == '-':
infile = '<stdin>'
elif isinstance(infile, str):
infile = os.path.abspath(infile)
else: # Input file is file object from API call.
sys.stdin = infile
infile = '<stdin>'
if outfile == '-':
outfile = '<stdout>'
elif isinstance(outfile, str):
outfile = os.path.abspath(outfile)
elif outfile is None:
if infile == '<stdin>':
outfile = '<stdout>'
else: # Output file is file object from API call.
sys.stdout = outfile
outfile = '<stdout>'
# Do the work.
asciidoc(backend, doctype, confiles, infile, outfile, options)
if document.has_errors:
sys.exit(1)
finally:
sys.stdin,sys.stdout = stdin,stdout
if __name__ == '__main__':
# Process command line options.
import getopt
try:
#DEPRECATED: --unsafe option.
opts,args = getopt.getopt(sys.argv[1:],
'a:b:cd:ef:hno:svw:',
['attribute=','backend=','conf-file=','doctype=','dump-conf',
'help','no-conf','no-header-footer','out-file=',
'section-numbers','verbose','version','safe','unsafe',
'doctest','filter'])
except getopt.GetoptError:
message.stderr('illegal command options')
sys.exit(1)
if '--doctest' in [opt[0] for opt in opts]:
# Run module doctests.
import doctest
options = doctest.NORMALIZE_WHITESPACE + doctest.ELLIPSIS
failures,tries = doctest.testmod(optionflags=options)
if failures == 0:
message.stderr('All doctests passed')
sys.exit(0)
else:
sys.exit(1)
if '--filter' in [opt[0] for opt in opts]:
config.init(sys.argv[0])
config.verbose = bool(set(['-v','--verbose']) & set([opt[0] for opt in opts]))
if not args:
die('missing --filter command')
elif args[0] == 'install':
Filter.install(args[1:])
elif args[0] == 'remove':
Filter.remove(args[1:])
elif args[0] == 'list':
Filter.list()
else:
die('illegal --filter command: %s' % args[0])
sys.exit(0)
try:
execute(sys.argv[0],opts,args)
except KeyboardInterrupt:
sys.exit(1)
| Python |
#!/usr/bin/env python
'''
NAME
latex2png - Converts LaTeX source to PNG file
SYNOPSIS
latex2png [options] INFILE
DESCRIPTION
This filter reads LaTeX source text from the input file
INFILE (or stdin if INFILE is -) and renders it to PNG image file.
Typically used to render math equations.
Requires latex(1), dvipng(1) commands and LaTeX math packages.
OPTIONS
-D DPI
Set the output resolution to DPI dots per inch. Use this option to
scale the output image size.
-o OUTFILE
The file name of the output file. If not specified the output file is
named like INFILE but with a .png file name extension.
-m
Skip if the PNG output file is newer that than the INFILE.
Compares timestamps on INFILE and OUTFILE. If
INFILE is - (stdin) then compares MD5 checksum stored in file
named like OUTFILE but with a .md5 file name extension.
The .md5 file is created if the -m option is used and the
INFILE is - (stdin).
-v
Verbosely print processing information to stderr.
--help, -h
Print this documentation.
--version
Print program version number.
SEE ALSO
latex(1), dvipng(1)
AUTHOR
Written by Stuart Rackham, <srackham@gmail.com>
The code was inspired by Kjell Magne Fauske's code:
http://fauskes.net/nb/htmleqII/
See also:
http://www.amk.ca/python/code/mt-math
http://code.google.com/p/latexmath2png/
COPYING
Copyright (C) 2010 Stuart Rackham. Free use of this software is
granted under the terms of the MIT License.
'''
# Suppress warning: "the md5 module is deprecated; use hashlib instead"
import warnings
warnings.simplefilter('ignore',DeprecationWarning)
import os, sys, tempfile, md5
VERSION = '0.1.0'
# Include LaTeX packages and commands here.
TEX_HEADER = r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\newcommand{\mx}[1]{\mathbf{\bm{#1}}} % Matrix command
\newcommand{\vc}[1]{\mathbf{\bm{#1}}} % Vector command
\newcommand{\T}{\text{T}} % Transpose
\pagestyle{empty}
\begin{document}'''
TEX_FOOTER = r'''\end{document}'''
# Globals.
verbose = False
class EApp(Exception): pass # Application specific exception.
def print_stderr(line):
sys.stderr.write(line + os.linesep)
def print_verbose(line):
if verbose:
print_stderr(line)
def run(cmd):
global verbose
if verbose:
cmd += ' 1>&2'
else:
cmd += ' 2>%s 1>&2' % os.devnull
print_verbose('executing: %s' % cmd)
if os.system(cmd):
raise EApp, 'failed command: %s' % cmd
def latex2png(infile, outfile, dpi, modified):
'''Convert LaTeX input file infile to PNG file named outfile.'''
outfile = os.path.abspath(outfile)
outdir = os.path.dirname(outfile)
if not os.path.isdir(outdir):
raise EApp, 'directory does not exist: %s' % outdir
texfile = tempfile.mktemp(suffix='.tex', dir=os.path.dirname(outfile))
basefile = os.path.splitext(texfile)[0]
dvifile = basefile + '.dvi'
temps = [basefile + ext for ext in ('.tex','.dvi', '.aux', '.log')]
skip = False
if infile == '-':
tex = sys.stdin.read()
checksum = md5.new(tex).digest()
f = os.path.splitext(outfile)[0] + '.md5'
if modified:
if os.path.isfile(f) and os.path.isfile(outfile) and \
checksum == open(f,'rb').read():
skip = True
open(f,'wb').write(checksum)
else:
if not os.path.isfile(infile):
raise EApp, 'input file does not exist: %s' % infile
tex = open(infile).read()
if modified and os.path.isfile(outfile) and \
os.path.getmtime(infile) <= os.path.getmtime(outfile):
skip = True
if skip:
print_verbose('skipped: no change: %s' % outfile)
return
tex = '%s\n%s\n%s\n' % (TEX_HEADER, tex.strip(), TEX_FOOTER)
print_verbose('tex:\n%s' % tex)
open(texfile, 'w').write(tex)
saved_pwd = os.getcwd()
os.chdir(outdir)
try:
# Compile LaTeX document to DVI file.
run('latex %s' % texfile)
# Convert DVI file to PNG.
cmd = 'dvipng'
if dpi:
cmd += ' -D %s' % dpi
cmd += ' -T tight -x 1000 -z 9 -bg Transparent -o "%s" "%s"' \
% (outfile,dvifile)
run(cmd)
finally:
os.chdir(saved_pwd)
for f in temps:
if os.path.isfile(f):
print_verbose('deleting: %s' % f)
os.remove(f)
def usage(msg=''):
if msg:
print_stderr(msg)
print_stderr('\n'
'usage:\n'
' latex2png [options] INFILE\n'
'\n'
'options:\n'
' -D DPI\n'
' -o OUTFILE\n'
' -m\n'
' -v\n'
' --help\n'
' --version')
def main():
# Process command line options.
global verbose
dpi = None
outfile = None
modified = False
import getopt
opts,args = getopt.getopt(sys.argv[1:], 'D:o:mhv', ['help','version'])
for o,v in opts:
if o in ('--help','-h'):
print __doc__
sys.exit(0)
if o =='--version':
print('latex2png version %s' % (VERSION,))
sys.exit(0)
if o == '-D': dpi = v
if o == '-o': outfile = v
if o == '-m': modified = True
if o == '-v': verbose = True
if len(args) != 1:
usage()
sys.exit(1)
infile = args[0]
if dpi and not dpi.isdigit():
usage('invalid DPI')
sys.exit(1)
if outfile is None:
if infile == '-':
usage('OUTFILE must be specified')
sys.exit(1)
outfile = os.path.splitext(infile)[0] + '.png'
# Do the work.
latex2png(infile, outfile, dpi, modified)
# Print something to suppress asciidoc 'no output from filter' warnings.
if infile == '-':
sys.stdout.write(' ')
if __name__ == "__main__":
try:
main()
except SystemExit:
raise
except KeyboardInterrupt:
sys.exit(1)
except Exception, e:
print_stderr("%s: %s" % (os.path.basename(sys.argv[0]), str(e)))
sys.exit(1)
| Python |
#!/usr/bin/env python
import os, sys, subprocess
from optparse import *
__AUTHOR__ = "Gouichi Iisaka <iisaka51@gmail.com>"
__VERSION__ = '1.1.4'
class EApp(Exception):
'''Application specific exception.'''
pass
class Application():
'''
NAME
graphviz2png - Converts textual graphviz notation to PNG file
SYNOPSIS
graphviz2png [options] INFILE
DESCRIPTION
This filter reads Graphviz notation text from the input file
INFILE (or stdin if INFILE is -), converts it to a PNG image file.
OPTIONS
-o OUTFILE, --outfile=OUTFILE
The file name of the output file. If not specified the output file is
named like INFILE but with a .png file name extension.
-L LAYOUT, --layout=LAYOUT
Graphviz layout: dot, neato, twopi, circo, fdp
Default is 'dot'.
-F FORMAT, --format=FORMAT
Graphviz output format: png, svg, or any other format Graphviz
supports. Run dot -T? to get the full list.
Default is 'png'.
-v, --verbose
Verbosely print processing information to stderr.
-h, --help
Print this documentation.
-V, --version
Print program version number.
SEE ALSO
graphviz(1)
AUTHOR
Written by Gouichi Iisaka, <iisaka51@gmail.com>
Format support added by Elmo Todurov, <todurov@gmail.com>
THANKS
Stuart Rackham, <srackham@gmail.com>
This script was inspired by his music2png.py and AsciiDoc
LICENSE
Copyright (C) 2008-2009 Gouichi Iisaka.
Free use of this software is granted under the terms of
the GNU General Public License (GPL).
'''
def __init__(self, argv=None):
# Run dot, get the list of supported formats. It's prefixed by some junk.
format_output = subprocess.Popen(["dot", "-T?"], stderr=subprocess.PIPE, stdout=subprocess.PIPE).communicate()[1]
# The junk contains : and ends with :. So we split it, then strip the final endline, then split the list for future usage.
supported_formats = format_output.split(": ")[2][:-1].split(" ")
if not argv:
argv = sys.argv
self.usage = '%prog [options] inputfile'
self.version = 'Version: %s\n' % __VERSION__
self.version += 'Copyright(c) 2008-2009: %s\n' % __AUTHOR__
self.option_list = [
Option("-o", "--outfile", action="store",
dest="outfile",
help="Output file"),
Option("-L", "--layout", action="store",
dest="layout", default="dot", type="choice",
choices=['dot','neato','twopi','circo','fdp'],
help="Layout type. LAYOUT=<dot|neato|twopi|circo|fdp>"),
Option("-F", "--format", action="store",
dest="format", default="png", type="choice",
choices=supported_formats,
help="Format type. FORMAT=<" + "|".join(supported_formats) + ">"),
Option("--debug", action="store_true",
dest="do_debug",
help=SUPPRESS_HELP),
Option("-v", "--verbose", action="store_true",
dest="do_verbose", default=False,
help="verbose output"),
]
self.parser = OptionParser( usage=self.usage, version=self.version,
option_list=self.option_list)
(self.options, self.args) = self.parser.parse_args()
if len(self.args) != 1:
self.parser.print_help()
sys.exit(1)
self.options.infile = self.args[0]
def systemcmd(self, cmd):
if self.options.do_verbose:
msg = 'Execute: %s' % cmd
sys.stderr.write(msg + os.linesep)
else:
cmd += ' 2>%s' % os.devnull
if os.system(cmd):
raise EApp, 'failed command: %s' % cmd
def graphviz2png(self, infile, outfile):
'''Convert Graphviz notation in file infile to
PNG file named outfile.'''
outfile = os.path.abspath(outfile)
outdir = os.path.dirname(outfile)
if not os.path.isdir(outdir):
raise EApp, 'directory does not exist: %s' % outdir
basefile = os.path.splitext(outfile)[0]
saved_cwd = os.getcwd()
os.chdir(outdir)
try:
cmd = '%s -T%s "%s" > "%s"' % (
self.options.layout, self.options.format, infile, outfile)
self.systemcmd(cmd)
finally:
os.chdir(saved_cwd)
if not self.options.do_debug:
os.unlink(infile)
def run(self):
if self.options.format == '':
self.options.format = 'png'
if self.options.infile == '-':
if self.options.outfile is None:
sys.stderr.write('OUTFILE must be specified')
sys.exit(1)
infile = os.path.splitext(self.options.outfile)[0] + '.txt'
lines = sys.stdin.readlines()
open(infile, 'w').writelines(lines)
if not os.path.isfile(infile):
raise EApp, 'input file does not exist: %s' % infile
if self.options.outfile is None:
outfile = os.path.splitext(infile)[0] + '.png'
else:
outfile = self.options.outfile
self.graphviz2png(infile, outfile)
# To suppress asciidoc 'no output from filter' warnings.
if self.options.infile == '-':
sys.stdout.write(' ')
if __name__ == "__main__":
app = Application()
app.run()
| Python |
#!/usr/bin/env python
'''
NAME
code-filter - AsciiDoc filter to highlight language keywords
SYNOPSIS
code-filter -b backend -l language [ -t tabsize ]
[ --help | -h ] [ --version | -v ]
DESCRIPTION
This filter reads source code from the standard input, highlights language
keywords and comments and writes to the standard output.
The purpose of this program is to demonstrate how to write an AsciiDoc
filter -- it's much to simplistic to be passed off as a code syntax
highlighter. Use the 'source-highlight-filter' instead.
OPTIONS
--help, -h
Print this documentation.
-b
Backend output file format: 'docbook', 'linuxdoc', 'html', 'css'.
-l
The name of the source code language: 'python', 'ruby', 'c++', 'c'.
-t tabsize
Expand source tabs to tabsize spaces.
--version, -v
Print program version number.
BUGS
- Code on the same line as a block comment is treated as comment.
Keywords inside literal strings are highlighted.
- There doesn't appear to be an easy way to accomodate linuxdoc so
just pass it through without markup.
AUTHOR
Written by Stuart Rackham, <srackham@gmail.com>
URLS
http://sourceforge.net/projects/asciidoc/
http://www.methods.co.nz/asciidoc/
COPYING
Copyright (C) 2002-2006 Stuart Rackham. Free use of this software is
granted under the terms of the GNU General Public License (GPL).
'''
import os, sys, re, string
VERSION = '1.1.2'
# Globals.
language = None
backend = None
tabsize = 8
keywordtags = {
'html':
('<strong>','</strong>'),
'css':
('<strong>','</strong>'),
'docbook':
('<emphasis role="strong">','</emphasis>'),
'linuxdoc':
('','')
}
commenttags = {
'html':
('<i>','</i>'),
'css':
('<i>','</i>'),
'docbook':
('<emphasis>','</emphasis>'),
'linuxdoc':
('','')
}
keywords = {
'python':
('and', 'del', 'for', 'is', 'raise', 'assert', 'elif', 'from',
'lambda', 'return', 'break', 'else', 'global', 'not', 'try', 'class',
'except', 'if', 'or', 'while', 'continue', 'exec', 'import', 'pass',
'yield', 'def', 'finally', 'in', 'print'),
'ruby':
('__FILE__', 'and', 'def', 'end', 'in', 'or', 'self', 'unless',
'__LINE__', 'begin', 'defined?' 'ensure', 'module', 'redo', 'super',
'until', 'BEGIN', 'break', 'do', 'false', 'next', 'rescue', 'then',
'when', 'END', 'case', 'else', 'for', 'nil', 'retry', 'true', 'while',
'alias', 'class', 'elsif', 'if', 'not', 'return', 'undef', 'yield'),
'c++':
('asm', 'auto', 'bool', 'break', 'case', 'catch', 'char', 'class',
'const', 'const_cast', 'continue', 'default', 'delete', 'do', 'double',
'dynamic_cast', 'else', 'enum', 'explicit', 'export', 'extern',
'false', 'float', 'for', 'friend', 'goto', 'if', 'inline', 'int',
'long', 'mutable', 'namespace', 'new', 'operator', 'private',
'protected', 'public', 'register', 'reinterpret_cast', 'return',
'short', 'signed', 'sizeof', 'static', 'static_cast', 'struct',
'switch', 'template', 'this', 'throw', 'true', 'try', 'typedef',
'typeid', 'typename', 'union', 'unsigned', 'using', 'virtual', 'void',
'volatile', 'wchar_t', 'while')
}
block_comments = {
'python': ("'''","'''"),
'ruby': None,
'c++': ('/*','*/')
}
inline_comments = {
'python': '#',
'ruby': '#',
'c++': '//'
}
def print_stderr(line):
sys.stderr.write(line+os.linesep)
def sub_keyword(mo):
'''re.subs() argument to tag keywords.'''
word = mo.group('word')
if word in keywords[language]:
stag,etag = keywordtags[backend]
return stag+word+etag
else:
return word
def code_filter():
'''This function does all the work.'''
global language, backend
inline_comment = inline_comments[language]
blk_comment = block_comments[language]
if blk_comment:
blk_comment = (re.escape(block_comments[language][0]),
re.escape(block_comments[language][1]))
stag,etag = commenttags[backend]
in_comment = 0 # True if we're inside a multi-line block comment.
tag_comment = 0 # True if we should tag the current line as a comment.
line = sys.stdin.readline()
while line:
line = string.rstrip(line)
line = string.expandtabs(line,tabsize)
# Escape special characters.
line = string.replace(line,'&','&')
line = string.replace(line,'<','<')
line = string.replace(line,'>','>')
# Process block comment.
if blk_comment:
if in_comment:
if re.match(r'.*'+blk_comment[1]+r'$',line):
in_comment = 0
else:
if re.match(r'^\s*'+blk_comment[0]+r'.*'+blk_comment[1],line):
# Single line block comment.
tag_comment = 1
elif re.match(r'^\s*'+blk_comment[0],line):
# Start of multi-line block comment.
tag_comment = 1
in_comment = 1
else:
tag_comment = 0
if tag_comment:
if line: line = stag+line+etag
else:
if inline_comment:
pos = string.find(line,inline_comment)
else:
pos = -1
if pos >= 0:
# Process inline comment.
line = re.sub(r'\b(?P<word>\w+)\b',sub_keyword,line[:pos]) \
+ stag + line[pos:] + etag
else:
line = re.sub(r'\b(?P<word>\w+)\b',sub_keyword,line)
sys.stdout.write(line + os.linesep)
line = sys.stdin.readline()
def usage(msg=''):
if msg:
print_stderr(msg)
print_stderr('Usage: code-filter -b backend -l language [ -t tabsize ]')
print_stderr(' [ --help | -h ] [ --version | -v ]')
def main():
global language, backend, tabsize
# Process command line options.
import getopt
opts,args = getopt.getopt(sys.argv[1:],
'b:l:ht:v',
['help','version'])
if len(args) > 0:
usage()
sys.exit(1)
for o,v in opts:
if o in ('--help','-h'):
print __doc__
sys.exit(0)
if o in ('--version','-v'):
print('code-filter version %s' % (VERSION,))
sys.exit(0)
if o == '-b': backend = v
if o == '-l':
v = string.lower(v)
if v == 'c': v = 'c++'
language = v
if o == '-t':
try:
tabsize = int(v)
except:
usage('illegal tabsize')
sys.exit(1)
if tabsize <= 0:
usage('illegal tabsize')
sys.exit(1)
if backend is None:
usage('backend option is mandatory')
sys.exit(1)
if not keywordtags.has_key(backend):
usage('illegal backend option')
sys.exit(1)
if language is None:
usage('language option is mandatory')
sys.exit(1)
if not keywords.has_key(language):
usage('illegal language option')
sys.exit(1)
# Do the work.
code_filter()
if __name__ == "__main__":
try:
main()
except (KeyboardInterrupt, SystemExit):
pass
except:
print_stderr("%s: unexpected exit status: %s" %
(os.path.basename(sys.argv[0]), sys.exc_info()[1]))
# Exit with previous sys.exit() status or zero if no sys.exit().
sys.exit(sys.exc_info()[1])
| Python |
#!/usr/bin/env python
'''
NAME
music2png - Converts textual music notation to classically notated PNG file
SYNOPSIS
music2png [options] INFILE
DESCRIPTION
This filter reads LilyPond or ABC music notation text from the input file
INFILE (or stdin if INFILE is -), converts it to classical music notation
and writes it to a trimmed PNG image file.
This script is a wrapper for LilyPond and ImageMagick commands.
OPTIONS
-f FORMAT
The INFILE music format. 'abc' for ABC notation, 'ly' for LilyPond
notation. Defaults to 'abc' unless source starts with backslash.
-o OUTFILE
The file name of the output file. If not specified the output file is
named like INFILE but with a .png file name extension.
-m
Skip if the PNG output file is newer that than the INFILE.
Compares timestamps on INFILE and OUTFILE. If
INFILE is - (stdin) then compares MD5 checksum stored in file
named like OUTFILE but with a .md5 file name extension.
The .md5 file is created if the -m option is used and the
INFILE is - (stdin).
-v
Verbosely print processing information to stderr.
--help, -h
Print this documentation.
--version
Print program version number.
SEE ALSO
lilypond(1), abc2ly(1), convert(1)
AUTHOR
Written by Stuart Rackham, <srackham@gmail.com>
COPYING
Copyright (C) 2006 Stuart Rackham. Free use of this software is
granted under the terms of the GNU General Public License (GPL).
'''
# Suppress warning: "the md5 module is deprecated; use hashlib instead"
import warnings
warnings.simplefilter('ignore',DeprecationWarning)
import os, sys, tempfile, md5
VERSION = '0.1.1'
# Globals.
verbose = False
class EApp(Exception): pass # Application specific exception.
def print_stderr(line):
sys.stderr.write(line + os.linesep)
def print_verbose(line):
if verbose:
print_stderr(line)
def run(cmd):
global verbose
if not verbose:
cmd += ' 2>%s' % os.devnull
print_verbose('executing: %s' % cmd)
if os.system(cmd):
raise EApp, 'failed command: %s' % cmd
def music2png(format, infile, outfile, modified):
'''Convert ABC notation in file infile to cropped PNG file named outfile.'''
outfile = os.path.abspath(outfile)
outdir = os.path.dirname(outfile)
if not os.path.isdir(outdir):
raise EApp, 'directory does not exist: %s' % outdir
basefile = tempfile.mktemp(dir=os.path.dirname(outfile))
temps = [basefile + ext for ext in ('.abc', '.ly', '.ps', '.midi')]
skip = False
if infile == '-':
source = sys.stdin.read()
checksum = md5.new(source).digest()
f = os.path.splitext(outfile)[0] + '.md5'
if modified:
if os.path.isfile(f) and os.path.isfile(outfile) and \
checksum == open(f,'rb').read():
skip = True
open(f,'wb').write(checksum)
else:
if not os.path.isfile(infile):
raise EApp, 'input file does not exist: %s' % infile
if modified and os.path.isfile(outfile) and \
os.path.getmtime(infile) <= os.path.getmtime(outfile):
skip = True
source = open(infile).read()
if skip:
print_verbose('skipped: no change: %s' % outfile)
return
if format is None:
if source and source.startswith('\\'): # Guess input format.
format = 'ly'
else:
format = 'abc'
open('%s.%s' % (basefile,format), 'w').write(source) # Temp source file.
abc = basefile + '.abc'
ly = basefile + '.ly'
png = basefile + '.png'
saved_pwd = os.getcwd()
os.chdir(outdir)
try:
if format == 'abc':
run('abc2ly --beams=None -o "%s" "%s"' % (ly,abc))
run('lilypond --png -o "%s" "%s"' % (basefile,ly))
os.rename(png, outfile)
finally:
os.chdir(saved_pwd)
# Chop the bottom 75 pixels off to get rid of the page footer.
run('convert "%s" -gravity South -crop 1000x10000+0+75 "%s"' % (outfile, outfile))
# Trim all blank areas from sides, top and bottom.
run('convert "%s" -trim "%s"' % (outfile, outfile))
for f in temps:
if os.path.isfile(f):
print_verbose('deleting: %s' % f)
os.remove(f)
def usage(msg=''):
if msg:
print_stderr(msg)
print_stderr('\n'
'usage:\n'
' music2png [options] INFILE\n'
'\n'
'options:\n'
' -f FORMAT\n'
' -o OUTFILE\n'
' -m\n'
' -v\n'
' --help\n'
' --version')
def main():
# Process command line options.
global verbose
format = None
outfile = None
modified = False
import getopt
opts,args = getopt.getopt(sys.argv[1:], 'f:o:mhv', ['help','version'])
for o,v in opts:
if o in ('--help','-h'):
print __doc__
sys.exit(0)
if o =='--version':
print('music2png version %s' % (VERSION,))
sys.exit(0)
if o == '-f': format = v
if o == '-o': outfile = v
if o == '-m': modified = True
if o == '-v': verbose = True
if len(args) != 1:
usage()
sys.exit(1)
infile = args[0]
if format not in (None, 'abc', 'ly'):
usage('invalid FORMAT')
sys.exit(1)
if outfile is None:
if infile == '-':
usage('OUTFILE must be specified')
sys.exit(1)
outfile = os.path.splitext(infile)[0] + '.png'
# Do the work.
music2png(format, infile, outfile, modified)
# Print something to suppress asciidoc 'no output from filter' warnings.
if infile == '-':
sys.stdout.write(' ')
if __name__ == "__main__":
try:
main()
except SystemExit:
raise
except KeyboardInterrupt:
sys.exit(1)
except Exception, e:
print_stderr("%s: %s" % (os.path.basename(sys.argv[0]), str(e)))
sys.exit(1)
| Python |
#!/bin/env python
import xml.dom.minidom as dom
import sys
import struct
WEAP_NUM = 780
struct_fmt = "<H BBHBBBB 8B8B8b8b8b8b8H bbBBBB"
def pack_weapon(dict):
l = []
l.append(dict['drain'])
l.append(dict['shotRepeat'])
l.append(dict['multi'])
l.append(dict['weapAni'])
l.append(dict['max'])
l.append(dict['tx'])
l.append(dict['ty'])
l.append(dict['aim'])
tmp = dict['patterns']
for j in xrange(8):
l.append(tmp[j]['attack'])
for j in xrange(8):
l.append(tmp[j]['del'])
for j in xrange(8):
l.append(tmp[j]['sx'])
for j in xrange(8):
l.append(tmp[j]['sy'])
for j in xrange(8):
l.append(tmp[j]['bx'])
for j in xrange(8):
l.append(tmp[j]['by'])
for j in xrange(8):
l.append(tmp[j]['sg'])
l.append(dict['acceleration'])
l.append(dict['accelerationx'])
l.append(dict['circleSize'])
l.append(dict['sound'])
l.append(dict['trail'])
l.append(dict['shipBlastFilter'])
return struct.pack(struct_fmt, *l)
def unpack_weapon(str):
tup = struct.unpack(struct_fmt, str)
dict = {}
dict['drain'] = tup[0]
dict['shotRepeat'] = tup[1]
dict['multi'] = tup[2]
dict['weapAni'] = tup[3]
dict['max'] = tup[4]
dict['tx'] = tup[5]
dict['ty'] = tup[6]
dict['aim'] = tup[7]
i = 8
tmp = [{} for j in xrange(8)]
for j in xrange(8):
tmp[j]['attack'] = tup[i]
i += 1
for j in xrange(8):
tmp[j]['del'] = tup[i]
i += 1
for j in xrange(8):
tmp[j]['sx'] = tup[i]
i += 1
for j in xrange(8):
tmp[j]['sy'] = tup[i]
i += 1
for j in xrange(8):
tmp[j]['bx'] = tup[i]
i += 1
for j in xrange(8):
tmp[j]['by'] = tup[i]
i += 1
for j in xrange(8):
tmp[j]['sg'] = tup[i]
i += 1
dict['patterns'] = tmp
dict['acceleration'] = tup[i]
dict['accelerationx'] = tup[i+1]
dict['circleSize'] = tup[i+2]
dict['sound'] = tup[i+3]
dict['trail'] = tup[i+4]
dict['shipBlastFilter'] = tup[i+5]
return dict
def DOMToDict(doc, weap_node):
dict = {}
for i in weap_node.childNodes:
if i.nodeType != i.ELEMENT_NODE:
continue
if i.hasAttribute("value"):
dict[i.tagName] = int(i.getAttribute("value"))
elif i.tagName == "patterns":
dict['patterns'] = [{} for el in xrange(8)]
index = 0
for j in i.childNodes:
if j.nodeType != i.ELEMENT_NODE:
continue
attrs = [j.attributes.item(i) for i in xrange(j.attributes.length)]
for i in attrs:
dict['patterns'][index][i.name] = int(i.nodeValue)
index += 1
return dict
def dictToDOM(doc, root, dict, index=None):
entry = doc.createElement("weapon")
if index != None:
entry.setAttribute("index", "%04X" % (index,))
keys = dict.keys()
keys.sort()
for i in keys:
node = doc.createElement(i)
if isinstance(dict[i], list):
for j in dict[i]:
keys = j.keys()
keys.sort()
n = doc.createElement("entry")
for i in keys:
n.setAttribute(i, str(j[i]))
node.appendChild(n)
else:
node.setAttribute("value", str(dict[i]))
entry.appendChild(node)
root.appendChild(entry)
def toXML(hdt, output):
doc = dom.getDOMImplementation().createDocument(None, "TyrianHDT", None)
try:
f = file(hdt, "rb")
except IOError:
print "%s couldn't be opened for reading." % (hdt,)
sys.exit(1)
try:
outf = file(output, "w")
except IOError:
print "%s couldn't be opened for writing." % (outf,)
sys.exit(1)
f.seek(struct.unpack("<i", f.read(4))[0])
f.read(7*2)
sys.stdout.write("Converting weapons")
index = 0
for i in xrange(WEAP_NUM+1):
tmp = f.read(struct.calcsize(struct_fmt))
shot = unpack_weapon(tmp)
dictToDOM(doc, doc.documentElement, shot, index)
index += 1
sys.stdout.write(".")
sys.stdout.flush()
sys.stdout.write("Done!\n")
sys.stdout.write("Writing XML...")
sys.stdout.flush()
doc.writexml(outf, addindent="\t", newl="\n")
sys.stdout.write("Done!\n")
def toHDT(input, hdt):
try:
f = file(input, "r")
except IOError:
print "%s couldn't be opened for reading." % (input,)
sys.exit(1)
try:
outf = file(hdt, "r+b")
except IOError:
print "%s couldn't be opened for writing." % (hdt,)
sys.exit(1)
outf.seek(struct.unpack("<i", outf.read(4))[0])
outf.read(7*2)
sys.stdout.write("Reading XML...")
sys.stdout.flush()
doc = dom.parse(f)
sys.stdout.write("Done!\n")
sys.stdout.write("Writing weapons")
for i in doc.documentElement.childNodes:
if i.nodeType != i.ELEMENT_NODE:
continue
shot = DOMToDict(doc, i)
str = pack_weapon(shot)
outf.write(str)
sys.stdout.write(".")
sys.stdout.flush()
sys.stdout.write("Done!\n")
def printHelp():
print "Usage: weapons.py toxml path/to/tyrian.hdt output.xml"
print " weapons.py tohdt input.xml path/to/tyrian.hdt"
sys.exit(1)
##############################
if __name__ == "__main__":
if len(sys.argv) != 4:
printHelp()
if sys.argv[1] == "toxml":
toXML(sys.argv[2], sys.argv[3])
elif sys.argv[1] == "tohdt":
toHDT(sys.argv[2], sys.argv[3])
else:
printHelp()
| Python |
#Fredkin gate structure program
#Constant Definitions
DEBUG = 1
cfgfileopen = open("Input.cfg","r");
splitline = cfgfileopen.readline().split();
print splitline[4];
ctrlvarno = splitline[4];
splitline = cfgfileopen.readline().split();
print splitline[2];
splitline = cfgfileopen.readline().split();
print splitline[2];
splitline = cfgfileopen.readline().split();
print splitline[4];
func_level_1 = splitline[4];
splitline = cfgfileopen.readline().split();
print splitline[4];
func_level_2 = splitline[4];
splitline = cfgfileopen.readline().split();
print splitline[3];
func_1_pos = splitline[3];
splitline = cfgfileopen.readline().split();
print splitline[3];
func_2_pos = splitline[3];
if DEBUG:
print "ctrlvarno: ",ctrlvarno;
func_1 = raw_input("Enter function 1: ");
if DEBUG:
print "Function 1: ",func_1;
func_2 = raw_input("Enter function 2: ");
if DEBUG:
print "Function 2: ",func_2;
if DEBUG:
print "Function 1 Input Level:", func_level_1;
if DEBUG:
print "Function 2 Input Level:", func_level_2;
if DEBUG:
print "Function 1 Input Position: ",func_1_pos;
if DEBUG:
print "Function 2 Input Position: ",func_2_pos;
| Python |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
class MainHandler(webapp.RequestHandler):
def get(self):
# Set the cross origin resource sharing header to allow AJAX
self.response.headers.add_header("Access-Control-Allow-Origin", "*")
# Print some JSON
self.response.out.write('{"message":"Hello World!"}\n')
def main():
application = webapp.WSGIApplication([('/', MainHandler)],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
from apiclient import discovery, model
from apiclient.http import BatchHttpRequest
import json
import os
api_key = open(os.environ['HOME'] + "/.freebase_api_key").read()
query1 = [{"name~=":"*doubt*","name":None,"type":"/media_common/quotation","author":[{"name":"William Shakespeare"}]}]
query2 = [{"name~=":"*law*","name":None,"type":"/media_common/quotation","author":[{"name":"William Shakespeare"}]}]
model.JsonModel.alt_param = ""
freebase = discovery.build('freebase', 'v1', developerKey=api_key)
def display_results(request_id, response, exception):
for topic in json.loads(response)['result']:
print topic['name']
batch = BatchHttpRequest(callback=display_results)
batch.add(freebase.mqlread(query=json.dumps(query1)))
batch.add(freebase.mqlread(query=json.dumps(query2)))
batch.execute()
| Python |
from apiclient import discovery
from apiclient import model
import json
import os
api_key = open(os.environ['HOME'] + "/.freebase_api_key").read()
model.JsonModel.alt_param = ""
freebase = discovery.build('freebase', 'v1', developerKey=api_key)
topic = freebase.topic.lookup(id='/en/san_francisco').execute()
for property in topic['property']:
print property + ':'
for value in topic['property'][property]['values']:
print ' - ' + value['text']
| Python |
from apiclient import discovery, model
import json
import os
api_key = open(os.environ['HOME'] + "/.freebase_api_key").read()
query = [{'id': None, 'name': None, 'type': '/astronomy/planet'}]
model.JsonModel.alt_param = ""
freebase = discovery.build('freebase', 'v1', developerKey=api_key)
response = json.loads(freebase.mqlread(query=json.dumps(query)).execute())
for planet in response['result']:
print planet['name']
| Python |
from apiclient import discovery
from apiclient import model
import json
import os
api_key = open(os.environ['HOME'] + "/.freebase_api_key").read()
model.JsonModel.alt_param = ""
freebase = discovery.build('freebase', 'v1', developerKey=api_key)
response = freebase.search(query='John Smith').execute()
for result in response['results']:
print result['id']
| Python |
from apiclient import discovery
from apiclient import model
import json
import Image
import os
api_key = open(os.environ['HOME'] + "/.freebase_api_key").read()
model.JsonModel.alt_param = ""
freebase = discovery.build('freebase', 'v1sandbox', developerKey=api_key)
response = freebase.image(id='/en/espresso').execute()
im = Image.open(response)
im.save('image.jpg', "JPEG")
| Python |
from apiclient import discovery
from apiclient import model
import json
import os
api_key = open(os.environ['HOME'] + "/.freebase_api_key").read()
model.JsonModel.alt_param = ""
freebase = discovery.build('freebase', 'v1', developerKey=api_key)
response = freebase.text(id='en/bob_dylan').execute()
print response
| Python |
import json
import MimeWriter
import mimetools
import urllib
import urllib2
import StringIO
import os
api_key = open(os.environ['HOME'] + "/.freebase_api_key").read()
service_url = 'https://www.googleapis.com/freebase/v1/mqlread'
def write_query_request(writer, query_name, query, service_url, api_key):
params = {
'query': json.dumps(query),
'key': api_key
}
txtin = StringIO.StringIO("GET " + service_url + '?' + urllib.urlencode(params) + "\n")
subpart = writer.nextpart()
subpart.addheader("Content-Transfer-Encoding", "binary")
subpart.addheader("Content-ID", "<" + query_name + ">")
pout = subpart.startbody("application/http")
mimetools.encode(txtin, pout, '8bit')
txtin.close()
query1 = [{'id': None, 'name': None, 'type': '/astronomy/planet'}]
query2 = [{'id': None, 'name': None, 'type': '/location/country'}]
out = StringIO.StringIO()
writer = MimeWriter.MimeWriter(out)
boundary = "batch_boundary"
writer.startmultipartbody("mixed", boundary)
writer.flushheaders()
write_query_request(writer, "q1", query1, service_url, api_key)
write_query_request(writer, "q2", query2, service_url, api_key)
writer.lastpart()
msg = out.getvalue()
msg = "\n".join(msg.split("\n")[4:])
out.close()
print msg
headers = {
'Content-Type': 'multipart/mixed; boundary=' + boundary
}
request = urllib2.Request("https://www.googleapis.com/batch", msg, headers)
response = urllib2.urlopen(request).read()
print response
| Python |
import json
import urllib
import os
api_key = open(os.environ['HOME'] + "/.freebase_api_key").read()
service_url = 'https://www.googleapis.com/freebase/v1/topic'
topic_id = '/m/0d6lp'
params = {
'key': api_key,
'filter': 'suggest'
}
url = service_url + topic_id + '?' + urllib.urlencode(params)
topic = json.loads(urllib.urlopen(url).read())
for property in topic['property']:
print property + ':'
for value in topic['property'][property]['values']:
print ' - ' + value['text']
| Python |
import json
import urllib
import os
api_key = open(os.environ['HOME'] + "/.freebase_api_key").read()
service_url = 'https://www.googleapis.com/freebase/v1/mqlread'
query = [{'id': None, 'name': None, 'type': '/astronomy/planet'}]
params = {
'query': json.dumps(query),
'key': api_key
}
url = service_url + '?' + urllib.urlencode(params)
response = json.loads(urllib.urlopen(url).read())
for planet in response['result']:
print planet['name']
| Python |
from oauth2client.file import Storage
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
import json
from urllib import urlencode
import httplib2
import os
CLIENT_ID = open(os.environ['HOME'] + "/.freebase_client_id").read()
CLIENT_SECRET = open(os.environ['HOME'] + "/.freebase_client_secret").read()
def authenticated_http():
storage = Storage('freebase.dat')
credentials = storage.get()
if credentials is None or credentials.invalid == True:
flow = OAuth2WebServerFlow(
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
scope='https://www.googleapis.com/auth/freebase',
user_agent='freebase-cmdline-sample/1.0',
xoauth_displayname='Freebase Client Example App')
credentials = run(flow, storage)
http = httplib2.Http()
return credentials.authorize(http)
http = authenticated_http()
query = {"create":"unconditional","id":None,"name":"Nowhere","type":"/location/location"}
data = dict(query=json.dumps(query))
headers = {
'X-HTTP-Method-Override': 'GET',
'Content-Type': 'application/x-www-form-urlencoded'
}
url = 'https://www.googleapis.com/freebase/v1sandbox/mqlwrite' + '?' + urlencode(data)
resp, content = http.request(url, "GET", headers=headers)
print content
| Python |
import json
import urllib
import os
api_key = open(os.environ['HOME'] + "/.api_key").read()
query = 'blue bottle'
service_url = 'https://www.googleapis.com/freebase/v1/search'
params = {
'query': query,
'key': api_key
}
url = service_url + '?' + urllib.urlencode(params)
response = json.loads(urllib.urlopen(url).read())
for result in response['result']:
print result['name'] + ' (' + str(result['score']) + ')'
| Python |
import json
import urllib
import os
api_key = open(os.environ['HOME'] + "/.freebase_api_key").read()
query = [{'id': None, 'name': None, 'type': '/location/country'}]
service_url = 'https://www.googleapis.com/freebase/v1/mqlread'
params = {
'query': json.dumps(query),
'cursor': '',
'key': api_key
}
while params['cursor'] != False:
url = service_url + '?' + urllib.urlencode(params)
response = json.loads(urllib.urlopen(url).read())
for result in response['result']:
print result['name']
params['cursor'] = response['cursor']
| Python |
import sys
import json
import urllib
import os
api_key = open(os.environ['HOME'] + "/.freebase_api_key").read()
topic_id = '/en/bob_dylan'
service_url = 'https://www.googleapis.com/freebase/v1/text'
url = service_url + topic_id + '?key=' + api_key;
response = json.loads(urllib.urlopen(url).read())
print response['result']
| Python |
import urllib
import urllib2
import json
import os
api_key = open(os.environ['HOME'] + "/.freebase_api_key").read()
url = 'https://www.googleapis.com/rpc'
requests = [{
'method': 'freebase.text.get',
'apiVersion': 'v1',
'params': {
'id': ['en','bob_dylan']
}
},{
'method': 'freebase.text.get',
'apiVersion': 'v1',
'params': {
'id': ['en','blade_runner']
}
}]
headers = { 'Content-Type': 'application/json' }
req = urllib2.Request(url, json.dumps(requests), headers)
response = urllib2.urlopen(req)
print response.read()
| Python |
import urllib
import urllib2
import json
import os
api_key = open(os.environ['HOME'] + "/.freebase_api_key").read()
url = 'https://www.googleapis.com/rpc'
request = {
'method': 'freebase.topic',
'apiVersion': 'v1',
'params': {
'id': ['en','bob_dylan'],
'key': api_key
}
}
headers = { 'Content-Type': 'application/json' }
req = urllib2.Request(url, json.dumps(request), headers)
response = urllib2.urlopen(req)
print response.read()
| Python |
import urllib
import urllib2
import json
import os
api_key = open(os.environ['HOME'] + "/.freebase_api_key").read()
url = 'https://www.googleapis.com/rpc'
query = [{
'id':'/en/blade_runner',
'name':None
}]
request = {
'method': 'freebase.mqlread',
'apiVersion': 'v1',
'params': {
'query': json.dumps(query)
}
}
headers = { 'Content-Type': 'application/json' }
req = urllib2.Request(url, json.dumps(request), headers)
response = urllib2.urlopen(req)
print response.read()
| Python |
import urllib
import urllib2
import json
import os
api_key = open(os.environ['HOME'] + "/.freebase_api_key").read()
url = 'https://www.googleapis.com/rpc'
request = {
'method': 'freebase.search',
'apiVersion': 'v1',
'params': {
'query': 'nirvana',
'key': api_key
}
}
headers = { 'Content-Type': 'application/json' }
req = urllib2.Request(url, json.dumps(request), headers)
response = urllib2.urlopen(req)
print response.read()
| Python |
import urllib
import urllib2
import json
import os
api_key = open(os.environ['HOME'] + "/.freebase_api_key").read()
url = 'https://www.googleapis.com/rpc'
request = {
'method': 'freebase.text.get',
'apiVersion': 'v1',
'params': {
'id': ['en','bob_dylan'],
'key': api_key
}
}
headers = { 'Content-Type': 'application/json' }
req = urllib2.Request(url, json.dumps(request), headers)
response = urllib2.urlopen(req)
print response.read()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'afshar@google.com (Ali Afshar)'
# Add the library location to the path
import sys
sys.path.insert(0, 'lib')
import os
import httplib2
import sessions
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.ext.webapp import template
from apiclient.discovery import build
from apiclient.http import MediaUpload
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
from oauth2client.client import AccessTokenRefreshError
from oauth2client.appengine import CredentialsProperty
from oauth2client.appengine import StorageByKeyName
from oauth2client.appengine import simplejson as json
ALL_SCOPES = ('https://www.googleapis.com/auth/drive.file '
'https://www.googleapis.com/auth/userinfo.email '
'https://www.googleapis.com/auth/userinfo.profile')
def SibPath(name):
"""Generate a path that is a sibling of this file.
Args:
name: Name of sibling file.
Returns:
Path to sibling file.
"""
return os.path.join(os.path.dirname(__file__), name)
# Load the secret that is used for client side sessions
# Create one of these for yourself with, for example:
# python -c "import os; print os.urandom(64)" > session-secret
SESSION_SECRET = open(SibPath('session.secret')).read()
INDEX_HTML = open(SibPath('index.html')).read()
class Credentials(db.Model):
"""Datastore entity for storing OAuth2.0 credentials.
The CredentialsProperty is provided by the Google API Python Client, and is
used by the Storage classes to store OAuth 2.0 credentials in the data store."""
credentials = CredentialsProperty()
def CreateService(service, version, creds):
"""Create a Google API service.
Load an API service from a discovery document and authorize it with the
provided credentials.
Args:
service: Service name (e.g 'drive', 'oauth2').
version: Service version (e.g 'v1').
creds: Credentials used to authorize service.
Returns:
Authorized Google API service.
"""
# Instantiate an Http instance
http = httplib2.Http()
# Authorize the Http instance with the passed credentials
creds.authorize(http)
# Build a service from the passed discovery document path
return build(service, version, http=http)
class DriveState(object):
"""Store state provided by Drive."""
def __init__(self, state):
"""Create a new instance of drive state.
Parse and load the JSON state parameter.
Args:
state: State query parameter as a string.
"""
if state:
state_data = json.loads(state)
self.action = state_data['action']
self.ids = map(str, state_data.get('ids', []))
else:
self.action = 'create'
self.ids = []
@classmethod
def FromRequest(cls, request):
"""Create a Drive State instance from an HTTP request.
Args:
cls: Type this class method is called against.
request: HTTP request.
"""
return DriveState(request.get('state'))
class BaseDriveHandler(webapp.RequestHandler):
"""Base request handler for drive applications.
Adds Authorization support for Drive.
"""
def CreateOAuthFlow(self):
"""Create OAuth2.0 flow controller
This controller can be used to perform all parts of the OAuth 2.0 dance
including exchanging an Authorization code.
Args:
request: HTTP request to create OAuth2.0 flow for
Returns:
OAuth2.0 Flow instance suitable for performing OAuth2.0.
"""
flow = flow_from_clientsecrets('client_secrets.json', scope='')
# Dynamically set the redirect_uri based on the request URL. This is extremely
# convenient for debugging to an alternative host without manually setting the
# redirect URI.
flow.redirect_uri = self.request.url.split('?', 1)[0].rsplit('/', 1)[0]
return flow
def GetCodeCredentials(self):
"""Create OAuth 2.0 credentials by extracting a code and performing OAuth2.0.
The authorization code is extracted form the URI parameters. If it is absent,
None is returned immediately. Otherwise, if it is present, it is used to
perform step 2 of the OAuth 2.0 web server flow.
Once a token is received, the user information is fetched from the userinfo
service and stored in the session. The token is saved in the datastore against
the user ID received from the userinfo service.
Args:
request: HTTP request used for extracting an authorization code and the
session information.
Returns:
OAuth2.0 credentials suitable for authorizing clients or None if
Authorization could not take place.
"""
# Other frameworks use different API to get a query parameter.
code = self.request.get('code')
if not code:
# returns None to indicate that no code was passed from Google Drive.
return None
# Auth flow is a controller that is loaded with the client information,
# including client_id, client_secret, redirect_uri etc
oauth_flow = self.CreateOAuthFlow()
# Perform the exchange of the code. If there is a failure with exchanging
# the code, return None.
try:
creds = oauth_flow.step2_exchange(code)
except FlowExchangeError:
return None
# Create an API service that can use the userinfo API. Authorize it with our
# credentials that we gained from the code exchange.
users_service = CreateService('oauth2', 'v2', creds)
# Make a call against the userinfo service to retrieve the user's information.
# In this case we are interested in the user's "id" field.
userid = users_service.userinfo().get().execute().get('id')
# Store the user id in the user's cookie-based session.
session = sessions.LilCookies(self, SESSION_SECRET)
session.set_secure_cookie(name='userid', value=userid)
# Store the credentials in the data store using the userid as the key.
StorageByKeyName(Credentials, userid, 'credentials').put(creds)
return creds
def GetSessionCredentials(self):
"""Get OAuth 2.0 credentials for an HTTP session.
If the user has a user id stored in their cookie session, extract that value
and use it to load that user's credentials from the data store.
Args:
request: HTTP request to use session from.
Returns:
OAuth2.0 credentials suitable for authorizing clients.
"""
# Try to load the user id from the session
session = sessions.LilCookies(self, SESSION_SECRET)
userid = session.get_secure_cookie(name='userid')
if not userid:
# return None to indicate that no credentials could be loaded from the
# session.
return None
# Load the credentials from the data store, using the userid as a key.
creds = StorageByKeyName(Credentials, userid, 'credentials').get()
# if the credentials are invalid, return None to indicate that the credentials
# cannot be used.
if creds and creds.invalid:
return None
return creds
def RedirectAuth(self):
"""Redirect a handler to an authorization page.
Used when a handler fails to fetch credentials suitable for making Drive API
requests. The request is redirected to an OAuth 2.0 authorization approval
page and on approval, are returned to application.
Args:
handler: webapp.RequestHandler to redirect.
"""
flow = self.CreateOAuthFlow()
# Manually add the required scopes. Since this redirect does not originate
# from the Google Drive UI, which authomatically sets the scopes that are
# listed in the API Console.
flow.scope = ALL_SCOPES
# Create the redirect URI by performing step 1 of the OAuth 2.0 web server
# flow.
uri = flow.step1_get_authorize_url(flow.redirect_uri)
# Perform the redirect.
self.redirect(uri)
def RespondJSON(self, data):
"""Generate a JSON response and return it to the client.
Args:
data: The data that will be converted to JSON to return.
"""
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(data))
def CreateAuthorizedService(self, service, version):
"""Create an authorize service instance.
The service can only ever retrieve the credentials from the session.
Args:
service: Service name (e.g 'drive', 'oauth2').
version: Service version (e.g 'v1').
Returns:
Authorized service or redirect to authorization flow if no credentials.
"""
# For the service, the session holds the credentials
creds = self.GetSessionCredentials()
if creds:
# If the session contains credentials, use them to create a Drive service
# instance.
return CreateService(service, version, creds)
else:
# If no credentials could be loaded from the session, redirect the user to
# the authorization page.
self.RedirectAuth()
def CreateDrive(self):
"""Create a drive client instance."""
return self.CreateAuthorizedService('drive', 'v2')
def CreateUserInfo(self):
"""Create a user info client instance."""
return self.CreateAuthorizedService('oauth2', 'v2')
class MainPage(BaseDriveHandler):
"""Web handler for the main page.
Handles requests and returns the user interface for Open With and Create
cases. Responsible for parsing the state provided from the Drive UI and acting
appropriately.
"""
def get(self):
"""Handle GET for Create New and Open With.
This creates an authorized client, and checks whether a resource id has
been passed or not. If a resource ID has been passed, this is the Open
With use-case, otherwise it is the Create New use-case.
"""
# Generate a state instance for the request, this includes the action, and
# the file id(s) that have been sent from the Drive user interface.
drive_state = DriveState.FromRequest(self.request)
if drive_state.action == 'open' and len(drive_state.ids) > 0:
code = self.request.get('code')
if code:
code = '?code=%s' % code
self.redirect('/#edit/%s%s' % (drive_state.ids[0], code))
return
# Fetch the credentials by extracting an OAuth 2.0 authorization code from
# the request URL. If the code is not present, redirect to the OAuth 2.0
# authorization URL.
creds = self.GetCodeCredentials()
if not creds:
return self.RedirectAuth()
# Extract the numerical portion of the client_id from the stored value in
# the OAuth flow. You could also store this value as a separate variable
# somewhere.
client_id = self.CreateOAuthFlow().client_id.split('.')[0].split('-')[0]
self.RenderTemplate()
def RenderTemplate(self):
"""Render a named template in a context."""
self.response.headers['Content-Type'] = 'text/html'
self.response.out.write(INDEX_HTML)
class ServiceHandler(BaseDriveHandler):
"""Web handler for the service to read and write to Drive."""
def post(self):
"""Called when HTTP POST requests are received by the web application.
The POST body is JSON which is deserialized and used as values to create a
new file in Drive. The authorization access token for this action is
retreived from the data store.
"""
# Create a Drive service
service = self.CreateDrive()
if service is None:
return
# Load the data that has been posted as JSON
data = self.RequestJSON()
# Create a new file data structure.
resource = {
'title': data['title'],
'description': data['description'],
'mimeType': data['mimeType'],
}
try:
# Make an insert request to create a new file. A MediaInMemoryUpload
# instance is used to upload the file body.
resource = service.files().insert(
body=resource,
media_body=MediaInMemoryUpload(
data.get('content', ''),
data['mimeType'],
resumable=True)
).execute()
# Respond with the new file id as JSON.
self.RespondJSON(resource['id'])
except AccessTokenRefreshError:
# In cases where the access token has expired and cannot be refreshed
# (e.g. manual token revoking) redirect the user to the authorization page
# to authorize.
self.RedirectAuth()
def get(self):
"""Called when HTTP GET requests are received by the web application.
Use the query parameter file_id to fetch the required file's metadata then
content and return it as a JSON object.
Since DrEdit deals with text files, it is safe to dump the content directly
into JSON, but this is not the case with binary files, where something like
Base64 encoding is more appropriate.
"""
# Create a Drive service
service = self.CreateDrive()
if service is None:
return
try:
# Requests are expected to pass the file_id query parameter.
file_id = self.request.get('file_id')
if file_id:
# Fetch the file metadata by making the service.files().get method of
# the Drive API.
f = service.files().get(fileId=file_id).execute()
downloadUrl = f.get('downloadUrl')
# If a download URL is provided in the file metadata, use it to make an
# authorized request to fetch the file ontent. Set this content in the
# data to return as the 'content' field. If there is no downloadUrl,
# just set empty content.
if downloadUrl:
resp, f['content'] = service._http.request(downloadUrl)
else:
f['content'] = ''
else:
f = None
# Generate a JSON response with the file data and return to the client.
self.RespondJSON(f)
except AccessTokenRefreshError:
# Catch AccessTokenRefreshError which occurs when the API client library
# fails to refresh a token. This occurs, for example, when a refresh token
# is revoked. When this happens the user is redirected to the
# Authorization URL.
self.RedirectAuth()
def put(self):
"""Called when HTTP PUT requests are received by the web application.
The PUT body is JSON which is deserialized and used as values to update
a file in Drive. The authorization access token for this action is
retreived from the data store.
"""
# Create a Drive service
service = self.CreateDrive()
if service is None:
return
# Load the data that has been posted as JSON
data = self.RequestJSON()
try:
# Create a new file data structure.
content = data.get('content')
if 'content' in data:
data.pop('content')
if content is not None:
# Make an update request to update the file. A MediaInMemoryUpload
# instance is used to upload the file body. Because of a limitation, this
# request must be made in two parts, the first to update the metadata, and
# the second to update the body.
resource = service.files().update(
fileId=data['resource_id'],
newRevision=self.request.get('newRevision', False),
body=data,
media_body=MediaInMemoryUpload(
content, data['mimeType'], resumable=True)
).execute()
else:
# Only update the metadata, a patch request is prefered but not yet
# supported on Google App Engine; see
# http://code.google.com/p/googleappengine/issues/detail?id=6316.
resource = service.files().update(
fileId=data['resource_id'],
newRevision=self.request.get('newRevision', False),
body=data).execute()
# Respond with the new file id as JSON.
self.RespondJSON(resource['id'])
except AccessTokenRefreshError:
# In cases where the access token has expired and cannot be refreshed
# (e.g. manual token revoking) redirect the user to the authorization page
# to authorize.
self.RedirectAuth()
def RequestJSON(self):
"""Load the request body as JSON.
Returns:
Request body loaded as JSON or None if there is no request body.
"""
if self.request.body:
return json.loads(self.request.body)
class UserHandler(BaseDriveHandler):
"""Web handler for the service to read user information."""
def get(self):
"""Called when HTTP GET requests are received by the web application."""
# Create a Drive service
service = self.CreateUserInfo()
if service is None:
return
try:
result = service.userinfo().get().execute()
# Generate a JSON response with the file data and return to the client.
self.RespondJSON(result)
except AccessTokenRefreshError:
# Catch AccessTokenRefreshError which occurs when the API client library
# fails to refresh a token. This occurs, for example, when a refresh token
# is revoked. When this happens the user is redirected to the
# Authorization URL.
self.RedirectAuth()
class AboutHandler(BaseDriveHandler):
"""Web handler for the service to read user information."""
def get(self):
"""Called when HTTP GET requests are received by the web application."""
# Create a Drive service
service = self.CreateDrive()
if service is None:
return
try:
result = service.about().get().execute()
# Generate a JSON response with the file data and return to the client.
self.RespondJSON(result)
except AccessTokenRefreshError:
# Catch AccessTokenRefreshError which occurs when the API client library
# fails to refresh a token. This occurs, for example, when a refresh token
# is revoked. When this happens the user is redirected to the
# Authorization URL.
self.RedirectAuth()
class MediaInMemoryUpload(MediaUpload):
"""MediaUpload for a chunk of bytes.
Construct a MediaFileUpload and pass as the media_body parameter of the
method. For example, if we had a service that allowed plain text:
"""
def __init__(self, body, mimetype='application/octet-stream',
chunksize=256*1024, resumable=False):
"""Create a new MediaBytesUpload.
Args:
body: string, Bytes of body content.
mimetype: string, Mime-type of the file or default of
'application/octet-stream'.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._body = body
self._mimetype = mimetype
self._resumable = resumable
self._chunksize = chunksize
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body.
"""
return len(self._body)
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorter than length if EOF was reached
first.
"""
return self._body[begin:begin + length]
# Create an WSGI application suitable for running on App Engine
application = webapp.WSGIApplication(
[('/', MainPage), ('/svc', ServiceHandler), ('/about', AboutHandler),
('/user', UserHandler)],
# XXX Set to False in production.
debug=True
)
def main():
"""Main entry point for executing a request with this handler."""
run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'afshar@google.com (Ali Afshar)'
import os
import httplib2
import sessions
from google.appengine.ext import db
from google.appengine.ext.webapp import template
from apiclient.discovery import build_from_document
from apiclient.http import MediaUpload
from oauth2client import client
from oauth2client.appengine import CredentialsProperty
from oauth2client.appengine import StorageByKeyName
from oauth2client.appengine import simplejson as json
APIS_BASE = 'https://www.googleapis.com'
ALL_SCOPES = ('https://www.googleapis.com/auth/drive.file '
'https://www.googleapis.com/auth/userinfo.email '
'https://www.googleapis.com/auth/userinfo.profile')
CODE_PARAMETER = 'code'
STATE_PARAMETER = 'state'
SESSION_SECRET = open('session.secret').read()
DRIVE_DISCOVERY_DOC = open('drive.json').read()
USERS_DISCOVERY_DOC = open('users.json').read()
class Credentials(db.Model):
"""Datastore entity for storing OAuth2.0 credentials."""
credentials = CredentialsProperty()
def CreateOAuthFlow(request):
"""Create OAuth2.0 flow controller
Args:
request: HTTP request to create OAuth2.0 flow for
Returns:
OAuth2.0 Flow instance suitable for performing OAuth2.0.
"""
flow = client.flow_from_clientsecrets('client-debug.json', scope='')
flow.redirect_uri = request.url.split('?', 1)[0].rstrip('/')
return flow
def GetCodeCredentials(request):
"""Create OAuth2.0 credentials by extracting a code and performing OAuth2.0.
Args:
request: HTTP request used for extracting an authorization code.
Returns:
OAuth2.0 credentials suitable for authorizing clients.
"""
code = request.get(CODE_PARAMETER)
if code:
oauth_flow = CreateOAuthFlow(request)
creds = oauth_flow.step2_exchange(code)
users_service = CreateService(USERS_DISCOVERY_DOC, creds)
userid = users_service.userinfo().get().execute().get('id')
request.session.set_secure_cookie(name='userid', value=userid)
StorageByKeyName(Credentials, userid, 'credentials').put(creds)
return creds
def GetSessionCredentials(request):
"""Get OAuth2.0 credentials for an HTTP session.
Args:
request: HTTP request to use session from.
Returns:
OAuth2.0 credentials suitable for authorizing clients.
"""
userid = request.session.get_secure_cookie(name='userid')
if userid:
creds = StorageByKeyName(Credentials, userid, 'credentials').get()
if creds and not creds.invalid:
return creds
def CreateService(discovery_doc, creds):
"""Create a Google API service.
Args:
discovery_doc: Discovery doc used to configure service.
creds: Credentials used to authorize service.
Returns:
Authorized Google API service.
"""
http = httplib2.Http()
creds.authorize(http)
return build_from_document(discovery_doc, APIS_BASE, http=http)
def RedirectAuth(handler):
"""Redirect a handler to an authorization page.
Args:
handler: webapp.RequestHandler to redirect.
"""
flow = CreateOAuthFlow(handler.request)
flow.scope = ALL_SCOPES
uri = flow.step1_get_authorize_url(flow.redirect_uri)
handler.redirect(uri)
def CreateDrive(handler):
"""Create a fully authorized drive service for this handler.
Args:
handler: RequestHandler from which drive service is generated.
Returns:
Authorized drive service, generated from the handler request.
"""
request = handler.request
request.session = sessions.LilCookies(handler, SESSION_SECRET)
creds = GetCodeCredentials(request) or GetSessionCredentials(request)
if creds:
return CreateService(DRIVE_DISCOVERY_DOC, creds)
else:
RedirectAuth(handler)
def ServiceEnabled(view):
"""Decorator to inject an authorized service into an HTTP handler.
Args:
view: HTTP request handler method.
Returns:
Decorated handler which accepts the service as a parameter.
"""
def ServiceDecoratedView(handler, view=view):
service = CreateDrive(handler)
response_data = view(handler, service)
handler.response.headers['Content-Type'] = 'text/html'
handler.response.out.write(response_data)
return ServiceDecoratedView
def ServiceEnabledJson(view):
"""Decorator to inject an authorized service into a JSON HTTP handler.
Args:
view: HTTP request handler method.
Returns:
Decorated handler which accepts the service as a parameter.
"""
def ServiceDecoratedView(handler, view=view):
service = CreateDrive(handler)
if handler.request.body:
data = json.loads(handler.request.body)
else:
data = None
response_data = json.dumps(view(handler, service, data))
handler.response.headers['Content-Type'] = 'application/json'
handler.response.out.write(response_data)
return ServiceDecoratedView
class DriveState(object):
"""Store state provided by Drive."""
def __init__(self, state):
self.ParseState(state)
@classmethod
def FromRequest(cls, request):
"""Create a Drive State instance from an HTTP request.
Args:
cls: Type this class method is called against.
request: HTTP request.
"""
return DriveState(request.get(STATE_PARAMETER))
def ParseState(self, state):
"""Parse a state parameter and set internal values.
Args:
state: State parameter to parse.
"""
if state.startswith('{'):
self.ParseJsonState(state)
else:
self.ParsePlainState(state)
def ParseJsonState(self, state):
"""Parse a state parameter that is JSON.
Args:
state: State parameter to parse
"""
state_data = json.loads(state)
self.action = state_data['action']
self.ids = map(str, state_data.get('ids', []))
def ParsePlainState(self, state):
"""Parse a state parameter that is a plain resource id or missing.
Args:
state: State parameter to parse
"""
if state:
self.action = 'open'
self.ids = [state]
else:
self.action = 'create'
self.ids = []
class MediaInMemoryUpload(MediaUpload):
"""MediaUpload for a chunk of bytes.
Construct a MediaFileUpload and pass as the media_body parameter of the
method. For example, if we had a service that allowed plain text:
"""
def __init__(self, body, mimetype='application/octet-stream',
chunksize=256*1024, resumable=False):
"""Create a new MediaBytesUpload.
Args:
body: string, Bytes of body content.
mimetype: string, Mime-type of the file or default of
'application/octet-stream'.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._body = body
self._mimetype = mimetype
self._resumable = resumable
self._chunksize = chunksize
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body.
"""
return len(self._body)
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorter than length if EOF was reached
first.
"""
return self._body[begin:begin + length]
def RenderTemplate(name, **context):
"""Render a named template in a context.
Args:
name: Template name.
context: Keyword arguments to render as template variables.
"""
return template.render(name, context)
| Python |
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import base64
import urllib
import time
import random
import urlparse
import hmac
import binascii
import httplib2
try:
from urlparse import parse_qs
parse_qs # placate pyflakes
except ImportError:
# fall back for Python 2.5
from cgi import parse_qs
try:
from hashlib import sha1
sha = sha1
except ImportError:
# hashlib was added in Python 2.5
import sha
import _version
__version__ = _version.__version__
OAUTH_VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def to_unicode(s):
""" Convert to unicode, raise exception with instructive error
message if s is not unicode, ascii, or utf-8. """
if not isinstance(s, unicode):
if not isinstance(s, str):
raise TypeError('You are required to pass either unicode or string here, not: %r (%s)' % (type(s), s))
try:
s = s.decode('utf-8')
except UnicodeDecodeError, le:
raise TypeError('You are required to pass either a unicode object or a utf-8 string here. You passed a Python string object which contained non-utf-8: %r. The UnicodeDecodeError that resulted from attempting to interpret it as utf-8 was: %s' % (s, le,))
return s
def to_utf8(s):
return to_unicode(s).encode('utf-8')
def to_unicode_if_string(s):
if isinstance(s, basestring):
return to_unicode(s)
else:
return s
def to_utf8_if_string(s):
if isinstance(s, basestring):
return to_utf8(s)
else:
return s
def to_unicode_optional_iterator(x):
"""
Raise TypeError if x is a str containing non-utf8 bytes or if x is
an iterable which contains such a str.
"""
if isinstance(x, basestring):
return to_unicode(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_unicode(e) for e in l ]
def to_utf8_optional_iterator(x):
"""
Raise TypeError if x is a str or if x is an iterable which
contains a str.
"""
if isinstance(x, basestring):
return to_utf8(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_utf8_if_string(e) for e in l ]
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s.encode('utf-8'), safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = OAUTH_VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None,
body='', is_form_encoded=False):
if url is not None:
self.url = to_unicode(url)
self.method = method
if parameters is not None:
for k, v in parameters.iteritems():
k = to_unicode(k)
v = to_unicode_optional_iterator(v)
self[k] = v
self.body = body
self.is_form_encoded = is_form_encoded
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
d = {}
for k, v in self.iteritems():
d[k.encode('utf-8')] = to_utf8_optional_iterator(v)
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(d, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url)
try:
query = base_url.query
except AttributeError:
# must be python <2.5
query = base_url[4]
query = parse_qs(query)
for k, v in self.items():
query.setdefault(k, []).append(v)
try:
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
except AttributeError:
# must be python <2.5
scheme = base_url[0]
netloc = base_url[1]
path = base_url[2]
params = base_url[3]
fragment = base_url[5]
url = (scheme, netloc, path, params,
urllib.urlencode(query, True), fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((to_utf8_if_string(key), to_utf8(value)))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((to_utf8_if_string(key), to_utf8_if_string(value)))
else:
items.extend((to_utf8_if_string(key), to_utf8_if_string(item)) for item in value)
# Include any query string parameters from the provided URL
query = urlparse.urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [(to_utf8(k), to_utf8(v)) for k, v in url_items if k != 'oauth_signature' ]
items.extend(url_items)
items.sort()
encoded_str = urllib.urlencode(items)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if not self.is_form_encoded:
# according to
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# section 4.1.1 "OAuth Consumers MUST NOT include an
# oauth_body_hash parameter on requests with form-encoded
# request bodies."
self['oauth_body_hash'] = base64.b64encode(sha(self.body).digest())
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None,
body='', is_form_encoded=False):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters, body=body,
is_form_encoded=is_form_encoded)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str.encode('utf-8'), keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, cache=None, timeout=None,
proxy_info=None):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
httplib2.Http.__init__(self, cache=cache, timeout=timeout, proxy_info=proxy_info)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body='', headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
DEFAULT_POST_CONTENT_TYPE = 'application/x-www-form-urlencoded'
if not isinstance(headers, dict):
headers = {}
if method == "POST":
headers['Content-Type'] = headers.get('Content-Type',
DEFAULT_POST_CONTENT_TYPE)
is_form_encoded = \
headers.get('Content-Type') == 'application/x-www-form-urlencoded'
if is_form_encoded and body:
parameters = parse_qs(body)
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=method, http_url=uri,
parameters=parameters, body=body, is_form_encoded=is_form_encoded)
req.sign_request(self.method, self.consumer, self.token)
schema, rest = urllib.splittype(uri)
if rest.startswith('//'):
hierpart = '//'
else:
hierpart = ''
host, rest = urllib.splithost(rest)
realm = schema + ':' + hierpart + host
if is_form_encoded:
body = req.to_postdata()
elif method == "GET":
uri = req.to_url()
else:
headers.update(req.to_header(realm=realm))
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = OAUTH_VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
self._check_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _check_version(self, request):
"""Verify the correct version of the request for this server."""
version = self._get_version(request)
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
def _get_version(self, request):
"""Return the version of the request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = OAUTH_VERSION
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
try:
signature_method = request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_verifier(self, request):
return request.get_parameter('oauth_verifier')
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
try:
signature = request.get_parameter('oauth_signature')
except:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now,
self.timestamp_threshold))
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if not hasattr(request, 'normalized_url') or request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
| Python |
# This is the version of this source code.
manual_verstr = "1.5"
auto_build_num = "211"
verstr = manual_verstr + "." + auto_build_num
try:
from pyutil.version_class import Version as pyutil_Version
__version__ = pyutil_Version(verstr)
except (ImportError, ValueError):
# Maybe there is no pyutil installed.
from distutils.version import LooseVersion as distutils_Version
__version__ = distutils_Version(verstr)
| Python |
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import oauth2
import imaplib
class IMAP4_SSL(imaplib.IMAP4_SSL):
"""IMAP wrapper for imaplib.IMAP4_SSL that implements XOAUTH."""
def authenticate(self, url, consumer, token):
if consumer is not None and not isinstance(consumer, oauth2.Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, oauth2.Token):
raise ValueError("Invalid token.")
imaplib.IMAP4_SSL.authenticate(self, 'XOAUTH',
lambda x: oauth2.build_xoauth_string(url, consumer, token))
| Python |
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import oauth2
import smtplib
import base64
class SMTP(smtplib.SMTP):
"""SMTP wrapper for smtplib.SMTP that implements XOAUTH."""
def authenticate(self, url, consumer, token):
if consumer is not None and not isinstance(consumer, oauth2.Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, oauth2.Token):
raise ValueError("Invalid token.")
self.docmd('AUTH', 'XOAUTH %s' % \
base64.b64encode(oauth2.build_xoauth_string(url, consumer, token)))
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line tools for authenticating via OAuth 2.0
Do the OAuth 2.0 Web Server dance for a command line application. Stores the
generated credentials in a common file that is used by other example apps in
the same directory.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = ['run']
import BaseHTTPServer
import gflags
import socket
import sys
import webbrowser
from client import FlowExchangeError
from client import OOB_CALLBACK_URN
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean('auth_local_webserver', True,
('Run a local web server to handle redirects during '
'OAuth authorization.'))
gflags.DEFINE_string('auth_host_name', 'localhost',
('Host name to use when running a local web server to '
'handle redirects during OAuth authorization.'))
gflags.DEFINE_multi_int('auth_host_port', [8080, 8090],
('Port to use when running a local web server to '
'handle redirects during OAuth authorization.'))
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(s):
"""Handle a GET request.
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
query = s.path.split('?', 1)[-1]
query = dict(parse_qsl(query))
s.server.query_params = query
s.wfile.write("<html><head><title>Authentication Status</title></head>")
s.wfile.write("<body><p>The authentication flow has completed.</p>")
s.wfile.write("</body></html>")
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
pass
def run(flow, storage, http=None):
"""Core code for a command-line application.
Args:
flow: Flow, an OAuth 2.0 Flow to step through.
storage: Storage, a Storage to store the credential in.
http: An instance of httplib2.Http.request
or something that acts like it.
Returns:
Credentials, the obtained credential.
"""
if FLAGS.auth_local_webserver:
success = False
port_number = 0
for port in FLAGS.auth_host_port:
port_number = port
try:
httpd = ClientRedirectServer((FLAGS.auth_host_name, port),
ClientRedirectHandler)
except socket.error, e:
pass
else:
success = True
break
FLAGS.auth_local_webserver = success
if not success:
print 'Failed to start a local webserver listening on either port 8080'
print 'or port 9090. Please check your firewall settings and locally'
print 'running programs that may be blocking or using those ports.'
print
print 'Falling back to --noauth_local_webserver and continuing with',
print 'authorization.'
print
if FLAGS.auth_local_webserver:
oauth_callback = 'http://%s:%s/' % (FLAGS.auth_host_name, port_number)
else:
oauth_callback = OOB_CALLBACK_URN
authorize_url = flow.step1_get_authorize_url(oauth_callback)
if FLAGS.auth_local_webserver:
webbrowser.open(authorize_url, new=1, autoraise=True)
print 'Your browser has been opened to visit:'
print
print ' ' + authorize_url
print
print 'If your browser is on a different machine then exit and re-run this'
print 'application with the command-line parameter '
print
print ' --noauth_local_webserver'
print
else:
print 'Go to the following link in your browser:'
print
print ' ' + authorize_url
print
code = None
if FLAGS.auth_local_webserver:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'code' in httpd.query_params:
code = httpd.query_params['code']
else:
print 'Failed to find "code" in the query parameters of the redirect.'
sys.exit('Try running with --noauth_local_webserver.')
else:
code = raw_input('Enter verification code: ').strip()
try:
credential = flow.step2_exchange(code, http)
except FlowExchangeError, e:
sys.exit('Authentication has failed: %s' % e)
storage.put(credential)
credential.set_store(storage)
print 'Authentication successful.'
return credential
| Python |
# Copyright 2011 Google Inc. All Rights Reserved.
"""Multi-credential file store with lock support.
This module implements a JSON credential store where multiple
credentials can be stored in one file. That file supports locking
both in a single process and across processes.
The credential themselves are keyed off of:
* client_id
* user_agent
* scope
The format of the stored data is like so:
{
'file_version': 1,
'data': [
{
'key': {
'clientId': '<client id>',
'userAgent': '<user agent>',
'scope': '<scope>'
},
'credential': {
# JSON serialized Credentials.
}
}
]
}
"""
__author__ = 'jbeda@google.com (Joe Beda)'
import base64
import errno
import logging
import os
import threading
from anyjson import simplejson
from client import Storage as BaseStorage
from client import Credentials
from locked_file import LockedFile
logger = logging.getLogger(__name__)
# A dict from 'filename'->_MultiStore instances
_multistores = {}
_multistores_lock = threading.Lock()
class Error(Exception):
"""Base error for this module."""
pass
class NewerCredentialStoreError(Error):
"""The credential store is a newer version that supported."""
pass
def get_credential_storage(filename, client_id, user_agent, scope,
warn_on_readonly=True):
"""Get a Storage instance for a credential.
Args:
filename: The JSON file storing a set of credentials
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: string or list of strings, Scope(s) being requested
warn_on_readonly: if True, log a warning if the store is readonly
Returns:
An object derived from client.Storage for getting/setting the
credential.
"""
filename = os.path.realpath(os.path.expanduser(filename))
_multistores_lock.acquire()
try:
multistore = _multistores.setdefault(
filename, _MultiStore(filename, warn_on_readonly))
finally:
_multistores_lock.release()
if type(scope) is list:
scope = ' '.join(scope)
return multistore._get_storage(client_id, user_agent, scope)
class _MultiStore(object):
"""A file backed store for multiple credentials."""
def __init__(self, filename, warn_on_readonly=True):
"""Initialize the class.
This will create the file if necessary.
"""
self._file = LockedFile(filename, 'r+b', 'rb')
self._thread_lock = threading.Lock()
self._read_only = False
self._warn_on_readonly = warn_on_readonly
self._create_file_if_needed()
# Cache of deserialized store. This is only valid after the
# _MultiStore is locked or _refresh_data_cache is called. This is
# of the form of:
#
# (client_id, user_agent, scope) -> OAuth2Credential
#
# If this is None, then the store hasn't been read yet.
self._data = None
class _Storage(BaseStorage):
"""A Storage object that knows how to read/write a single credential."""
def __init__(self, multistore, client_id, user_agent, scope):
self._multistore = multistore
self._client_id = client_id
self._user_agent = user_agent
self._scope = scope
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant.
"""
self._multistore._lock()
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
self._multistore._unlock()
def locked_get(self):
"""Retrieve credential.
The Storage lock must be held when this is called.
Returns:
oauth2client.client.Credentials
"""
credential = self._multistore._get_credential(
self._client_id, self._user_agent, self._scope)
if credential:
credential.set_store(self)
return credential
def locked_put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self._multistore._update_credential(credentials, self._scope)
def locked_delete(self):
"""Delete a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self._multistore._delete_credential(self._client_id, self._user_agent,
self._scope)
def _create_file_if_needed(self):
"""Create an empty file if necessary.
This method will not initialize the file. Instead it implements a
simple version of "touch" to ensure the file has been created.
"""
if not os.path.exists(self._file.filename()):
old_umask = os.umask(0177)
try:
open(self._file.filename(), 'a+b').close()
finally:
os.umask(old_umask)
def _lock(self):
"""Lock the entire multistore."""
self._thread_lock.acquire()
self._file.open_and_lock()
if not self._file.is_locked():
self._read_only = True
if self._warn_on_readonly:
logger.warn('The credentials file (%s) is not writable. Opening in '
'read-only mode. Any refreshed credentials will only be '
'valid for this run.' % self._file.filename())
if os.path.getsize(self._file.filename()) == 0:
logger.debug('Initializing empty multistore file')
# The multistore is empty so write out an empty file.
self._data = {}
self._write()
elif not self._read_only or self._data is None:
# Only refresh the data if we are read/write or we haven't
# cached the data yet. If we are readonly, we assume is isn't
# changing out from under us and that we only have to read it
# once. This prevents us from whacking any new access keys that
# we have cached in memory but were unable to write out.
self._refresh_data_cache()
def _unlock(self):
"""Release the lock on the multistore."""
self._file.unlock_and_close()
self._thread_lock.release()
def _locked_json_read(self):
"""Get the raw content of the multistore file.
The multistore must be locked when this is called.
Returns:
The contents of the multistore decoded as JSON.
"""
assert self._thread_lock.locked()
self._file.file_handle().seek(0)
return simplejson.load(self._file.file_handle())
def _locked_json_write(self, data):
"""Write a JSON serializable data structure to the multistore.
The multistore must be locked when this is called.
Args:
data: The data to be serialized and written.
"""
assert self._thread_lock.locked()
if self._read_only:
return
self._file.file_handle().seek(0)
simplejson.dump(data, self._file.file_handle(), sort_keys=True, indent=2)
self._file.file_handle().truncate()
def _refresh_data_cache(self):
"""Refresh the contents of the multistore.
The multistore must be locked when this is called.
Raises:
NewerCredentialStoreError: Raised when a newer client has written the
store.
"""
self._data = {}
try:
raw_data = self._locked_json_read()
except Exception:
logger.warn('Credential data store could not be loaded. '
'Will ignore and overwrite.')
return
version = 0
try:
version = raw_data['file_version']
except Exception:
logger.warn('Missing version for credential data store. It may be '
'corrupt or an old version. Overwriting.')
if version > 1:
raise NewerCredentialStoreError(
'Credential file has file_version of %d. '
'Only file_version of 1 is supported.' % version)
credentials = []
try:
credentials = raw_data['data']
except (TypeError, KeyError):
pass
for cred_entry in credentials:
try:
(key, credential) = self._decode_credential_from_json(cred_entry)
self._data[key] = credential
except:
# If something goes wrong loading a credential, just ignore it
logger.info('Error decoding credential, skipping', exc_info=True)
def _decode_credential_from_json(self, cred_entry):
"""Load a credential from our JSON serialization.
Args:
cred_entry: A dict entry from the data member of our format
Returns:
(key, cred) where the key is the key tuple and the cred is the
OAuth2Credential object.
"""
raw_key = cred_entry['key']
client_id = raw_key['clientId']
user_agent = raw_key['userAgent']
scope = raw_key['scope']
key = (client_id, user_agent, scope)
credential = None
credential = Credentials.new_from_json(simplejson.dumps(cred_entry['credential']))
return (key, credential)
def _write(self):
"""Write the cached data back out.
The multistore must be locked.
"""
raw_data = {'file_version': 1}
raw_creds = []
raw_data['data'] = raw_creds
for (cred_key, cred) in self._data.items():
raw_key = {
'clientId': cred_key[0],
'userAgent': cred_key[1],
'scope': cred_key[2]
}
raw_cred = simplejson.loads(cred.to_json())
raw_creds.append({'key': raw_key, 'credential': raw_cred})
self._locked_json_write(raw_data)
def _get_credential(self, client_id, user_agent, scope):
"""Get a credential from the multistore.
The multistore must be locked.
Args:
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: A string for the scope(s) being requested
Returns:
The credential specified or None if not present
"""
key = (client_id, user_agent, scope)
return self._data.get(key, None)
def _update_credential(self, cred, scope):
"""Update a credential and write the multistore.
This must be called when the multistore is locked.
Args:
cred: The OAuth2Credential to update/set
scope: The scope(s) that this credential covers
"""
key = (cred.client_id, cred.user_agent, scope)
self._data[key] = cred
self._write()
def _delete_credential(self, client_id, user_agent, scope):
"""Delete a credential and write the multistore.
This must be called when the multistore is locked.
Args:
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: The scope(s) that this credential covers
"""
key = (client_id, user_agent, scope)
try:
del self._data[key]
except KeyError:
pass
self._write()
def _get_storage(self, client_id, user_agent, scope):
"""Get a Storage object to get/set a credential.
This Storage is a 'view' into the multistore.
Args:
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: A string for the scope(s) being requested
Returns:
A Storage object that can be used to get/set this cred
"""
return self._Storage(self, client_id, user_agent, scope)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An OAuth 2.0 client.
Tools for interacting with OAuth 2.0 protected resources.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import base64
import clientsecrets
import copy
import datetime
import httplib2
import logging
import os
import sys
import time
import urllib
import urlparse
from anyjson import simplejson
HAS_OPENSSL = False
try:
from oauth2client.crypt import Signer
from oauth2client.crypt import make_signed_jwt
from oauth2client.crypt import verify_signed_jwt_with_certs
HAS_OPENSSL = True
except ImportError:
pass
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
logger = logging.getLogger(__name__)
# Expiry is stored in RFC3339 UTC format
EXPIRY_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
# Which certs to use to validate id_tokens received.
ID_TOKEN_VERIFICATON_CERTS = 'https://www.googleapis.com/oauth2/v1/certs'
# Constant to use for the out of band OAuth 2.0 flow.
OOB_CALLBACK_URN = 'urn:ietf:wg:oauth:2.0:oob'
class Error(Exception):
"""Base error for this module."""
pass
class FlowExchangeError(Error):
"""Error trying to exchange an authorization grant for an access token."""
pass
class AccessTokenRefreshError(Error):
"""Error trying to refresh an expired access token."""
pass
class UnknownClientSecretsFlowError(Error):
"""The client secrets file called for an unknown type of OAuth 2.0 flow. """
pass
class AccessTokenCredentialsError(Error):
"""Having only the access_token means no refresh is possible."""
pass
class VerifyJwtTokenError(Error):
"""Could on retrieve certificates for validation."""
pass
def _abstract():
raise NotImplementedError('You need to override this function')
class MemoryCache(object):
"""httplib2 Cache implementation which only caches locally."""
def __init__(self):
self.cache = {}
def get(self, key):
return self.cache.get(key)
def set(self, key, value):
self.cache[key] = value
def delete(self, key):
self.cache.pop(key, None)
class Credentials(object):
"""Base class for all Credentials objects.
Subclasses must define an authorize() method that applies the credentials to
an HTTP transport.
Subclasses must also specify a classmethod named 'from_json' that takes a JSON
string as input and returns an instaniated Credentials object.
"""
NON_SERIALIZED_MEMBERS = ['store']
def authorize(self, http):
"""Take an httplib2.Http instance (or equivalent) and
authorizes it for the set of credentials, usually by
replacing http.request() with a method that adds in
the appropriate headers and then delegates to the original
Http.request() method.
"""
_abstract()
def refresh(self, http):
"""Forces a refresh of the access_token.
Args:
http: httplib2.Http, an http object to be used to make the refresh
request.
"""
_abstract()
def apply(self, headers):
"""Add the authorization to the headers.
Args:
headers: dict, the headers to add the Authorization header to.
"""
_abstract()
def _to_json(self, strip):
"""Utility function for creating a JSON representation of an instance of Credentials.
Args:
strip: array, An array of names of members to not include in the JSON.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
t = type(self)
d = copy.copy(self.__dict__)
for member in strip:
if member in d:
del d[member]
if 'token_expiry' in d and isinstance(d['token_expiry'], datetime.datetime):
d['token_expiry'] = d['token_expiry'].strftime(EXPIRY_FORMAT)
# Add in information we will need later to reconsistitue this instance.
d['_class'] = t.__name__
d['_module'] = t.__module__
return simplejson.dumps(d)
def to_json(self):
"""Creating a JSON representation of an instance of Credentials.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json(Credentials.NON_SERIALIZED_MEMBERS)
@classmethod
def new_from_json(cls, s):
"""Utility class method to instantiate a Credentials subclass from a JSON
representation produced by to_json().
Args:
s: string, JSON from to_json().
Returns:
An instance of the subclass of Credentials that was serialized with
to_json().
"""
data = simplejson.loads(s)
# Find and call the right classmethod from_json() to restore the object.
module = data['_module']
try:
m = __import__(module)
except ImportError:
# In case there's an object from the old package structure, update it
module = module.replace('.apiclient', '')
m = __import__(module)
m = __import__(module, fromlist=module.split('.')[:-1])
kls = getattr(m, data['_class'])
from_json = getattr(kls, 'from_json')
return from_json(s)
@classmethod
def from_json(cls, s):
"""Instantiate a Credentials object from a JSON description of it.
The JSON should have been produced by calling .to_json() on the object.
Args:
data: dict, A deserialized JSON object.
Returns:
An instance of a Credentials subclass.
"""
return Credentials()
class Flow(object):
"""Base class for all Flow objects."""
pass
class Storage(object):
"""Base class for all Storage objects.
Store and retrieve a single credential. This class supports locking
such that multiple processes and threads can operate on a single
store.
"""
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant.
"""
pass
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
pass
def locked_get(self):
"""Retrieve credential.
The Storage lock must be held when this is called.
Returns:
oauth2client.client.Credentials
"""
_abstract()
def locked_put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
_abstract()
def locked_delete(self):
"""Delete a credential.
The Storage lock must be held when this is called.
"""
_abstract()
def get(self):
"""Retrieve credential.
The Storage lock must *not* be held when this is called.
Returns:
oauth2client.client.Credentials
"""
self.acquire_lock()
try:
return self.locked_get()
finally:
self.release_lock()
def put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self.acquire_lock()
try:
self.locked_put(credentials)
finally:
self.release_lock()
def delete(self):
"""Delete credential.
Frees any resources associated with storing the credential.
The Storage lock must *not* be held when this is called.
Returns:
None
"""
self.acquire_lock()
try:
return self.locked_delete()
finally:
self.release_lock()
class OAuth2Credentials(Credentials):
"""Credentials object for OAuth 2.0.
Credentials can be applied to an httplib2.Http object using the authorize()
method, which then adds the OAuth 2.0 access token to each request.
OAuth2Credentials objects may be safely pickled and unpickled.
"""
def __init__(self, access_token, client_id, client_secret, refresh_token,
token_expiry, token_uri, user_agent, id_token=None):
"""Create an instance of OAuth2Credentials.
This constructor is not usually called by the user, instead
OAuth2Credentials objects are instantiated by the OAuth2WebServerFlow.
Args:
access_token: string, access token.
client_id: string, client identifier.
client_secret: string, client secret.
refresh_token: string, refresh token.
token_expiry: datetime, when the access_token expires.
token_uri: string, URI of token endpoint.
user_agent: string, The HTTP User-Agent to provide for this application.
id_token: object, The identity of the resource owner.
Notes:
store: callable, A callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has expired and been refreshed.
"""
self.access_token = access_token
self.client_id = client_id
self.client_secret = client_secret
self.refresh_token = refresh_token
self.store = None
self.token_expiry = token_expiry
self.token_uri = token_uri
self.user_agent = user_agent
self.id_token = id_token
# True if the credentials have been revoked or expired and can't be
# refreshed.
self.invalid = False
def authorize(self, http):
"""Authorize an httplib2.Http instance with these credentials.
The modified http.request method will add authentication headers to each
request and will refresh access_tokens when a 401 is received on a
request. In addition the http.request method has a credentials property,
http.request.credentials, which is the Credentials object that authorized
it.
Args:
http: An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth subclass of httplib2.Authenication
because it never gets passed the absolute URI, which is needed for
signing. So instead we have to overload 'request' with a closure
that adds in the Authorization header and then calls the original
version of 'request()'.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
if not self.access_token:
logger.info('Attempting refresh to obtain initial access_token')
self._refresh(request_orig)
# Modify the request headers to add the appropriate
# Authorization header.
if headers is None:
headers = {}
self.apply(headers)
if self.user_agent is not None:
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
if resp.status == 401:
logger.info('Refreshing due to a 401')
self._refresh(request_orig)
self.apply(headers)
return request_orig(uri, method, body, headers,
redirections, connection_type)
else:
return (resp, content)
# Replace the request method with our own closure.
http.request = new_request
# Set credentials as a property of the request method.
setattr(http.request, 'credentials', self)
return http
def refresh(self, http):
"""Forces a refresh of the access_token.
Args:
http: httplib2.Http, an http object to be used to make the refresh
request.
"""
self._refresh(http.request)
def apply(self, headers):
"""Add the authorization to the headers.
Args:
headers: dict, the headers to add the Authorization header to.
"""
headers['Authorization'] = 'Bearer ' + self.access_token
def to_json(self):
return self._to_json(Credentials.NON_SERIALIZED_MEMBERS)
@classmethod
def from_json(cls, s):
"""Instantiate a Credentials object from a JSON description of it. The JSON
should have been produced by calling .to_json() on the object.
Args:
data: dict, A deserialized JSON object.
Returns:
An instance of a Credentials subclass.
"""
data = simplejson.loads(s)
if 'token_expiry' in data and not isinstance(data['token_expiry'],
datetime.datetime):
try:
data['token_expiry'] = datetime.datetime.strptime(
data['token_expiry'], EXPIRY_FORMAT)
except:
data['token_expiry'] = None
retval = OAuth2Credentials(
data['access_token'],
data['client_id'],
data['client_secret'],
data['refresh_token'],
data['token_expiry'],
data['token_uri'],
data['user_agent'],
data.get('id_token', None))
retval.invalid = data['invalid']
return retval
@property
def access_token_expired(self):
"""True if the credential is expired or invalid.
If the token_expiry isn't set, we assume the token doesn't expire.
"""
if self.invalid:
return True
if not self.token_expiry:
return False
now = datetime.datetime.utcnow()
if now >= self.token_expiry:
logger.info('access_token is expired. Now: %s, token_expiry: %s',
now, self.token_expiry)
return True
return False
def set_store(self, store):
"""Set the Storage for the credential.
Args:
store: Storage, an implementation of Stroage object.
This is needed to store the latest access_token if it
has expired and been refreshed. This implementation uses
locking to check for updates before updating the
access_token.
"""
self.store = store
def _updateFromCredential(self, other):
"""Update this Credential from another instance."""
self.__dict__.update(other.__getstate__())
def __getstate__(self):
"""Trim the state down to something that can be pickled."""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled."""
self.__dict__.update(state)
self.store = None
def _generate_refresh_request_body(self):
"""Generate the body that will be used in the refresh request."""
body = urllib.urlencode({
'grant_type': 'refresh_token',
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': self.refresh_token,
})
return body
def _generate_refresh_request_headers(self):
"""Generate the headers that will be used in the refresh request."""
headers = {
'content-type': 'application/x-www-form-urlencoded',
}
if self.user_agent is not None:
headers['user-agent'] = self.user_agent
return headers
def _refresh(self, http_request):
"""Refreshes the access_token.
This method first checks by reading the Storage object if available.
If a refresh is still needed, it holds the Storage lock until the
refresh is completed.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
if not self.store:
self._do_refresh_request(http_request)
else:
self.store.acquire_lock()
try:
new_cred = self.store.locked_get()
if (new_cred and not new_cred.invalid and
new_cred.access_token != self.access_token):
logger.info('Updated access_token read from Storage')
self._updateFromCredential(new_cred)
else:
self._do_refresh_request(http_request)
finally:
self.store.release_lock()
def _do_refresh_request(self, http_request):
"""Refresh the access_token using the refresh_token.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
body = self._generate_refresh_request_body()
headers = self._generate_refresh_request_headers()
logger.info('Refreshing access_token')
resp, content = http_request(
self.token_uri, method='POST', body=body, headers=headers)
if resp.status == 200:
# TODO(jcgregorio) Raise an error if loads fails?
d = simplejson.loads(content)
self.access_token = d['access_token']
self.refresh_token = d.get('refresh_token', self.refresh_token)
if 'expires_in' in d:
self.token_expiry = datetime.timedelta(
seconds=int(d['expires_in'])) + datetime.datetime.utcnow()
else:
self.token_expiry = None
if self.store:
self.store.locked_put(self)
else:
# An {'error':...} response body means the token is expired or revoked,
# so we flag the credentials as such.
logger.info('Failed to retrieve access token: %s' % content)
error_msg = 'Invalid response %s.' % resp['status']
try:
d = simplejson.loads(content)
if 'error' in d:
error_msg = d['error']
self.invalid = True
if self.store:
self.store.locked_put(self)
except:
pass
raise AccessTokenRefreshError(error_msg)
class AccessTokenCredentials(OAuth2Credentials):
"""Credentials object for OAuth 2.0.
Credentials can be applied to an httplib2.Http object using the
authorize() method, which then signs each request from that object
with the OAuth 2.0 access token. This set of credentials is for the
use case where you have acquired an OAuth 2.0 access_token from
another place such as a JavaScript client or another web
application, and wish to use it from Python. Because only the
access_token is present it can not be refreshed and will in time
expire.
AccessTokenCredentials objects may be safely pickled and unpickled.
Usage:
credentials = AccessTokenCredentials('<an access token>',
'my-user-agent/1.0')
http = httplib2.Http()
http = credentials.authorize(http)
Exceptions:
AccessTokenCredentialsExpired: raised when the access_token expires or is
revoked.
"""
def __init__(self, access_token, user_agent):
"""Create an instance of OAuth2Credentials
This is one of the few types if Credentials that you should contrust,
Credentials objects are usually instantiated by a Flow.
Args:
access_token: string, access token.
user_agent: string, The HTTP User-Agent to provide for this application.
Notes:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
"""
super(AccessTokenCredentials, self).__init__(
access_token,
None,
None,
None,
None,
None,
user_agent)
@classmethod
def from_json(cls, s):
data = simplejson.loads(s)
retval = AccessTokenCredentials(
data['access_token'],
data['user_agent'])
return retval
def _refresh(self, http_request):
raise AccessTokenCredentialsError(
"The access_token is expired or invalid and can't be refreshed.")
class AssertionCredentials(OAuth2Credentials):
"""Abstract Credentials object used for OAuth 2.0 assertion grants.
This credential does not require a flow to instantiate because it
represents a two legged flow, and therefore has all of the required
information to generate and refresh its own access tokens. It must
be subclassed to generate the appropriate assertion string.
AssertionCredentials objects may be safely pickled and unpickled.
"""
def __init__(self, assertion_type, user_agent,
token_uri='https://accounts.google.com/o/oauth2/token',
**unused_kwargs):
"""Constructor for AssertionFlowCredentials.
Args:
assertion_type: string, assertion type that will be declared to the auth
server
user_agent: string, The HTTP User-Agent to provide for this application.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
"""
super(AssertionCredentials, self).__init__(
None,
None,
None,
None,
None,
token_uri,
user_agent)
self.assertion_type = assertion_type
def _generate_refresh_request_body(self):
assertion = self._generate_assertion()
body = urllib.urlencode({
'assertion_type': self.assertion_type,
'assertion': assertion,
'grant_type': 'assertion',
})
return body
def _generate_assertion(self):
"""Generate the assertion string that will be used in the access token
request.
"""
_abstract()
if HAS_OPENSSL:
# PyOpenSSL is not a prerequisite for oauth2client, so if it is missing then
# don't create the SignedJwtAssertionCredentials or the verify_id_token()
# method.
class SignedJwtAssertionCredentials(AssertionCredentials):
"""Credentials object used for OAuth 2.0 Signed JWT assertion grants.
This credential does not require a flow to instantiate because it
represents a two legged flow, and therefore has all of the required
information to generate and refresh its own access tokens.
"""
MAX_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
def __init__(self,
service_account_name,
private_key,
scope,
private_key_password='notasecret',
user_agent=None,
token_uri='https://accounts.google.com/o/oauth2/token',
**kwargs):
"""Constructor for SignedJwtAssertionCredentials.
Args:
service_account_name: string, id for account, usually an email address.
private_key: string, private key in P12 format.
scope: string or list of strings, scope(s) of the credentials being
requested.
private_key_password: string, password for private_key.
user_agent: string, HTTP User-Agent to provide for this application.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
kwargs: kwargs, Additional parameters to add to the JWT token, for
example prn=joe@xample.org."""
super(SignedJwtAssertionCredentials, self).__init__(
'http://oauth.net/grant_type/jwt/1.0/bearer',
user_agent,
token_uri=token_uri,
)
if type(scope) is list:
scope = ' '.join(scope)
self.scope = scope
self.private_key = private_key
self.private_key_password = private_key_password
self.service_account_name = service_account_name
self.kwargs = kwargs
@classmethod
def from_json(cls, s):
data = simplejson.loads(s)
retval = SignedJwtAssertionCredentials(
data['service_account_name'],
data['private_key'],
data['private_key_password'],
data['scope'],
data['user_agent'],
data['token_uri'],
data['kwargs']
)
retval.invalid = data['invalid']
return retval
def _generate_assertion(self):
"""Generate the assertion that will be used in the request."""
now = long(time.time())
payload = {
'aud': self.token_uri,
'scope': self.scope,
'iat': now,
'exp': now + SignedJwtAssertionCredentials.MAX_TOKEN_LIFETIME_SECS,
'iss': self.service_account_name
}
payload.update(self.kwargs)
logger.debug(str(payload))
return make_signed_jwt(
Signer.from_string(self.private_key, self.private_key_password),
payload)
# Only used in verify_id_token(), which is always calling to the same URI
# for the certs.
_cached_http = httplib2.Http(MemoryCache())
def verify_id_token(id_token, audience, http=None,
cert_uri=ID_TOKEN_VERIFICATON_CERTS):
"""Verifies a signed JWT id_token.
Args:
id_token: string, A Signed JWT.
audience: string, The audience 'aud' that the token should be for.
http: httplib2.Http, instance to use to make the HTTP request. Callers
should supply an instance that has caching enabled.
cert_uri: string, URI of the certificates in JSON format to
verify the JWT against.
Returns:
The deserialized JSON in the JWT.
Raises:
oauth2client.crypt.AppIdentityError if the JWT fails to verify.
"""
if http is None:
http = _cached_http
resp, content = http.request(cert_uri)
if resp.status == 200:
certs = simplejson.loads(content)
return verify_signed_jwt_with_certs(id_token, certs, audience)
else:
raise VerifyJwtTokenError('Status code: %d' % resp.status)
def _urlsafe_b64decode(b64string):
# Guard against unicode strings, which base64 can't handle.
b64string = b64string.encode('ascii')
padded = b64string + '=' * (4 - len(b64string) % 4)
return base64.urlsafe_b64decode(padded)
def _extract_id_token(id_token):
"""Extract the JSON payload from a JWT.
Does the extraction w/o checking the signature.
Args:
id_token: string, OAuth 2.0 id_token.
Returns:
object, The deserialized JSON payload.
"""
segments = id_token.split('.')
if (len(segments) != 3):
raise VerifyJwtTokenError(
'Wrong number of segments in token: %s' % id_token)
return simplejson.loads(_urlsafe_b64decode(segments[1]))
def credentials_from_code(client_id, client_secret, scope, code,
redirect_uri = 'postmessage',
http=None, user_agent=None,
token_uri='https://accounts.google.com/o/oauth2/token'):
"""Exchanges an authorization code for an OAuth2Credentials object.
Args:
client_id: string, client identifier.
client_secret: string, client secret.
scope: string or list of strings, scope(s) to request.
code: string, An authroization code, most likely passed down from
the client
redirect_uri: string, this is generally set to 'postmessage' to match the
redirect_uri that the client specified
http: httplib2.Http, optional http instance to use to do the fetch
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
Returns:
An OAuth2Credentials object.
Raises:
FlowExchangeError if the authorization code cannot be exchanged for an
access token
"""
flow = OAuth2WebServerFlow(client_id, client_secret, scope, user_agent,
'https://accounts.google.com/o/oauth2/auth',
token_uri)
# We primarily make this call to set up the redirect_uri in the flow object
uriThatWeDontReallyUse = flow.step1_get_authorize_url(redirect_uri)
credentials = flow.step2_exchange(code, http)
return credentials
def credentials_from_clientsecrets_and_code(filename, scope, code,
message = None,
redirect_uri = 'postmessage',
http=None):
"""Returns OAuth2Credentials from a clientsecrets file and an auth code.
Will create the right kind of Flow based on the contents of the clientsecrets
file or will raise InvalidClientSecretsError for unknown types of Flows.
Args:
filename: string, File name of clientsecrets.
scope: string or list of strings, scope(s) to request.
code: string, An authroization code, most likely passed down from
the client
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. If message is provided then
sys.exit will be called in the case of an error. If message in not
provided then clientsecrets.InvalidClientSecretsError will be raised.
redirect_uri: string, this is generally set to 'postmessage' to match the
redirect_uri that the client specified
http: httplib2.Http, optional http instance to use to do the fetch
Returns:
An OAuth2Credentials object.
Raises:
FlowExchangeError if the authorization code cannot be exchanged for an
access token
UnknownClientSecretsFlowError if the file describes an unknown kind of Flow.
clientsecrets.InvalidClientSecretsError if the clientsecrets file is
invalid.
"""
flow = flow_from_clientsecrets(filename, scope, message)
# We primarily make this call to set up the redirect_uri in the flow object
uriThatWeDontReallyUse = flow.step1_get_authorize_url(redirect_uri)
credentials = flow.step2_exchange(code, http)
return credentials
class OAuth2WebServerFlow(Flow):
"""Does the Web Server Flow for OAuth 2.0.
OAuth2Credentials objects may be safely pickled and unpickled.
"""
def __init__(self, client_id, client_secret, scope, user_agent=None,
auth_uri='https://accounts.google.com/o/oauth2/auth',
token_uri='https://accounts.google.com/o/oauth2/token',
**kwargs):
"""Constructor for OAuth2WebServerFlow.
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string or list of strings, scope(s) of the credentials being
requested.
user_agent: string, HTTP User-Agent to provide for this application.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
**kwargs: dict, The keyword arguments are all optional and required
parameters for the OAuth calls.
"""
self.client_id = client_id
self.client_secret = client_secret
if type(scope) is list:
scope = ' '.join(scope)
self.scope = scope
self.user_agent = user_agent
self.auth_uri = auth_uri
self.token_uri = token_uri
self.params = {
'access_type': 'offline',
}
self.params.update(kwargs)
self.redirect_uri = None
def step1_get_authorize_url(self, redirect_uri=OOB_CALLBACK_URN):
"""Returns a URI to redirect to the provider.
Args:
redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for
a non-web-based application, or a URI that handles the callback from
the authorization server.
If redirect_uri is 'urn:ietf:wg:oauth:2.0:oob' then pass in the
generated verification code to step2_exchange,
otherwise pass in the query parameters received
at the callback uri to step2_exchange.
"""
self.redirect_uri = redirect_uri
query = {
'response_type': 'code',
'client_id': self.client_id,
'redirect_uri': redirect_uri,
'scope': self.scope,
}
query.update(self.params)
parts = list(urlparse.urlparse(self.auth_uri))
query.update(dict(parse_qsl(parts[4]))) # 4 is the index of the query part
parts[4] = urllib.urlencode(query)
return urlparse.urlunparse(parts)
def step2_exchange(self, code, http=None):
"""Exhanges a code for OAuth2Credentials.
Args:
code: string or dict, either the code as a string, or a dictionary
of the query parameters to the redirect_uri, which contains
the code.
http: httplib2.Http, optional http instance to use to do the fetch
Returns:
An OAuth2Credentials object that can be used to authorize requests.
Raises:
FlowExchangeError if a problem occured exchanging the code for a
refresh_token.
"""
if not (isinstance(code, str) or isinstance(code, unicode)):
if 'code' not in code:
if 'error' in code:
error_msg = code['error']
else:
error_msg = 'No code was supplied in the query parameters.'
raise FlowExchangeError(error_msg)
else:
code = code['code']
body = urllib.urlencode({
'grant_type': 'authorization_code',
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
'redirect_uri': self.redirect_uri,
'scope': self.scope,
})
headers = {
'content-type': 'application/x-www-form-urlencoded',
}
if self.user_agent is not None:
headers['user-agent'] = self.user_agent
if http is None:
http = httplib2.Http()
resp, content = http.request(self.token_uri, method='POST', body=body,
headers=headers)
if resp.status == 200:
# TODO(jcgregorio) Raise an error if simplejson.loads fails?
d = simplejson.loads(content)
access_token = d['access_token']
refresh_token = d.get('refresh_token', None)
token_expiry = None
if 'expires_in' in d:
token_expiry = datetime.datetime.utcnow() + datetime.timedelta(
seconds=int(d['expires_in']))
if 'id_token' in d:
d['id_token'] = _extract_id_token(d['id_token'])
logger.info('Successfully retrieved access token: %s' % content)
return OAuth2Credentials(access_token, self.client_id,
self.client_secret, refresh_token, token_expiry,
self.token_uri, self.user_agent,
id_token=d.get('id_token', None))
else:
logger.info('Failed to retrieve access token: %s' % content)
error_msg = 'Invalid response %s.' % resp['status']
try:
d = simplejson.loads(content)
if 'error' in d:
error_msg = d['error']
except:
pass
raise FlowExchangeError(error_msg)
def flow_from_clientsecrets(filename, scope, message=None):
"""Create a Flow from a clientsecrets file.
Will create the right kind of Flow based on the contents of the clientsecrets
file or will raise InvalidClientSecretsError for unknown types of Flows.
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) to request.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. If message is provided then
sys.exit will be called in the case of an error. If message in not
provided then clientsecrets.InvalidClientSecretsError will be raised.
Returns:
A Flow object.
Raises:
UnknownClientSecretsFlowError if the file describes an unknown kind of Flow.
clientsecrets.InvalidClientSecretsError if the clientsecrets file is
invalid.
"""
try:
client_type, client_info = clientsecrets.loadfile(filename)
if client_type in [clientsecrets.TYPE_WEB, clientsecrets.TYPE_INSTALLED]:
return OAuth2WebServerFlow(
client_info['client_id'],
client_info['client_secret'],
scope,
None, # user_agent
client_info['auth_uri'],
client_info['token_uri'])
except clientsecrets.InvalidClientSecretsError:
if message:
sys.exit(message)
else:
raise
else:
raise UnknownClientSecretsFlowError(
'This OAuth 2.0 flow is unsupported: "%s"' * client_type)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth 2.0
credentials.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import os
import stat
import threading
from anyjson import simplejson
from client import Storage as BaseStorage
from client import Credentials
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from a file."""
def __init__(self, filename):
self._filename = filename
self._lock = threading.Lock()
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant."""
self._lock.acquire()
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
self._lock.release()
def locked_get(self):
"""Retrieve Credential from file.
Returns:
oauth2client.client.Credentials
"""
credentials = None
try:
f = open(self._filename, 'rb')
content = f.read()
f.close()
except IOError:
return credentials
try:
credentials = Credentials.new_from_json(content)
credentials.set_store(self)
except ValueError:
pass
return credentials
def _create_file_if_needed(self):
"""Create an empty file if necessary.
This method will not initialize the file. Instead it implements a
simple version of "touch" to ensure the file has been created.
"""
if not os.path.exists(self._filename):
old_umask = os.umask(0177)
try:
open(self._filename, 'a+b').close()
finally:
os.umask(old_umask)
def locked_put(self, credentials):
"""Write Credentials to file.
Args:
credentials: Credentials, the credentials to store.
"""
self._create_file_if_needed()
f = open(self._filename, 'wb')
f.write(credentials.to_json())
f.close()
def locked_delete(self):
"""Delete Credentials file.
Args:
credentials: Credentials, the credentials to store.
"""
os.unlink(self._filename)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OAuth 2.0 utilities for Django.
Utilities for using OAuth 2.0 in conjunction with
the Django datastore.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import oauth2client
import base64
import pickle
from django.db import models
from oauth2client.client import Storage as BaseStorage
class CredentialsField(models.Field):
__metaclass__ = models.SubfieldBase
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return base64.b64encode(pickle.dumps(value))
class FlowField(models.Field):
__metaclass__ = models.SubfieldBase
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Flow):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return base64.b64encode(pickle.dumps(value))
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from
the datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsField
on a db model class.
"""
def __init__(self, model_class, key_name, key_value, property_name):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
key_value: string, key value for the entity that has the credentials
property_name: string, name of the property that is an CredentialsProperty
"""
self.model_class = model_class
self.key_name = key_name
self.key_value = key_value
self.property_name = property_name
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
credential = None
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query)
if len(entities) > 0:
credential = getattr(entities[0], self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self)
return credential
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
args = {self.key_name: self.key_value}
entity = self.model_class(**args)
setattr(entity, self.property_name, credentials)
entity.save()
def locked_delete(self):
"""Delete Credentials from the datastore."""
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query).delete()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google App Engine
Utilities for making it easier to use OAuth 2.0 on Google App Engine.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import base64
import httplib2
import logging
import pickle
import time
import clientsecrets
from anyjson import simplejson
from client import AccessTokenRefreshError
from client import AssertionCredentials
from client import Credentials
from client import Flow
from client import OAuth2WebServerFlow
from client import Storage
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.api import app_identity
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import login_required
from google.appengine.ext.webapp.util import run_wsgi_app
OAUTH2CLIENT_NAMESPACE = 'oauth2client#ns'
class InvalidClientSecretsError(Exception):
"""The client_secrets.json file is malformed or missing required fields."""
pass
class AppAssertionCredentials(AssertionCredentials):
"""Credentials object for App Engine Assertion Grants
This object will allow an App Engine application to identify itself to Google
and other OAuth 2.0 servers that can verify assertions. It can be used for
the purpose of accessing data stored under an account assigned to the App
Engine application itself.
This credential does not require a flow to instantiate because it represents
a two legged flow, and therefore has all of the required information to
generate and refresh its own access tokens.
"""
def __init__(self, scope, **kwargs):
"""Constructor for AppAssertionCredentials
Args:
scope: string or list of strings, scope(s) of the credentials being requested.
"""
if type(scope) is list:
scope = ' '.join(scope)
self.scope = scope
super(AppAssertionCredentials, self).__init__(
None,
None,
None)
@classmethod
def from_json(cls, json):
data = simplejson.loads(json)
return AppAssertionCredentials(data['scope'])
def _refresh(self, http_request):
"""Refreshes the access_token.
Since the underlying App Engine app_identity implementation does its own
caching we can skip all the storage hoops and just to a refresh using the
API.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
try:
(token, _) = app_identity.get_access_token(self.scope)
except app_identity.Error, e:
raise AccessTokenRefreshError(str(e))
self.access_token = token
class FlowProperty(db.Property):
"""App Engine datastore Property for Flow.
Utility property that allows easy storage and retreival of an
oauth2client.Flow"""
# Tell what the user type is.
data_type = Flow
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
flow = super(FlowProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(flow))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, Flow):
raise db.BadValueError('Property %s must be convertible '
'to a FlowThreeLegged instance (%s)' %
(self.name, value))
return super(FlowProperty, self).validate(value)
def empty(self, value):
return not value
class CredentialsProperty(db.Property):
"""App Engine datastore Property for Credentials.
Utility property that allows easy storage and retrieval of
oath2client.Credentials
"""
# Tell what the user type is.
data_type = Credentials
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
logging.info("get: Got type " + str(type(model_instance)))
cred = super(CredentialsProperty,
self).get_value_for_datastore(model_instance)
if cred is None:
cred = ''
else:
cred = cred.to_json()
return db.Blob(cred)
# For reading from datastore.
def make_value_from_datastore(self, value):
logging.info("make: Got type " + str(type(value)))
if value is None:
return None
if len(value) == 0:
return None
try:
credentials = Credentials.new_from_json(value)
except ValueError:
credentials = None
return credentials
def validate(self, value):
value = super(CredentialsProperty, self).validate(value)
logging.info("validate: Got type " + str(type(value)))
if value is not None and not isinstance(value, Credentials):
raise db.BadValueError('Property %s must be convertible '
'to a Credentials instance (%s)' %
(self.name, value))
#if value is not None and not isinstance(value, Credentials):
# return None
return value
class StorageByKeyName(Storage):
"""Store and retrieve a single credential to and from
the App Engine datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsProperty
on a datastore model class, and that entities
are stored by key_name.
"""
def __init__(self, model, key_name, property_name, cache=None):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
property_name: string, name of the property that is a CredentialsProperty
cache: memcache, a write-through cache to put in front of the datastore
"""
self._model = model
self._key_name = key_name
self._property_name = property_name
self._cache = cache
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
if self._cache:
json = self._cache.get(self._key_name)
if json:
return Credentials.new_from_json(json)
credential = None
entity = self._model.get_by_key_name(self._key_name)
if entity is not None:
credential = getattr(entity, self._property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self)
if self._cache:
self._cache.set(self._key_name, credential.to_json())
return credential
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity = self._model.get_or_insert(self._key_name)
setattr(entity, self._property_name, credentials)
entity.put()
if self._cache:
self._cache.set(self._key_name, credentials.to_json())
def locked_delete(self):
"""Delete Credential from datastore."""
if self._cache:
self._cache.delete(self._key_name)
entity = self._model.get_by_key_name(self._key_name)
if entity is not None:
entity.delete()
class CredentialsModel(db.Model):
"""Storage for OAuth 2.0 Credentials
Storage of the model is keyed by the user.user_id().
"""
credentials = CredentialsProperty()
class OAuth2Decorator(object):
"""Utility for making OAuth 2.0 easier.
Instantiate and then use with oauth_required or oauth_aware
as decorators on webapp.RequestHandler methods.
Example:
decorator = OAuth2Decorator(
client_id='837...ent.com',
client_secret='Qh...wwI',
scope='https://www.googleapis.com/auth/plus')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be used
# in API calls
"""
def __init__(self, client_id, client_secret, scope,
auth_uri='https://accounts.google.com/o/oauth2/auth',
token_uri='https://accounts.google.com/o/oauth2/token',
user_agent=None,
message=None, **kwargs):
"""Constructor for OAuth2Decorator
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string or list of strings, scope(s) of the credentials being
requested.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
user_agent: string, User agent of your application, default to None.
message: Message to display if there are problems with the OAuth 2.0
configuration. The message may contain HTML and will be presented on the
web interface for any method that uses the decorator.
**kwargs: dict, Keyword arguments are be passed along as kwargs to the
OAuth2WebServerFlow constructor.
"""
self.flow = OAuth2WebServerFlow(client_id, client_secret, scope, user_agent,
auth_uri, token_uri, **kwargs)
self.credentials = None
self._request_handler = None
self._message = message
self._in_error = False
def _display_error_message(self, request_handler):
request_handler.response.out.write('<html><body>')
request_handler.response.out.write(self._message)
request_handler.response.out.write('</body></html>')
def oauth_required(self, method):
"""Decorator that starts the OAuth 2.0 dance.
Starts the OAuth dance for the logged in user if they haven't already
granted access for this application.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
def check_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
# Don't use @login_decorator as this could be used in a POST request.
if not user:
request_handler.redirect(users.create_login_url(
request_handler.request.uri))
return
# Store the request URI in 'state' so we can use it later
self.flow.params['state'] = request_handler.request.url
self._request_handler = request_handler
self.credentials = StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').get()
if not self.has_credentials():
return request_handler.redirect(self.authorize_url())
try:
method(request_handler, *args, **kwargs)
except AccessTokenRefreshError:
return request_handler.redirect(self.authorize_url())
return check_oauth
def oauth_aware(self, method):
"""Decorator that sets up for OAuth 2.0 dance, but doesn't do it.
Does all the setup for the OAuth dance, but doesn't initiate it.
This decorator is useful if you want to create a page that knows
whether or not the user has granted access to this application.
From within a method decorated with @oauth_aware the has_credentials()
and authorize_url() methods can be called.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
def setup_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
# Don't use @login_decorator as this could be used in a POST request.
if not user:
request_handler.redirect(users.create_login_url(
request_handler.request.uri))
return
self.flow.params['state'] = request_handler.request.url
self._request_handler = request_handler
self.credentials = StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').get()
method(request_handler, *args, **kwargs)
return setup_oauth
def has_credentials(self):
"""True if for the logged in user there are valid access Credentials.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
return self.credentials is not None and not self.credentials.invalid
def authorize_url(self):
"""Returns the URL to start the OAuth dance.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
callback = self._request_handler.request.relative_url('/oauth2callback')
url = self.flow.step1_get_authorize_url(callback)
user = users.get_current_user()
memcache.set(user.user_id(), pickle.dumps(self.flow),
namespace=OAUTH2CLIENT_NAMESPACE)
return str(url)
def http(self):
"""Returns an authorized http instance.
Must only be called from within an @oauth_required decorated method, or
from within an @oauth_aware decorated method where has_credentials()
returns True.
"""
return self.credentials.authorize(httplib2.Http())
class OAuth2DecoratorFromClientSecrets(OAuth2Decorator):
"""An OAuth2Decorator that builds from a clientsecrets file.
Uses a clientsecrets file as the source for all the information when
constructing an OAuth2Decorator.
Example:
decorator = OAuth2DecoratorFromClientSecrets(
os.path.join(os.path.dirname(__file__), 'client_secrets.json')
scope='https://www.googleapis.com/auth/plus')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be used
# in API calls
"""
def __init__(self, filename, scope, message=None):
"""Constructor
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may contain HTML and
will be presented on the web interface for any method that uses the
decorator.
"""
try:
client_type, client_info = clientsecrets.loadfile(filename)
if client_type not in [clientsecrets.TYPE_WEB, clientsecrets.TYPE_INSTALLED]:
raise InvalidClientSecretsError('OAuth2Decorator doesn\'t support this OAuth 2.0 flow.')
super(OAuth2DecoratorFromClientSecrets,
self).__init__(
client_info['client_id'],
client_info['client_secret'],
scope,
client_info['auth_uri'],
client_info['token_uri'],
message)
except clientsecrets.InvalidClientSecretsError:
self._in_error = True
if message is not None:
self._message = message
else:
self._message = "Please configure your application for OAuth 2.0"
def oauth2decorator_from_clientsecrets(filename, scope, message=None):
"""Creates an OAuth2Decorator populated from a clientsecrets file.
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may contain HTML and
will be presented on the web interface for any method that uses the
decorator.
Returns: An OAuth2Decorator
"""
return OAuth2DecoratorFromClientSecrets(filename, scope, message)
class OAuth2Handler(webapp.RequestHandler):
"""Handler for the redirect_uri of the OAuth 2.0 dance."""
@login_required
def get(self):
error = self.request.get('error')
if error:
errormsg = self.request.get('error_description', error)
self.response.out.write(
'The authorization request failed: %s' % errormsg)
else:
user = users.get_current_user()
flow = pickle.loads(memcache.get(user.user_id(),
namespace=OAUTH2CLIENT_NAMESPACE))
# This code should be ammended with application specific error
# handling. The following cases should be considered:
# 1. What if the flow doesn't exist in memcache? Or is corrupt?
# 2. What if the step2_exchange fails?
if flow:
credentials = flow.step2_exchange(self.request.params)
StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').put(credentials)
self.redirect(str(self.request.get('state')))
else:
# TODO Add error handling here.
pass
application = webapp.WSGIApplication([('/oauth2callback', OAuth2Handler)])
def main():
run_wsgi_app(application)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
import logging
import time
from OpenSSL import crypto
from anyjson import simplejson
CLOCK_SKEW_SECS = 300 # 5 minutes in seconds
AUTH_TOKEN_LIFETIME_SECS = 300 # 5 minutes in seconds
MAX_TOKEN_LIFETIME_SECS = 86400 # 1 day in seconds
class AppIdentityError(Exception):
pass
class Verifier(object):
"""Verifies the signature on a message."""
def __init__(self, pubkey):
"""Constructor.
Args:
pubkey, OpenSSL.crypto.PKey, The public key to verify with.
"""
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string, The message to verify.
signature: string, The signature on the message.
Returns:
True if message was singed by the private key associated with the public
key that this object was constructed with.
"""
try:
crypto.verify(self._pubkey, signature, message, 'sha256')
return True
except:
return False
@staticmethod
def from_string(key_pem, is_x509_cert):
"""Construct a Verified instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it is
expected to be an RSA key in PEM format.
Returns:
Verifier instance.
Raises:
OpenSSL.crypto.Error if the key_pem can't be parsed.
"""
if is_x509_cert:
pubkey = crypto.load_certificate(crypto.FILETYPE_PEM, key_pem)
else:
pubkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key_pem)
return Verifier(pubkey)
class Signer(object):
"""Signs messages with a private key."""
def __init__(self, pkey):
"""Constructor.
Args:
pkey, OpenSSL.crypto.PKey, The private key to sign with.
"""
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: string, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
return crypto.sign(self._key, message, 'sha256')
@staticmethod
def from_string(key, password='notasecret'):
"""Construct a Signer instance from a string.
Args:
key: string, private key in P12 format.
password: string, password for the private key file.
Returns:
Signer instance.
Raises:
OpenSSL.crypto.Error if the key can't be parsed.
"""
pkey = crypto.load_pkcs12(key, password).get_privatekey()
return Signer(pkey)
def _urlsafe_b64encode(raw_bytes):
return base64.urlsafe_b64encode(raw_bytes).rstrip('=')
def _urlsafe_b64decode(b64string):
# Guard against unicode strings, which base64 can't handle.
b64string = b64string.encode('ascii')
padded = b64string + '=' * (4 - len(b64string) % 4)
return base64.urlsafe_b64decode(padded)
def _json_encode(data):
return simplejson.dumps(data, separators = (',', ':'))
def make_signed_jwt(signer, payload):
"""Make a signed JWT.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
Args:
signer: crypt.Signer, Cryptographic signer.
payload: dict, Dictionary of data to convert to JSON and then sign.
Returns:
string, The JWT for the payload.
"""
header = {'typ': 'JWT', 'alg': 'RS256'}
segments = [
_urlsafe_b64encode(_json_encode(header)),
_urlsafe_b64encode(_json_encode(payload)),
]
signing_input = '.'.join(segments)
signature = signer.sign(signing_input)
segments.append(_urlsafe_b64encode(signature))
logging.debug(str(segments))
return '.'.join(segments)
def verify_signed_jwt_with_certs(jwt, certs, audience):
"""Verify a JWT against public certs.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
Args:
jwt: string, A JWT.
certs: dict, Dictionary where values of public keys in PEM format.
audience: string, The audience, 'aud', that this JWT should contain. If
None then the JWT's 'aud' parameter is not verified.
Returns:
dict, The deserialized JSON payload in the JWT.
Raises:
AppIdentityError if any checks are failed.
"""
segments = jwt.split('.')
if (len(segments) != 3):
raise AppIdentityError(
'Wrong number of segments in token: %s' % jwt)
signed = '%s.%s' % (segments[0], segments[1])
signature = _urlsafe_b64decode(segments[2])
# Parse token.
json_body = _urlsafe_b64decode(segments[1])
try:
parsed = simplejson.loads(json_body)
except:
raise AppIdentityError('Can\'t parse token: %s' % json_body)
# Check signature.
verified = False
for (keyname, pem) in certs.items():
verifier = Verifier.from_string(pem, True)
if (verifier.verify(signed, signature)):
verified = True
break
if not verified:
raise AppIdentityError('Invalid token signature: %s' % jwt)
# Check creation timestamp.
iat = parsed.get('iat')
if iat is None:
raise AppIdentityError('No iat field in token: %s' % json_body)
earliest = iat - CLOCK_SKEW_SECS
# Check expiration timestamp.
now = long(time.time())
exp = parsed.get('exp')
if exp is None:
raise AppIdentityError('No exp field in token: %s' % json_body)
if exp >= now + MAX_TOKEN_LIFETIME_SECS:
raise AppIdentityError(
'exp field too far in future: %s' % json_body)
latest = exp + CLOCK_SKEW_SECS
if now < earliest:
raise AppIdentityError('Token used too early, %d < %d: %s' %
(now, earliest, json_body))
if now > latest:
raise AppIdentityError('Token used too late, %d > %d: %s' %
(now, latest, json_body))
# Check audience.
if audience is not None:
aud = parsed.get('aud')
if aud is None:
raise AppIdentityError('No aud field in token: %s' % json_body)
if aud != audience:
raise AppIdentityError('Wrong recipient, %s != %s: %s' %
(aud, audience, json_body))
return parsed
| Python |
# Copyright (C) 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for reading OAuth 2.0 client secret files.
A client_secrets.json file contains all the information needed to interact with
an OAuth 2.0 protected service.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from anyjson import simplejson
# Properties that make a client_secrets.json file valid.
TYPE_WEB = 'web'
TYPE_INSTALLED = 'installed'
VALID_CLIENT = {
TYPE_WEB: {
'required': [
'client_id',
'client_secret',
'redirect_uris',
'auth_uri',
'token_uri'],
'string': [
'client_id',
'client_secret'
]
},
TYPE_INSTALLED: {
'required': [
'client_id',
'client_secret',
'redirect_uris',
'auth_uri',
'token_uri'],
'string': [
'client_id',
'client_secret'
]
}
}
class Error(Exception):
"""Base error for this module."""
pass
class InvalidClientSecretsError(Error):
"""Format of ClientSecrets file is invalid."""
pass
def _validate_clientsecrets(obj):
if obj is None or len(obj) != 1:
raise InvalidClientSecretsError('Invalid file format.')
client_type = obj.keys()[0]
if client_type not in VALID_CLIENT.keys():
raise InvalidClientSecretsError('Unknown client type: %s.' % client_type)
client_info = obj[client_type]
for prop_name in VALID_CLIENT[client_type]['required']:
if prop_name not in client_info:
raise InvalidClientSecretsError(
'Missing property "%s" in a client type of "%s".' % (prop_name,
client_type))
for prop_name in VALID_CLIENT[client_type]['string']:
if client_info[prop_name].startswith('[['):
raise InvalidClientSecretsError(
'Property "%s" is not configured.' % prop_name)
return client_type, client_info
def load(fp):
obj = simplejson.load(fp)
return _validate_clientsecrets(obj)
def loads(s):
obj = simplejson.loads(s)
return _validate_clientsecrets(obj)
def loadfile(filename):
try:
fp = file(filename, 'r')
try:
obj = simplejson.load(fp)
finally:
fp.close()
except IOError:
raise InvalidClientSecretsError('File not found: "%s"' % filename)
return _validate_clientsecrets(obj)
| Python |
# Copyright 2011 Google Inc. All Rights Reserved.
"""Locked file interface that should work on Unix and Windows pythons.
This module first tries to use fcntl locking to ensure serialized access
to a file, then falls back on a lock file if that is unavialable.
Usage:
f = LockedFile('filename', 'r+b', 'rb')
f.open_and_lock()
if f.is_locked():
print 'Acquired filename with r+b mode'
f.file_handle().write('locked data')
else:
print 'Aquired filename with rb mode'
f.unlock_and_close()
"""
__author__ = 'cache@google.com (David T McWherter)'
import errno
import logging
import os
import time
logger = logging.getLogger(__name__)
class AlreadyLockedException(Exception):
"""Trying to lock a file that has already been locked by the LockedFile."""
pass
class _Opener(object):
"""Base class for different locking primitives."""
def __init__(self, filename, mode, fallback_mode):
"""Create an Opener.
Args:
filename: string, The pathname of the file.
mode: string, The preferred mode to access the file with.
fallback_mode: string, The mode to use if locking fails.
"""
self._locked = False
self._filename = filename
self._mode = mode
self._fallback_mode = fallback_mode
self._fh = None
def is_locked(self):
"""Was the file locked."""
return self._locked
def file_handle(self):
"""The file handle to the file. Valid only after opened."""
return self._fh
def filename(self):
"""The filename that is being locked."""
return self._filename
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries.
"""
pass
def unlock_and_close(self):
"""Unlock and close the file."""
pass
class _PosixOpener(_Opener):
"""Lock files using Posix advisory lock files."""
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Tries to create a .lock file next to the file we're trying to open.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries.
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
self._locked = False
try:
self._fh = open(self._filename, self._mode)
except IOError, e:
# If we can't access with _mode, try _fallback_mode and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
lock_filename = self._posix_lockfile(self._filename)
start_time = time.time()
while True:
try:
self._lock_fd = os.open(lock_filename,
os.O_CREAT|os.O_EXCL|os.O_RDWR)
self._locked = True
break
except OSError, e:
if e.errno != errno.EEXIST:
raise
if (time.time() - start_time) >= timeout:
logger.warn('Could not acquire lock %s in %s seconds' % (
lock_filename, timeout))
# Close the file and open in fallback_mode.
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Unlock a file by removing the .lock file, and close the handle."""
if self._locked:
lock_filename = self._posix_lockfile(self._filename)
os.unlink(lock_filename)
os.close(self._lock_fd)
self._locked = False
self._lock_fd = None
if self._fh:
self._fh.close()
def _posix_lockfile(self, filename):
"""The name of the lock file to use for posix locking."""
return '%s.lock' % filename
try:
import fcntl
class _FcntlOpener(_Opener):
"""Open, lock, and unlock a file using fcntl.lockf."""
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
start_time = time.time()
try:
self._fh = open(self._filename, self._mode)
except IOError, e:
# If we can't access with _mode, try _fallback_mode and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
# We opened in _mode, try to lock the file.
while True:
try:
fcntl.lockf(self._fh.fileno(), fcntl.LOCK_EX)
self._locked = True
return
except IOError, e:
# If not retrying, then just pass on the error.
if timeout == 0:
raise e
if e.errno != errno.EACCES:
raise e
# We could not acquire the lock. Try again.
if (time.time() - start_time) >= timeout:
logger.warn('Could not lock %s in %s seconds' % (
self._filename, timeout))
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Close and unlock the file using the fcntl.lockf primitive."""
if self._locked:
fcntl.lockf(self._fh.fileno(), fcntl.LOCK_UN)
self._locked = False
if self._fh:
self._fh.close()
except ImportError:
_FcntlOpener = None
try:
import pywintypes
import win32con
import win32file
class _Win32Opener(_Opener):
"""Open, lock, and unlock a file using windows primitives."""
# Error #33:
# 'The process cannot access the file because another process'
FILE_IN_USE_ERROR = 33
# Error #158:
# 'The segment is already unlocked.'
FILE_ALREADY_UNLOCKED_ERROR = 158
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
start_time = time.time()
try:
self._fh = open(self._filename, self._mode)
except IOError, e:
# If we can't access with _mode, try _fallback_mode and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
# We opened in _mode, try to lock the file.
while True:
try:
hfile = win32file._get_osfhandle(self._fh.fileno())
win32file.LockFileEx(
hfile,
(win32con.LOCKFILE_FAIL_IMMEDIATELY|
win32con.LOCKFILE_EXCLUSIVE_LOCK), 0, -0x10000,
pywintypes.OVERLAPPED())
self._locked = True
return
except pywintypes.error, e:
if timeout == 0:
raise e
# If the error is not that the file is already in use, raise.
if e[0] != _Win32Opener.FILE_IN_USE_ERROR:
raise
# We could not acquire the lock. Try again.
if (time.time() - start_time) >= timeout:
logger.warn('Could not lock %s in %s seconds' % (
self._filename, timeout))
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Close and unlock the file using the win32 primitive."""
if self._locked:
try:
hfile = win32file._get_osfhandle(self._fh.fileno())
win32file.UnlockFileEx(hfile, 0, -0x10000, pywintypes.OVERLAPPED())
except pywintypes.error, e:
if e[0] != _Win32Opener.FILE_ALREADY_UNLOCKED_ERROR:
raise
self._locked = False
if self._fh:
self._fh.close()
except ImportError:
_Win32Opener = None
class LockedFile(object):
"""Represent a file that has exclusive access."""
def __init__(self, filename, mode, fallback_mode, use_native_locking=True):
"""Construct a LockedFile.
Args:
filename: string, The path of the file to open.
mode: string, The mode to try to open the file with.
fallback_mode: string, The mode to use if locking fails.
use_native_locking: bool, Whether or not fcntl/win32 locking is used.
"""
opener = None
if not opener and use_native_locking:
if _Win32Opener:
opener = _Win32Opener(filename, mode, fallback_mode)
if _FcntlOpener:
opener = _FcntlOpener(filename, mode, fallback_mode)
if not opener:
opener = _PosixOpener(filename, mode, fallback_mode)
self._opener = opener
def filename(self):
"""Return the filename we were constructed with."""
return self._opener._filename
def file_handle(self):
"""Return the file_handle to the opened file."""
return self._opener.file_handle()
def is_locked(self):
"""Return whether we successfully locked the file."""
return self._opener.is_locked()
def open_and_lock(self, timeout=0, delay=0.05):
"""Open the file, trying to lock it.
Args:
timeout: float, The number of seconds to try to acquire the lock.
delay: float, The number of seconds to wait between retry attempts.
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
"""
self._opener.open_and_lock(timeout, delay)
def unlock_and_close(self):
"""Unlock and close a file."""
self._opener.unlock_and_close()
| Python |
__version__ = "1.0c2"
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility module to import a JSON module
Hides all the messy details of exactly where
we get a simplejson module from.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
try: # pragma: no cover
# Should work for Python2.6 and higher.
import json as simplejson
except ImportError: # pragma: no cover
try:
import simplejson
except ImportError:
# Try to import from django, should work on App Engine
from django.utils import simplejson
| Python |
import Cookie
import datetime
import time
import email.utils
import calendar
import base64
import hashlib
import hmac
import re
import logging
# Ripped from the Tornado Framework's web.py
# http://github.com/facebook/tornado/commit/39ac6d169a36a54bb1f6b9bf1fdebb5c9da96e09
#
# Tornado is licensed under the Apache Licence, Version 2.0
# (http://www.apache.org/licenses/LICENSE-2.0.html).
#
# Example:
# from vendor.prayls.lilcookies import LilCookies
# cookieutil = LilCookies(self, application_settings['cookie_secret'])
# cookieutil.set_secure_cookie(name = 'mykey', value = 'myvalue', expires_days= 365*100)
# cookieutil.get_secure_cookie(name = 'mykey')
class LilCookies:
@staticmethod
def _utf8(s):
if isinstance(s, unicode):
return s.encode("utf-8")
assert isinstance(s, str)
return s
@staticmethod
def _time_independent_equals(a, b):
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
@staticmethod
def _signature_from_secret(cookie_secret, *parts):
""" Takes a secret salt value to create a signature for values in the `parts` param."""
hash = hmac.new(cookie_secret, digestmod=hashlib.sha1)
for part in parts: hash.update(part)
return hash.hexdigest()
@staticmethod
def _signed_cookie_value(cookie_secret, name, value):
""" Returns a signed value for use in a cookie.
This is helpful to have in its own method if you need to re-use this function for other needs. """
timestamp = str(int(time.time()))
value = base64.b64encode(value)
signature = LilCookies._signature_from_secret(cookie_secret, name, value, timestamp)
return "|".join([value, timestamp, signature])
@staticmethod
def _verified_cookie_value(cookie_secret, name, signed_value):
"""Returns the un-encrypted value given the signed value if it validates, or None."""
value = signed_value
if not value: return None
parts = value.split("|")
if len(parts) != 3: return None
signature = LilCookies._signature_from_secret(cookie_secret, name, parts[0], parts[1])
if not LilCookies._time_independent_equals(parts[2], signature):
logging.warning("Invalid cookie signature %r", value)
return None
timestamp = int(parts[1])
if timestamp < time.time() - 31 * 86400:
logging.warning("Expired cookie %r", value)
return None
try:
return base64.b64decode(parts[0])
except:
return None
def __init__(self, handler, cookie_secret):
"""You must specify the cookie_secret to use any of the secure methods.
It should be a long, random sequence of bytes to be used as the HMAC
secret for the signature.
"""
if len(cookie_secret) < 45:
raise ValueError("LilCookies cookie_secret should at least be 45 characters long, but got `%s`" % cookie_secret)
self.handler = handler
self.request = handler.request
self.response = handler.response
self.cookie_secret = cookie_secret
def cookies(self):
"""A dictionary of Cookie.Morsel objects."""
if not hasattr(self, "_cookies"):
self._cookies = Cookie.BaseCookie()
if "Cookie" in self.request.headers:
try:
self._cookies.load(self.request.headers["Cookie"])
except:
self.clear_all_cookies()
return self._cookies
def get_cookie(self, name, default=None):
"""Gets the value of the cookie with the given name, else default."""
if name in self.cookies():
return self._cookies[name].value
return default
def set_cookie(self, name, value, domain=None, expires=None, path="/",
expires_days=None, **kwargs):
"""Sets the given cookie name/value with the given options.
Additional keyword arguments are set on the Cookie.Morsel
directly.
See http://docs.python.org/library/cookie.html#morsel-objects
for available attributes.
"""
name = LilCookies._utf8(name)
value = LilCookies._utf8(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError("Invalid cookie %r: %r" % (name, value))
if not hasattr(self, "_new_cookies"):
self._new_cookies = []
new_cookie = Cookie.BaseCookie()
self._new_cookies.append(new_cookie)
new_cookie[name] = value
if domain:
new_cookie[name]["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.utcnow() + datetime.timedelta(days=expires_days)
if expires:
timestamp = calendar.timegm(expires.utctimetuple())
new_cookie[name]["expires"] = email.utils.formatdate(
timestamp, localtime=False, usegmt=True)
if path:
new_cookie[name]["path"] = path
for k, v in kwargs.iteritems():
new_cookie[name][k] = v
# The 2 lines below were not in Tornado. Instead, they output all their cookies to the headers at once before a response flush.
for vals in new_cookie.values():
self.response.headers._headers.append(('Set-Cookie', vals.OutputString(None)))
def clear_cookie(self, name, path="/", domain=None):
"""Deletes the cookie with the given name."""
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
self.set_cookie(name, value="", path=path, expires=expires,
domain=domain)
def clear_all_cookies(self):
"""Deletes all the cookies the user sent with this request."""
for name in self.cookies().iterkeys():
self.clear_cookie(name)
def set_secure_cookie(self, name, value, expires_days=30, **kwargs):
"""Signs and timestamps a cookie so it cannot be forged.
To read a cookie set with this method, use get_secure_cookie().
"""
value = LilCookies._signed_cookie_value(self.cookie_secret, name, value)
self.set_cookie(name, value, expires_days=expires_days, **kwargs)
def get_secure_cookie(self, name, value=None):
"""Returns the given signed cookie if it validates, or None."""
if value is None: value = self.get_cookie(name)
return LilCookies._verified_cookie_value(self.cookie_secret, name, value)
def _cookie_signature(self, *parts):
return LilCookies._signature_from_secret(self.cookie_secret)
| Python |
# Copyright (C) 2007 Joe Gregorio
#
# Licensed under the MIT License
"""MIME-Type Parser
This module provides basic functions for handling mime-types. It can handle
matching mime-types against a list of media-ranges. See section 14.1 of the
HTTP specification [RFC 2616] for a complete explanation.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
Contents:
- parse_mime_type(): Parses a mime-type into its component parts.
- parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q'
quality parameter.
- quality(): Determines the quality ('q') of a mime-type when
compared against a list of media-ranges.
- quality_parsed(): Just like quality() except the second parameter must be
pre-parsed.
- best_match(): Choose the mime-type with the highest quality ('q')
from a list of candidates.
"""
__version__ = '0.1.3'
__author__ = 'Joe Gregorio'
__email__ = 'joe@bitworking.org'
__license__ = 'MIT License'
__credits__ = ''
def parse_mime_type(mime_type):
"""Parses a mime-type into its component parts.
Carves up a mime-type and returns a tuple of the (type, subtype, params)
where 'params' is a dictionary of all the parameters for the media range.
For example, the media range 'application/xhtml;q=0.5' would get parsed
into:
('application', 'xhtml', {'q', '0.5'})
"""
parts = mime_type.split(';')
params = dict([tuple([s.strip() for s in param.split('=', 1)])\
for param in parts[1:]
])
full_type = parts[0].strip()
# Java URLConnection class sends an Accept header that includes a
# single '*'. Turn it into a legal wildcard.
if full_type == '*':
full_type = '*/*'
(type, subtype) = full_type.split('/')
return (type.strip(), subtype.strip(), params)
def parse_media_range(range):
"""Parse a media-range into its component parts.
Carves up a media range and returns a tuple of the (type, subtype,
params) where 'params' is a dictionary of all the parameters for the media
range. For example, the media range 'application/*;q=0.5' would get parsed
into:
('application', '*', {'q', '0.5'})
In addition this function also guarantees that there is a value for 'q'
in the params dictionary, filling it in with a proper default if
necessary.
"""
(type, subtype, params) = parse_mime_type(range)
if not params.has_key('q') or not params['q'] or \
not float(params['q']) or float(params['q']) > 1\
or float(params['q']) < 0:
params['q'] = '1'
return (type, subtype, params)
def fitness_and_quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns a tuple of
the fitness value and the value of the 'q' quality parameter of the best
match, or (-1, 0) if no match was found. Just as for quality_parsed(),
'parsed_ranges' must be a list of parsed media ranges.
"""
best_fitness = -1
best_fit_q = 0
(target_type, target_subtype, target_params) =\
parse_media_range(mime_type)
for (type, subtype, params) in parsed_ranges:
type_match = (type == target_type or\
type == '*' or\
target_type == '*')
subtype_match = (subtype == target_subtype or\
subtype == '*' or\
target_subtype == '*')
if type_match and subtype_match:
param_matches = reduce(lambda x, y: x + y, [1 for (key, value) in \
target_params.iteritems() if key != 'q' and \
params.has_key(key) and value == params[key]], 0)
fitness = (type == target_type) and 100 or 0
fitness += (subtype == target_subtype) and 10 or 0
fitness += param_matches
if fitness > best_fitness:
best_fitness = fitness
best_fit_q = params['q']
return best_fitness, float(best_fit_q)
def quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns the 'q'
quality parameter of the best match, 0 if no match was found. This function
bahaves the same as quality() except that 'parsed_ranges' must be a list of
parsed media ranges.
"""
return fitness_and_quality_parsed(mime_type, parsed_ranges)[1]
def quality(mime_type, ranges):
"""Return the quality ('q') of a mime-type against a list of media-ranges.
Returns the quality 'q' of a mime-type when compared against the
media-ranges in ranges. For example:
>>> quality('text/html','text/*;q=0.3, text/html;q=0.7,
text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5')
0.7
"""
parsed_ranges = [parse_media_range(r) for r in ranges.split(',')]
return quality_parsed(mime_type, parsed_ranges)
def best_match(supported, header):
"""Return mime-type with the highest quality ('q') from list of candidates.
Takes a list of supported mime-types and finds the best match for all the
media-ranges listed in header. The value of header must be a string that
conforms to the format of the HTTP Accept: header. The value of 'supported'
is a list of mime-types. The list of supported mime-types should be sorted
in order of increasing desirability, in case of a situation where there is
a tie.
>>> best_match(['application/xbel+xml', 'text/xml'],
'text/*;q=0.5,*/*; q=0.1')
'text/xml'
"""
split_header = _filter_blank(header.split(','))
parsed_header = [parse_media_range(r) for r in split_header]
weighted_matches = []
pos = 0
for mime_type in supported:
weighted_matches.append((fitness_and_quality_parsed(mime_type,
parsed_header), pos, mime_type))
pos += 1
weighted_matches.sort()
return weighted_matches[-1][0][1] and weighted_matches[-1][2] or ''
def _filter_blank(i):
for s in i:
if s.strip():
yield s
| Python |
# Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to encapsulate a single HTTP request.
The classes implement a command pattern, with every
object supporting an execute() method that does the
actuall HTTP request.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import StringIO
import base64
import copy
import gzip
import httplib2
import mimeparse
import mimetypes
import os
import urllib
import urlparse
import uuid
from email.generator import Generator
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from email.parser import FeedParser
from errors import BatchError
from errors import HttpError
from errors import ResumableUploadError
from errors import UnexpectedBodyError
from errors import UnexpectedMethodError
from model import JsonModel
from oauth2client.anyjson import simplejson
DEFAULT_CHUNK_SIZE = 512*1024
class MediaUploadProgress(object):
"""Status of a resumable upload."""
def __init__(self, resumable_progress, total_size):
"""Constructor.
Args:
resumable_progress: int, bytes sent so far.
total_size: int, total bytes in complete upload, or None if the total
upload size isn't known ahead of time.
"""
self.resumable_progress = resumable_progress
self.total_size = total_size
def progress(self):
"""Percent of upload completed, as a float.
Returns:
the percentage complete as a float, returning 0.0 if the total size of
the upload is unknown.
"""
if self.total_size is not None:
return float(self.resumable_progress) / float(self.total_size)
else:
return 0.0
class MediaDownloadProgress(object):
"""Status of a resumable download."""
def __init__(self, resumable_progress, total_size):
"""Constructor.
Args:
resumable_progress: int, bytes received so far.
total_size: int, total bytes in complete download.
"""
self.resumable_progress = resumable_progress
self.total_size = total_size
def progress(self):
"""Percent of download completed, as a float.
Returns:
the percentage complete as a float, returning 0.0 if the total size of
the download is unknown.
"""
if self.total_size is not None:
return float(self.resumable_progress) / float(self.total_size)
else:
return 0.0
class MediaUpload(object):
"""Describes a media object to upload.
Base class that defines the interface of MediaUpload subclasses.
Note that subclasses of MediaUpload may allow you to control the chunksize
when upload a media object. It is important to keep the size of the chunk as
large as possible to keep the upload efficient. Other factors may influence
the size of the chunk you use, particularly if you are working in an
environment where individual HTTP requests may have a hardcoded time limit,
such as under certain classes of requests under Google App Engine.
"""
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
raise NotImplementedError()
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return 'application/octet-stream'
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return None
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return False
def getbytes(self, begin, end):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorter than length if EOF was reached
first.
"""
raise NotImplementedError()
def _to_json(self, strip=None):
"""Utility function for creating a JSON representation of a MediaUpload.
Args:
strip: array, An array of names of members to not include in the JSON.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
t = type(self)
d = copy.copy(self.__dict__)
if strip is not None:
for member in strip:
del d[member]
d['_class'] = t.__name__
d['_module'] = t.__module__
return simplejson.dumps(d)
def to_json(self):
"""Create a JSON representation of an instance of MediaUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json()
@classmethod
def new_from_json(cls, s):
"""Utility class method to instantiate a MediaUpload subclass from a JSON
representation produced by to_json().
Args:
s: string, JSON from to_json().
Returns:
An instance of the subclass of MediaUpload that was serialized with
to_json().
"""
data = simplejson.loads(s)
# Find and call the right classmethod from_json() to restore the object.
module = data['_module']
m = __import__(module, fromlist=module.split('.')[:-1])
kls = getattr(m, data['_class'])
from_json = getattr(kls, 'from_json')
return from_json(s)
class MediaFileUpload(MediaUpload):
"""A MediaUpload for a file.
Construct a MediaFileUpload and pass as the media_body parameter of the
method. For example, if we had a service that allowed uploading images:
media = MediaFileUpload('cow.png', mimetype='image/png',
chunksize=1024*1024, resumable=True)
farm.animals()..insert(
id='cow',
name='cow.png',
media_body=media).execute()
"""
def __init__(self, filename, mimetype=None, chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
"""Constructor.
Args:
filename: string, Name of the file.
mimetype: string, Mime-type of the file. If None then a mime-type will be
guessed from the file extension.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._filename = filename
self._size = os.path.getsize(filename)
self._fd = None
if mimetype is None:
(mimetype, encoding) = mimetypes.guess_type(filename)
self._mimetype = mimetype
self._chunksize = chunksize
self._resumable = resumable
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return self._size
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorted than length if EOF was reached
first.
"""
if self._fd is None:
self._fd = open(self._filename, 'rb')
self._fd.seek(begin)
return self._fd.read(length)
def to_json(self):
"""Creating a JSON representation of an instance of MediaFileUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json(['_fd'])
@staticmethod
def from_json(s):
d = simplejson.loads(s)
return MediaFileUpload(
d['_filename'], d['_mimetype'], d['_chunksize'], d['_resumable'])
class MediaIoBaseUpload(MediaUpload):
"""A MediaUpload for a io.Base objects.
Note that the Python file object is compatible with io.Base and can be used
with this class also.
fh = io.BytesIO('...Some data to upload...')
media = MediaIoBaseUpload(fh, mimetype='image/png',
chunksize=1024*1024, resumable=True)
farm.animals().insert(
id='cow',
name='cow.png',
media_body=media).execute()
"""
def __init__(self, fh, mimetype, chunksize=DEFAULT_CHUNK_SIZE,
resumable=False):
"""Constructor.
Args:
fh: io.Base or file object, The source of the bytes to upload. MUST be
opened in blocking mode, do not use streams opened in non-blocking mode.
mimetype: string, Mime-type of the file. If None then a mime-type will be
guessed from the file extension.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._fh = fh
self._mimetype = mimetype
self._chunksize = chunksize
self._resumable = resumable
self._size = None
try:
if hasattr(self._fh, 'fileno'):
fileno = self._fh.fileno()
# Pipes and such show up as 0 length files.
size = os.fstat(fileno).st_size
if size:
self._size = os.fstat(fileno).st_size
except IOError:
pass
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return self._size
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorted than length if EOF was reached
first.
"""
self._fh.seek(begin)
return self._fh.read(length)
def to_json(self):
"""This upload type is not serializable."""
raise NotImplementedError('MediaIoBaseUpload is not serializable.')
class MediaInMemoryUpload(MediaUpload):
"""MediaUpload for a chunk of bytes.
Construct a MediaFileUpload and pass as the media_body parameter of the
method.
"""
def __init__(self, body, mimetype='application/octet-stream',
chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
"""Create a new MediaBytesUpload.
Args:
body: string, Bytes of body content.
mimetype: string, Mime-type of the file or default of
'application/octet-stream'.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._body = body
self._mimetype = mimetype
self._resumable = resumable
self._chunksize = chunksize
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return len(self._body)
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorter than length if EOF was reached
first.
"""
return self._body[begin:begin + length]
def to_json(self):
"""Create a JSON representation of a MediaInMemoryUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
t = type(self)
d = copy.copy(self.__dict__)
del d['_body']
d['_class'] = t.__name__
d['_module'] = t.__module__
d['_b64body'] = base64.b64encode(self._body)
return simplejson.dumps(d)
@staticmethod
def from_json(s):
d = simplejson.loads(s)
return MediaInMemoryUpload(base64.b64decode(d['_b64body']),
d['_mimetype'], d['_chunksize'],
d['_resumable'])
class MediaIoBaseDownload(object):
""""Download media resources.
Note that the Python file object is compatible with io.Base and can be used
with this class also.
Example:
request = farms.animals().get_media(id='cow')
fh = io.FileIO('cow.png', mode='wb')
downloader = MediaIoBaseDownload(fh, request, chunksize=1024*1024)
done = False
while done is False:
status, done = downloader.next_chunk()
if status:
print "Download %d%%." % int(status.progress() * 100)
print "Download Complete!"
"""
def __init__(self, fh, request, chunksize=DEFAULT_CHUNK_SIZE):
"""Constructor.
Args:
fh: io.Base or file object, The stream in which to write the downloaded
bytes.
request: apiclient.http.HttpRequest, the media request to perform in
chunks.
chunksize: int, File will be downloaded in chunks of this many bytes.
"""
self.fh_ = fh
self.request_ = request
self.uri_ = request.uri
self.chunksize_ = chunksize
self.progress_ = 0
self.total_size_ = None
self.done_ = False
def next_chunk(self):
"""Get the next chunk of the download.
Returns:
(status, done): (MediaDownloadStatus, boolean)
The value of 'done' will be True when the media has been fully
downloaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.Error if a transport error has occured.
"""
headers = {
'range': 'bytes=%d-%d' % (
self.progress_, self.progress_ + self.chunksize_)
}
http = self.request_.http
http.follow_redirects = False
resp, content = http.request(self.uri_, headers=headers)
if resp.status in [301, 302, 303, 307, 308] and 'location' in resp:
self.uri_ = resp['location']
resp, content = http.request(self.uri_, headers=headers)
if resp.status in [200, 206]:
self.progress_ += len(content)
self.fh_.write(content)
if 'content-range' in resp:
content_range = resp['content-range']
length = content_range.rsplit('/', 1)[1]
self.total_size_ = int(length)
if self.progress_ == self.total_size_:
self.done_ = True
return MediaDownloadProgress(self.progress_, self.total_size_), self.done_
else:
raise HttpError(resp, content, self.uri_)
class HttpRequest(object):
"""Encapsulates a single HTTP request."""
def __init__(self, http, postproc, uri,
method='GET',
body=None,
headers=None,
methodId=None,
resumable=None):
"""Constructor for an HttpRequest.
Args:
http: httplib2.Http, the transport object to use to make a request
postproc: callable, called on the HTTP response and content to transform
it into a data object before returning, or raising an exception
on an error.
uri: string, the absolute URI to send the request to
method: string, the HTTP method to use
body: string, the request body of the HTTP request,
headers: dict, the HTTP request headers
methodId: string, a unique identifier for the API method being called.
resumable: MediaUpload, None if this is not a resumbale request.
"""
self.uri = uri
self.method = method
self.body = body
self.headers = headers or {}
self.methodId = methodId
self.http = http
self.postproc = postproc
self.resumable = resumable
self._in_error_state = False
# Pull the multipart boundary out of the content-type header.
major, minor, params = mimeparse.parse_mime_type(
headers.get('content-type', 'application/json'))
# The size of the non-media part of the request.
self.body_size = len(self.body or '')
# The resumable URI to send chunks to.
self.resumable_uri = None
# The bytes that have been uploaded.
self.resumable_progress = 0
def execute(self, http=None):
"""Execute the request.
Args:
http: httplib2.Http, an http object to be used in place of the
one the HttpRequest request object was constructed with.
Returns:
A deserialized object model of the response body as determined
by the postproc.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.Error if a transport error has occured.
"""
if http is None:
http = self.http
if self.resumable:
body = None
while body is None:
_, body = self.next_chunk(http)
return body
else:
if 'content-length' not in self.headers:
self.headers['content-length'] = str(self.body_size)
resp, content = http.request(self.uri, self.method,
body=self.body,
headers=self.headers)
if resp.status >= 300:
raise HttpError(resp, content, self.uri)
return self.postproc(resp, content)
def next_chunk(self, http=None):
"""Execute the next step of a resumable upload.
Can only be used if the method being executed supports media uploads and
the MediaUpload object passed in was flagged as using resumable upload.
Example:
media = MediaFileUpload('cow.png', mimetype='image/png',
chunksize=1000, resumable=True)
request = farm.animals().insert(
id='cow',
name='cow.png',
media_body=media)
response = None
while response is None:
status, response = request.next_chunk()
if status:
print "Upload %d%% complete." % int(status.progress() * 100)
Returns:
(status, body): (ResumableMediaStatus, object)
The body will be None until the resumable media is fully uploaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.Error if a transport error has occured.
"""
if http is None:
http = self.http
if self.resumable.size() is None:
size = '*'
else:
size = str(self.resumable.size())
if self.resumable_uri is None:
start_headers = copy.copy(self.headers)
start_headers['X-Upload-Content-Type'] = self.resumable.mimetype()
if size != '*':
start_headers['X-Upload-Content-Length'] = size
start_headers['content-length'] = str(self.body_size)
resp, content = http.request(self.uri, self.method,
body=self.body,
headers=start_headers)
if resp.status == 200 and 'location' in resp:
self.resumable_uri = resp['location']
else:
raise ResumableUploadError("Failed to retrieve starting URI.")
elif self._in_error_state:
# If we are in an error state then query the server for current state of
# the upload by sending an empty PUT and reading the 'range' header in
# the response.
headers = {
'Content-Range': 'bytes */%s' % size,
'content-length': '0'
}
resp, content = http.request(self.resumable_uri, 'PUT',
headers=headers)
status, body = self._process_response(resp, content)
if body:
# The upload was complete.
return (status, body)
data = self.resumable.getbytes(
self.resumable_progress, self.resumable.chunksize())
# A short read implies that we are at EOF, so finish the upload.
if len(data) < self.resumable.chunksize():
size = str(self.resumable_progress + len(data))
headers = {
'Content-Range': 'bytes %d-%d/%s' % (
self.resumable_progress, self.resumable_progress + len(data) - 1,
size)
}
try:
resp, content = http.request(self.resumable_uri, 'PUT',
body=data,
headers=headers)
except:
self._in_error_state = True
raise
return self._process_response(resp, content)
def _process_response(self, resp, content):
"""Process the response from a single chunk upload.
Args:
resp: httplib2.Response, the response object.
content: string, the content of the response.
Returns:
(status, body): (ResumableMediaStatus, object)
The body will be None until the resumable media is fully uploaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx or a 308.
"""
if resp.status in [200, 201]:
self._in_error_state = False
return None, self.postproc(resp, content)
elif resp.status == 308:
self._in_error_state = False
# A "308 Resume Incomplete" indicates we are not done.
self.resumable_progress = int(resp['range'].split('-')[1]) + 1
if 'location' in resp:
self.resumable_uri = resp['location']
else:
self._in_error_state = True
raise HttpError(resp, content, self.uri)
return (MediaUploadProgress(self.resumable_progress, self.resumable.size()),
None)
def to_json(self):
"""Returns a JSON representation of the HttpRequest."""
d = copy.copy(self.__dict__)
if d['resumable'] is not None:
d['resumable'] = self.resumable.to_json()
del d['http']
del d['postproc']
return simplejson.dumps(d)
@staticmethod
def from_json(s, http, postproc):
"""Returns an HttpRequest populated with info from a JSON object."""
d = simplejson.loads(s)
if d['resumable'] is not None:
d['resumable'] = MediaUpload.new_from_json(d['resumable'])
return HttpRequest(
http,
postproc,
uri=d['uri'],
method=d['method'],
body=d['body'],
headers=d['headers'],
methodId=d['methodId'],
resumable=d['resumable'])
class BatchHttpRequest(object):
"""Batches multiple HttpRequest objects into a single HTTP request.
Example:
from apiclient.http import BatchHttpRequest
def list_animals(request_id, response):
\"\"\"Do something with the animals list response.\"\"\"
pass
def list_farmers(request_id, response):
\"\"\"Do something with the farmers list response.\"\"\"
pass
service = build('farm', 'v2')
batch = BatchHttpRequest()
batch.add(service.animals().list(), list_animals)
batch.add(service.farmers().list(), list_farmers)
batch.execute(http)
"""
def __init__(self, callback=None, batch_uri=None):
"""Constructor for a BatchHttpRequest.
Args:
callback: callable, A callback to be called for each response, of the
form callback(id, response). The first parameter is the request id, and
the second is the deserialized response object.
batch_uri: string, URI to send batch requests to.
"""
if batch_uri is None:
batch_uri = 'https://www.googleapis.com/batch'
self._batch_uri = batch_uri
# Global callback to be called for each individual response in the batch.
self._callback = callback
# A map from id to request.
self._requests = {}
# A map from id to callback.
self._callbacks = {}
# List of request ids, in the order in which they were added.
self._order = []
# The last auto generated id.
self._last_auto_id = 0
# Unique ID on which to base the Content-ID headers.
self._base_id = None
# A map from request id to (headers, content) response pairs
self._responses = {}
# A map of id(Credentials) that have been refreshed.
self._refreshed_credentials = {}
def _refresh_and_apply_credentials(self, request, http):
"""Refresh the credentials and apply to the request.
Args:
request: HttpRequest, the request.
http: httplib2.Http, the global http object for the batch.
"""
# For the credentials to refresh, but only once per refresh_token
# If there is no http per the request then refresh the http passed in
# via execute()
creds = None
if request.http is not None and hasattr(request.http.request,
'credentials'):
creds = request.http.request.credentials
elif http is not None and hasattr(http.request, 'credentials'):
creds = http.request.credentials
if creds is not None:
if id(creds) not in self._refreshed_credentials:
creds.refresh(http)
self._refreshed_credentials[id(creds)] = 1
# Only apply the credentials if we are using the http object passed in,
# otherwise apply() will get called during _serialize_request().
if request.http is None or not hasattr(request.http.request,
'credentials'):
creds.apply(request.headers)
def _id_to_header(self, id_):
"""Convert an id to a Content-ID header value.
Args:
id_: string, identifier of individual request.
Returns:
A Content-ID header with the id_ encoded into it. A UUID is prepended to
the value because Content-ID headers are supposed to be universally
unique.
"""
if self._base_id is None:
self._base_id = uuid.uuid4()
return '<%s+%s>' % (self._base_id, urllib.quote(id_))
def _header_to_id(self, header):
"""Convert a Content-ID header value to an id.
Presumes the Content-ID header conforms to the format that _id_to_header()
returns.
Args:
header: string, Content-ID header value.
Returns:
The extracted id value.
Raises:
BatchError if the header is not in the expected format.
"""
if header[0] != '<' or header[-1] != '>':
raise BatchError("Invalid value for Content-ID: %s" % header)
if '+' not in header:
raise BatchError("Invalid value for Content-ID: %s" % header)
base, id_ = header[1:-1].rsplit('+', 1)
return urllib.unquote(id_)
def _serialize_request(self, request):
"""Convert an HttpRequest object into a string.
Args:
request: HttpRequest, the request to serialize.
Returns:
The request as a string in application/http format.
"""
# Construct status line
parsed = urlparse.urlparse(request.uri)
request_line = urlparse.urlunparse(
(None, None, parsed.path, parsed.params, parsed.query, None)
)
status_line = request.method + ' ' + request_line + ' HTTP/1.1\n'
major, minor = request.headers.get('content-type', 'application/json').split('/')
msg = MIMENonMultipart(major, minor)
headers = request.headers.copy()
if request.http is not None and hasattr(request.http.request,
'credentials'):
request.http.request.credentials.apply(headers)
# MIMENonMultipart adds its own Content-Type header.
if 'content-type' in headers:
del headers['content-type']
for key, value in headers.iteritems():
msg[key] = value
msg['Host'] = parsed.netloc
msg.set_unixfrom(None)
if request.body is not None:
msg.set_payload(request.body)
msg['content-length'] = str(len(request.body))
# Serialize the mime message.
fp = StringIO.StringIO()
# maxheaderlen=0 means don't line wrap headers.
g = Generator(fp, maxheaderlen=0)
g.flatten(msg, unixfrom=False)
body = fp.getvalue()
# Strip off the \n\n that the MIME lib tacks onto the end of the payload.
if request.body is None:
body = body[:-2]
return status_line.encode('utf-8') + body
def _deserialize_response(self, payload):
"""Convert string into httplib2 response and content.
Args:
payload: string, headers and body as a string.
Returns:
A pair (resp, content) like would be returned from httplib2.request.
"""
# Strip off the status line
status_line, payload = payload.split('\n', 1)
protocol, status, reason = status_line.split(' ', 2)
# Parse the rest of the response
parser = FeedParser()
parser.feed(payload)
msg = parser.close()
msg['status'] = status
# Create httplib2.Response from the parsed headers.
resp = httplib2.Response(msg)
resp.reason = reason
resp.version = int(protocol.split('/', 1)[1].replace('.', ''))
content = payload.split('\r\n\r\n', 1)[1]
return resp, content
def _new_id(self):
"""Create a new id.
Auto incrementing number that avoids conflicts with ids already used.
Returns:
string, a new unique id.
"""
self._last_auto_id += 1
while str(self._last_auto_id) in self._requests:
self._last_auto_id += 1
return str(self._last_auto_id)
def add(self, request, callback=None, request_id=None):
"""Add a new request.
Every callback added will be paired with a unique id, the request_id. That
unique id will be passed back to the callback when the response comes back
from the server. The default behavior is to have the library generate it's
own unique id. If the caller passes in a request_id then they must ensure
uniqueness for each request_id, and if they are not an exception is
raised. Callers should either supply all request_ids or nevery supply a
request id, to avoid such an error.
Args:
request: HttpRequest, Request to add to the batch.
callback: callable, A callback to be called for this response, of the
form callback(id, response). The first parameter is the request id, and
the second is the deserialized response object.
request_id: string, A unique id for the request. The id will be passed to
the callback with the response.
Returns:
None
Raises:
BatchError if a media request is added to a batch.
KeyError is the request_id is not unique.
"""
if request_id is None:
request_id = self._new_id()
if request.resumable is not None:
raise BatchError("Media requests cannot be used in a batch request.")
if request_id in self._requests:
raise KeyError("A request with this ID already exists: %s" % request_id)
self._requests[request_id] = request
self._callbacks[request_id] = callback
self._order.append(request_id)
def _execute(self, http, order, requests):
"""Serialize batch request, send to server, process response.
Args:
http: httplib2.Http, an http object to be used to make the request with.
order: list, list of request ids in the order they were added to the
batch.
request: list, list of request objects to send.
Raises:
httplib2.Error if a transport error has occured.
apiclient.errors.BatchError if the response is the wrong format.
"""
message = MIMEMultipart('mixed')
# Message should not write out it's own headers.
setattr(message, '_write_headers', lambda self: None)
# Add all the individual requests.
for request_id in order:
request = requests[request_id]
msg = MIMENonMultipart('application', 'http')
msg['Content-Transfer-Encoding'] = 'binary'
msg['Content-ID'] = self._id_to_header(request_id)
body = self._serialize_request(request)
msg.set_payload(body)
message.attach(msg)
body = message.as_string()
headers = {}
headers['content-type'] = ('multipart/mixed; '
'boundary="%s"') % message.get_boundary()
resp, content = http.request(self._batch_uri, 'POST', body=body,
headers=headers)
if resp.status >= 300:
raise HttpError(resp, content, self._batch_uri)
# Now break out the individual responses and store each one.
boundary, _ = content.split(None, 1)
# Prepend with a content-type header so FeedParser can handle it.
header = 'content-type: %s\r\n\r\n' % resp['content-type']
for_parser = header + content
parser = FeedParser()
parser.feed(for_parser)
mime_response = parser.close()
if not mime_response.is_multipart():
raise BatchError("Response not in multipart/mixed format.", resp,
content)
for part in mime_response.get_payload():
request_id = self._header_to_id(part['Content-ID'])
headers, content = self._deserialize_response(part.get_payload())
self._responses[request_id] = (headers, content)
def execute(self, http=None):
"""Execute all the requests as a single batched HTTP request.
Args:
http: httplib2.Http, an http object to be used in place of the one the
HttpRequest request object was constructed with. If one isn't supplied
then use a http object from the requests in this batch.
Returns:
None
Raises:
httplib2.Error if a transport error has occured.
apiclient.errors.BatchError if the response is the wrong format.
"""
# If http is not supplied use the first valid one given in the requests.
if http is None:
for request_id in self._order:
request = self._requests[request_id]
if request is not None:
http = request.http
break
if http is None:
raise ValueError("Missing a valid http object.")
self._execute(http, self._order, self._requests)
# Loop over all the requests and check for 401s. For each 401 request the
# credentials should be refreshed and then sent again in a separate batch.
redo_requests = {}
redo_order = []
for request_id in self._order:
headers, content = self._responses[request_id]
if headers['status'] == '401':
redo_order.append(request_id)
request = self._requests[request_id]
self._refresh_and_apply_credentials(request, http)
redo_requests[request_id] = request
if redo_requests:
self._execute(http, redo_order, redo_requests)
# Now process all callbacks that are erroring, and raise an exception for
# ones that return a non-2xx response? Or add extra parameter to callback
# that contains an HttpError?
for request_id in self._order:
headers, content = self._responses[request_id]
request = self._requests[request_id]
callback = self._callbacks[request_id]
response = None
exception = None
try:
r = httplib2.Response(headers)
response = request.postproc(r, content)
except HttpError, e:
exception = e
if callback is not None:
callback(request_id, response, exception)
if self._callback is not None:
self._callback(request_id, response, exception)
class HttpRequestMock(object):
"""Mock of HttpRequest.
Do not construct directly, instead use RequestMockBuilder.
"""
def __init__(self, resp, content, postproc):
"""Constructor for HttpRequestMock
Args:
resp: httplib2.Response, the response to emulate coming from the request
content: string, the response body
postproc: callable, the post processing function usually supplied by
the model class. See model.JsonModel.response() as an example.
"""
self.resp = resp
self.content = content
self.postproc = postproc
if resp is None:
self.resp = httplib2.Response({'status': 200, 'reason': 'OK'})
if 'reason' in self.resp:
self.resp.reason = self.resp['reason']
def execute(self, http=None):
"""Execute the request.
Same behavior as HttpRequest.execute(), but the response is
mocked and not really from an HTTP request/response.
"""
return self.postproc(self.resp, self.content)
class RequestMockBuilder(object):
"""A simple mock of HttpRequest
Pass in a dictionary to the constructor that maps request methodIds to
tuples of (httplib2.Response, content, opt_expected_body) that should be
returned when that method is called. None may also be passed in for the
httplib2.Response, in which case a 200 OK response will be generated.
If an opt_expected_body (str or dict) is provided, it will be compared to
the body and UnexpectedBodyError will be raised on inequality.
Example:
response = '{"data": {"id": "tag:google.c...'
requestBuilder = RequestMockBuilder(
{
'plus.activities.get': (None, response),
}
)
apiclient.discovery.build("plus", "v1", requestBuilder=requestBuilder)
Methods that you do not supply a response for will return a
200 OK with an empty string as the response content or raise an excpetion
if check_unexpected is set to True. The methodId is taken from the rpcName
in the discovery document.
For more details see the project wiki.
"""
def __init__(self, responses, check_unexpected=False):
"""Constructor for RequestMockBuilder
The constructed object should be a callable object
that can replace the class HttpResponse.
responses - A dictionary that maps methodIds into tuples
of (httplib2.Response, content). The methodId
comes from the 'rpcName' field in the discovery
document.
check_unexpected - A boolean setting whether or not UnexpectedMethodError
should be raised on unsupplied method.
"""
self.responses = responses
self.check_unexpected = check_unexpected
def __call__(self, http, postproc, uri, method='GET', body=None,
headers=None, methodId=None, resumable=None):
"""Implements the callable interface that discovery.build() expects
of requestBuilder, which is to build an object compatible with
HttpRequest.execute(). See that method for the description of the
parameters and the expected response.
"""
if methodId in self.responses:
response = self.responses[methodId]
resp, content = response[:2]
if len(response) > 2:
# Test the body against the supplied expected_body.
expected_body = response[2]
if bool(expected_body) != bool(body):
# Not expecting a body and provided one
# or expecting a body and not provided one.
raise UnexpectedBodyError(expected_body, body)
if isinstance(expected_body, str):
expected_body = simplejson.loads(expected_body)
body = simplejson.loads(body)
if body != expected_body:
raise UnexpectedBodyError(expected_body, body)
return HttpRequestMock(resp, content, postproc)
elif self.check_unexpected:
raise UnexpectedMethodError(methodId)
else:
model = JsonModel(False)
return HttpRequestMock(None, '{}', model.response)
class HttpMock(object):
"""Mock of httplib2.Http"""
def __init__(self, filename, headers=None):
"""
Args:
filename: string, absolute filename to read response from
headers: dict, header to return with response
"""
if headers is None:
headers = {'status': '200 OK'}
f = file(filename, 'r')
self.data = f.read()
f.close()
self.headers = headers
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
return httplib2.Response(self.headers), self.data
class HttpMockSequence(object):
"""Mock of httplib2.Http
Mocks a sequence of calls to request returning different responses for each
call. Create an instance initialized with the desired response headers
and content and then use as if an httplib2.Http instance.
http = HttpMockSequence([
({'status': '401'}, ''),
({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'),
({'status': '200'}, 'echo_request_headers'),
])
resp, content = http.request("http://examples.com")
There are special values you can pass in for content to trigger
behavours that are helpful in testing.
'echo_request_headers' means return the request headers in the response body
'echo_request_headers_as_json' means return the request headers in
the response body
'echo_request_body' means return the request body in the response body
'echo_request_uri' means return the request uri in the response body
"""
def __init__(self, iterable):
"""
Args:
iterable: iterable, a sequence of pairs of (headers, body)
"""
self._iterable = iterable
self.follow_redirects = True
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
resp, content = self._iterable.pop(0)
if content == 'echo_request_headers':
content = headers
elif content == 'echo_request_headers_as_json':
content = simplejson.dumps(headers)
elif content == 'echo_request_body':
content = body
elif content == 'echo_request_uri':
content = uri
return httplib2.Response(resp), content
def set_user_agent(http, user_agent):
"""Set the user-agent on every request.
Args:
http - An instance of httplib2.Http
or something that acts like it.
user_agent: string, the value for the user-agent header.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = set_user_agent(h, "my-app-name/6.0")
Most of the time the user-agent will be set doing auth, this is for the rare
cases where you are accessing an unauthenticated endpoint.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if 'user-agent' in headers:
headers['user-agent'] = user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
def tunnel_patch(http):
"""Tunnel PATCH requests over POST.
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = tunnel_patch(h, "my-app-name/6.0")
Useful if you are running on a platform that doesn't support PATCH.
Apply this last if you are using OAuth 1.0, as changing the method
will result in a different signature.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if method == 'PATCH':
if 'oauth_token' in headers.get('authorization', ''):
logging.warning(
'OAuth 1.0 request made with Credentials after tunnel_patch.')
headers['x-http-method-override'] = "PATCH"
method = 'POST'
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import copy
import httplib2
import logging
import oauth2 as oauth
import urllib
import urlparse
from anyjson import simplejson
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
class Error(Exception):
"""Base error for this module."""
pass
class RequestError(Error):
"""Error occurred during request."""
pass
class MissingParameter(Error):
pass
class CredentialsInvalidError(Error):
pass
def _abstract():
raise NotImplementedError('You need to override this function')
def _oauth_uri(name, discovery, params):
"""Look up the OAuth URI from the discovery
document and add query parameters based on
params.
name - The name of the OAuth URI to lookup, one
of 'request', 'access', or 'authorize'.
discovery - Portion of discovery document the describes
the OAuth endpoints.
params - Dictionary that is used to form the query parameters
for the specified URI.
"""
if name not in ['request', 'access', 'authorize']:
raise KeyError(name)
keys = discovery[name]['parameters'].keys()
query = {}
for key in keys:
if key in params:
query[key] = params[key]
return discovery[name]['url'] + '?' + urllib.urlencode(query)
class Credentials(object):
"""Base class for all Credentials objects.
Subclasses must define an authorize() method
that applies the credentials to an HTTP transport.
"""
def authorize(self, http):
"""Take an httplib2.Http instance (or equivalent) and
authorizes it for the set of credentials, usually by
replacing http.request() with a method that adds in
the appropriate headers and then delegates to the original
Http.request() method.
"""
_abstract()
class Flow(object):
"""Base class for all Flow objects."""
pass
class Storage(object):
"""Base class for all Storage objects.
Store and retrieve a single credential.
"""
def get(self):
"""Retrieve credential.
Returns:
apiclient.oauth.Credentials
"""
_abstract()
def put(self, credentials):
"""Write a credential.
Args:
credentials: Credentials, the credentials to store.
"""
_abstract()
class OAuthCredentials(Credentials):
"""Credentials object for OAuth 1.0a
"""
def __init__(self, consumer, token, user_agent):
"""
consumer - An instance of oauth.Consumer.
token - An instance of oauth.Token constructed with
the access token and secret.
user_agent - The HTTP User-Agent to provide for this application.
"""
self.consumer = consumer
self.token = token
self.user_agent = user_agent
self.store = None
# True if the credentials have been revoked
self._invalid = False
@property
def invalid(self):
"""True if the credentials are invalid, such as being revoked."""
return getattr(self, "_invalid", False)
def set_store(self, store):
"""Set the storage for the credential.
Args:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has been revoked.
"""
self.store = store
def __getstate__(self):
"""Trim the state down to something that can be pickled."""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled."""
self.__dict__.update(state)
self.store = None
def authorize(self, http):
"""Authorize an httplib2.Http instance with these Credentials
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth
subclass of httplib2.Authenication because
it never gets passed the absolute URI, which is
needed for signing. So instead we have to overload
'request' with a closure that adds in the
Authorization header and then calls the original version
of 'request()'.
"""
request_orig = http.request
signer = oauth.SignatureMethod_HMAC_SHA1()
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the appropriate
Authorization header."""
response_code = 302
http.follow_redirects = False
while response_code in [301, 302]:
req = oauth.Request.from_consumer_and_token(
self.consumer, self.token, http_method=method, http_url=uri)
req.sign_request(signer, self.consumer, self.token)
if headers is None:
headers = {}
headers.update(req.to_header())
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
response_code = resp.status
if response_code in [301, 302]:
uri = resp['location']
# Update the stored credential if it becomes invalid.
if response_code == 401:
logging.info('Access token no longer valid: %s' % content)
self._invalid = True
if self.store is not None:
self.store(self)
raise CredentialsInvalidError("Credentials are no longer valid.")
return resp, content
http.request = new_request
return http
class TwoLeggedOAuthCredentials(Credentials):
"""Two Legged Credentials object for OAuth 1.0a.
The Two Legged object is created directly, not from a flow. Once you
authorize and httplib2.Http instance you can change the requestor and that
change will propogate to the authorized httplib2.Http instance. For example:
http = httplib2.Http()
http = credentials.authorize(http)
credentials.requestor = 'foo@example.info'
http.request(...)
credentials.requestor = 'bar@example.info'
http.request(...)
"""
def __init__(self, consumer_key, consumer_secret, user_agent):
"""
Args:
consumer_key: string, An OAuth 1.0 consumer key
consumer_secret: string, An OAuth 1.0 consumer secret
user_agent: string, The HTTP User-Agent to provide for this application.
"""
self.consumer = oauth.Consumer(consumer_key, consumer_secret)
self.user_agent = user_agent
self.store = None
# email address of the user to act on the behalf of.
self._requestor = None
@property
def invalid(self):
"""True if the credentials are invalid, such as being revoked.
Always returns False for Two Legged Credentials.
"""
return False
def getrequestor(self):
return self._requestor
def setrequestor(self, email):
self._requestor = email
requestor = property(getrequestor, setrequestor, None,
'The email address of the user to act on behalf of')
def set_store(self, store):
"""Set the storage for the credential.
Args:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has been revoked.
"""
self.store = store
def __getstate__(self):
"""Trim the state down to something that can be pickled."""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled."""
self.__dict__.update(state)
self.store = None
def authorize(self, http):
"""Authorize an httplib2.Http instance with these Credentials
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth
subclass of httplib2.Authenication because
it never gets passed the absolute URI, which is
needed for signing. So instead we have to overload
'request' with a closure that adds in the
Authorization header and then calls the original version
of 'request()'.
"""
request_orig = http.request
signer = oauth.SignatureMethod_HMAC_SHA1()
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the appropriate
Authorization header."""
response_code = 302
http.follow_redirects = False
while response_code in [301, 302]:
# add in xoauth_requestor_id=self._requestor to the uri
if self._requestor is None:
raise MissingParameter(
'Requestor must be set before using TwoLeggedOAuthCredentials')
parsed = list(urlparse.urlparse(uri))
q = parse_qsl(parsed[4])
q.append(('xoauth_requestor_id', self._requestor))
parsed[4] = urllib.urlencode(q)
uri = urlparse.urlunparse(parsed)
req = oauth.Request.from_consumer_and_token(
self.consumer, None, http_method=method, http_url=uri)
req.sign_request(signer, self.consumer, None)
if headers is None:
headers = {}
headers.update(req.to_header())
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
response_code = resp.status
if response_code in [301, 302]:
uri = resp['location']
if response_code == 401:
logging.info('Access token no longer valid: %s' % content)
# Do not store the invalid state of the Credentials because
# being 2LO they could be reinstated in the future.
raise CredentialsInvalidError("Credentials are invalid.")
return resp, content
http.request = new_request
return http
class FlowThreeLegged(Flow):
"""Does the Three Legged Dance for OAuth 1.0a.
"""
def __init__(self, discovery, consumer_key, consumer_secret, user_agent,
**kwargs):
"""
discovery - Section of the API discovery document that describes
the OAuth endpoints.
consumer_key - OAuth consumer key
consumer_secret - OAuth consumer secret
user_agent - The HTTP User-Agent that identifies the application.
**kwargs - The keyword arguments are all optional and required
parameters for the OAuth calls.
"""
self.discovery = discovery
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.user_agent = user_agent
self.params = kwargs
self.request_token = {}
required = {}
for uriinfo in discovery.itervalues():
for name, value in uriinfo['parameters'].iteritems():
if value['required'] and not name.startswith('oauth_'):
required[name] = 1
for key in required.iterkeys():
if key not in self.params:
raise MissingParameter('Required parameter %s not supplied' % key)
def step1_get_authorize_url(self, oauth_callback='oob'):
"""Returns a URI to redirect to the provider.
oauth_callback - Either the string 'oob' for a non-web-based application,
or a URI that handles the callback from the authorization
server.
If oauth_callback is 'oob' then pass in the
generated verification code to step2_exchange,
otherwise pass in the query parameters received
at the callback uri to step2_exchange.
"""
consumer = oauth.Consumer(self.consumer_key, self.consumer_secret)
client = oauth.Client(consumer)
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
body = urllib.urlencode({'oauth_callback': oauth_callback})
uri = _oauth_uri('request', self.discovery, self.params)
resp, content = client.request(uri, 'POST', headers=headers,
body=body)
if resp['status'] != '200':
logging.error('Failed to retrieve temporary authorization: %s', content)
raise RequestError('Invalid response %s.' % resp['status'])
self.request_token = dict(parse_qsl(content))
auth_params = copy.copy(self.params)
auth_params['oauth_token'] = self.request_token['oauth_token']
return _oauth_uri('authorize', self.discovery, auth_params)
def step2_exchange(self, verifier):
"""Exhanges an authorized request token
for OAuthCredentials.
Args:
verifier: string, dict - either the verifier token, or a dictionary
of the query parameters to the callback, which contains
the oauth_verifier.
Returns:
The Credentials object.
"""
if not (isinstance(verifier, str) or isinstance(verifier, unicode)):
verifier = verifier['oauth_verifier']
token = oauth.Token(
self.request_token['oauth_token'],
self.request_token['oauth_token_secret'])
token.set_verifier(verifier)
consumer = oauth.Consumer(self.consumer_key, self.consumer_secret)
client = oauth.Client(consumer, token)
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
uri = _oauth_uri('access', self.discovery, self.params)
resp, content = client.request(uri, 'POST', headers=headers)
if resp['status'] != '200':
logging.error('Failed to retrieve access token: %s', content)
raise RequestError('Invalid response %s.' % resp['status'])
oauth_params = dict(parse_qsl(content))
token = oauth.Token(
oauth_params['oauth_token'],
oauth_params['oauth_token_secret'])
return OAuthCredentials(consumer, token, self.user_agent)
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model objects for requests and responses.
Each API may support one or more serializations, such
as JSON, Atom, etc. The model classes are responsible
for converting between the wire format and the Python
object representation.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import gflags
import logging
import urllib
from errors import HttpError
from oauth2client.anyjson import simplejson
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean('dump_request_response', False,
'Dump all http server requests and responses. '
)
def _abstract():
raise NotImplementedError('You need to override this function')
class Model(object):
"""Model base class.
All Model classes should implement this interface.
The Model serializes and de-serializes between a wire
format such as JSON and a Python object representation.
"""
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a serialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized in the desired wire format.
"""
_abstract()
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
_abstract()
class BaseModel(Model):
"""Base model class.
Subclasses should provide implementations for the "serialize" and
"deserialize" methods, as well as values for the following class attributes.
Attributes:
accept: The value to use for the HTTP Accept header.
content_type: The value to use for the HTTP Content-type header.
no_content_response: The value to return when deserializing a 204 "No
Content" response.
alt_param: The value to supply as the "alt" query parameter for requests.
"""
accept = None
content_type = None
no_content_response = None
alt_param = None
def _log_request(self, headers, path_params, query, body):
"""Logs debugging information about the request if requested."""
if FLAGS.dump_request_response:
logging.info('--request-start--')
logging.info('-headers-start-')
for h, v in headers.iteritems():
logging.info('%s: %s', h, v)
logging.info('-headers-end-')
logging.info('-path-parameters-start-')
for h, v in path_params.iteritems():
logging.info('%s: %s', h, v)
logging.info('-path-parameters-end-')
logging.info('body: %s', body)
logging.info('query: %s', query)
logging.info('--request-end--')
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a serialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable by simplejson.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized as JSON
"""
query = self._build_query(query_params)
headers['accept'] = self.accept
headers['accept-encoding'] = 'gzip, deflate'
if 'user-agent' in headers:
headers['user-agent'] += ' '
else:
headers['user-agent'] = ''
headers['user-agent'] += 'google-api-python-client/1.0'
if body_value is not None:
headers['content-type'] = self.content_type
body_value = self.serialize(body_value)
self._log_request(headers, path_params, query, body_value)
return (headers, path_params, query, body_value)
def _build_query(self, params):
"""Builds a query string.
Args:
params: dict, the query parameters
Returns:
The query parameters properly encoded into an HTTP URI query string.
"""
if self.alt_param is not None:
params.update({'alt': self.alt_param})
astuples = []
for key, value in params.iteritems():
if type(value) == type([]):
for x in value:
x = x.encode('utf-8')
astuples.append((key, x))
else:
if getattr(value, 'encode', False) and callable(value.encode):
value = value.encode('utf-8')
astuples.append((key, value))
return '?' + urllib.urlencode(astuples)
def _log_response(self, resp, content):
"""Logs debugging information about the response if requested."""
if FLAGS.dump_request_response:
logging.info('--response-start--')
for h, v in resp.iteritems():
logging.info('%s: %s', h, v)
if content:
logging.info(content)
logging.info('--response-end--')
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
self._log_response(resp, content)
# Error handling is TBD, for example, do we retry
# for some operation/error combinations?
if resp.status < 300:
if resp.status == 204:
# A 204: No Content response should be treated differently
# to all the other success states
return self.no_content_response
return self.deserialize(content)
else:
logging.debug('Content from bad request was: %s' % content)
raise HttpError(resp, content)
def serialize(self, body_value):
"""Perform the actual Python object serialization.
Args:
body_value: object, the request body as a Python object.
Returns:
string, the body in serialized form.
"""
_abstract()
def deserialize(self, content):
"""Perform the actual deserialization from response string to Python
object.
Args:
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
"""
_abstract()
class JsonModel(BaseModel):
"""Model class for JSON.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request and response bodies.
"""
accept = 'application/json'
content_type = 'application/json'
alt_param = 'json'
def __init__(self, data_wrapper=False):
"""Construct a JsonModel.
Args:
data_wrapper: boolean, wrap requests and responses in a data wrapper
"""
self._data_wrapper = data_wrapper
def serialize(self, body_value):
if (isinstance(body_value, dict) and 'data' not in body_value and
self._data_wrapper):
body_value = {'data': body_value}
return simplejson.dumps(body_value)
def deserialize(self, content):
body = simplejson.loads(content)
if isinstance(body, dict) and 'data' in body:
body = body['data']
return body
@property
def no_content_response(self):
return {}
class RawModel(JsonModel):
"""Model class for requests that don't return JSON.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request, and returns the raw bytes
of the response body.
"""
accept = '*/*'
content_type = 'application/json'
alt_param = None
def deserialize(self, content):
return content
@property
def no_content_response(self):
return ''
class MediaModel(JsonModel):
"""Model class for requests that return Media.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request, and returns the raw bytes
of the response body.
"""
accept = '*/*'
content_type = 'application/json'
alt_param = 'media'
def deserialize(self, content):
return content
@property
def no_content_response(self):
return ''
class ProtocolBufferModel(BaseModel):
"""Model class for protocol buffers.
Serializes and de-serializes the binary protocol buffer sent in the HTTP
request and response bodies.
"""
accept = 'application/x-protobuf'
content_type = 'application/x-protobuf'
alt_param = 'proto'
def __init__(self, protocol_buffer):
"""Constructs a ProtocolBufferModel.
The serialzed protocol buffer returned in an HTTP response will be
de-serialized using the given protocol buffer class.
Args:
protocol_buffer: The protocol buffer class used to de-serialize a
response from the API.
"""
self._protocol_buffer = protocol_buffer
def serialize(self, body_value):
return body_value.SerializeToString()
def deserialize(self, content):
return self._protocol_buffer.FromString(content)
@property
def no_content_response(self):
return self._protocol_buffer()
def makepatch(original, modified):
"""Create a patch object.
Some methods support PATCH, an efficient way to send updates to a resource.
This method allows the easy construction of patch bodies by looking at the
differences between a resource before and after it was modified.
Args:
original: object, the original deserialized resource
modified: object, the modified deserialized resource
Returns:
An object that contains only the changes from original to modified, in a
form suitable to pass to a PATCH method.
Example usage:
item = service.activities().get(postid=postid, userid=userid).execute()
original = copy.deepcopy(item)
item['object']['content'] = 'This is updated.'
service.activities.patch(postid=postid, userid=userid,
body=makepatch(original, item)).execute()
"""
patch = {}
for key, original_value in original.iteritems():
modified_value = modified.get(key, None)
if modified_value is None:
# Use None to signal that the element is deleted
patch[key] = None
elif original_value != modified_value:
if type(original_value) == type({}):
# Recursively descend objects
patch[key] = makepatch(original_value, modified_value)
else:
# In the case of simple types or arrays we just replace
patch[key] = modified_value
else:
# Don't add anything to patch if there's no change
pass
for key in modified:
if key not in original:
patch[key] = modified[key]
return patch
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for discovery based APIs
A client library for Google's discovery based APIs.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = [
'build',
'build_from_document'
'fix_method_name',
'key2param'
]
import copy
import httplib2
import logging
import os
import random
import re
import uritemplate
import urllib
import urlparse
import mimeparse
import mimetypes
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
from apiclient.errors import HttpError
from apiclient.errors import InvalidJsonError
from apiclient.errors import MediaUploadSizeError
from apiclient.errors import UnacceptableMimeTypeError
from apiclient.errors import UnknownApiNameOrVersion
from apiclient.errors import UnknownLinkType
from apiclient.http import HttpRequest
from apiclient.http import MediaFileUpload
from apiclient.http import MediaUpload
from apiclient.model import JsonModel
from apiclient.model import MediaModel
from apiclient.model import RawModel
from apiclient.schema import Schemas
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from oauth2client.anyjson import simplejson
logger = logging.getLogger(__name__)
URITEMPLATE = re.compile('{[^}]*}')
VARNAME = re.compile('[a-zA-Z0-9_-]+')
DISCOVERY_URI = ('https://www.googleapis.com/discovery/v1/apis/'
'{api}/{apiVersion}/rest')
DEFAULT_METHOD_DOC = 'A description of how to use this function'
# Parameters accepted by the stack, but not visible via discovery.
STACK_QUERY_PARAMETERS = ['trace', 'pp', 'userip', 'strict']
# Python reserved words.
RESERVED_WORDS = ['and', 'assert', 'break', 'class', 'continue', 'def', 'del',
'elif', 'else', 'except', 'exec', 'finally', 'for', 'from',
'global', 'if', 'import', 'in', 'is', 'lambda', 'not', 'or',
'pass', 'print', 'raise', 'return', 'try', 'while' ]
def fix_method_name(name):
"""Fix method names to avoid reserved word conflicts.
Args:
name: string, method name.
Returns:
The name with a '_' prefixed if the name is a reserved word.
"""
if name in RESERVED_WORDS:
return name + '_'
else:
return name
def _add_query_parameter(url, name, value):
"""Adds a query parameter to a url.
Replaces the current value if it already exists in the URL.
Args:
url: string, url to add the query parameter to.
name: string, query parameter name.
value: string, query parameter value.
Returns:
Updated query parameter. Does not update the url if value is None.
"""
if value is None:
return url
else:
parsed = list(urlparse.urlparse(url))
q = dict(parse_qsl(parsed[4]))
q[name] = value
parsed[4] = urllib.urlencode(q)
return urlparse.urlunparse(parsed)
def key2param(key):
"""Converts key names into parameter names.
For example, converting "max-results" -> "max_results"
Args:
key: string, the method key name.
Returns:
A safe method name based on the key name.
"""
result = []
key = list(key)
if not key[0].isalpha():
result.append('x')
for c in key:
if c.isalnum():
result.append(c)
else:
result.append('_')
return ''.join(result)
def build(serviceName,
version,
http=None,
discoveryServiceUrl=DISCOVERY_URI,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Construct a Resource for interacting with an API.
Construct a Resource object for interacting with an API. The serviceName and
version are the names from the Discovery service.
Args:
serviceName: string, name of the service.
version: string, the version of the service.
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
discoveryServiceUrl: string, a URI Template that points to the location of
the discovery service. It should have two parameters {api} and
{apiVersion} that when filled in produce an absolute URI to the discovery
document for that service.
developerKey: string, key obtained from
https://code.google.com/apis/console.
model: apiclient.Model, converts to and from the wire format.
requestBuilder: apiclient.http.HttpRequest, encapsulator for an HTTP
request.
Returns:
A Resource object with methods for interacting with the service.
"""
params = {
'api': serviceName,
'apiVersion': version
}
if http is None:
http = httplib2.Http()
requested_url = uritemplate.expand(discoveryServiceUrl, params)
# REMOTE_ADDR is defined by the CGI spec [RFC3875] as the environment
# variable that contains the network address of the client sending the
# request. If it exists then add that to the request for the discovery
# document to avoid exceeding the quota on discovery requests.
if 'REMOTE_ADDR' in os.environ:
requested_url = _add_query_parameter(requested_url, 'userIp',
os.environ['REMOTE_ADDR'])
logger.info('URL being requested: %s' % requested_url)
resp, content = http.request(requested_url)
if resp.status == 404:
raise UnknownApiNameOrVersion("name: %s version: %s" % (serviceName,
version))
if resp.status >= 400:
raise HttpError(resp, content, requested_url)
try:
service = simplejson.loads(content)
except ValueError, e:
logger.error('Failed to parse as JSON: ' + content)
raise InvalidJsonError()
return build_from_document(content, discoveryServiceUrl, http=http,
developerKey=developerKey, model=model, requestBuilder=requestBuilder)
def build_from_document(
service,
base,
future=None,
http=None,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Create a Resource for interacting with an API.
Same as `build()`, but constructs the Resource object from a discovery
document that is it given, as opposed to retrieving one over HTTP.
Args:
service: string, discovery document.
base: string, base URI for all HTTP requests, usually the discovery URI.
future: string, discovery document with future capabilities (deprecated).
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
developerKey: string, Key for controlling API usage, generated
from the API Console.
model: Model class instance that serializes and de-serializes requests and
responses.
requestBuilder: Takes an http request and packages it up to be executed.
Returns:
A Resource object with methods for interacting with the service.
"""
# future is no longer used.
future = {}
service = simplejson.loads(service)
base = urlparse.urljoin(base, service['basePath'])
schema = Schemas(service)
if model is None:
features = service.get('features', [])
model = JsonModel('dataWrapper' in features)
resource = _createResource(http, base, model, requestBuilder, developerKey,
service, service, schema)
return resource
def _cast(value, schema_type):
"""Convert value to a string based on JSON Schema type.
See http://tools.ietf.org/html/draft-zyp-json-schema-03 for more details on
JSON Schema.
Args:
value: any, the value to convert
schema_type: string, the type that value should be interpreted as
Returns:
A string representation of 'value' based on the schema_type.
"""
if schema_type == 'string':
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
elif schema_type == 'integer':
return str(int(value))
elif schema_type == 'number':
return str(float(value))
elif schema_type == 'boolean':
return str(bool(value)).lower()
else:
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
MULTIPLIERS = {
"KB": 2 ** 10,
"MB": 2 ** 20,
"GB": 2 ** 30,
"TB": 2 ** 40,
}
def _media_size_to_long(maxSize):
"""Convert a string media size, such as 10GB or 3TB into an integer.
Args:
maxSize: string, size as a string, such as 2MB or 7GB.
Returns:
The size as an integer value.
"""
if len(maxSize) < 2:
return 0
units = maxSize[-2:].upper()
multiplier = MULTIPLIERS.get(units, 0)
if multiplier:
return int(maxSize[:-2]) * multiplier
else:
return int(maxSize)
def _createResource(http, baseUrl, model, requestBuilder,
developerKey, resourceDesc, rootDesc, schema):
"""Build a Resource from the API description.
Args:
http: httplib2.Http, Object to make http requests with.
baseUrl: string, base URL for the API. All requests are relative to this
URI.
model: apiclient.Model, converts to and from the wire format.
requestBuilder: class or callable that instantiates an
apiclient.HttpRequest object.
developerKey: string, key obtained from
https://code.google.com/apis/console
resourceDesc: object, section of deserialized discovery document that
describes a resource. Note that the top level discovery document
is considered a resource.
rootDesc: object, the entire deserialized discovery document.
schema: object, mapping of schema names to schema descriptions.
Returns:
An instance of Resource with all the methods attached for interacting with
that resource.
"""
class Resource(object):
"""A class for interacting with a resource."""
def __init__(self):
self._http = http
self._baseUrl = baseUrl
self._model = model
self._developerKey = developerKey
self._requestBuilder = requestBuilder
def createMethod(theclass, methodName, methodDesc, rootDesc):
"""Creates a method for attaching to a Resource.
Args:
theclass: type, the class to attach methods to.
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
rootDesc: object, the entire deserialized discovery document.
"""
methodName = fix_method_name(methodName)
pathUrl = methodDesc['path']
httpMethod = methodDesc['httpMethod']
methodId = methodDesc['id']
mediaPathUrl = None
accept = []
maxSize = 0
if 'mediaUpload' in methodDesc:
mediaUpload = methodDesc['mediaUpload']
# TODO(jcgregorio) Use URLs from discovery once it is updated.
parsed = list(urlparse.urlparse(baseUrl))
basePath = parsed[2]
mediaPathUrl = '/upload' + basePath + pathUrl
accept = mediaUpload['accept']
maxSize = _media_size_to_long(mediaUpload.get('maxSize', ''))
if 'parameters' not in methodDesc:
methodDesc['parameters'] = {}
# Add in the parameters common to all methods.
for name, desc in rootDesc.get('parameters', {}).iteritems():
methodDesc['parameters'][name] = desc
# Add in undocumented query parameters.
for name in STACK_QUERY_PARAMETERS:
methodDesc['parameters'][name] = {
'type': 'string',
'location': 'query'
}
if httpMethod in ['PUT', 'POST', 'PATCH'] and 'request' in methodDesc:
methodDesc['parameters']['body'] = {
'description': 'The request body.',
'type': 'object',
'required': True,
}
if 'request' in methodDesc:
methodDesc['parameters']['body'].update(methodDesc['request'])
else:
methodDesc['parameters']['body']['type'] = 'object'
if 'mediaUpload' in methodDesc:
methodDesc['parameters']['media_body'] = {
'description': 'The filename of the media request body.',
'type': 'string',
'required': False,
}
if 'body' in methodDesc['parameters']:
methodDesc['parameters']['body']['required'] = False
argmap = {} # Map from method parameter name to query parameter name
required_params = [] # Required parameters
repeated_params = [] # Repeated parameters
pattern_params = {} # Parameters that must match a regex
query_params = [] # Parameters that will be used in the query string
path_params = {} # Parameters that will be used in the base URL
param_type = {} # The type of the parameter
enum_params = {} # Allowable enumeration values for each parameter
if 'parameters' in methodDesc:
for arg, desc in methodDesc['parameters'].iteritems():
param = key2param(arg)
argmap[param] = arg
if desc.get('pattern', ''):
pattern_params[param] = desc['pattern']
if desc.get('enum', ''):
enum_params[param] = desc['enum']
if desc.get('required', False):
required_params.append(param)
if desc.get('repeated', False):
repeated_params.append(param)
if desc.get('location') == 'query':
query_params.append(param)
if desc.get('location') == 'path':
path_params[param] = param
param_type[param] = desc.get('type', 'string')
for match in URITEMPLATE.finditer(pathUrl):
for namematch in VARNAME.finditer(match.group(0)):
name = key2param(namematch.group(0))
path_params[name] = name
if name in query_params:
query_params.remove(name)
def method(self, **kwargs):
# Don't bother with doc string, it will be over-written by createMethod.
for name in kwargs.iterkeys():
if name not in argmap:
raise TypeError('Got an unexpected keyword argument "%s"' % name)
# Remove args that have a value of None.
keys = kwargs.keys()
for name in keys:
if kwargs[name] is None:
del kwargs[name]
for name in required_params:
if name not in kwargs:
raise TypeError('Missing required parameter "%s"' % name)
for name, regex in pattern_params.iteritems():
if name in kwargs:
if isinstance(kwargs[name], basestring):
pvalues = [kwargs[name]]
else:
pvalues = kwargs[name]
for pvalue in pvalues:
if re.match(regex, pvalue) is None:
raise TypeError(
'Parameter "%s" value "%s" does not match the pattern "%s"' %
(name, pvalue, regex))
for name, enums in enum_params.iteritems():
if name in kwargs:
# We need to handle the case of a repeated enum
# name differently, since we want to handle both
# arg='value' and arg=['value1', 'value2']
if (name in repeated_params and
not isinstance(kwargs[name], basestring)):
values = kwargs[name]
else:
values = [kwargs[name]]
for value in values:
if value not in enums:
raise TypeError(
'Parameter "%s" value "%s" is not an allowed value in "%s"' %
(name, value, str(enums)))
actual_query_params = {}
actual_path_params = {}
for key, value in kwargs.iteritems():
to_type = param_type.get(key, 'string')
# For repeated parameters we cast each member of the list.
if key in repeated_params and type(value) == type([]):
cast_value = [_cast(x, to_type) for x in value]
else:
cast_value = _cast(value, to_type)
if key in query_params:
actual_query_params[argmap[key]] = cast_value
if key in path_params:
actual_path_params[argmap[key]] = cast_value
body_value = kwargs.get('body', None)
media_filename = kwargs.get('media_body', None)
if self._developerKey:
actual_query_params['key'] = self._developerKey
model = self._model
# If there is no schema for the response then presume a binary blob.
if methodName.endswith('_media'):
model = MediaModel()
elif 'response' not in methodDesc:
model = RawModel()
headers = {}
headers, params, query, body = model.request(headers,
actual_path_params, actual_query_params, body_value)
expanded_url = uritemplate.expand(pathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
resumable = None
multipart_boundary = ''
if media_filename:
# Ensure we end up with a valid MediaUpload object.
if isinstance(media_filename, basestring):
(media_mime_type, encoding) = mimetypes.guess_type(media_filename)
if media_mime_type is None:
raise UnknownFileType(media_filename)
if not mimeparse.best_match([media_mime_type], ','.join(accept)):
raise UnacceptableMimeTypeError(media_mime_type)
media_upload = MediaFileUpload(media_filename, media_mime_type)
elif isinstance(media_filename, MediaUpload):
media_upload = media_filename
else:
raise TypeError('media_filename must be str or MediaUpload.')
# Check the maxSize
if maxSize > 0 and media_upload.size() > maxSize:
raise MediaUploadSizeError("Media larger than: %s" % maxSize)
# Use the media path uri for media uploads
expanded_url = uritemplate.expand(mediaPathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
if media_upload.resumable():
url = _add_query_parameter(url, 'uploadType', 'resumable')
if media_upload.resumable():
# This is all we need to do for resumable, if the body exists it gets
# sent in the first request, otherwise an empty body is sent.
resumable = media_upload
else:
# A non-resumable upload
if body is None:
# This is a simple media upload
headers['content-type'] = media_upload.mimetype()
body = media_upload.getbytes(0, media_upload.size())
url = _add_query_parameter(url, 'uploadType', 'media')
else:
# This is a multipart/related upload.
msgRoot = MIMEMultipart('related')
# msgRoot should not write out it's own headers
setattr(msgRoot, '_write_headers', lambda self: None)
# attach the body as one part
msg = MIMENonMultipart(*headers['content-type'].split('/'))
msg.set_payload(body)
msgRoot.attach(msg)
# attach the media as the second part
msg = MIMENonMultipart(*media_upload.mimetype().split('/'))
msg['Content-Transfer-Encoding'] = 'binary'
payload = media_upload.getbytes(0, media_upload.size())
msg.set_payload(payload)
msgRoot.attach(msg)
body = msgRoot.as_string()
multipart_boundary = msgRoot.get_boundary()
headers['content-type'] = ('multipart/related; '
'boundary="%s"') % multipart_boundary
url = _add_query_parameter(url, 'uploadType', 'multipart')
logger.info('URL being requested: %s' % url)
return self._requestBuilder(self._http,
model.response,
url,
method=httpMethod,
body=body,
headers=headers,
methodId=methodId,
resumable=resumable)
docs = [methodDesc.get('description', DEFAULT_METHOD_DOC), '\n\n']
if len(argmap) > 0:
docs.append('Args:\n')
# Skip undocumented params and params common to all methods.
skip_parameters = rootDesc.get('parameters', {}).keys()
skip_parameters.append(STACK_QUERY_PARAMETERS)
for arg in argmap.iterkeys():
if arg in skip_parameters:
continue
repeated = ''
if arg in repeated_params:
repeated = ' (repeated)'
required = ''
if arg in required_params:
required = ' (required)'
paramdesc = methodDesc['parameters'][argmap[arg]]
paramdoc = paramdesc.get('description', 'A parameter')
if '$ref' in paramdesc:
docs.append(
(' %s: object, %s%s%s\n The object takes the'
' form of:\n\n%s\n\n') % (arg, paramdoc, required, repeated,
schema.prettyPrintByName(paramdesc['$ref'])))
else:
paramtype = paramdesc.get('type', 'string')
docs.append(' %s: %s, %s%s%s\n' % (arg, paramtype, paramdoc, required,
repeated))
enum = paramdesc.get('enum', [])
enumDesc = paramdesc.get('enumDescriptions', [])
if enum and enumDesc:
docs.append(' Allowed values\n')
for (name, desc) in zip(enum, enumDesc):
docs.append(' %s - %s\n' % (name, desc))
if 'response' in methodDesc:
if methodName.endswith('_media'):
docs.append('\nReturns:\n The media object as a string.\n\n ')
else:
docs.append('\nReturns:\n An object of the form:\n\n ')
docs.append(schema.prettyPrintSchema(methodDesc['response']))
setattr(method, '__doc__', ''.join(docs))
setattr(theclass, methodName, method)
def createNextMethod(theclass, methodName, methodDesc, rootDesc):
"""Creates any _next methods for attaching to a Resource.
The _next methods allow for easy iteration through list() responses.
Args:
theclass: type, the class to attach methods to.
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
rootDesc: object, the entire deserialized discovery document.
"""
methodName = fix_method_name(methodName)
methodId = methodDesc['id'] + '.next'
def methodNext(self, previous_request, previous_response):
"""Retrieves the next page of results.
Args:
previous_request: The request for the previous page.
previous_response: The response from the request for the previous page.
Returns:
A request object that you can call 'execute()' on to request the next
page. Returns None if there are no more items in the collection.
"""
# Retrieve nextPageToken from previous_response
# Use as pageToken in previous_request to create new request.
if 'nextPageToken' not in previous_response:
return None
request = copy.copy(previous_request)
pageToken = previous_response['nextPageToken']
parsed = list(urlparse.urlparse(request.uri))
q = parse_qsl(parsed[4])
# Find and remove old 'pageToken' value from URI
newq = [(key, value) for (key, value) in q if key != 'pageToken']
newq.append(('pageToken', pageToken))
parsed[4] = urllib.urlencode(newq)
uri = urlparse.urlunparse(parsed)
request.uri = uri
logger.info('URL being requested: %s' % uri)
return request
setattr(theclass, methodName, methodNext)
# Add basic methods to Resource
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
createMethod(Resource, methodName, methodDesc, rootDesc)
# Add in _media methods. The functionality of the attached method will
# change when it sees that the method name ends in _media.
if methodDesc.get('supportsMediaDownload', False):
createMethod(Resource, methodName + '_media', methodDesc, rootDesc)
# Add in nested resources
if 'resources' in resourceDesc:
def createResourceMethod(theclass, methodName, methodDesc, rootDesc):
"""Create a method on the Resource to access a nested Resource.
Args:
theclass: type, the class to attach methods to.
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
rootDesc: object, the entire deserialized discovery document.
"""
methodName = fix_method_name(methodName)
def methodResource(self):
return _createResource(self._http, self._baseUrl, self._model,
self._requestBuilder, self._developerKey,
methodDesc, rootDesc, schema)
setattr(methodResource, '__doc__', 'A collection resource.')
setattr(methodResource, '__is_resource__', True)
setattr(theclass, methodName, methodResource)
for methodName, methodDesc in resourceDesc['resources'].iteritems():
createResourceMethod(Resource, methodName, methodDesc, rootDesc)
# Add _next() methods
# Look for response bodies in schema that contain nextPageToken, and methods
# that take a pageToken parameter.
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
if 'response' in methodDesc:
responseSchema = methodDesc['response']
if '$ref' in responseSchema:
responseSchema = schema.get(responseSchema['$ref'])
hasNextPageToken = 'nextPageToken' in responseSchema.get('properties',
{})
hasPageToken = 'pageToken' in methodDesc.get('parameters', {})
if hasNextPageToken and hasPageToken:
createNextMethod(Resource, methodName + '_next',
resourceDesc['methods'][methodName],
methodName)
return Resource()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth 1.0 credentials.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import pickle
import threading
from apiclient.oauth import Storage as BaseStorage
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from a file."""
def __init__(self, filename):
self._filename = filename
self._lock = threading.Lock()
def get(self):
"""Retrieve Credential from file.
Returns:
apiclient.oauth.Credentials
"""
self._lock.acquire()
try:
f = open(self._filename, 'r')
credentials = pickle.loads(f.read())
f.close()
credentials.set_store(self.put)
except:
credentials = None
self._lock.release()
return credentials
def put(self, credentials):
"""Write a pickled Credentials to file.
Args:
credentials: Credentials, the credentials to store.
"""
self._lock.acquire()
f = open(self._filename, 'w')
f.write(pickle.dumps(credentials))
f.close()
self._lock.release()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import apiclient
import base64
import pickle
from django.db import models
class OAuthCredentialsField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self):
return 'VARCHAR'
def to_python(self, value):
if value is None:
return None
if isinstance(value, apiclient.oauth.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
class FlowThreeLeggedField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self):
return 'VARCHAR'
def to_python(self, value):
print "In to_python", value
if value is None:
return None
if isinstance(value, apiclient.oauth.FlowThreeLegged):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google App Engine
Utilities for making it easier to use the
Google API Client for Python on Google App Engine.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import pickle
from google.appengine.ext import db
from apiclient.oauth import OAuthCredentials
from apiclient.oauth import FlowThreeLegged
class FlowThreeLeggedProperty(db.Property):
"""Utility property that allows easy
storage and retreival of an
apiclient.oauth.FlowThreeLegged"""
# Tell what the user type is.
data_type = FlowThreeLegged
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
flow = super(FlowThreeLeggedProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(flow))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, FlowThreeLegged):
raise BadValueError('Property %s must be convertible '
'to a FlowThreeLegged instance (%s)' %
(self.name, value))
return super(FlowThreeLeggedProperty, self).validate(value)
def empty(self, value):
return not value
class OAuthCredentialsProperty(db.Property):
"""Utility property that allows easy
storage and retrieval of
apiclient.oath.OAuthCredentials
"""
# Tell what the user type is.
data_type = OAuthCredentials
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
cred = super(OAuthCredentialsProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(cred))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, OAuthCredentials):
raise BadValueError('Property %s must be convertible '
'to an OAuthCredentials instance (%s)' %
(self.name, value))
return super(OAuthCredentialsProperty, self).validate(value)
def empty(self, value):
return not value
class StorageByKeyName(object):
"""Store and retrieve a single credential to and from
the App Engine datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsProperty
on a datastore model class, and that entities
are stored by key_name.
"""
def __init__(self, model, key_name, property_name):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
property_name: string, name of the property that is a CredentialsProperty
"""
self.model = model
self.key_name = key_name
self.property_name = property_name
def get(self):
"""Retrieve Credential from datastore.
Returns:
Credentials
"""
entity = self.model.get_or_insert(self.key_name)
credential = getattr(entity, self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self.put)
return credential
def put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity = self.model.get_or_insert(self.key_name)
setattr(entity, self.property_name, credentials)
entity.put()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line tools for authenticating via OAuth 1.0
Do the OAuth 1.0 Three Legged Dance for
a command line application. Stores the generated
credentials in a common file that is used by
other example apps in the same directory.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = ["run"]
import BaseHTTPServer
import gflags
import logging
import socket
import sys
from optparse import OptionParser
from apiclient.oauth import RequestError
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean('auth_local_webserver', True,
('Run a local web server to handle redirects during '
'OAuth authorization.'))
gflags.DEFINE_string('auth_host_name', 'localhost',
('Host name to use when running a local web server to '
'handle redirects during OAuth authorization.'))
gflags.DEFINE_multi_int('auth_host_port', [8080, 8090],
('Port to use when running a local web server to '
'handle redirects during OAuth authorization.'))
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 1.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 1.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(s):
"""Handle a GET request
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
query = s.path.split('?', 1)[-1]
query = dict(parse_qsl(query))
s.server.query_params = query
s.wfile.write("<html><head><title>Authentication Status</title></head>")
s.wfile.write("<body><p>The authentication flow has completed.</p>")
s.wfile.write("</body></html>")
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
pass
def run(flow, storage):
"""Core code for a command-line application.
Args:
flow: Flow, an OAuth 1.0 Flow to step through.
storage: Storage, a Storage to store the credential in.
Returns:
Credentials, the obtained credential.
Exceptions:
RequestError: if step2 of the flow fails.
Args:
"""
if FLAGS.auth_local_webserver:
success = False
port_number = 0
for port in FLAGS.auth_host_port:
port_number = port
try:
httpd = BaseHTTPServer.HTTPServer((FLAGS.auth_host_name, port),
ClientRedirectHandler)
except socket.error, e:
pass
else:
success = True
break
FLAGS.auth_local_webserver = success
if FLAGS.auth_local_webserver:
oauth_callback = 'http://%s:%s/' % (FLAGS.auth_host_name, port_number)
else:
oauth_callback = 'oob'
authorize_url = flow.step1_get_authorize_url(oauth_callback)
print 'Go to the following link in your browser:'
print authorize_url
print
if FLAGS.auth_local_webserver:
print 'If your browser is on a different machine then exit and re-run this'
print 'application with the command-line parameter --noauth_local_webserver.'
print
if FLAGS.auth_local_webserver:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'oauth_verifier' in httpd.query_params:
code = httpd.query_params['oauth_verifier']
else:
accepted = 'n'
while accepted.lower() == 'n':
accepted = raw_input('Have you authorized me? (y/n) ')
code = raw_input('What is the verification code? ').strip()
try:
credentials = flow.step2_exchange(code)
except RequestError:
sys.exit('The authentication has failed.')
storage.put(credentials)
credentials.set_store(storage.put)
print "You have successfully authenticated."
return credentials
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Errors for the library.
All exceptions defined by the library
should be defined in this file.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from oauth2client.anyjson import simplejson
class Error(Exception):
"""Base error for this module."""
pass
class HttpError(Error):
"""HTTP data was invalid or unexpected."""
def __init__(self, resp, content, uri=None):
self.resp = resp
self.content = content
self.uri = uri
def _get_reason(self):
"""Calculate the reason for the error from the response content."""
if self.resp.get('content-type', '').startswith('application/json'):
try:
data = simplejson.loads(self.content)
reason = data['error']['message']
except (ValueError, KeyError):
reason = self.content
else:
reason = self.resp.reason
return reason
def __repr__(self):
if self.uri:
return '<HttpError %s when requesting %s returned "%s">' % (
self.resp.status, self.uri, self._get_reason())
else:
return '<HttpError %s "%s">' % (self.resp.status, self._get_reason())
__str__ = __repr__
class InvalidJsonError(Error):
"""The JSON returned could not be parsed."""
pass
class UnknownLinkType(Error):
"""Link type unknown or unexpected."""
pass
class UnknownApiNameOrVersion(Error):
"""No API with that name and version exists."""
pass
class UnacceptableMimeTypeError(Error):
"""That is an unacceptable mimetype for this operation."""
pass
class MediaUploadSizeError(Error):
"""Media is larger than the method can accept."""
pass
class ResumableUploadError(Error):
"""Error occured during resumable upload."""
pass
class BatchError(HttpError):
"""Error occured during batch operations."""
def __init__(self, reason, resp=None, content=None):
self.resp = resp
self.content = content
self.reason = reason
def __repr__(self):
return '<BatchError %s "%s">' % (self.resp.status, self.reason)
__str__ = __repr__
class UnexpectedMethodError(Error):
"""Exception raised by RequestMockBuilder on unexpected calls."""
def __init__(self, methodId=None):
"""Constructor for an UnexpectedMethodError."""
super(UnexpectedMethodError, self).__init__(
'Received unexpected call %s' % methodId)
class UnexpectedBodyError(Error):
"""Exception raised by RequestMockBuilder on unexpected bodies."""
def __init__(self, expected, provided):
"""Constructor for an UnexpectedMethodError."""
super(UnexpectedBodyError, self).__init__(
'Expected: [%s] - Provided: [%s]' % (expected, provided))
| Python |
__version__ = "1.0c2"
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Schema processing for discovery based APIs
Schemas holds an APIs discovery schemas. It can return those schema as
deserialized JSON objects, or pretty print them as prototype objects that
conform to the schema.
For example, given the schema:
schema = \"\"\"{
"Foo": {
"type": "object",
"properties": {
"etag": {
"type": "string",
"description": "ETag of the collection."
},
"kind": {
"type": "string",
"description": "Type of the collection ('calendar#acl').",
"default": "calendar#acl"
},
"nextPageToken": {
"type": "string",
"description": "Token used to access the next
page of this result. Omitted if no further results are available."
}
}
}
}\"\"\"
s = Schemas(schema)
print s.prettyPrintByName('Foo')
Produces the following output:
{
"nextPageToken": "A String", # Token used to access the
# next page of this result. Omitted if no further results are available.
"kind": "A String", # Type of the collection ('calendar#acl').
"etag": "A String", # ETag of the collection.
},
The constructor takes a discovery document in which to look up named schema.
"""
# TODO(jcgregorio) support format, enum, minimum, maximum
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import copy
from oauth2client.anyjson import simplejson
class Schemas(object):
"""Schemas for an API."""
def __init__(self, discovery):
"""Constructor.
Args:
discovery: object, Deserialized discovery document from which we pull
out the named schema.
"""
self.schemas = discovery.get('schemas', {})
# Cache of pretty printed schemas.
self.pretty = {}
def _prettyPrintByName(self, name, seen=None, dent=0):
"""Get pretty printed object prototype from the schema name.
Args:
name: string, Name of schema in the discovery document.
seen: list of string, Names of schema already seen. Used to handle
recursive definitions.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
if seen is None:
seen = []
if name in seen:
# Do not fall into an infinite loop over recursive definitions.
return '# Object with schema name: %s' % name
seen.append(name)
if name not in self.pretty:
self.pretty[name] = _SchemaToStruct(self.schemas[name],
seen, dent).to_str(self._prettyPrintByName)
seen.pop()
return self.pretty[name]
def prettyPrintByName(self, name):
"""Get pretty printed object prototype from the schema name.
Args:
name: string, Name of schema in the discovery document.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
# Return with trailing comma and newline removed.
return self._prettyPrintByName(name, seen=[], dent=1)[:-2]
def _prettyPrintSchema(self, schema, seen=None, dent=0):
"""Get pretty printed object prototype of schema.
Args:
schema: object, Parsed JSON schema.
seen: list of string, Names of schema already seen. Used to handle
recursive definitions.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
if seen is None:
seen = []
return _SchemaToStruct(schema, seen, dent).to_str(self._prettyPrintByName)
def prettyPrintSchema(self, schema):
"""Get pretty printed object prototype of schema.
Args:
schema: object, Parsed JSON schema.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
# Return with trailing comma and newline removed.
return self._prettyPrintSchema(schema, dent=1)[:-2]
def get(self, name):
"""Get deserialized JSON schema from the schema name.
Args:
name: string, Schema name.
"""
return self.schemas[name]
class _SchemaToStruct(object):
"""Convert schema to a prototype object."""
def __init__(self, schema, seen, dent=0):
"""Constructor.
Args:
schema: object, Parsed JSON schema.
seen: list, List of names of schema already seen while parsing. Used to
handle recursive definitions.
dent: int, Initial indentation depth.
"""
# The result of this parsing kept as list of strings.
self.value = []
# The final value of the parsing.
self.string = None
# The parsed JSON schema.
self.schema = schema
# Indentation level.
self.dent = dent
# Method that when called returns a prototype object for the schema with
# the given name.
self.from_cache = None
# List of names of schema already seen while parsing.
self.seen = seen
def emit(self, text):
"""Add text as a line to the output.
Args:
text: string, Text to output.
"""
self.value.extend([" " * self.dent, text, '\n'])
def emitBegin(self, text):
"""Add text to the output, but with no line terminator.
Args:
text: string, Text to output.
"""
self.value.extend([" " * self.dent, text])
def emitEnd(self, text, comment):
"""Add text and comment to the output with line terminator.
Args:
text: string, Text to output.
comment: string, Python comment.
"""
if comment:
divider = '\n' + ' ' * (self.dent + 2) + '# '
lines = comment.splitlines()
lines = [x.rstrip() for x in lines]
comment = divider.join(lines)
self.value.extend([text, ' # ', comment, '\n'])
else:
self.value.extend([text, '\n'])
def indent(self):
"""Increase indentation level."""
self.dent += 1
def undent(self):
"""Decrease indentation level."""
self.dent -= 1
def _to_str_impl(self, schema):
"""Prototype object based on the schema, in Python code with comments.
Args:
schema: object, Parsed JSON schema file.
Returns:
Prototype object based on the schema, in Python code with comments.
"""
stype = schema.get('type')
if stype == 'object':
self.emitEnd('{', schema.get('description', ''))
self.indent()
for pname, pschema in schema.get('properties', {}).iteritems():
self.emitBegin('"%s": ' % pname)
self._to_str_impl(pschema)
self.undent()
self.emit('},')
elif '$ref' in schema:
schemaName = schema['$ref']
description = schema.get('description', '')
s = self.from_cache(schemaName, self.seen)
parts = s.splitlines()
self.emitEnd(parts[0], description)
for line in parts[1:]:
self.emit(line.rstrip())
elif stype == 'boolean':
value = schema.get('default', 'True or False')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'string':
value = schema.get('default', 'A String')
self.emitEnd('"%s",' % str(value), schema.get('description', ''))
elif stype == 'integer':
value = schema.get('default', '42')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'number':
value = schema.get('default', '3.14')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'null':
self.emitEnd('None,', schema.get('description', ''))
elif stype == 'any':
self.emitEnd('"",', schema.get('description', ''))
elif stype == 'array':
self.emitEnd('[', schema.get('description'))
self.indent()
self.emitBegin('')
self._to_str_impl(schema['items'])
self.undent()
self.emit('],')
else:
self.emit('Unknown type! %s' % stype)
self.emitEnd('', '')
self.string = ''.join(self.value)
return self.string
def to_str(self, from_cache):
"""Prototype object based on the schema, in Python code with comments.
Args:
from_cache: callable(name, seen), Callable that retrieves an object
prototype for a schema with the given name. Seen is a list of schema
names already seen as we recursively descend the schema definition.
Returns:
Prototype object based on the schema, in Python code with comments.
The lines of the code will all be properly indented.
"""
self.from_cache = from_cache
return self._to_str_impl(self.schema)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility module to import a JSON module
Hides all the messy details of exactly where
we get a simplejson module from.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
try: # pragma: no cover
import simplejson
except ImportError: # pragma: no cover
try:
# Try to import from django, should work on App Engine
from django.utils import simplejson
except ImportError:
# Should work for Python2.6 and higher.
import json as simplejson
| Python |
# Early, and incomplete implementation of -04.
#
import re
import urllib
RESERVED = ":/?#[]@!$&'()*+,;="
OPERATOR = "+./;?|!@"
EXPLODE = "*+"
MODIFIER = ":^"
TEMPLATE = re.compile(r"{(?P<operator>[\+\./;\?|!@])?(?P<varlist>[^}]+)}", re.UNICODE)
VAR = re.compile(r"^(?P<varname>[^=\+\*:\^]+)((?P<explode>[\+\*])|(?P<partial>[:\^]-?[0-9]+))?(=(?P<default>.*))?$", re.UNICODE)
def _tostring(varname, value, explode, operator, safe=""):
if type(value) == type([]):
if explode == "+":
return ",".join([varname + "." + urllib.quote(x, safe) for x in value])
else:
return ",".join([urllib.quote(x, safe) for x in value])
if type(value) == type({}):
keys = value.keys()
keys.sort()
if explode == "+":
return ",".join([varname + "." + urllib.quote(key, safe) + "," + urllib.quote(value[key], safe) for key in keys])
else:
return ",".join([urllib.quote(key, safe) + "," + urllib.quote(value[key], safe) for key in keys])
else:
return urllib.quote(value, safe)
def _tostring_path(varname, value, explode, operator, safe=""):
joiner = operator
if type(value) == type([]):
if explode == "+":
return joiner.join([varname + "." + urllib.quote(x, safe) for x in value])
elif explode == "*":
return joiner.join([urllib.quote(x, safe) for x in value])
else:
return ",".join([urllib.quote(x, safe) for x in value])
elif type(value) == type({}):
keys = value.keys()
keys.sort()
if explode == "+":
return joiner.join([varname + "." + urllib.quote(key, safe) + joiner + urllib.quote(value[key], safe) for key in keys])
elif explode == "*":
return joiner.join([urllib.quote(key, safe) + joiner + urllib.quote(value[key], safe) for key in keys])
else:
return ",".join([urllib.quote(key, safe) + "," + urllib.quote(value[key], safe) for key in keys])
else:
if value:
return urllib.quote(value, safe)
else:
return ""
def _tostring_query(varname, value, explode, operator, safe=""):
joiner = operator
varprefix = ""
if operator == "?":
joiner = "&"
varprefix = varname + "="
if type(value) == type([]):
if 0 == len(value):
return ""
if explode == "+":
return joiner.join([varname + "=" + urllib.quote(x, safe) for x in value])
elif explode == "*":
return joiner.join([urllib.quote(x, safe) for x in value])
else:
return varprefix + ",".join([urllib.quote(x, safe) for x in value])
elif type(value) == type({}):
if 0 == len(value):
return ""
keys = value.keys()
keys.sort()
if explode == "+":
return joiner.join([varname + "." + urllib.quote(key, safe) + "=" + urllib.quote(value[key], safe) for key in keys])
elif explode == "*":
return joiner.join([urllib.quote(key, safe) + "=" + urllib.quote(value[key], safe) for key in keys])
else:
return varprefix + ",".join([urllib.quote(key, safe) + "," + urllib.quote(value[key], safe) for key in keys])
else:
if value:
return varname + "=" + urllib.quote(value, safe)
else:
return varname
TOSTRING = {
"" : _tostring,
"+": _tostring,
";": _tostring_query,
"?": _tostring_query,
"/": _tostring_path,
".": _tostring_path,
}
def expand(template, vars):
def _sub(match):
groupdict = match.groupdict()
operator = groupdict.get('operator')
if operator is None:
operator = ''
varlist = groupdict.get('varlist')
safe = "@"
if operator == '+':
safe = RESERVED
varspecs = varlist.split(",")
varnames = []
defaults = {}
for varspec in varspecs:
m = VAR.search(varspec)
groupdict = m.groupdict()
varname = groupdict.get('varname')
explode = groupdict.get('explode')
partial = groupdict.get('partial')
default = groupdict.get('default')
if default:
defaults[varname] = default
varnames.append((varname, explode, partial))
retval = []
joiner = operator
prefix = operator
if operator == "+":
prefix = ""
joiner = ","
if operator == "?":
joiner = "&"
if operator == "":
joiner = ","
for varname, explode, partial in varnames:
if varname in vars:
value = vars[varname]
#if not value and (type(value) == type({}) or type(value) == type([])) and varname in defaults:
if not value and value != "" and varname in defaults:
value = defaults[varname]
elif varname in defaults:
value = defaults[varname]
else:
continue
retval.append(TOSTRING[operator](varname, value, explode, operator, safe=safe))
if "".join(retval):
return prefix + joiner.join(retval)
else:
return ""
return TEMPLATE.sub(_sub, template)
| Python |
#!/usr/bin/env python
# Copyright (c) 2010, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module to enforce different constraints on flags.
A validator represents an invariant, enforced over a one or more flags.
See 'FLAGS VALIDATORS' in gflags.py's docstring for a usage manual.
"""
__author__ = 'olexiy@google.com (Olexiy Oryeshko)'
class Error(Exception):
"""Thrown If validator constraint is not satisfied."""
class Validator(object):
"""Base class for flags validators.
Users should NOT overload these classes, and use gflags.Register...
methods instead.
"""
# Used to assign each validator an unique insertion_index
validators_count = 0
def __init__(self, checker, message):
"""Constructor to create all validators.
Args:
checker: function to verify the constraint.
Input of this method varies, see SimpleValidator and
DictionaryValidator for a detailed description.
message: string, error message to be shown to the user
"""
self.checker = checker
self.message = message
Validator.validators_count += 1
# Used to assert validators in the order they were registered (CL/18694236)
self.insertion_index = Validator.validators_count
def Verify(self, flag_values):
"""Verify that constraint is satisfied.
flags library calls this method to verify Validator's constraint.
Args:
flag_values: gflags.FlagValues, containing all flags
Raises:
Error: if constraint is not satisfied.
"""
param = self._GetInputToCheckerFunction(flag_values)
if not self.checker(param):
raise Error(self.message)
def GetFlagsNames(self):
"""Return the names of the flags checked by this validator.
Returns:
[string], names of the flags
"""
raise NotImplementedError('This method should be overloaded')
def PrintFlagsWithValues(self, flag_values):
raise NotImplementedError('This method should be overloaded')
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: gflags.FlagValues, containing all flags.
Returns:
Return type depends on the specific validator.
"""
raise NotImplementedError('This method should be overloaded')
class SimpleValidator(Validator):
"""Validator behind RegisterValidator() method.
Validates that a single flag passes its checker function. The checker function
takes the flag value and returns True (if value looks fine) or, if flag value
is not valid, either returns False or raises an Exception."""
def __init__(self, flag_name, checker, message):
"""Constructor.
Args:
flag_name: string, name of the flag.
checker: function to verify the validator.
input - value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(SimpleValidator, self).__init__(checker, message)
self.flag_name = flag_name
def GetFlagsNames(self):
return [self.flag_name]
def PrintFlagsWithValues(self, flag_values):
return 'flag --%s=%s' % (self.flag_name, flag_values[self.flag_name].value)
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: gflags.FlagValues
Returns:
value of the corresponding flag.
"""
return flag_values[self.flag_name].value
class DictionaryValidator(Validator):
"""Validator behind RegisterDictionaryValidator method.
Validates that flag values pass their common checker function. The checker
function takes flag values and returns True (if values look fine) or,
if values are not valid, either returns False or raises an Exception.
"""
def __init__(self, flag_names, checker, message):
"""Constructor.
Args:
flag_names: [string], containing names of the flags used by checker.
checker: function to verify the validator.
input - dictionary, with keys() being flag_names, and value for each
key being the value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(DictionaryValidator, self).__init__(checker, message)
self.flag_names = flag_names
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: gflags.FlagValues
Returns:
dictionary, with keys() being self.lag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
"""
return dict([key, flag_values[key].value] for key in self.flag_names)
def PrintFlagsWithValues(self, flag_values):
prefix = 'flags '
flags_with_values = []
for key in self.flag_names:
flags_with_values.append('%s=%s' % (key, flag_values[key].value))
return prefix + ', '.join(flags_with_values)
def GetFlagsNames(self):
return self.flag_names
| Python |
"""SocksiPy - Python SOCKS module.
Version 1.00
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
"""
"""
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
"""
import base64
import socket
import struct
import sys
if getattr(socket, 'socket', None) is None:
raise ImportError('socket.socket missing, proxy support unusable')
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
PROXY_TYPE_HTTP_NO_TUNNEL = 4
_defaultproxy = None
_orgsocket = socket.socket
class ProxyError(Exception): pass
class GeneralProxyError(ProxyError): pass
class Socks5AuthError(ProxyError): pass
class Socks5Error(ProxyError): pass
class Socks4Error(ProxyError): pass
class HTTPError(ProxyError): pass
_generalerrors = ("success",
"invalid data",
"not connected",
"not available",
"bad proxy type",
"bad input")
_socks5errors = ("succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"Unknown error")
_socks5autherrors = ("succeeded",
"authentication is required",
"all offered authentication methods were rejected",
"unknown username or invalid password",
"unknown error")
_socks4errors = ("request granted",
"request rejected or failed",
"request rejected because SOCKS server cannot connect to identd on the client",
"request rejected because the client program and identd report different user-ids",
"unknown error")
def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype, addr, port, rdns, username, password)
def wrapmodule(module):
"""wrapmodule(module)
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using setdefaultproxy(...) first.
This will only work on modules that import socket directly into the namespace;
most of the Python Standard Library falls into this category.
"""
if _defaultproxy != None:
module.socket.socket = socksocket
else:
raise GeneralProxyError((4, "no proxy specified"))
class socksocket(socket.socket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
"""
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None):
_orgsocket.__init__(self, family, type, proto, _sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
self.__httptunnel = True
def __recvall(self, count):
"""__recvall(count) -> data
Receive EXACTLY the number of bytes requested from the socket.
Blocks until the required number of bytes have been received.
"""
data = self.recv(count)
while len(data) < count:
d = self.recv(count-len(data))
if not d: raise GeneralProxyError((0, "connection closed unexpectedly"))
data = data + d
return data
def sendall(self, content, *args):
""" override socket.socket.sendall method to rewrite the header
for non-tunneling proxies if needed
"""
if not self.__httptunnel:
content = self.__rewriteproxy(content)
return super(socksocket, self).sendall(content, *args)
def __rewriteproxy(self, header):
""" rewrite HTTP request headers to support non-tunneling proxies
(i.e. those which do not support the CONNECT method).
This only works for HTTP (not HTTPS) since HTTPS requires tunneling.
"""
host, endpt = None, None
hdrs = header.split("\r\n")
for hdr in hdrs:
if hdr.lower().startswith("host:"):
host = hdr
elif hdr.lower().startswith("get") or hdr.lower().startswith("post"):
endpt = hdr
if host and endpt:
hdrs.remove(host)
hdrs.remove(endpt)
host = host.split(" ")[1]
endpt = endpt.split(" ")
if (self.__proxy[4] != None and self.__proxy[5] != None):
hdrs.insert(0, self.__getauthheader())
hdrs.insert(0, "Host: %s" % host)
hdrs.insert(0, "%s http://%s%s %s" % (endpt[0], host, endpt[1], endpt[2]))
return "\r\n".join(hdrs)
def __getauthheader(self):
auth = self.__proxy[4] + ":" + self.__proxy[5]
return "Proxy-Authorization: Basic " + base64.b64encode(auth)
def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.__proxy = (proxytype, addr, port, rdns, username, password)
def __negotiatesocks5(self, destaddr, destport):
"""__negotiatesocks5(self,destaddr,destport)
Negotiates a connection through a SOCKS5 server.
"""
# First we'll send the authentication packages we support.
if (self.__proxy[4]!=None) and (self.__proxy[5]!=None):
# The username/password details were supplied to the
# setproxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
self.sendall(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02))
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
self.sendall(struct.pack('BBB', 0x05, 0x01, 0x00))
# We'll receive the server's response to determine which
# method was selected
chosenauth = self.__recvall(2)
if chosenauth[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
# Check the chosen authentication method
if chosenauth[1:2] == chr(0x00).encode():
# No authentication is required
pass
elif chosenauth[1:2] == chr(0x02).encode():
# Okay, we need to perform a basic username/password
# authentication.
self.sendall(chr(0x01).encode() + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.__proxy[5])) + self.__proxy[5])
authstat = self.__recvall(2)
if authstat[0:1] != chr(0x01).encode():
# Bad response
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if authstat[1:2] != chr(0x00).encode():
# Authentication failed
self.close()
raise Socks5AuthError((3, _socks5autherrors[3]))
# Authentication succeeded
else:
# Reaching here is always bad
self.close()
if chosenauth[1] == chr(0xFF).encode():
raise Socks5AuthError((2, _socks5autherrors[2]))
else:
raise GeneralProxyError((1, _generalerrors[1]))
# Now we can request the actual connection
req = struct.pack('BBB', 0x05, 0x01, 0x00)
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
ipaddr = socket.inet_aton(destaddr)
req = req + chr(0x01).encode() + ipaddr
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if self.__proxy[3]:
# Resolve remotely
ipaddr = None
req = req + chr(0x03).encode() + chr(len(destaddr)).encode() + destaddr
else:
# Resolve locally
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + chr(0x01).encode() + ipaddr
req = req + struct.pack(">H", destport)
self.sendall(req)
# Get the response
resp = self.__recvall(4)
if resp[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
elif resp[1:2] != chr(0x00).encode():
# Connection failed
self.close()
if ord(resp[1:2])<=8:
raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
else:
raise Socks5Error((9, _socks5errors[9]))
# Get the bound address/port
elif resp[3:4] == chr(0x01).encode():
boundaddr = self.__recvall(4)
elif resp[3:4] == chr(0x03).encode():
resp = resp + self.recv(1)
boundaddr = self.__recvall(ord(resp[4:5]))
else:
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
boundport = struct.unpack(">H", self.__recvall(2))[0]
self.__proxysockname = (boundaddr, boundport)
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def getproxysockname(self):
"""getsockname() -> address info
Returns the bound IP address and port number at the proxy.
"""
return self.__proxysockname
def getproxypeername(self):
"""getproxypeername() -> address info
Returns the IP and port number of the proxy.
"""
return _orgsocket.getpeername(self)
def getpeername(self):
"""getpeername() -> address info
Returns the IP address and port number of the destination
machine (note: getproxypeername returns the proxy)
"""
return self.__proxypeername
def __negotiatesocks4(self,destaddr,destport):
"""__negotiatesocks4(self,destaddr,destport)
Negotiates a connection through a SOCKS4 server.
"""
# Check if the destination address provided is an IP address
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if self.__proxy[3]:
ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
# Construct the request packet
req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
# The username parameter is considered userid for SOCKS4
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + chr(0x00).encode()
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if rmtrslv:
req = req + destaddr + chr(0x00).encode()
self.sendall(req)
# Get the response from the server
resp = self.__recvall(8)
if resp[0:1] != chr(0x00).encode():
# Bad data
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
if resp[1:2] != chr(0x5A).encode():
# Server returned an error
self.close()
if ord(resp[1:2]) in (91, 92, 93):
self.close()
raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
else:
raise Socks4Error((94, _socks4errors[4]))
# Get the bound address/port
self.__proxysockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def __negotiatehttp(self, destaddr, destport):
"""__negotiatehttp(self,destaddr,destport)
Negotiates a connection through an HTTP server.
"""
# If we need to resolve locally, we do this now
if not self.__proxy[3]:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
headers = ["CONNECT ", addr, ":", str(destport), " HTTP/1.1\r\n"]
headers += ["Host: ", destaddr, "\r\n"]
if (self.__proxy[4] != None and self.__proxy[5] != None):
headers += [self.__getauthheader(), "\r\n"]
headers.append("\r\n")
self.sendall("".join(headers).encode())
# We read the response until we get the string "\r\n\r\n"
resp = self.recv(1)
while resp.find("\r\n\r\n".encode()) == -1:
resp = resp + self.recv(1)
# We just need the first line to check if the connection
# was successful
statusline = resp.splitlines()[0].split(" ".encode(), 2)
if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode, statusline[2]))
self.__proxysockname = ("0.0.0.0", 0)
self.__proxypeername = (addr, destport)
def connect(self, destpair):
"""connect(self, despair)
Connects to the specified destination through a proxy.
destpar - A tuple of the IP/DNS address and the port number.
(identical to socket's connect).
To select the proxy server use setproxy().
"""
# Do a minimal input check first
if (not type(destpair) in (list,tuple)) or (len(destpair) < 2) or (type(destpair[0]) != type('')) or (type(destpair[1]) != int):
raise GeneralProxyError((5, _generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks5(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatesocks4(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatehttp(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP_NO_TUNNEL:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1],portnum))
if destpair[1] == 443:
self.__negotiatehttp(destpair[0],destpair[1])
else:
self.__httptunnel = False
elif self.__proxy[0] == None:
_orgsocket.connect(self, (destpair[0], destpair[1]))
else:
raise GeneralProxyError((4, _generalerrors[4]))
| Python |
"""
iri2uri
Converts an IRI to a URI.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"
__history__ = """
"""
import urlparse
# Convert an IRI to a URI following the rules in RFC 3987
#
# The characters we need to enocde and escape are defined in the spec:
#
# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
# / %xD0000-DFFFD / %xE1000-EFFFD
escape_range = [
(0xA0, 0xD7FF ),
(0xE000, 0xF8FF ),
(0xF900, 0xFDCF ),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD ),
(0x20000, 0x2FFFD ),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD ),
(0x50000, 0x5FFFD ),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD ),
(0x80000, 0x8FFFD ),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD ),
(0xB0000, 0xBFFFD ),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD ),
(0xE1000, 0xEFFFD),
(0xF0000, 0xFFFFD ),
(0x100000, 0x10FFFD)
]
def encode(c):
retval = c
i = ord(c)
for low, high in escape_range:
if i < low:
break
if i >= low and i <= high:
retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')])
break
return retval
def iri2uri(uri):
"""Convert an IRI to a URI. Note that IRIs must be
passed in a unicode strings. That is, do not utf-8 encode
the IRI before passing it into the function."""
if isinstance(uri ,unicode):
(scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
authority = authority.encode('idna')
# For each character in 'ucschar' or 'iprivate'
# 1. encode as utf-8
# 2. then %-encode each octet of that utf-8
uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))
uri = "".join([encode(c) for c in uri])
return uri
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test_uris(self):
"""Test that URIs are invariant under the transformation."""
invariant = [
u"ftp://ftp.is.co.za/rfc/rfc1808.txt",
u"http://www.ietf.org/rfc/rfc2396.txt",
u"ldap://[2001:db8::7]/c=GB?objectClass?one",
u"mailto:John.Doe@example.com",
u"news:comp.infosystems.www.servers.unix",
u"tel:+1-816-555-1212",
u"telnet://192.0.2.16:80/",
u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ]
for uri in invariant:
self.assertEqual(uri, iri2uri(uri))
def test_iri(self):
""" Test that the right type of escaping is done for each part of the URI."""
self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}"))
self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}"))
self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}"))
self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")))
self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8')))
unittest.main()
| Python |
from __future__ import generators
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 2.3 or later
Changelog:
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger"]
__license__ = "MIT"
__version__ = "0.7.2"
import re
import sys
import email
import email.Utils
import email.Message
import email.FeedParser
import StringIO
import gzip
import zlib
import httplib
import urlparse
import base64
import os
import copy
import calendar
import time
import random
import errno
# remove depracated warning in python2.6
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
try:
from httplib2 import socks
except ImportError:
socks = None
# Build the appropriate socket wrapper for ssl
try:
import ssl # python 2.6
ssl_SSLError = ssl.SSLError
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if disable_validation:
cert_reqs = ssl.CERT_NONE
else:
cert_reqs = ssl.CERT_REQUIRED
# We should be specifying SSL version 3 or TLS v1, but the ssl module
# doesn't expose the necessary knobs. So we need to go with the default
# of SSLv23.
return ssl.wrap_socket(sock, keyfile=key_file, certfile=cert_file,
cert_reqs=cert_reqs, ca_certs=ca_certs)
except (AttributeError, ImportError):
ssl_SSLError = None
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if not disable_validation:
raise CertificateValidationUnsupported(
"SSL certificate validation is not supported without "
"the ssl module installed. To avoid this error, install "
"the ssl module, or explicity disable validation.")
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if sys.version_info >= (2,3):
from iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error',
'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError',
'debuglevel', 'ProxiesUnavailableError']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# Python 2.3 support
if sys.version_info < (2,4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, 'getheaders'):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class MalformedHeader(HttpLib2Error): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
class ProxiesUnavailableError(HttpLib2Error): pass
class CertificateValidationUnsupported(HttpLib2Error): pass
class SSLHandshakeError(HttpLib2Error): pass
class NotSupportedOnThisPlatform(HttpLib2Error): pass
class CertificateHostnameMismatch(SSLHandshakeError):
def __init__(self, desc, host, cert):
HttpLib2Error.__init__(self, desc)
self.host = host
self.cert = cert
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
# Default CA certificates file bundled with httplib2.
CA_CERTS = os.path.join(
os.path.dirname(os.path.abspath(__file__ )), "cacerts.txt")
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r'^\w+://')
re_slash = re.compile(r'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme.match(filename):
if isinstance(filename,str):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,unicode):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_slash.sub(",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if headers.has_key('cache-control'):
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headers.has_key(headername):
try:
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
except ValueError:
raise MalformedHeader("WWW-Authenticate")
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif cc.has_key('no-cache'):
retval = "TRANSPARENT"
elif cc_response.has_key('no-cache'):
retval = "STALE"
elif cc.has_key('only-if-cached'):
retval = "FRESH"
elif response_headers.has_key('date'):
date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if cc_response.has_key('max-age'):
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif response_headers.has_key('expires'):
expires = email.Utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if cc.has_key('max-age'):
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if cc.has_key('min-fresh'):
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if cc.has_key('no-store') or cc_response.has_key('no-store'):
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get('vary', None)
if vary:
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-rise this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)
))
headers['authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'],
)
if self.challenge.get('opaque'):
headers['authorization'] += ', opaque="%s"' % self.challenge['opaque']
self.challenge['nc'] += 1
def response(self, response, content):
if not response.has_key('authentication-info'):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if updated_challenge.has_key('nextnonce'):
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer (t.broyer@ltgt.net)"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']
])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist,
)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None):
"""The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX
constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000)
"""
self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass = proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns,
self.proxy_user, self.proxy_pass)
def isgood(self):
return (self.proxy_host != None) and (self.proxy_port != None)
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""
HTTPConnection subclass that supports timeouts
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
if self.proxy_info and socks is None:
raise ProxiesUnavailableError(
'Proxy support missing but proxy use was requested!')
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if self.proxy_info and self.proxy_info.isgood():
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(*self.proxy_info.astuple())
else:
self.sock = socket.socket(af, socktype, proto)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
self.sock.connect(sa)
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"""
This class allows communication via SSL.
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None,
ca_certs=None, disable_ssl_certificate_validation=False):
httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file,
cert_file=cert_file, strict=strict)
self.timeout = timeout
self.proxy_info = proxy_info
if ca_certs is None:
ca_certs = CA_CERTS
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# The following two methods were adapted from https_wrapper.py, released
# with the Google Appengine SDK at
# http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py
# under the following license:
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def _GetValidHostsForCert(self, cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
"""
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName']
if x[0].lower() == 'dns']
else:
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname']
def _ValidateCertificateHostname(self, cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = self._GetValidHostsForCert(cert)
for host in hosts:
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False
def connect(self):
"Connect to a host on a given (SSL) port."
msg = "getaddrinfo returns an empty list"
for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo(
self.host, self.port, 0, socket.SOCK_STREAM):
try:
if self.proxy_info and self.proxy_info.isgood():
sock = socks.socksocket(family, socktype, proto)
sock.setproxy(*self.proxy_info.astuple())
else:
sock = socket.socket(family, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock =_ssl_wrap_socket(
sock, self.key_file, self.cert_file,
self.disable_ssl_certificate_validation, self.ca_certs)
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
if not self.disable_ssl_certificate_validation:
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not self._ValidateCertificateHostname(cert, hostname):
raise CertificateHostnameMismatch(
'Server presented certificate that does not match '
'host %s: %s' % (hostname, cert), hostname, cert)
except ssl_SSLError, e:
if sock:
sock.close()
if self.sock:
self.sock.close()
self.sock = None
# Unfortunately the ssl module doesn't seem to provide any way
# to get at more detailed error information, in particular
# whether the error is due to certificate validation or
# something else (such as SSL protocol mismatch).
if e.errno == ssl.SSL_ERROR_SSL:
raise SSLHandshakeError(e)
else:
raise
except (socket.timeout, socket.gaierror):
raise
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
SCHEME_TO_CONNECTION = {
'http': HTTPConnectionWithTimeout,
'https': HTTPSConnectionWithTimeout
}
# Use a different connection object for Google App Engine
try:
from google.appengine.api import apiproxy_stub_map
if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None:
raise ImportError # Bail out; we're not actually running on App Engine.
from google.appengine.api.urlfetch import fetch
from google.appengine.api.urlfetch import InvalidURLError
from google.appengine.api.urlfetch import DownloadError
from google.appengine.api.urlfetch import ResponseTooLargeError
from google.appengine.api.urlfetch import SSLCertificateError
class ResponseDict(dict):
"""Is a dictionary that also has a read() method, so
that it can pass itself off as an httlib.HTTPResponse()."""
def read(self):
pass
class AppEngineHttpConnection(object):
"""Emulates an httplib.HTTPConnection object, but actually uses the Google
App Engine urlfetch library. This allows the timeout to be properly used on
Google App Engine, and avoids using httplib, which on Google App Engine is
just another wrapper around urlfetch.
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None, ca_certs=None,
disable_certificate_validation=False):
self.host = host
self.port = port
self.timeout = timeout
if key_file or cert_file or proxy_info or ca_certs:
raise NotSupportedOnThisPlatform()
self.response = None
self.scheme = 'http'
self.validate_certificate = not disable_certificate_validation
self.sock = True
def request(self, method, url, body, headers):
# Calculate the absolute URI, which fetch requires
netloc = self.host
if self.port:
netloc = '%s:%s' % (self.host, self.port)
absolute_uri = '%s://%s%s' % (self.scheme, netloc, url)
try:
response = fetch(absolute_uri, payload=body, method=method,
headers=headers, allow_truncated=False, follow_redirects=False,
deadline=self.timeout,
validate_certificate=self.validate_certificate)
self.response = ResponseDict(response.headers)
self.response['status'] = str(response.status_code)
self.response.status = response.status_code
setattr(self.response, 'read', lambda : response.content)
# Make sure the exceptions raised match the exceptions expected.
except InvalidURLError:
raise socket.gaierror('')
except (DownloadError, ResponseTooLargeError, SSLCertificateError):
raise httplib.HTTPException()
def getresponse(self):
if self.response:
return self.response
else:
raise httplib.HTTPException()
def set_debuglevel(self, level):
pass
def connect(self):
pass
def close(self):
pass
class AppEngineHttpsConnection(AppEngineHttpConnection):
"""Same as AppEngineHttpConnection, but for HTTPS URIs."""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None):
AppEngineHttpConnection.__init__(self, host, port, key_file, cert_file,
strict, timeout, proxy_info)
self.scheme = 'https'
# Update the connection classes to use the Googel App Engine specific ones.
SCHEME_TO_CONNECTION = {
'http': AppEngineHttpConnection,
'https': AppEngineHttpsConnection
}
except ImportError:
pass
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None, proxy_info=None,
ca_certs=None, disable_ssl_certificate_validation=False):
"""
The value of proxy_info is a ProxyInfo instance.
If 'cache' is a string then it is used as a directory name for
a disk cache. Otherwise it must be an object that supports the
same interface as FileCache.
All timeouts are in seconds. If None is passed for timeout
then Python's default timeout for sockets will be used. See
for example the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
ca_certs is the path of a file containing root CA certificates for SSL
server certificate validation. By default, a CA cert file bundled with
httplib2 is used.
If disable_ssl_certificate_validation is true, SSL cert validation will
not be performed.
"""
self.proxy_info = proxy_info
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, basestring):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT", "PATCH"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
for i in range(2):
try:
if conn.sock is None:
conn.connect()
conn.request(method, request_uri, body, headers)
except socket.timeout:
raise
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except ssl_SSLError:
conn.close()
raise
except socket.error, e:
err = 0
if hasattr(e, 'args'):
err = getattr(e, 'args')[0]
else:
err = e.errno
if err == errno.ECONNREFUSED: # Connection refused
raise
except httplib.HTTPException:
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
if conn.sock is None:
if i == 0:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
if i == 0:
conn.close()
conn.connect()
continue
try:
response = conn.getresponse()
except (socket.error, httplib.HTTPException):
if i == 0:
conn.close()
conn.connect()
continue
else:
raise
else:
content = ""
if method == "HEAD":
response.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = method
if response.status in [302, 303]:
redirect_method = "GET"
body = None
(response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1)
response.previous = old_response
else:
raise RedirectLimit("Redirected more times than rediection_limit allows.", response, content)
elif response.status in [200, 203] and method in ["GET", "HEAD"]:
# Don't cache 206's since we aren't going to handle byte range requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin
with either 'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc.
There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a string
object.
Any extra headers that are to be sent with the request should be provided in the
'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if not headers.has_key('user-agent'):
headers['user-agent'] = "Python-httplib2/%s (gzip)" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = SCHEME_TO_CONNECTION[scheme]
certs = list(self.certificates.iter(authority))
if issubclass(connection_type, HTTPSConnectionWithTimeout):
if certs:
conn = self.connections[conn_key] = connection_type(
authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout,
proxy_info=self.proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=self.proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=self.proxy_info)
conn.set_debuglevel(debuglevel)
if 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip, deflate'
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split('\r\n\r\n', 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except IndexError:
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ['GET', 'HEAD'] and 'vary' in info:
vary = info['vary']
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
value = info[key]
if headers.get(header, None) != value:
cached_value = None
break
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
if redirections <= 0:
raise RedirectLimit("Redirected more times than rediection_limit allows.", {}, "")
(response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if info.has_key('last-modified') and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if cc.has_key('only-if-cached'):
info['status'] = '504'
response = Response(info)
content = ""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception, e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = "Request Timeout"
response = Response( {
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response( {
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key] = value
self.status = int(self['status'])
else:
for key, value in info.iteritems():
self[key] = value
self.status = int(self.get('status', self.status))
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError, name
| Python |
#!/usr/bin/env python
#
# Copyright (c) 2002, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ---
# Author: Chad Lester
# Design and style contributions by:
# Amit Patel, Bogdan Cocosel, Daniel Dulitz, Eric Tiedemann,
# Eric Veach, Laurence Gonsalves, Matthew Springer
# Code reorganized a bit by Craig Silverstein
"""This module is used to define and parse command line flags.
This module defines a *distributed* flag-definition policy: rather than
an application having to define all flags in or near main(), each python
module defines flags that are useful to it. When one python module
imports another, it gains access to the other's flags. (This is
implemented by having all modules share a common, global registry object
containing all the flag information.)
Flags are defined through the use of one of the DEFINE_xxx functions.
The specific function used determines how the flag is parsed, checked,
and optionally type-converted, when it's seen on the command line.
IMPLEMENTATION: DEFINE_* creates a 'Flag' object and registers it with a
'FlagValues' object (typically the global FlagValues FLAGS, defined
here). The 'FlagValues' object can scan the command line arguments and
pass flag arguments to the corresponding 'Flag' objects for
value-checking and type conversion. The converted flag values are
available as attributes of the 'FlagValues' object.
Code can access the flag through a FlagValues object, for instance
gflags.FLAGS.myflag. Typically, the __main__ module passes the command
line arguments to gflags.FLAGS for parsing.
At bottom, this module calls getopt(), so getopt functionality is
supported, including short- and long-style flags, and the use of -- to
terminate flags.
Methods defined by the flag module will throw 'FlagsError' exceptions.
The exception argument will be a human-readable string.
FLAG TYPES: This is a list of the DEFINE_*'s that you can do. All flags
take a name, default value, help-string, and optional 'short' name
(one-letter name). Some flags have other arguments, which are described
with the flag.
DEFINE_string: takes any input, and interprets it as a string.
DEFINE_bool or
DEFINE_boolean: typically does not take an argument: say --myflag to
set FLAGS.myflag to true, or --nomyflag to set
FLAGS.myflag to false. Alternately, you can say
--myflag=true or --myflag=t or --myflag=1 or
--myflag=false or --myflag=f or --myflag=0
DEFINE_float: takes an input and interprets it as a floating point
number. Takes optional args lower_bound and upper_bound;
if the number specified on the command line is out of
range, it will raise a FlagError.
DEFINE_integer: takes an input and interprets it as an integer. Takes
optional args lower_bound and upper_bound as for floats.
DEFINE_enum: takes a list of strings which represents legal values. If
the command-line value is not in this list, raise a flag
error. Otherwise, assign to FLAGS.flag as a string.
DEFINE_list: Takes a comma-separated list of strings on the commandline.
Stores them in a python list object.
DEFINE_spaceseplist: Takes a space-separated list of strings on the
commandline. Stores them in a python list object.
Example: --myspacesepflag "foo bar baz"
DEFINE_multistring: The same as DEFINE_string, except the flag can be
specified more than once on the commandline. The
result is a python list object (list of strings),
even if the flag is only on the command line once.
DEFINE_multi_int: The same as DEFINE_integer, except the flag can be
specified more than once on the commandline. The
result is a python list object (list of ints), even if
the flag is only on the command line once.
SPECIAL FLAGS: There are a few flags that have special meaning:
--help prints a list of all the flags in a human-readable fashion
--helpshort prints a list of all key flags (see below).
--helpxml prints a list of all flags, in XML format. DO NOT parse
the output of --help and --helpshort. Instead, parse
the output of --helpxml. For more info, see
"OUTPUT FOR --helpxml" below.
--flagfile=foo read flags from file foo.
--undefok=f1,f2 ignore unrecognized option errors for f1,f2.
For boolean flags, you should use --undefok=boolflag, and
--boolflag and --noboolflag will be accepted. Do not use
--undefok=noboolflag.
-- as in getopt(), terminates flag-processing
FLAGS VALIDATORS: If your program:
- requires flag X to be specified
- needs flag Y to match a regular expression
- or requires any more general constraint to be satisfied
then validators are for you!
Each validator represents a constraint over one flag, which is enforced
starting from the initial parsing of the flags and until the program
terminates.
Also, lower_bound and upper_bound for numerical flags are enforced using flag
validators.
Howto:
If you want to enforce a constraint over one flag, use
gflags.RegisterValidator(flag_name,
checker,
message='Flag validation failed',
flag_values=FLAGS)
After flag values are initially parsed, and after any change to the specified
flag, method checker(flag_value) will be executed. If constraint is not
satisfied, an IllegalFlagValue exception will be raised. See
RegisterValidator's docstring for a detailed explanation on how to construct
your own checker.
EXAMPLE USAGE:
FLAGS = gflags.FLAGS
gflags.DEFINE_integer('my_version', 0, 'Version number.')
gflags.DEFINE_string('filename', None, 'Input file name', short_name='f')
gflags.RegisterValidator('my_version',
lambda value: value % 2 == 0,
message='--my_version must be divisible by 2')
gflags.MarkFlagAsRequired('filename')
NOTE ON --flagfile:
Flags may be loaded from text files in addition to being specified on
the commandline.
Any flags you don't feel like typing, throw them in a file, one flag per
line, for instance:
--myflag=myvalue
--nomyboolean_flag
You then specify your file with the special flag '--flagfile=somefile'.
You CAN recursively nest flagfile= tokens OR use multiple files on the
command line. Lines beginning with a single hash '#' or a double slash
'//' are comments in your flagfile.
Any flagfile=<file> will be interpreted as having a relative path from
the current working directory rather than from the place the file was
included from:
myPythonScript.py --flagfile=config/somefile.cfg
If somefile.cfg includes further --flagfile= directives, these will be
referenced relative to the original CWD, not from the directory the
including flagfile was found in!
The caveat applies to people who are including a series of nested files
in a different dir than they are executing out of. Relative path names
are always from CWD, not from the directory of the parent include
flagfile. We do now support '~' expanded directory names.
Absolute path names ALWAYS work!
EXAMPLE USAGE:
FLAGS = gflags.FLAGS
# Flag names are globally defined! So in general, we need to be
# careful to pick names that are unlikely to be used by other libraries.
# If there is a conflict, we'll get an error at import time.
gflags.DEFINE_string('name', 'Mr. President', 'your name')
gflags.DEFINE_integer('age', None, 'your age in years', lower_bound=0)
gflags.DEFINE_boolean('debug', False, 'produces debugging output')
gflags.DEFINE_enum('gender', 'male', ['male', 'female'], 'your gender')
def main(argv):
try:
argv = FLAGS(argv) # parse flags
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
if FLAGS.debug: print 'non-flag arguments:', argv
print 'Happy Birthday', FLAGS.name
if FLAGS.age is not None:
print 'You are a %d year old %s' % (FLAGS.age, FLAGS.gender)
if __name__ == '__main__':
main(sys.argv)
KEY FLAGS:
As we already explained, each module gains access to all flags defined
by all the other modules it transitively imports. In the case of
non-trivial scripts, this means a lot of flags ... For documentation
purposes, it is good to identify the flags that are key (i.e., really
important) to a module. Clearly, the concept of "key flag" is a
subjective one. When trying to determine whether a flag is key to a
module or not, assume that you are trying to explain your module to a
potential user: which flags would you really like to mention first?
We'll describe shortly how to declare which flags are key to a module.
For the moment, assume we know the set of key flags for each module.
Then, if you use the app.py module, you can use the --helpshort flag to
print only the help for the flags that are key to the main module, in a
human-readable format.
NOTE: If you need to parse the flag help, do NOT use the output of
--help / --helpshort. That output is meant for human consumption, and
may be changed in the future. Instead, use --helpxml; flags that are
key for the main module are marked there with a <key>yes</key> element.
The set of key flags for a module M is composed of:
1. Flags defined by module M by calling a DEFINE_* function.
2. Flags that module M explictly declares as key by using the function
DECLARE_key_flag(<flag_name>)
3. Key flags of other modules that M specifies by using the function
ADOPT_module_key_flags(<other_module>)
This is a "bulk" declaration of key flags: each flag that is key for
<other_module> becomes key for the current module too.
Notice that if you do not use the functions described at points 2 and 3
above, then --helpshort prints information only about the flags defined
by the main module of our script. In many cases, this behavior is good
enough. But if you move part of the main module code (together with the
related flags) into a different module, then it is nice to use
DECLARE_key_flag / ADOPT_module_key_flags and make sure --helpshort
lists all relevant flags (otherwise, your code refactoring may confuse
your users).
Note: each of DECLARE_key_flag / ADOPT_module_key_flags has its own
pluses and minuses: DECLARE_key_flag is more targeted and may lead a
more focused --helpshort documentation. ADOPT_module_key_flags is good
for cases when an entire module is considered key to the current script.
Also, it does not require updates to client scripts when a new flag is
added to the module.
EXAMPLE USAGE 2 (WITH KEY FLAGS):
Consider an application that contains the following three files (two
auxiliary modules and a main module)
File libfoo.py:
import gflags
gflags.DEFINE_integer('num_replicas', 3, 'Number of replicas to start')
gflags.DEFINE_boolean('rpc2', True, 'Turn on the usage of RPC2.')
... some code ...
File libbar.py:
import gflags
gflags.DEFINE_string('bar_gfs_path', '/gfs/path',
'Path to the GFS files for libbar.')
gflags.DEFINE_string('email_for_bar_errors', 'bar-team@google.com',
'Email address for bug reports about module libbar.')
gflags.DEFINE_boolean('bar_risky_hack', False,
'Turn on an experimental and buggy optimization.')
... some code ...
File myscript.py:
import gflags
import libfoo
import libbar
gflags.DEFINE_integer('num_iterations', 0, 'Number of iterations.')
# Declare that all flags that are key for libfoo are
# key for this module too.
gflags.ADOPT_module_key_flags(libfoo)
# Declare that the flag --bar_gfs_path (defined in libbar) is key
# for this module.
gflags.DECLARE_key_flag('bar_gfs_path')
... some code ...
When myscript is invoked with the flag --helpshort, the resulted help
message lists information about all the key flags for myscript:
--num_iterations, --num_replicas, --rpc2, and --bar_gfs_path.
Of course, myscript uses all the flags declared by it (in this case,
just --num_replicas) or by any of the modules it transitively imports
(e.g., the modules libfoo, libbar). E.g., it can access the value of
FLAGS.bar_risky_hack, even if --bar_risky_hack is not declared as a key
flag for myscript.
OUTPUT FOR --helpxml:
The --helpxml flag generates output with the following structure:
<?xml version="1.0"?>
<AllFlags>
<program>PROGRAM_BASENAME</program>
<usage>MAIN_MODULE_DOCSTRING</usage>
(<flag>
[<key>yes</key>]
<file>DECLARING_MODULE</file>
<name>FLAG_NAME</name>
<meaning>FLAG_HELP_MESSAGE</meaning>
<default>DEFAULT_FLAG_VALUE</default>
<current>CURRENT_FLAG_VALUE</current>
<type>FLAG_TYPE</type>
[OPTIONAL_ELEMENTS]
</flag>)*
</AllFlags>
Notes:
1. The output is intentionally similar to the output generated by the
C++ command-line flag library. The few differences are due to the
Python flags that do not have a C++ equivalent (at least not yet),
e.g., DEFINE_list.
2. New XML elements may be added in the future.
3. DEFAULT_FLAG_VALUE is in serialized form, i.e., the string you can
pass for this flag on the command-line. E.g., for a flag defined
using DEFINE_list, this field may be foo,bar, not ['foo', 'bar'].
4. CURRENT_FLAG_VALUE is produced using str(). This means that the
string 'false' will be represented in the same way as the boolean
False. Using repr() would have removed this ambiguity and simplified
parsing, but would have broken the compatibility with the C++
command-line flags.
5. OPTIONAL_ELEMENTS describe elements relevant for certain kinds of
flags: lower_bound, upper_bound (for flags that specify bounds),
enum_value (for enum flags), list_separator (for flags that consist of
a list of values, separated by a special token).
6. We do not provide any example here: please use --helpxml instead.
This module requires at least python 2.2.1 to run.
"""
import cgi
import getopt
import os
import re
import string
import struct
import sys
# pylint: disable-msg=C6204
try:
import fcntl
except ImportError:
fcntl = None
try:
# Importing termios will fail on non-unix platforms.
import termios
except ImportError:
termios = None
import gflags_validators
# pylint: enable-msg=C6204
# Are we running under pychecker?
_RUNNING_PYCHECKER = 'pychecker.python' in sys.modules
def _GetCallingModuleObjectAndName():
"""Returns the module that's calling into this module.
We generally use this function to get the name of the module calling a
DEFINE_foo... function.
"""
# Walk down the stack to find the first globals dict that's not ours.
for depth in range(1, sys.getrecursionlimit()):
if not sys._getframe(depth).f_globals is globals():
globals_for_frame = sys._getframe(depth).f_globals
module, module_name = _GetModuleObjectAndName(globals_for_frame)
if module_name is not None:
return module, module_name
raise AssertionError("No module was found")
def _GetCallingModule():
"""Returns the name of the module that's calling into this module."""
return _GetCallingModuleObjectAndName()[1]
def _GetThisModuleObjectAndName():
"""Returns: (module object, module name) for this module."""
return _GetModuleObjectAndName(globals())
# module exceptions:
class FlagsError(Exception):
"""The base class for all flags errors."""
pass
class DuplicateFlag(FlagsError):
"""Raised if there is a flag naming conflict."""
pass
class CantOpenFlagFileError(FlagsError):
"""Raised if flagfile fails to open: doesn't exist, wrong permissions, etc."""
pass
class DuplicateFlagCannotPropagateNoneToSwig(DuplicateFlag):
"""Special case of DuplicateFlag -- SWIG flag value can't be set to None.
This can be raised when a duplicate flag is created. Even if allow_override is
True, we still abort if the new value is None, because it's currently
impossible to pass None default value back to SWIG. See FlagValues.SetDefault
for details.
"""
pass
class DuplicateFlagError(DuplicateFlag):
"""A DuplicateFlag whose message cites the conflicting definitions.
A DuplicateFlagError conveys more information than a DuplicateFlag,
namely the modules where the conflicting definitions occur. This
class was created to avoid breaking external modules which depend on
the existing DuplicateFlags interface.
"""
def __init__(self, flagname, flag_values, other_flag_values=None):
"""Create a DuplicateFlagError.
Args:
flagname: Name of the flag being redefined.
flag_values: FlagValues object containing the first definition of
flagname.
other_flag_values: If this argument is not None, it should be the
FlagValues object where the second definition of flagname occurs.
If it is None, we assume that we're being called when attempting
to create the flag a second time, and we use the module calling
this one as the source of the second definition.
"""
self.flagname = flagname
first_module = flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
if other_flag_values is None:
second_module = _GetCallingModule()
else:
second_module = other_flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
msg = "The flag '%s' is defined twice. First from %s, Second from %s" % (
self.flagname, first_module, second_module)
DuplicateFlag.__init__(self, msg)
class IllegalFlagValue(FlagsError):
"""The flag command line argument is illegal."""
pass
class UnrecognizedFlag(FlagsError):
"""Raised if a flag is unrecognized."""
pass
# An UnrecognizedFlagError conveys more information than an UnrecognizedFlag.
# Since there are external modules that create DuplicateFlags, the interface to
# DuplicateFlag shouldn't change. The flagvalue will be assigned the full value
# of the flag and its argument, if any, allowing handling of unrecognized flags
# in an exception handler.
# If flagvalue is the empty string, then this exception is an due to a
# reference to a flag that was not already defined.
class UnrecognizedFlagError(UnrecognizedFlag):
def __init__(self, flagname, flagvalue=''):
self.flagname = flagname
self.flagvalue = flagvalue
UnrecognizedFlag.__init__(
self, "Unknown command line flag '%s'" % flagname)
# Global variable used by expvar
_exported_flags = {}
_help_width = 80 # width of help output
def GetHelpWidth():
"""Returns: an integer, the width of help lines that is used in TextWrap."""
if (not sys.stdout.isatty()) or (termios is None) or (fcntl is None):
return _help_width
try:
data = fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, '1234')
columns = struct.unpack('hh', data)[1]
# Emacs mode returns 0.
# Here we assume that any value below 40 is unreasonable
if columns >= 40:
return columns
# Returning an int as default is fine, int(int) just return the int.
return int(os.getenv('COLUMNS', _help_width))
except (TypeError, IOError, struct.error):
return _help_width
def CutCommonSpacePrefix(text):
"""Removes a common space prefix from the lines of a multiline text.
If the first line does not start with a space, it is left as it is and
only in the remaining lines a common space prefix is being searched
for. That means the first line will stay untouched. This is especially
useful to turn doc strings into help texts. This is because some
people prefer to have the doc comment start already after the
apostrophe and then align the following lines while others have the
apostrophes on a separate line.
The function also drops trailing empty lines and ignores empty lines
following the initial content line while calculating the initial
common whitespace.
Args:
text: text to work on
Returns:
the resulting text
"""
text_lines = text.splitlines()
# Drop trailing empty lines
while text_lines and not text_lines[-1]:
text_lines = text_lines[:-1]
if text_lines:
# We got some content, is the first line starting with a space?
if text_lines[0] and text_lines[0][0].isspace():
text_first_line = []
else:
text_first_line = [text_lines.pop(0)]
# Calculate length of common leading whitespace (only over content lines)
common_prefix = os.path.commonprefix([line for line in text_lines if line])
space_prefix_len = len(common_prefix) - len(common_prefix.lstrip())
# If we have a common space prefix, drop it from all lines
if space_prefix_len:
for index in xrange(len(text_lines)):
if text_lines[index]:
text_lines[index] = text_lines[index][space_prefix_len:]
return '\n'.join(text_first_line + text_lines)
return ''
def TextWrap(text, length=None, indent='', firstline_indent=None, tabs=' '):
"""Wraps a given text to a maximum line length and returns it.
We turn lines that only contain whitespace into empty lines. We keep
new lines and tabs (e.g., we do not treat tabs as spaces).
Args:
text: text to wrap
length: maximum length of a line, includes indentation
if this is None then use GetHelpWidth()
indent: indent for all but first line
firstline_indent: indent for first line; if None, fall back to indent
tabs: replacement for tabs
Returns:
wrapped text
Raises:
FlagsError: if indent not shorter than length
FlagsError: if firstline_indent not shorter than length
"""
# Get defaults where callee used None
if length is None:
length = GetHelpWidth()
if indent is None:
indent = ''
if len(indent) >= length:
raise FlagsError('Indent must be shorter than length')
# In line we will be holding the current line which is to be started
# with indent (or firstline_indent if available) and then appended
# with words.
if firstline_indent is None:
firstline_indent = ''
line = indent
else:
line = firstline_indent
if len(firstline_indent) >= length:
raise FlagsError('First line indent must be shorter than length')
# If the callee does not care about tabs we simply convert them to
# spaces If callee wanted tabs to be single space then we do that
# already here.
if not tabs or tabs == ' ':
text = text.replace('\t', ' ')
else:
tabs_are_whitespace = not tabs.strip()
line_regex = re.compile('([ ]*)(\t*)([^ \t]+)', re.MULTILINE)
# Split the text into lines and the lines with the regex above. The
# resulting lines are collected in result[]. For each split we get the
# spaces, the tabs and the next non white space (e.g. next word).
result = []
for text_line in text.splitlines():
# Store result length so we can find out whether processing the next
# line gave any new content
old_result_len = len(result)
# Process next line with line_regex. For optimization we do an rstrip().
# - process tabs (changes either line or word, see below)
# - process word (first try to squeeze on line, then wrap or force wrap)
# Spaces found on the line are ignored, they get added while wrapping as
# needed.
for spaces, current_tabs, word in line_regex.findall(text_line.rstrip()):
# If tabs weren't converted to spaces, handle them now
if current_tabs:
# If the last thing we added was a space anyway then drop
# it. But let's not get rid of the indentation.
if (((result and line != indent) or
(not result and line != firstline_indent)) and line[-1] == ' '):
line = line[:-1]
# Add the tabs, if that means adding whitespace, just add it at
# the line, the rstrip() code while shorten the line down if
# necessary
if tabs_are_whitespace:
line += tabs * len(current_tabs)
else:
# if not all tab replacement is whitespace we prepend it to the word
word = tabs * len(current_tabs) + word
# Handle the case where word cannot be squeezed onto current last line
if len(line) + len(word) > length and len(indent) + len(word) <= length:
result.append(line.rstrip())
line = indent + word
word = ''
# No space left on line or can we append a space?
if len(line) + 1 >= length:
result.append(line.rstrip())
line = indent
else:
line += ' '
# Add word and shorten it up to allowed line length. Restart next
# line with indent and repeat, or add a space if we're done (word
# finished) This deals with words that cannot fit on one line
# (e.g. indent + word longer than allowed line length).
while len(line) + len(word) >= length:
line += word
result.append(line[:length])
word = line[length:]
line = indent
# Default case, simply append the word and a space
if word:
line += word + ' '
# End of input line. If we have content we finish the line. If the
# current line is just the indent but we had content in during this
# original line then we need to add an empty line.
if (result and line != indent) or (not result and line != firstline_indent):
result.append(line.rstrip())
elif len(result) == old_result_len:
result.append('')
line = indent
return '\n'.join(result)
def DocToHelp(doc):
"""Takes a __doc__ string and reformats it as help."""
# Get rid of starting and ending white space. Using lstrip() or even
# strip() could drop more than maximum of first line and right space
# of last line.
doc = doc.strip()
# Get rid of all empty lines
whitespace_only_line = re.compile('^[ \t]+$', re.M)
doc = whitespace_only_line.sub('', doc)
# Cut out common space at line beginnings
doc = CutCommonSpacePrefix(doc)
# Just like this module's comment, comments tend to be aligned somehow.
# In other words they all start with the same amount of white space
# 1) keep double new lines
# 2) keep ws after new lines if not empty line
# 3) all other new lines shall be changed to a space
# Solution: Match new lines between non white space and replace with space.
doc = re.sub('(?<=\S)\n(?=\S)', ' ', doc, re.M)
return doc
def _GetModuleObjectAndName(globals_dict):
"""Returns the module that defines a global environment, and its name.
Args:
globals_dict: A dictionary that should correspond to an environment
providing the values of the globals.
Returns:
A pair consisting of (1) module object and (2) module name (a
string). Returns (None, None) if the module could not be
identified.
"""
# The use of .items() (instead of .iteritems()) is NOT a mistake: if
# a parallel thread imports a module while we iterate over
# .iteritems() (not nice, but possible), we get a RuntimeError ...
# Hence, we use the slightly slower but safer .items().
for name, module in sys.modules.items():
if getattr(module, '__dict__', None) is globals_dict:
if name == '__main__':
# Pick a more informative name for the main module.
name = sys.argv[0]
return (module, name)
return (None, None)
def _GetMainModule():
"""Returns: string, name of the module from which execution started."""
# First, try to use the same logic used by _GetCallingModuleObjectAndName(),
# i.e., call _GetModuleObjectAndName(). For that we first need to
# find the dictionary that the main module uses to store the
# globals.
#
# That's (normally) the same dictionary object that the deepest
# (oldest) stack frame is using for globals.
deepest_frame = sys._getframe(0)
while deepest_frame.f_back is not None:
deepest_frame = deepest_frame.f_back
globals_for_main_module = deepest_frame.f_globals
main_module_name = _GetModuleObjectAndName(globals_for_main_module)[1]
# The above strategy fails in some cases (e.g., tools that compute
# code coverage by redefining, among other things, the main module).
# If so, just use sys.argv[0]. We can probably always do this, but
# it's safest to try to use the same logic as _GetCallingModuleObjectAndName()
if main_module_name is None:
main_module_name = sys.argv[0]
return main_module_name
class FlagValues:
"""Registry of 'Flag' objects.
A 'FlagValues' can then scan command line arguments, passing flag
arguments through to the 'Flag' objects that it owns. It also
provides easy access to the flag values. Typically only one
'FlagValues' object is needed by an application: gflags.FLAGS
This class is heavily overloaded:
'Flag' objects are registered via __setitem__:
FLAGS['longname'] = x # register a new flag
The .value attribute of the registered 'Flag' objects can be accessed
as attributes of this 'FlagValues' object, through __getattr__. Both
the long and short name of the original 'Flag' objects can be used to
access its value:
FLAGS.longname # parsed flag value
FLAGS.x # parsed flag value (short name)
Command line arguments are scanned and passed to the registered 'Flag'
objects through the __call__ method. Unparsed arguments, including
argv[0] (e.g. the program name) are returned.
argv = FLAGS(sys.argv) # scan command line arguments
The original registered Flag objects can be retrieved through the use
of the dictionary-like operator, __getitem__:
x = FLAGS['longname'] # access the registered Flag object
The str() operator of a 'FlagValues' object provides help for all of
the registered 'Flag' objects.
"""
def __init__(self):
# Since everything in this class is so heavily overloaded, the only
# way of defining and using fields is to access __dict__ directly.
# Dictionary: flag name (string) -> Flag object.
self.__dict__['__flags'] = {}
# Dictionary: module name (string) -> list of Flag objects that are defined
# by that module.
self.__dict__['__flags_by_module'] = {}
# Dictionary: module id (int) -> list of Flag objects that are defined by
# that module.
self.__dict__['__flags_by_module_id'] = {}
# Dictionary: module name (string) -> list of Flag objects that are
# key for that module.
self.__dict__['__key_flags_by_module'] = {}
# Set if we should use new style gnu_getopt rather than getopt when parsing
# the args. Only possible with Python 2.3+
self.UseGnuGetOpt(False)
def UseGnuGetOpt(self, use_gnu_getopt=True):
"""Use GNU-style scanning. Allows mixing of flag and non-flag arguments.
See http://docs.python.org/library/getopt.html#getopt.gnu_getopt
Args:
use_gnu_getopt: wether or not to use GNU style scanning.
"""
self.__dict__['__use_gnu_getopt'] = use_gnu_getopt
def IsGnuGetOpt(self):
return self.__dict__['__use_gnu_getopt']
def FlagDict(self):
return self.__dict__['__flags']
def FlagsByModuleDict(self):
"""Returns the dictionary of module_name -> list of defined flags.
Returns:
A dictionary. Its keys are module names (strings). Its values
are lists of Flag objects.
"""
return self.__dict__['__flags_by_module']
def FlagsByModuleIdDict(self):
"""Returns the dictionary of module_id -> list of defined flags.
Returns:
A dictionary. Its keys are module IDs (ints). Its values
are lists of Flag objects.
"""
return self.__dict__['__flags_by_module_id']
def KeyFlagsByModuleDict(self):
"""Returns the dictionary of module_name -> list of key flags.
Returns:
A dictionary. Its keys are module names (strings). Its values
are lists of Flag objects.
"""
return self.__dict__['__key_flags_by_module']
def _RegisterFlagByModule(self, module_name, flag):
"""Records the module that defines a specific flag.
We keep track of which flag is defined by which module so that we
can later sort the flags by module.
Args:
module_name: A string, the name of a Python module.
flag: A Flag object, a flag that is key to the module.
"""
flags_by_module = self.FlagsByModuleDict()
flags_by_module.setdefault(module_name, []).append(flag)
def _RegisterFlagByModuleId(self, module_id, flag):
"""Records the module that defines a specific flag.
Args:
module_id: An int, the ID of the Python module.
flag: A Flag object, a flag that is key to the module.
"""
flags_by_module_id = self.FlagsByModuleIdDict()
flags_by_module_id.setdefault(module_id, []).append(flag)
def _RegisterKeyFlagForModule(self, module_name, flag):
"""Specifies that a flag is a key flag for a module.
Args:
module_name: A string, the name of a Python module.
flag: A Flag object, a flag that is key to the module.
"""
key_flags_by_module = self.KeyFlagsByModuleDict()
# The list of key flags for the module named module_name.
key_flags = key_flags_by_module.setdefault(module_name, [])
# Add flag, but avoid duplicates.
if flag not in key_flags:
key_flags.append(flag)
def _GetFlagsDefinedByModule(self, module):
"""Returns the list of flags defined by a module.
Args:
module: A module object or a module name (a string).
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object.
"""
if not isinstance(module, str):
module = module.__name__
return list(self.FlagsByModuleDict().get(module, []))
def _GetKeyFlagsForModule(self, module):
"""Returns the list of key flags for a module.
Args:
module: A module object or a module name (a string)
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object.
"""
if not isinstance(module, str):
module = module.__name__
# Any flag is a key flag for the module that defined it. NOTE:
# key_flags is a fresh list: we can update it without affecting the
# internals of this FlagValues object.
key_flags = self._GetFlagsDefinedByModule(module)
# Take into account flags explicitly declared as key for a module.
for flag in self.KeyFlagsByModuleDict().get(module, []):
if flag not in key_flags:
key_flags.append(flag)
return key_flags
def FindModuleDefiningFlag(self, flagname, default=None):
"""Return the name of the module defining this flag, or default.
Args:
flagname: Name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The name of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we return default.
"""
for module, flags in self.FlagsByModuleDict().iteritems():
for flag in flags:
if flag.name == flagname or flag.short_name == flagname:
return module
return default
def FindModuleIdDefiningFlag(self, flagname, default=None):
"""Return the ID of the module defining this flag, or default.
Args:
flagname: Name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The ID of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we return default.
"""
for module_id, flags in self.FlagsByModuleIdDict().iteritems():
for flag in flags:
if flag.name == flagname or flag.short_name == flagname:
return module_id
return default
def AppendFlagValues(self, flag_values):
"""Appends flags registered in another FlagValues instance.
Args:
flag_values: registry to copy from
"""
for flag_name, flag in flag_values.FlagDict().iteritems():
# Each flags with shortname appears here twice (once under its
# normal name, and again with its short name). To prevent
# problems (DuplicateFlagError) with double flag registration, we
# perform a check to make sure that the entry we're looking at is
# for its normal name.
if flag_name == flag.name:
try:
self[flag_name] = flag
except DuplicateFlagError:
raise DuplicateFlagError(flag_name, self,
other_flag_values=flag_values)
def RemoveFlagValues(self, flag_values):
"""Remove flags that were previously appended from another FlagValues.
Args:
flag_values: registry containing flags to remove.
"""
for flag_name in flag_values.FlagDict():
self.__delattr__(flag_name)
def __setitem__(self, name, flag):
"""Registers a new flag variable."""
fl = self.FlagDict()
if not isinstance(flag, Flag):
raise IllegalFlagValue(flag)
if not isinstance(name, type("")):
raise FlagsError("Flag name must be a string")
if len(name) == 0:
raise FlagsError("Flag name cannot be empty")
# If running under pychecker, duplicate keys are likely to be
# defined. Disable check for duplicate keys when pycheck'ing.
if (name in fl and not flag.allow_override and
not fl[name].allow_override and not _RUNNING_PYCHECKER):
module, module_name = _GetCallingModuleObjectAndName()
if (self.FindModuleDefiningFlag(name) == module_name and
id(module) != self.FindModuleIdDefiningFlag(name)):
# If the flag has already been defined by a module with the same name,
# but a different ID, we can stop here because it indicates that the
# module is simply being imported a subsequent time.
return
raise DuplicateFlagError(name, self)
short_name = flag.short_name
if short_name is not None:
if (short_name in fl and not flag.allow_override and
not fl[short_name].allow_override and not _RUNNING_PYCHECKER):
raise DuplicateFlagError(short_name, self)
fl[short_name] = flag
fl[name] = flag
global _exported_flags
_exported_flags[name] = flag
def __getitem__(self, name):
"""Retrieves the Flag object for the flag --name."""
return self.FlagDict()[name]
def __getattr__(self, name):
"""Retrieves the 'value' attribute of the flag --name."""
fl = self.FlagDict()
if name not in fl:
raise AttributeError(name)
return fl[name].value
def __setattr__(self, name, value):
"""Sets the 'value' attribute of the flag --name."""
fl = self.FlagDict()
fl[name].value = value
self._AssertValidators(fl[name].validators)
return value
def _AssertAllValidators(self):
all_validators = set()
for flag in self.FlagDict().itervalues():
for validator in flag.validators:
all_validators.add(validator)
self._AssertValidators(all_validators)
def _AssertValidators(self, validators):
"""Assert if all validators in the list are satisfied.
Asserts validators in the order they were created.
Args:
validators: Iterable(gflags_validators.Validator), validators to be
verified
Raises:
AttributeError: if validators work with a non-existing flag.
IllegalFlagValue: if validation fails for at least one validator
"""
for validator in sorted(
validators, key=lambda validator: validator.insertion_index):
try:
validator.Verify(self)
except gflags_validators.Error, e:
message = validator.PrintFlagsWithValues(self)
raise IllegalFlagValue('%s: %s' % (message, str(e)))
def _FlagIsRegistered(self, flag_obj):
"""Checks whether a Flag object is registered under some name.
Note: this is non trivial: in addition to its normal name, a flag
may have a short name too. In self.FlagDict(), both the normal and
the short name are mapped to the same flag object. E.g., calling
only "del FLAGS.short_name" is not unregistering the corresponding
Flag object (it is still registered under the longer name).
Args:
flag_obj: A Flag object.
Returns:
A boolean: True iff flag_obj is registered under some name.
"""
flag_dict = self.FlagDict()
# Check whether flag_obj is registered under its long name.
name = flag_obj.name
if flag_dict.get(name, None) == flag_obj:
return True
# Check whether flag_obj is registered under its short name.
short_name = flag_obj.short_name
if (short_name is not None and
flag_dict.get(short_name, None) == flag_obj):
return True
# The flag cannot be registered under any other name, so we do not
# need to do a full search through the values of self.FlagDict().
return False
def __delattr__(self, flag_name):
"""Deletes a previously-defined flag from a flag object.
This method makes sure we can delete a flag by using
del flag_values_object.<flag_name>
E.g.,
gflags.DEFINE_integer('foo', 1, 'Integer flag.')
del gflags.FLAGS.foo
Args:
flag_name: A string, the name of the flag to be deleted.
Raises:
AttributeError: When there is no registered flag named flag_name.
"""
fl = self.FlagDict()
if flag_name not in fl:
raise AttributeError(flag_name)
flag_obj = fl[flag_name]
del fl[flag_name]
if not self._FlagIsRegistered(flag_obj):
# If the Flag object indicated by flag_name is no longer
# registered (please see the docstring of _FlagIsRegistered), then
# we delete the occurrences of the flag object in all our internal
# dictionaries.
self.__RemoveFlagFromDictByModule(self.FlagsByModuleDict(), flag_obj)
self.__RemoveFlagFromDictByModule(self.FlagsByModuleIdDict(), flag_obj)
self.__RemoveFlagFromDictByModule(self.KeyFlagsByModuleDict(), flag_obj)
def __RemoveFlagFromDictByModule(self, flags_by_module_dict, flag_obj):
"""Removes a flag object from a module -> list of flags dictionary.
Args:
flags_by_module_dict: A dictionary that maps module names to lists of
flags.
flag_obj: A flag object.
"""
for unused_module, flags_in_module in flags_by_module_dict.iteritems():
# while (as opposed to if) takes care of multiple occurrences of a
# flag in the list for the same module.
while flag_obj in flags_in_module:
flags_in_module.remove(flag_obj)
def SetDefault(self, name, value):
"""Changes the default value of the named flag object."""
fl = self.FlagDict()
if name not in fl:
raise AttributeError(name)
fl[name].SetDefault(value)
self._AssertValidators(fl[name].validators)
def __contains__(self, name):
"""Returns True if name is a value (flag) in the dict."""
return name in self.FlagDict()
has_key = __contains__ # a synonym for __contains__()
def __iter__(self):
return iter(self.FlagDict())
def __call__(self, argv):
"""Parses flags from argv; stores parsed flags into this FlagValues object.
All unparsed arguments are returned. Flags are parsed using the GNU
Program Argument Syntax Conventions, using getopt:
http://www.gnu.org/software/libc/manual/html_mono/libc.html#Getopt
Args:
argv: argument list. Can be of any type that may be converted to a list.
Returns:
The list of arguments not parsed as options, including argv[0]
Raises:
FlagsError: on any parsing error
"""
# Support any sequence type that can be converted to a list
argv = list(argv)
shortopts = ""
longopts = []
fl = self.FlagDict()
# This pre parses the argv list for --flagfile=<> options.
argv = argv[:1] + self.ReadFlagsFromFiles(argv[1:], force_gnu=False)
# Correct the argv to support the google style of passing boolean
# parameters. Boolean parameters may be passed by using --mybool,
# --nomybool, --mybool=(true|false|1|0). getopt does not support
# having options that may or may not have a parameter. We replace
# instances of the short form --mybool and --nomybool with their
# full forms: --mybool=(true|false).
original_argv = list(argv) # list() makes a copy
shortest_matches = None
for name, flag in fl.items():
if not flag.boolean:
continue
if shortest_matches is None:
# Determine the smallest allowable prefix for all flag names
shortest_matches = self.ShortestUniquePrefixes(fl)
no_name = 'no' + name
prefix = shortest_matches[name]
no_prefix = shortest_matches[no_name]
# Replace all occurrences of this boolean with extended forms
for arg_idx in range(1, len(argv)):
arg = argv[arg_idx]
if arg.find('=') >= 0: continue
if arg.startswith('--'+prefix) and ('--'+name).startswith(arg):
argv[arg_idx] = ('--%s=true' % name)
elif arg.startswith('--'+no_prefix) and ('--'+no_name).startswith(arg):
argv[arg_idx] = ('--%s=false' % name)
# Loop over all of the flags, building up the lists of short options
# and long options that will be passed to getopt. Short options are
# specified as a string of letters, each letter followed by a colon
# if it takes an argument. Long options are stored in an array of
# strings. Each string ends with an '=' if it takes an argument.
for name, flag in fl.items():
longopts.append(name + "=")
if len(name) == 1: # one-letter option: allow short flag type also
shortopts += name
if not flag.boolean:
shortopts += ":"
longopts.append('undefok=')
undefok_flags = []
# In case --undefok is specified, loop to pick up unrecognized
# options one by one.
unrecognized_opts = []
args = argv[1:]
while True:
try:
if self.__dict__['__use_gnu_getopt']:
optlist, unparsed_args = getopt.gnu_getopt(args, shortopts, longopts)
else:
optlist, unparsed_args = getopt.getopt(args, shortopts, longopts)
break
except getopt.GetoptError, e:
if not e.opt or e.opt in fl:
# Not an unrecognized option, re-raise the exception as a FlagsError
raise FlagsError(e)
# Remove offender from args and try again
for arg_index in range(len(args)):
if ((args[arg_index] == '--' + e.opt) or
(args[arg_index] == '-' + e.opt) or
(args[arg_index].startswith('--' + e.opt + '='))):
unrecognized_opts.append((e.opt, args[arg_index]))
args = args[0:arg_index] + args[arg_index+1:]
break
else:
# We should have found the option, so we don't expect to get
# here. We could assert, but raising the original exception
# might work better.
raise FlagsError(e)
for name, arg in optlist:
if name == '--undefok':
flag_names = arg.split(',')
undefok_flags.extend(flag_names)
# For boolean flags, if --undefok=boolflag is specified, then we should
# also accept --noboolflag, in addition to --boolflag.
# Since we don't know the type of the undefok'd flag, this will affect
# non-boolean flags as well.
# NOTE: You shouldn't use --undefok=noboolflag, because then we will
# accept --nonoboolflag here. We are choosing not to do the conversion
# from noboolflag -> boolflag because of the ambiguity that flag names
# can start with 'no'.
undefok_flags.extend('no' + name for name in flag_names)
continue
if name.startswith('--'):
# long option
name = name[2:]
short_option = 0
else:
# short option
name = name[1:]
short_option = 1
if name in fl:
flag = fl[name]
if flag.boolean and short_option: arg = 1
flag.Parse(arg)
# If there were unrecognized options, raise an exception unless
# the options were named via --undefok.
for opt, value in unrecognized_opts:
if opt not in undefok_flags:
raise UnrecognizedFlagError(opt, value)
if unparsed_args:
if self.__dict__['__use_gnu_getopt']:
# if using gnu_getopt just return the program name + remainder of argv.
ret_val = argv[:1] + unparsed_args
else:
# unparsed_args becomes the first non-flag detected by getopt to
# the end of argv. Because argv may have been modified above,
# return original_argv for this region.
ret_val = argv[:1] + original_argv[-len(unparsed_args):]
else:
ret_val = argv[:1]
self._AssertAllValidators()
return ret_val
def Reset(self):
"""Resets the values to the point before FLAGS(argv) was called."""
for f in self.FlagDict().values():
f.Unparse()
def RegisteredFlags(self):
"""Returns: a list of the names and short names of all registered flags."""
return list(self.FlagDict())
def FlagValuesDict(self):
"""Returns: a dictionary that maps flag names to flag values."""
flag_values = {}
for flag_name in self.RegisteredFlags():
flag = self.FlagDict()[flag_name]
flag_values[flag_name] = flag.value
return flag_values
def __str__(self):
"""Generates a help string for all known flags."""
return self.GetHelp()
def GetHelp(self, prefix=''):
"""Generates a help string for all known flags."""
helplist = []
flags_by_module = self.FlagsByModuleDict()
if flags_by_module:
modules = sorted(flags_by_module)
# Print the help for the main module first, if possible.
main_module = _GetMainModule()
if main_module in modules:
modules.remove(main_module)
modules = [main_module] + modules
for module in modules:
self.__RenderOurModuleFlags(module, helplist)
self.__RenderModuleFlags('gflags',
_SPECIAL_FLAGS.FlagDict().values(),
helplist)
else:
# Just print one long list of flags.
self.__RenderFlagList(
self.FlagDict().values() + _SPECIAL_FLAGS.FlagDict().values(),
helplist, prefix)
return '\n'.join(helplist)
def __RenderModuleFlags(self, module, flags, output_lines, prefix=""):
"""Generates a help string for a given module."""
if not isinstance(module, str):
module = module.__name__
output_lines.append('\n%s%s:' % (prefix, module))
self.__RenderFlagList(flags, output_lines, prefix + " ")
def __RenderOurModuleFlags(self, module, output_lines, prefix=""):
"""Generates a help string for a given module."""
flags = self._GetFlagsDefinedByModule(module)
if flags:
self.__RenderModuleFlags(module, flags, output_lines, prefix)
def __RenderOurModuleKeyFlags(self, module, output_lines, prefix=""):
"""Generates a help string for the key flags of a given module.
Args:
module: A module object or a module name (a string).
output_lines: A list of strings. The generated help message
lines will be appended to this list.
prefix: A string that is prepended to each generated help line.
"""
key_flags = self._GetKeyFlagsForModule(module)
if key_flags:
self.__RenderModuleFlags(module, key_flags, output_lines, prefix)
def ModuleHelp(self, module):
"""Describe the key flags of a module.
Args:
module: A module object or a module name (a string).
Returns:
string describing the key flags of a module.
"""
helplist = []
self.__RenderOurModuleKeyFlags(module, helplist)
return '\n'.join(helplist)
def MainModuleHelp(self):
"""Describe the key flags of the main module.
Returns:
string describing the key flags of a module.
"""
return self.ModuleHelp(_GetMainModule())
def __RenderFlagList(self, flaglist, output_lines, prefix=" "):
fl = self.FlagDict()
special_fl = _SPECIAL_FLAGS.FlagDict()
flaglist = [(flag.name, flag) for flag in flaglist]
flaglist.sort()
flagset = {}
for (name, flag) in flaglist:
# It's possible this flag got deleted or overridden since being
# registered in the per-module flaglist. Check now against the
# canonical source of current flag information, the FlagDict.
if fl.get(name, None) != flag and special_fl.get(name, None) != flag:
# a different flag is using this name now
continue
# only print help once
if flag in flagset: continue
flagset[flag] = 1
flaghelp = ""
if flag.short_name: flaghelp += "-%s," % flag.short_name
if flag.boolean:
flaghelp += "--[no]%s" % flag.name + ":"
else:
flaghelp += "--%s" % flag.name + ":"
flaghelp += " "
if flag.help:
flaghelp += flag.help
flaghelp = TextWrap(flaghelp, indent=prefix+" ",
firstline_indent=prefix)
if flag.default_as_str:
flaghelp += "\n"
flaghelp += TextWrap("(default: %s)" % flag.default_as_str,
indent=prefix+" ")
if flag.parser.syntactic_help:
flaghelp += "\n"
flaghelp += TextWrap("(%s)" % flag.parser.syntactic_help,
indent=prefix+" ")
output_lines.append(flaghelp)
def get(self, name, default):
"""Returns the value of a flag (if not None) or a default value.
Args:
name: A string, the name of a flag.
default: Default value to use if the flag value is None.
"""
value = self.__getattr__(name)
if value is not None: # Can't do if not value, b/c value might be '0' or ""
return value
else:
return default
def ShortestUniquePrefixes(self, fl):
"""Returns: dictionary; maps flag names to their shortest unique prefix."""
# Sort the list of flag names
sorted_flags = []
for name, flag in fl.items():
sorted_flags.append(name)
if flag.boolean:
sorted_flags.append('no%s' % name)
sorted_flags.sort()
# For each name in the sorted list, determine the shortest unique
# prefix by comparing itself to the next name and to the previous
# name (the latter check uses cached info from the previous loop).
shortest_matches = {}
prev_idx = 0
for flag_idx in range(len(sorted_flags)):
curr = sorted_flags[flag_idx]
if flag_idx == (len(sorted_flags) - 1):
next = None
else:
next = sorted_flags[flag_idx+1]
next_len = len(next)
for curr_idx in range(len(curr)):
if (next is None
or curr_idx >= next_len
or curr[curr_idx] != next[curr_idx]):
# curr longer than next or no more chars in common
shortest_matches[curr] = curr[:max(prev_idx, curr_idx) + 1]
prev_idx = curr_idx
break
else:
# curr shorter than (or equal to) next
shortest_matches[curr] = curr
prev_idx = curr_idx + 1 # next will need at least one more char
return shortest_matches
def __IsFlagFileDirective(self, flag_string):
"""Checks whether flag_string contain a --flagfile=<foo> directive."""
if isinstance(flag_string, type("")):
if flag_string.startswith('--flagfile='):
return 1
elif flag_string == '--flagfile':
return 1
elif flag_string.startswith('-flagfile='):
return 1
elif flag_string == '-flagfile':
return 1
else:
return 0
return 0
def ExtractFilename(self, flagfile_str):
"""Returns filename from a flagfile_str of form -[-]flagfile=filename.
The cases of --flagfile foo and -flagfile foo shouldn't be hitting
this function, as they are dealt with in the level above this
function.
"""
if flagfile_str.startswith('--flagfile='):
return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip())
elif flagfile_str.startswith('-flagfile='):
return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip())
else:
raise FlagsError('Hit illegal --flagfile type: %s' % flagfile_str)
def __GetFlagFileLines(self, filename, parsed_file_list):
"""Returns the useful (!=comments, etc) lines from a file with flags.
Args:
filename: A string, the name of the flag file.
parsed_file_list: A list of the names of the files we have
already read. MUTATED BY THIS FUNCTION.
Returns:
List of strings. See the note below.
NOTE(springer): This function checks for a nested --flagfile=<foo>
tag and handles the lower file recursively. It returns a list of
all the lines that _could_ contain command flags. This is
EVERYTHING except whitespace lines and comments (lines starting
with '#' or '//').
"""
line_list = [] # All line from flagfile.
flag_line_list = [] # Subset of lines w/o comments, blanks, flagfile= tags.
try:
file_obj = open(filename, 'r')
except IOError, e_msg:
raise CantOpenFlagFileError('ERROR:: Unable to open flagfile: %s' % e_msg)
line_list = file_obj.readlines()
file_obj.close()
parsed_file_list.append(filename)
# This is where we check each line in the file we just read.
for line in line_list:
if line.isspace():
pass
# Checks for comment (a line that starts with '#').
elif line.startswith('#') or line.startswith('//'):
pass
# Checks for a nested "--flagfile=<bar>" flag in the current file.
# If we find one, recursively parse down into that file.
elif self.__IsFlagFileDirective(line):
sub_filename = self.ExtractFilename(line)
# We do a little safety check for reparsing a file we've already done.
if not sub_filename in parsed_file_list:
included_flags = self.__GetFlagFileLines(sub_filename,
parsed_file_list)
flag_line_list.extend(included_flags)
else: # Case of hitting a circularly included file.
sys.stderr.write('Warning: Hit circular flagfile dependency: %s\n' %
(sub_filename,))
else:
# Any line that's not a comment or a nested flagfile should get
# copied into 2nd position. This leaves earlier arguments
# further back in the list, thus giving them higher priority.
flag_line_list.append(line.strip())
return flag_line_list
def ReadFlagsFromFiles(self, argv, force_gnu=True):
"""Processes command line args, but also allow args to be read from file.
Args:
argv: A list of strings, usually sys.argv[1:], which may contain one or
more flagfile directives of the form --flagfile="./filename".
Note that the name of the program (sys.argv[0]) should be omitted.
force_gnu: If False, --flagfile parsing obeys normal flag semantics.
If True, --flagfile parsing instead follows gnu_getopt semantics.
*** WARNING *** force_gnu=False may become the future default!
Returns:
A new list which has the original list combined with what we read
from any flagfile(s).
References: Global gflags.FLAG class instance.
This function should be called before the normal FLAGS(argv) call.
This function scans the input list for a flag that looks like:
--flagfile=<somefile>. Then it opens <somefile>, reads all valid key
and value pairs and inserts them into the input list between the
first item of the list and any subsequent items in the list.
Note that your application's flags are still defined the usual way
using gflags DEFINE_flag() type functions.
Notes (assuming we're getting a commandline of some sort as our input):
--> Flags from the command line argv _should_ always take precedence!
--> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile.
It will be processed after the parent flag file is done.
--> For duplicate flags, first one we hit should "win".
--> In a flagfile, a line beginning with # or // is a comment.
--> Entirely blank lines _should_ be ignored.
"""
parsed_file_list = []
rest_of_args = argv
new_argv = []
while rest_of_args:
current_arg = rest_of_args[0]
rest_of_args = rest_of_args[1:]
if self.__IsFlagFileDirective(current_arg):
# This handles the case of -(-)flagfile foo. In this case the
# next arg really is part of this one.
if current_arg == '--flagfile' or current_arg == '-flagfile':
if not rest_of_args:
raise IllegalFlagValue('--flagfile with no argument')
flag_filename = os.path.expanduser(rest_of_args[0])
rest_of_args = rest_of_args[1:]
else:
# This handles the case of (-)-flagfile=foo.
flag_filename = self.ExtractFilename(current_arg)
new_argv.extend(
self.__GetFlagFileLines(flag_filename, parsed_file_list))
else:
new_argv.append(current_arg)
# Stop parsing after '--', like getopt and gnu_getopt.
if current_arg == '--':
break
# Stop parsing after a non-flag, like getopt.
if not current_arg.startswith('-'):
if not force_gnu and not self.__dict__['__use_gnu_getopt']:
break
if rest_of_args:
new_argv.extend(rest_of_args)
return new_argv
def FlagsIntoString(self):
"""Returns a string with the flags assignments from this FlagValues object.
This function ignores flags whose value is None. Each flag
assignment is separated by a newline.
NOTE: MUST mirror the behavior of the C++ CommandlineFlagsIntoString
from http://code.google.com/p/google-gflags
"""
s = ''
for flag in self.FlagDict().values():
if flag.value is not None:
s += flag.Serialize() + '\n'
return s
def AppendFlagsIntoFile(self, filename):
"""Appends all flags assignments from this FlagInfo object to a file.
Output will be in the format of a flagfile.
NOTE: MUST mirror the behavior of the C++ AppendFlagsIntoFile
from http://code.google.com/p/google-gflags
"""
out_file = open(filename, 'a')
out_file.write(self.FlagsIntoString())
out_file.close()
def WriteHelpInXMLFormat(self, outfile=None):
"""Outputs flag documentation in XML format.
NOTE: We use element names that are consistent with those used by
the C++ command-line flag library, from
http://code.google.com/p/google-gflags
We also use a few new elements (e.g., <key>), but we do not
interfere / overlap with existing XML elements used by the C++
library. Please maintain this consistency.
Args:
outfile: File object we write to. Default None means sys.stdout.
"""
outfile = outfile or sys.stdout
outfile.write('<?xml version=\"1.0\"?>\n')
outfile.write('<AllFlags>\n')
indent = ' '
_WriteSimpleXMLElement(outfile, 'program', os.path.basename(sys.argv[0]),
indent)
usage_doc = sys.modules['__main__'].__doc__
if not usage_doc:
usage_doc = '\nUSAGE: %s [flags]\n' % sys.argv[0]
else:
usage_doc = usage_doc.replace('%s', sys.argv[0])
_WriteSimpleXMLElement(outfile, 'usage', usage_doc, indent)
# Get list of key flags for the main module.
key_flags = self._GetKeyFlagsForModule(_GetMainModule())
# Sort flags by declaring module name and next by flag name.
flags_by_module = self.FlagsByModuleDict()
all_module_names = list(flags_by_module.keys())
all_module_names.sort()
for module_name in all_module_names:
flag_list = [(f.name, f) for f in flags_by_module[module_name]]
flag_list.sort()
for unused_flag_name, flag in flag_list:
is_key = flag in key_flags
flag.WriteInfoInXMLFormat(outfile, module_name,
is_key=is_key, indent=indent)
outfile.write('</AllFlags>\n')
outfile.flush()
def AddValidator(self, validator):
"""Register new flags validator to be checked.
Args:
validator: gflags_validators.Validator
Raises:
AttributeError: if validators work with a non-existing flag.
"""
for flag_name in validator.GetFlagsNames():
flag = self.FlagDict()[flag_name]
flag.validators.append(validator)
# end of FlagValues definition
# The global FlagValues instance
FLAGS = FlagValues()
def _StrOrUnicode(value):
"""Converts value to a python string or, if necessary, unicode-string."""
try:
return str(value)
except UnicodeEncodeError:
return unicode(value)
def _MakeXMLSafe(s):
"""Escapes <, >, and & from s, and removes XML 1.0-illegal chars."""
s = cgi.escape(s) # Escape <, >, and &
# Remove characters that cannot appear in an XML 1.0 document
# (http://www.w3.org/TR/REC-xml/#charsets).
#
# NOTE: if there are problems with current solution, one may move to
# XML 1.1, which allows such chars, if they're entity-escaped (&#xHH;).
s = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f]', '', s)
# Convert non-ascii characters to entities. Note: requires python >=2.3
s = s.encode('ascii', 'xmlcharrefreplace') # u'\xce\x88' -> 'uΈ'
return s
def _WriteSimpleXMLElement(outfile, name, value, indent):
"""Writes a simple XML element.
Args:
outfile: File object we write the XML element to.
name: A string, the name of XML element.
value: A Python object, whose string representation will be used
as the value of the XML element.
indent: A string, prepended to each line of generated output.
"""
value_str = _StrOrUnicode(value)
if isinstance(value, bool):
# Display boolean values as the C++ flag library does: no caps.
value_str = value_str.lower()
safe_value_str = _MakeXMLSafe(value_str)
outfile.write('%s<%s>%s</%s>\n' % (indent, name, safe_value_str, name))
class Flag:
"""Information about a command-line flag.
'Flag' objects define the following fields:
.name - the name for this flag
.default - the default value for this flag
.default_as_str - default value as repr'd string, e.g., "'true'" (or None)
.value - the most recent parsed value of this flag; set by Parse()
.help - a help string or None if no help is available
.short_name - the single letter alias for this flag (or None)
.boolean - if 'true', this flag does not accept arguments
.present - true if this flag was parsed from command line flags.
.parser - an ArgumentParser object
.serializer - an ArgumentSerializer object
.allow_override - the flag may be redefined without raising an error
The only public method of a 'Flag' object is Parse(), but it is
typically only called by a 'FlagValues' object. The Parse() method is
a thin wrapper around the 'ArgumentParser' Parse() method. The parsed
value is saved in .value, and the .present attribute is updated. If
this flag was already present, a FlagsError is raised.
Parse() is also called during __init__ to parse the default value and
initialize the .value attribute. This enables other python modules to
safely use flags even if the __main__ module neglects to parse the
command line arguments. The .present attribute is cleared after
__init__ parsing. If the default value is set to None, then the
__init__ parsing step is skipped and the .value attribute is
initialized to None.
Note: The default value is also presented to the user in the help
string, so it is important that it be a legal value for this flag.
"""
def __init__(self, parser, serializer, name, default, help_string,
short_name=None, boolean=0, allow_override=0):
self.name = name
if not help_string:
help_string = '(no help available)'
self.help = help_string
self.short_name = short_name
self.boolean = boolean
self.present = 0
self.parser = parser
self.serializer = serializer
self.allow_override = allow_override
self.value = None
self.validators = []
self.SetDefault(default)
def __hash__(self):
return hash(id(self))
def __eq__(self, other):
return self is other
def __lt__(self, other):
if isinstance(other, Flag):
return id(self) < id(other)
return NotImplemented
def __GetParsedValueAsString(self, value):
if value is None:
return None
if self.serializer:
return repr(self.serializer.Serialize(value))
if self.boolean:
if value:
return repr('true')
else:
return repr('false')
return repr(_StrOrUnicode(value))
def Parse(self, argument):
try:
self.value = self.parser.Parse(argument)
except ValueError, e: # recast ValueError as IllegalFlagValue
raise IllegalFlagValue("flag --%s=%s: %s" % (self.name, argument, e))
self.present += 1
def Unparse(self):
if self.default is None:
self.value = None
else:
self.Parse(self.default)
self.present = 0
def Serialize(self):
if self.value is None:
return ''
if self.boolean:
if self.value:
return "--%s" % self.name
else:
return "--no%s" % self.name
else:
if not self.serializer:
raise FlagsError("Serializer not present for flag %s" % self.name)
return "--%s=%s" % (self.name, self.serializer.Serialize(self.value))
def SetDefault(self, value):
"""Changes the default value (and current value too) for this Flag."""
# We can't allow a None override because it may end up not being
# passed to C++ code when we're overriding C++ flags. So we
# cowardly bail out until someone fixes the semantics of trying to
# pass None to a C++ flag. See swig_flags.Init() for details on
# this behavior.
# TODO(olexiy): Users can directly call this method, bypassing all flags
# validators (we don't have FlagValues here, so we can not check
# validators).
# The simplest solution I see is to make this method private.
# Another approach would be to store reference to the corresponding
# FlagValues with each flag, but this seems to be an overkill.
if value is None and self.allow_override:
raise DuplicateFlagCannotPropagateNoneToSwig(self.name)
self.default = value
self.Unparse()
self.default_as_str = self.__GetParsedValueAsString(self.value)
def Type(self):
"""Returns: a string that describes the type of this Flag."""
# NOTE: we use strings, and not the types.*Type constants because
# our flags can have more exotic types, e.g., 'comma separated list
# of strings', 'whitespace separated list of strings', etc.
return self.parser.Type()
def WriteInfoInXMLFormat(self, outfile, module_name, is_key=False, indent=''):
"""Writes common info about this flag, in XML format.
This is information that is relevant to all flags (e.g., name,
meaning, etc.). If you defined a flag that has some other pieces of
info, then please override _WriteCustomInfoInXMLFormat.
Please do NOT override this method.
Args:
outfile: File object we write to.
module_name: A string, the name of the module that defines this flag.
is_key: A boolean, True iff this flag is key for main module.
indent: A string that is prepended to each generated line.
"""
outfile.write(indent + '<flag>\n')
inner_indent = indent + ' '
if is_key:
_WriteSimpleXMLElement(outfile, 'key', 'yes', inner_indent)
_WriteSimpleXMLElement(outfile, 'file', module_name, inner_indent)
# Print flag features that are relevant for all flags.
_WriteSimpleXMLElement(outfile, 'name', self.name, inner_indent)
if self.short_name:
_WriteSimpleXMLElement(outfile, 'short_name', self.short_name,
inner_indent)
if self.help:
_WriteSimpleXMLElement(outfile, 'meaning', self.help, inner_indent)
# The default flag value can either be represented as a string like on the
# command line, or as a Python object. We serialize this value in the
# latter case in order to remain consistent.
if self.serializer and not isinstance(self.default, str):
default_serialized = self.serializer.Serialize(self.default)
else:
default_serialized = self.default
_WriteSimpleXMLElement(outfile, 'default', default_serialized, inner_indent)
_WriteSimpleXMLElement(outfile, 'current', self.value, inner_indent)
_WriteSimpleXMLElement(outfile, 'type', self.Type(), inner_indent)
# Print extra flag features this flag may have.
self._WriteCustomInfoInXMLFormat(outfile, inner_indent)
outfile.write(indent + '</flag>\n')
def _WriteCustomInfoInXMLFormat(self, outfile, indent):
"""Writes extra info about this flag, in XML format.
"Extra" means "not already printed by WriteInfoInXMLFormat above."
Args:
outfile: File object we write to.
indent: A string that is prepended to each generated line.
"""
# Usually, the parser knows the extra details about the flag, so
# we just forward the call to it.
self.parser.WriteCustomInfoInXMLFormat(outfile, indent)
# End of Flag definition
class _ArgumentParserCache(type):
"""Metaclass used to cache and share argument parsers among flags."""
_instances = {}
def __call__(mcs, *args, **kwargs):
"""Returns an instance of the argument parser cls.
This method overrides behavior of the __new__ methods in
all subclasses of ArgumentParser (inclusive). If an instance
for mcs with the same set of arguments exists, this instance is
returned, otherwise a new instance is created.
If any keyword arguments are defined, or the values in args
are not hashable, this method always returns a new instance of
cls.
Args:
args: Positional initializer arguments.
kwargs: Initializer keyword arguments.
Returns:
An instance of cls, shared or new.
"""
if kwargs:
return type.__call__(mcs, *args, **kwargs)
else:
instances = mcs._instances
key = (mcs,) + tuple(args)
try:
return instances[key]
except KeyError:
# No cache entry for key exists, create a new one.
return instances.setdefault(key, type.__call__(mcs, *args))
except TypeError:
# An object in args cannot be hashed, always return
# a new instance.
return type.__call__(mcs, *args)
class ArgumentParser(object):
"""Base class used to parse and convert arguments.
The Parse() method checks to make sure that the string argument is a
legal value and convert it to a native type. If the value cannot be
converted, it should throw a 'ValueError' exception with a human
readable explanation of why the value is illegal.
Subclasses should also define a syntactic_help string which may be
presented to the user to describe the form of the legal values.
Argument parser classes must be stateless, since instances are cached
and shared between flags. Initializer arguments are allowed, but all
member variables must be derived from initializer arguments only.
"""
__metaclass__ = _ArgumentParserCache
syntactic_help = ""
def Parse(self, argument):
"""Default implementation: always returns its argument unmodified."""
return argument
def Type(self):
return 'string'
def WriteCustomInfoInXMLFormat(self, outfile, indent):
pass
class ArgumentSerializer:
"""Base class for generating string representations of a flag value."""
def Serialize(self, value):
return _StrOrUnicode(value)
class ListSerializer(ArgumentSerializer):
def __init__(self, list_sep):
self.list_sep = list_sep
def Serialize(self, value):
return self.list_sep.join([_StrOrUnicode(x) for x in value])
# Flags validators
def RegisterValidator(flag_name,
checker,
message='Flag validation failed',
flag_values=FLAGS):
"""Adds a constraint, which will be enforced during program execution.
The constraint is validated when flags are initially parsed, and after each
change of the corresponding flag's value.
Args:
flag_name: string, name of the flag to be checked.
checker: method to validate the flag.
input - value of the corresponding flag (string, boolean, etc.
This value will be passed to checker by the library). See file's
docstring for examples.
output - Boolean.
Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise gflags_validators.Error(desired_error_message).
message: error text to be shown to the user if checker returns False.
If checker raises gflags_validators.Error, message from the raised
Error will be shown.
flag_values: FlagValues
Raises:
AttributeError: if flag_name is not registered as a valid flag name.
"""
flag_values.AddValidator(gflags_validators.SimpleValidator(flag_name,
checker,
message))
def MarkFlagAsRequired(flag_name, flag_values=FLAGS):
"""Ensure that flag is not None during program execution.
Registers a flag validator, which will follow usual validator
rules.
Args:
flag_name: string, name of the flag
flag_values: FlagValues
Raises:
AttributeError: if flag_name is not registered as a valid flag name.
"""
RegisterValidator(flag_name,
lambda value: value is not None,
message='Flag --%s must be specified.' % flag_name,
flag_values=flag_values)
def _RegisterBoundsValidatorIfNeeded(parser, name, flag_values):
"""Enforce lower and upper bounds for numeric flags.
Args:
parser: NumericParser (either FloatParser or IntegerParser). Provides lower
and upper bounds, and help text to display.
name: string, name of the flag
flag_values: FlagValues
"""
if parser.lower_bound is not None or parser.upper_bound is not None:
def Checker(value):
if value is not None and parser.IsOutsideBounds(value):
message = '%s is not %s' % (value, parser.syntactic_help)
raise gflags_validators.Error(message)
return True
RegisterValidator(name,
Checker,
flag_values=flag_values)
# The DEFINE functions are explained in mode details in the module doc string.
def DEFINE(parser, name, default, help, flag_values=FLAGS, serializer=None,
**args):
"""Registers a generic Flag object.
NOTE: in the docstrings of all DEFINE* functions, "registers" is short
for "creates a new flag and registers it".
Auxiliary function: clients should use the specialized DEFINE_<type>
function instead.
Args:
parser: ArgumentParser that is used to parse the flag arguments.
name: A string, the flag name.
default: The default value of the flag.
help: A help string.
flag_values: FlagValues object the flag will be registered with.
serializer: ArgumentSerializer that serializes the flag value.
args: Dictionary with extra keyword args that are passes to the
Flag __init__.
"""
DEFINE_flag(Flag(parser, serializer, name, default, help, **args),
flag_values)
def DEFINE_flag(flag, flag_values=FLAGS):
"""Registers a 'Flag' object with a 'FlagValues' object.
By default, the global FLAGS 'FlagValue' object is used.
Typical users will use one of the more specialized DEFINE_xxx
functions, such as DEFINE_string or DEFINE_integer. But developers
who need to create Flag objects themselves should use this function
to register their flags.
"""
# copying the reference to flag_values prevents pychecker warnings
fv = flag_values
fv[flag.name] = flag
# Tell flag_values who's defining the flag.
if isinstance(flag_values, FlagValues):
# Regarding the above isinstance test: some users pass funny
# values of flag_values (e.g., {}) in order to avoid the flag
# registration (in the past, there used to be a flag_values ==
# FLAGS test here) and redefine flags with the same name (e.g.,
# debug). To avoid breaking their code, we perform the
# registration only if flag_values is a real FlagValues object.
module, module_name = _GetCallingModuleObjectAndName()
flag_values._RegisterFlagByModule(module_name, flag)
flag_values._RegisterFlagByModuleId(id(module), flag)
def _InternalDeclareKeyFlags(flag_names,
flag_values=FLAGS, key_flag_values=None):
"""Declares a flag as key for the calling module.
Internal function. User code should call DECLARE_key_flag or
ADOPT_module_key_flags instead.
Args:
flag_names: A list of strings that are names of already-registered
Flag objects.
flag_values: A FlagValues object that the flags listed in
flag_names have registered with (the value of the flag_values
argument from the DEFINE_* calls that defined those flags).
This should almost never need to be overridden.
key_flag_values: A FlagValues object that (among possibly many
other things) keeps track of the key flags for each module.
Default None means "same as flag_values". This should almost
never need to be overridden.
Raises:
UnrecognizedFlagError: when we refer to a flag that was not
defined yet.
"""
key_flag_values = key_flag_values or flag_values
module = _GetCallingModule()
for flag_name in flag_names:
if flag_name not in flag_values:
raise UnrecognizedFlagError(flag_name)
flag = flag_values.FlagDict()[flag_name]
key_flag_values._RegisterKeyFlagForModule(module, flag)
def DECLARE_key_flag(flag_name, flag_values=FLAGS):
"""Declares one flag as key to the current module.
Key flags are flags that are deemed really important for a module.
They are important when listing help messages; e.g., if the
--helpshort command-line flag is used, then only the key flags of the
main module are listed (instead of all flags, as in the case of
--help).
Sample usage:
gflags.DECLARED_key_flag('flag_1')
Args:
flag_name: A string, the name of an already declared flag.
(Redeclaring flags as key, including flags implicitly key
because they were declared in this module, is a no-op.)
flag_values: A FlagValues object. This should almost never
need to be overridden.
"""
if flag_name in _SPECIAL_FLAGS:
# Take care of the special flags, e.g., --flagfile, --undefok.
# These flags are defined in _SPECIAL_FLAGS, and are treated
# specially during flag parsing, taking precedence over the
# user-defined flags.
_InternalDeclareKeyFlags([flag_name],
flag_values=_SPECIAL_FLAGS,
key_flag_values=flag_values)
return
_InternalDeclareKeyFlags([flag_name], flag_values=flag_values)
def ADOPT_module_key_flags(module, flag_values=FLAGS):
"""Declares that all flags key to a module are key to the current module.
Args:
module: A module object.
flag_values: A FlagValues object. This should almost never need
to be overridden.
Raises:
FlagsError: When given an argument that is a module name (a
string), instead of a module object.
"""
# NOTE(salcianu): an even better test would be if not
# isinstance(module, types.ModuleType) but I didn't want to import
# types for such a tiny use.
if isinstance(module, str):
raise FlagsError('Received module name %s; expected a module object.'
% module)
_InternalDeclareKeyFlags(
[f.name for f in flag_values._GetKeyFlagsForModule(module.__name__)],
flag_values=flag_values)
# If module is this flag module, take _SPECIAL_FLAGS into account.
if module == _GetThisModuleObjectAndName()[0]:
_InternalDeclareKeyFlags(
# As we associate flags with _GetCallingModuleObjectAndName(), the
# special flags defined in this module are incorrectly registered with
# a different module. So, we can't use _GetKeyFlagsForModule.
# Instead, we take all flags from _SPECIAL_FLAGS (a private
# FlagValues, where no other module should register flags).
[f.name for f in _SPECIAL_FLAGS.FlagDict().values()],
flag_values=_SPECIAL_FLAGS,
key_flag_values=flag_values)
#
# STRING FLAGS
#
def DEFINE_string(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value can be any string."""
parser = ArgumentParser()
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
#
# BOOLEAN FLAGS
#
class BooleanParser(ArgumentParser):
"""Parser of boolean values."""
def Convert(self, argument):
"""Converts the argument to a boolean; raise ValueError on errors."""
if type(argument) == str:
if argument.lower() in ['true', 't', '1']:
return True
elif argument.lower() in ['false', 'f', '0']:
return False
bool_argument = bool(argument)
if argument == bool_argument:
# The argument is a valid boolean (True, False, 0, or 1), and not just
# something that always converts to bool (list, string, int, etc.).
return bool_argument
raise ValueError('Non-boolean argument to boolean flag', argument)
def Parse(self, argument):
val = self.Convert(argument)
return val
def Type(self):
return 'bool'
class BooleanFlag(Flag):
"""Basic boolean flag.
Boolean flags do not take any arguments, and their value is either
True (1) or False (0). The false value is specified on the command
line by prepending the word 'no' to either the long or the short flag
name.
For example, if a Boolean flag was created whose long name was
'update' and whose short name was 'x', then this flag could be
explicitly unset through either --noupdate or --nox.
"""
def __init__(self, name, default, help, short_name=None, **args):
p = BooleanParser()
Flag.__init__(self, p, None, name, default, help, short_name, 1, **args)
if not self.help: self.help = "a boolean value"
def DEFINE_boolean(name, default, help, flag_values=FLAGS, **args):
"""Registers a boolean flag.
Such a boolean flag does not take an argument. If a user wants to
specify a false value explicitly, the long option beginning with 'no'
must be used: i.e. --noflag
This flag will have a value of None, True or False. None is possible
if default=None and the user does not specify the flag on the command
line.
"""
DEFINE_flag(BooleanFlag(name, default, help, **args), flag_values)
# Match C++ API to unconfuse C++ people.
DEFINE_bool = DEFINE_boolean
class HelpFlag(BooleanFlag):
"""
HelpFlag is a special boolean flag that prints usage information and
raises a SystemExit exception if it is ever found in the command
line arguments. Note this is called with allow_override=1, so other
apps can define their own --help flag, replacing this one, if they want.
"""
def __init__(self):
BooleanFlag.__init__(self, "help", 0, "show this help",
short_name="?", allow_override=1)
def Parse(self, arg):
if arg:
doc = sys.modules["__main__"].__doc__
flags = str(FLAGS)
print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0])
if flags:
print "flags:"
print flags
sys.exit(1)
class HelpXMLFlag(BooleanFlag):
"""Similar to HelpFlag, but generates output in XML format."""
def __init__(self):
BooleanFlag.__init__(self, 'helpxml', False,
'like --help, but generates XML output',
allow_override=1)
def Parse(self, arg):
if arg:
FLAGS.WriteHelpInXMLFormat(sys.stdout)
sys.exit(1)
class HelpshortFlag(BooleanFlag):
"""
HelpshortFlag is a special boolean flag that prints usage
information for the "main" module, and rasies a SystemExit exception
if it is ever found in the command line arguments. Note this is
called with allow_override=1, so other apps can define their own
--helpshort flag, replacing this one, if they want.
"""
def __init__(self):
BooleanFlag.__init__(self, "helpshort", 0,
"show usage only for this module", allow_override=1)
def Parse(self, arg):
if arg:
doc = sys.modules["__main__"].__doc__
flags = FLAGS.MainModuleHelp()
print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0])
if flags:
print "flags:"
print flags
sys.exit(1)
#
# Numeric parser - base class for Integer and Float parsers
#
class NumericParser(ArgumentParser):
"""Parser of numeric values.
Parsed value may be bounded to a given upper and lower bound.
"""
def IsOutsideBounds(self, val):
return ((self.lower_bound is not None and val < self.lower_bound) or
(self.upper_bound is not None and val > self.upper_bound))
def Parse(self, argument):
val = self.Convert(argument)
if self.IsOutsideBounds(val):
raise ValueError("%s is not %s" % (val, self.syntactic_help))
return val
def WriteCustomInfoInXMLFormat(self, outfile, indent):
if self.lower_bound is not None:
_WriteSimpleXMLElement(outfile, 'lower_bound', self.lower_bound, indent)
if self.upper_bound is not None:
_WriteSimpleXMLElement(outfile, 'upper_bound', self.upper_bound, indent)
def Convert(self, argument):
"""Default implementation: always returns its argument unmodified."""
return argument
# End of Numeric Parser
#
# FLOAT FLAGS
#
class FloatParser(NumericParser):
"""Parser of floating point values.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = "a"
number_name = "number"
syntactic_help = " ".join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
super(FloatParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound))
elif lower_bound == 0:
sh = "a non-negative %s" % self.number_name
elif upper_bound == 0:
sh = "a non-positive %s" % self.number_name
elif upper_bound is not None:
sh = "%s <= %s" % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = "%s >= %s" % (self.number_name, lower_bound)
self.syntactic_help = sh
def Convert(self, argument):
"""Converts argument to a float; raises ValueError on errors."""
return float(argument)
def Type(self):
return 'float'
# End of FloatParser
def DEFINE_float(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value must be a float.
If lower_bound or upper_bound are set, then this flag must be
within the given range.
"""
parser = FloatParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
_RegisterBoundsValidatorIfNeeded(parser, name, flag_values=flag_values)
#
# INTEGER FLAGS
#
class IntegerParser(NumericParser):
"""Parser of an integer value.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = "an"
number_name = "integer"
syntactic_help = " ".join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
super(IntegerParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound))
elif lower_bound == 1:
sh = "a positive %s" % self.number_name
elif upper_bound == -1:
sh = "a negative %s" % self.number_name
elif lower_bound == 0:
sh = "a non-negative %s" % self.number_name
elif upper_bound == 0:
sh = "a non-positive %s" % self.number_name
elif upper_bound is not None:
sh = "%s <= %s" % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = "%s >= %s" % (self.number_name, lower_bound)
self.syntactic_help = sh
def Convert(self, argument):
__pychecker__ = 'no-returnvalues'
if type(argument) == str:
base = 10
if len(argument) > 2 and argument[0] == "0" and argument[1] == "x":
base = 16
return int(argument, base)
else:
return int(argument)
def Type(self):
return 'int'
def DEFINE_integer(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value must be an integer.
If lower_bound, or upper_bound are set, then this flag must be
within the given range.
"""
parser = IntegerParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
_RegisterBoundsValidatorIfNeeded(parser, name, flag_values=flag_values)
#
# ENUM FLAGS
#
class EnumParser(ArgumentParser):
"""Parser of a string enum value (a string value from a given set).
If enum_values (see below) is not specified, any string is allowed.
"""
def __init__(self, enum_values=None):
super(EnumParser, self).__init__()
self.enum_values = enum_values
def Parse(self, argument):
if self.enum_values and argument not in self.enum_values:
raise ValueError("value should be one of <%s>" %
"|".join(self.enum_values))
return argument
def Type(self):
return 'string enum'
class EnumFlag(Flag):
"""Basic enum flag; its value can be any string from list of enum_values."""
def __init__(self, name, default, help, enum_values=None,
short_name=None, **args):
enum_values = enum_values or []
p = EnumParser(enum_values)
g = ArgumentSerializer()
Flag.__init__(self, p, g, name, default, help, short_name, **args)
if not self.help: self.help = "an enum string"
self.help = "<%s>: %s" % ("|".join(enum_values), self.help)
def _WriteCustomInfoInXMLFormat(self, outfile, indent):
for enum_value in self.parser.enum_values:
_WriteSimpleXMLElement(outfile, 'enum_value', enum_value, indent)
def DEFINE_enum(name, default, enum_values, help, flag_values=FLAGS,
**args):
"""Registers a flag whose value can be any string from enum_values."""
DEFINE_flag(EnumFlag(name, default, help, enum_values, ** args),
flag_values)
#
# LIST FLAGS
#
class BaseListParser(ArgumentParser):
"""Base class for a parser of lists of strings.
To extend, inherit from this class; from the subclass __init__, call
BaseListParser.__init__(self, token, name)
where token is a character used to tokenize, and name is a description
of the separator.
"""
def __init__(self, token=None, name=None):
assert name
super(BaseListParser, self).__init__()
self._token = token
self._name = name
self.syntactic_help = "a %s separated list" % self._name
def Parse(self, argument):
if isinstance(argument, list):
return argument
elif argument == '':
return []
else:
return [s.strip() for s in argument.split(self._token)]
def Type(self):
return '%s separated list of strings' % self._name
class ListParser(BaseListParser):
"""Parser for a comma-separated list of strings."""
def __init__(self):
BaseListParser.__init__(self, ',', 'comma')
def WriteCustomInfoInXMLFormat(self, outfile, indent):
BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent)
_WriteSimpleXMLElement(outfile, 'list_separator', repr(','), indent)
class WhitespaceSeparatedListParser(BaseListParser):
"""Parser for a whitespace-separated list of strings."""
def __init__(self):
BaseListParser.__init__(self, None, 'whitespace')
def WriteCustomInfoInXMLFormat(self, outfile, indent):
BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent)
separators = list(string.whitespace)
separators.sort()
for ws_char in string.whitespace:
_WriteSimpleXMLElement(outfile, 'list_separator', repr(ws_char), indent)
def DEFINE_list(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value is a comma-separated list of strings."""
parser = ListParser()
serializer = ListSerializer(',')
DEFINE(parser, name, default, help, flag_values, serializer, **args)
def DEFINE_spaceseplist(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value is a whitespace-separated list of strings.
Any whitespace can be used as a separator.
"""
parser = WhitespaceSeparatedListParser()
serializer = ListSerializer(' ')
DEFINE(parser, name, default, help, flag_values, serializer, **args)
#
# MULTI FLAGS
#
class MultiFlag(Flag):
"""A flag that can appear multiple time on the command-line.
The value of such a flag is a list that contains the individual values
from all the appearances of that flag on the command-line.
See the __doc__ for Flag for most behavior of this class. Only
differences in behavior are described here:
* The default value may be either a single value or a list of values.
A single value is interpreted as the [value] singleton list.
* The value of the flag is always a list, even if the option was
only supplied once, and even if the default value is a single
value
"""
def __init__(self, *args, **kwargs):
Flag.__init__(self, *args, **kwargs)
self.help += ';\n repeat this option to specify a list of values'
def Parse(self, arguments):
"""Parses one or more arguments with the installed parser.
Args:
arguments: a single argument or a list of arguments (typically a
list of default values); a single argument is converted
internally into a list containing one item.
"""
if not isinstance(arguments, list):
# Default value may be a list of values. Most other arguments
# will not be, so convert them into a single-item list to make
# processing simpler below.
arguments = [arguments]
if self.present:
# keep a backup reference to list of previously supplied option values
values = self.value
else:
# "erase" the defaults with an empty list
values = []
for item in arguments:
# have Flag superclass parse argument, overwriting self.value reference
Flag.Parse(self, item) # also increments self.present
values.append(self.value)
# put list of option values back in the 'value' attribute
self.value = values
def Serialize(self):
if not self.serializer:
raise FlagsError("Serializer not present for flag %s" % self.name)
if self.value is None:
return ''
s = ''
multi_value = self.value
for self.value in multi_value:
if s: s += ' '
s += Flag.Serialize(self)
self.value = multi_value
return s
def Type(self):
return 'multi ' + self.parser.Type()
def DEFINE_multi(parser, serializer, name, default, help, flag_values=FLAGS,
**args):
"""Registers a generic MultiFlag that parses its args with a given parser.
Auxiliary function. Normal users should NOT use it directly.
Developers who need to create their own 'Parser' classes for options
which can appear multiple times can call this module function to
register their flags.
"""
DEFINE_flag(MultiFlag(parser, serializer, name, default, help, **args),
flag_values)
def DEFINE_multistring(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of any strings.
Use the flag on the command line multiple times to place multiple
string values into the list. The 'default' may be a single string
(which will be converted into a single-element list) or a list of
strings.
"""
parser = ArgumentParser()
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
def DEFINE_multi_int(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of arbitrary integers.
Use the flag on the command line multiple times to place multiple
integer values into the list. The 'default' may be a single integer
(which will be converted into a single-element list) or a list of
integers.
"""
parser = IntegerParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
def DEFINE_multi_float(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of arbitrary floats.
Use the flag on the command line multiple times to place multiple
float values into the list. The 'default' may be a single float
(which will be converted into a single-element list) or a list of
floats.
"""
parser = FloatParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
# Now register the flags that we want to exist in all applications.
# These are all defined with allow_override=1, so user-apps can use
# these flagnames for their own purposes, if they want.
DEFINE_flag(HelpFlag())
DEFINE_flag(HelpshortFlag())
DEFINE_flag(HelpXMLFlag())
# Define special flags here so that help may be generated for them.
# NOTE: Please do NOT use _SPECIAL_FLAGS from outside this module.
_SPECIAL_FLAGS = FlagValues()
DEFINE_string(
'flagfile', "",
"Insert flag definitions from the given file into the command line.",
_SPECIAL_FLAGS)
DEFINE_string(
'undefok', "",
"comma-separated list of flag names that it is okay to specify "
"on the command line even if the program does not define a flag "
"with that name. IMPORTANT: flags in this list that have "
"arguments MUST use the --flag=value format.", _SPECIAL_FLAGS)
| Python |
'''
Module which prompts the user for translations and saves them.
TODO: implement
@author: Rodrigo Damazio
'''
class Translator(object):
'''
classdocs
'''
def __init__(self, language):
'''
Constructor
'''
self._language = language
def Translate(self, string_names):
print string_names | Python |
'''
Module which brings history information about files from Mercurial.
@author: Rodrigo Damazio
'''
import re
import subprocess
REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*')
def _GetOutputLines(args):
'''
Runs an external process and returns its output as a list of lines.
@param args: the arguments to run
'''
process = subprocess.Popen(args,
stdout=subprocess.PIPE,
universal_newlines = True,
shell = False)
output = process.communicate()[0]
return output.splitlines()
def FillMercurialRevisions(filename, parsed_file):
'''
Fills the revs attribute of all strings in the given parsed file with
a list of revisions that touched the lines corresponding to that string.
@param filename: the name of the file to get history for
@param parsed_file: the parsed file to modify
'''
# Take output of hg annotate to get revision of each line
output_lines = _GetOutputLines(['hg', 'annotate', '-c', filename])
# Create a map of line -> revision (key is list index, line 0 doesn't exist)
line_revs = ['dummy']
for line in output_lines:
rev_match = REVISION_REGEX.match(line)
if not rev_match:
raise 'Unexpected line of output from hg: %s' % line
rev_hash = rev_match.group('hash')
line_revs.append(rev_hash)
for str in parsed_file.itervalues():
# Get the lines that correspond to each string
start_line = str['startLine']
end_line = str['endLine']
# Get the revisions that touched those lines
revs = []
for line_number in range(start_line, end_line + 1):
revs.append(line_revs[line_number])
# Merge with any revisions that were already there
# (for explict revision specification)
if 'revs' in str:
revs += str['revs']
# Assign the revisions to the string
str['revs'] = frozenset(revs)
def DoesRevisionSuperceed(filename, rev1, rev2):
'''
Tells whether a revision superceeds another.
This essentially means that the older revision is an ancestor of the newer
one.
This also returns True if the two revisions are the same.
@param rev1: the revision that may be superceeding the other
@param rev2: the revision that may be superceeded
@return: True if rev1 superceeds rev2 or they're the same
'''
if rev1 == rev2:
return True
# TODO: Add filename
args = ['hg', 'log', '-r', 'ancestors(%s)' % rev1, '--template', '{node|short}\n', filename]
output_lines = _GetOutputLines(args)
return rev2 in output_lines
def NewestRevision(filename, rev1, rev2):
'''
Returns which of two revisions is closest to the head of the repository.
If none of them is the ancestor of the other, then we return either one.
@param rev1: the first revision
@param rev2: the second revision
'''
if DoesRevisionSuperceed(filename, rev1, rev2):
return rev1
return rev2 | Python |
'''
Module which parses a string XML file.
@author: Rodrigo Damazio
'''
from xml.parsers.expat import ParserCreate
import re
#import xml.etree.ElementTree as ET
class StringsParser(object):
'''
Parser for string XML files.
This object is not thread-safe and should be used for parsing a single file at
a time, only.
'''
def Parse(self, file):
'''
Parses the given file and returns a dictionary mapping keys to an object
with attributes for that key, such as the value, start/end line and explicit
revisions.
In addition to the standard XML format of the strings file, this parser
supports an annotation inside comments, in one of these formats:
<!-- KEEP_PARENT name="bla" -->
<!-- KEEP_PARENT name="bla" rev="123456789012" -->
Such an annotation indicates that we're explicitly inheriting form the
master file (and the optional revision says that this decision is compatible
with the master file up to that revision).
@param file: the name of the file to parse
'''
self._Reset()
# Unfortunately expat is the only parser that will give us line numbers
self._xml_parser = ParserCreate()
self._xml_parser.StartElementHandler = self._StartElementHandler
self._xml_parser.EndElementHandler = self._EndElementHandler
self._xml_parser.CharacterDataHandler = self._CharacterDataHandler
self._xml_parser.CommentHandler = self._CommentHandler
file_obj = open(file)
self._xml_parser.ParseFile(file_obj)
file_obj.close()
return self._all_strings
def _Reset(self):
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
self._all_strings = {}
def _StartElementHandler(self, name, attrs):
if name != 'string':
return
if 'name' not in attrs:
return
assert not self._currentString
assert not self._currentStringName
self._currentString = {
'startLine' : self._xml_parser.CurrentLineNumber,
}
if 'rev' in attrs:
self._currentString['revs'] = [attrs['rev']]
self._currentStringName = attrs['name']
self._currentStringValue = ''
def _EndElementHandler(self, name):
if name != 'string':
return
assert self._currentString
assert self._currentStringName
self._currentString['value'] = self._currentStringValue
self._currentString['endLine'] = self._xml_parser.CurrentLineNumber
self._all_strings[self._currentStringName] = self._currentString
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
def _CharacterDataHandler(self, data):
if not self._currentString:
return
self._currentStringValue += data
_KEEP_PARENT_REGEX = re.compile(r'\s*KEEP_PARENT\s+'
r'name\s*=\s*[\'"]?(?P<name>[a-z0-9_]+)[\'"]?'
r'(?:\s+rev=[\'"]?(?P<rev>[0-9a-f]{12})[\'"]?)?\s*',
re.MULTILINE | re.DOTALL)
def _CommentHandler(self, data):
keep_parent_match = self._KEEP_PARENT_REGEX.match(data)
if not keep_parent_match:
return
name = keep_parent_match.group('name')
self._all_strings[name] = {
'keepParent' : True,
'startLine' : self._xml_parser.CurrentLineNumber,
'endLine' : self._xml_parser.CurrentLineNumber
}
rev = keep_parent_match.group('rev')
if rev:
self._all_strings[name]['revs'] = [rev] | Python |
#!/usr/bin/python
'''
Entry point for My Tracks i18n tool.
@author: Rodrigo Damazio
'''
import mytracks.files
import mytracks.translate
import mytracks.validate
import sys
def Usage():
print 'Usage: %s <command> [<language> ...]\n' % sys.argv[0]
print 'Commands are:'
print ' cleanup'
print ' translate'
print ' validate'
sys.exit(1)
def Translate(languages):
'''
Asks the user to interactively translate any missing or oudated strings from
the files for the given languages.
@param languages: the languages to translate
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
missing = validator.missing_in_lang()
outdated = validator.outdated_in_lang()
for lang in languages:
untranslated = missing[lang] + outdated[lang]
if len(untranslated) == 0:
continue
translator = mytracks.translate.Translator(lang)
translator.Translate(untranslated)
def Validate(languages):
'''
Computes and displays errors in the string files for the given languages.
@param languages: the languages to compute for
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
error_count = 0
if (validator.valid()):
print 'All files OK'
else:
for lang, missing in validator.missing_in_master().iteritems():
print 'Missing in master, present in %s: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, missing in validator.missing_in_lang().iteritems():
print 'Missing in %s, present in master: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, outdated in validator.outdated_in_lang().iteritems():
print 'Outdated in %s: %s:' % (lang, str(outdated))
error_count = error_count + len(outdated)
return error_count
if __name__ == '__main__':
argv = sys.argv
argc = len(argv)
if argc < 2:
Usage()
languages = mytracks.files.GetAllLanguageFiles()
if argc == 3:
langs = set(argv[2:])
if not langs.issubset(languages):
raise 'Language(s) not found'
# Filter just to the languages specified
languages = dict((lang, lang_file)
for lang, lang_file in languages.iteritems()
if lang in langs or lang == 'en' )
cmd = argv[1]
if cmd == 'translate':
Translate(languages)
elif cmd == 'validate':
error_count = Validate(languages)
else:
Usage()
error_count = 0
print '%d errors found.' % error_count
| Python |
'''
Module which compares languague files to the master file and detects
issues.
@author: Rodrigo Damazio
'''
import os
from mytracks.parser import StringsParser
import mytracks.history
class Validator(object):
def __init__(self, languages):
'''
Builds a strings file validator.
Params:
@param languages: a dictionary mapping each language to its corresponding directory
'''
self._langs = {}
self._master = None
self._language_paths = languages
parser = StringsParser()
for lang, lang_dir in languages.iteritems():
filename = os.path.join(lang_dir, 'strings.xml')
parsed_file = parser.Parse(filename)
mytracks.history.FillMercurialRevisions(filename, parsed_file)
if lang == 'en':
self._master = parsed_file
else:
self._langs[lang] = parsed_file
self._Reset()
def Validate(self):
'''
Computes whether all the data in the files for the given languages is valid.
'''
self._Reset()
self._ValidateMissingKeys()
self._ValidateOutdatedKeys()
def valid(self):
return (len(self._missing_in_master) == 0 and
len(self._missing_in_lang) == 0 and
len(self._outdated_in_lang) == 0)
def missing_in_master(self):
return self._missing_in_master
def missing_in_lang(self):
return self._missing_in_lang
def outdated_in_lang(self):
return self._outdated_in_lang
def _Reset(self):
# These are maps from language to string name list
self._missing_in_master = {}
self._missing_in_lang = {}
self._outdated_in_lang = {}
def _ValidateMissingKeys(self):
'''
Computes whether there are missing keys on either side.
'''
master_keys = frozenset(self._master.iterkeys())
for lang, file in self._langs.iteritems():
keys = frozenset(file.iterkeys())
missing_in_master = keys - master_keys
missing_in_lang = master_keys - keys
if len(missing_in_master) > 0:
self._missing_in_master[lang] = missing_in_master
if len(missing_in_lang) > 0:
self._missing_in_lang[lang] = missing_in_lang
def _ValidateOutdatedKeys(self):
'''
Computers whether any of the language keys are outdated with relation to the
master keys.
'''
for lang, file in self._langs.iteritems():
outdated = []
for key, str in file.iteritems():
# Get all revisions that touched master and language files for this
# string.
master_str = self._master[key]
master_revs = master_str['revs']
lang_revs = str['revs']
if not master_revs or not lang_revs:
print 'WARNING: No revision for %s in %s' % (key, lang)
continue
master_file = os.path.join(self._language_paths['en'], 'strings.xml')
lang_file = os.path.join(self._language_paths[lang], 'strings.xml')
# Assume that the repository has a single head (TODO: check that),
# and as such there is always one revision which superceeds all others.
master_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(master_file, r1, r2),
master_revs)
lang_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(lang_file, r1, r2),
lang_revs)
# If the master version is newer than the lang version
if mytracks.history.DoesRevisionSuperceed(lang_file, master_rev, lang_rev):
outdated.append(key)
if len(outdated) > 0:
self._outdated_in_lang[lang] = outdated
| Python |
'''
Module for dealing with resource files (but not their contents).
@author: Rodrigo Damazio
'''
import os.path
from glob import glob
import re
MYTRACKS_RES_DIR = 'MyTracks/res'
ANDROID_MASTER_VALUES = 'values'
ANDROID_VALUES_MASK = 'values-*'
def GetMyTracksDir():
'''
Returns the directory in which the MyTracks directory is located.
'''
path = os.getcwd()
while not os.path.isdir(os.path.join(path, MYTRACKS_RES_DIR)):
if path == '/':
raise 'Not in My Tracks project'
# Go up one level
path = os.path.split(path)[0]
return path
def GetAllLanguageFiles():
'''
Returns a mapping from all found languages to their respective directories.
'''
mytracks_path = GetMyTracksDir()
res_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_VALUES_MASK)
language_dirs = glob(res_dir)
master_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_MASTER_VALUES)
if len(language_dirs) == 0:
raise 'No languages found!'
if not os.path.isdir(master_dir):
raise 'Couldn\'t find master file'
language_tuples = [(re.findall(r'.*values-([A-Za-z-]+)', dir)[0],dir) for dir in language_dirs]
language_tuples.append(('en', master_dir))
return dict(language_tuples)
| Python |
#!/usr/bin/env python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c11"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6c10-py2.3.egg': 'ce1e2ab5d3a0256456d9fc13800a7090',
'setuptools-0.6c10-py2.4.egg': '57d6d9d6e9b80772c59a53a8433a5dd4',
'setuptools-0.6c10-py2.5.egg': 'de46ac8b1c97c895572e5e8596aeb8c7',
'setuptools-0.6c10-py2.6.egg': '58ea40aef06da02ce641495523a0b7f5',
'setuptools-0.6c11-py2.3.egg': '2baeac6e13d414a9d28e7ba5b5a596de',
'setuptools-0.6c11-py2.4.egg': 'bd639f9b0eac4c42497034dec2ec0c2b',
'setuptools-0.6c11-py2.5.egg': '64c94f3bf7a72a13ec83e0b24f2749b2',
'setuptools-0.6c11-py2.6.egg': 'bfa92100bd772d5a213eedd356d64086',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
except pkg_resources.DistributionNotFound:
pass
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
| Python |
#!/usr/bin/env python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup configuration."""
import platform
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup # pylint:disable-msg=C6204
# Configure the required packages and scripts to install, depending on
# Python version and OS.
REQUIRED_PACKAGES = [
'google-apputils',
'python-gflags',
'google-api-python-client==1.0',
'oauth2client==1.0',
'httplib2',
]
CONSOLE_SCRIPTS = [
'bq = bq:run_main',
]
if platform.system() == 'Windows':
REQUIRED_PACKAGES.append('pyreadline')
py_version = platform.python_version()
if py_version < '2.6.5' or py_version >= '3':
raise ValueError('BigQuery requires Python >= 2.6.5.')
_BQ_VERSION = '2.0.13'
setup(name='bigquery',
version=_BQ_VERSION,
description='BigQuery command-line tool',
url='http://code.google.com/p/google-bigquery-tools/',
author='Google Inc.',
author_email='bigquery-team@google.com',
# Contained modules and scripts.
py_modules=[
'bq',
'bigquery_client',
'table_formatter',
],
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
install_requires=REQUIRED_PACKAGES,
provides=[
'bigquery (%s)' % (_BQ_VERSION,),
],
# Information for packaging of the discovery document.
include_package_data=True,
packages=['discovery'],
package_data={
'discovery': ['*'],
},
# PyPI package information.
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Topic :: Database :: Front-Ends',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
keywords='google bigquery library',
)
| Python |
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Table formatting library.
We define a TableFormatter interface, and create subclasses for
several different print formats, including formats intended for both
human and machine consumption:
Human Consumption
-----------------
PrettyFormatter: This prints ASCII-art bordered tables. Inspired
by the prettytable python library. Example:
+-----+---------------+
| foo | longer header |
+-----+---------------+
| a | 3 |
| ... |
| abc | 123 |
+-----+---------------+
SparsePrettyFormatter: This is a PrettyFormatter which simply
doesn't print most of the border. Example:
foo longer header
----- ---------------
a 3
...
abc 123
PrettyJsonFormatter: Prints JSON output in a format easily
read by a human. Example:
[
{
"foo": "a",
"longer header": 3
},
...
{
"foo": "abc",
"longer header": 123
}
]
Machine Consumption
-------------------
CsvFormatter: Prints output in CSV form, with minimal
quoting, '\n' separation between lines, and including
a header line. Example:
foo,longer header
a,3
...
abc,123
JsonFormatter: Prints JSON output in the most compact
form possible. Example:
[{"foo":"a","longer header":3},...,{"foo":"abc","longer header":123}]
Additional formatters can be added by subclassing TableFormatter and
overriding the following methods:
__len__, __unicode__, AddRow, column_names, AddColumn
"""
import cStringIO
import csv
import itertools
import json
import sys
class FormatterException(Exception):
pass
class TableFormatter(object):
"""Interface for table formatters."""
def __init__(self, **kwds):
"""Initializes the base class.
Keyword arguments:
skip_header_when_empty: If true, does not print the table's header
if there are zero rows. This argument has no effect on
PrettyJsonFormatter.
"""
if self.__class__ == TableFormatter:
raise NotImplementedError(
'Cannot instantiate abstract class TableFormatter')
self.skip_header_when_empty = kwds.get('skip_header_when_empty', False)
def __nonzero__(self):
return bool(len(self))
def __len__(self):
raise NotImplementedError('__len__ must be implemented by subclass')
def __str__(self):
return unicode(self).encode(sys.getdefaultencoding(), 'backslashreplace')
def __unicode__(self):
raise NotImplementedError('__unicode__ must be implemented by subclass')
def Print(self):
if self:
# TODO(user): Make encoding a customizable attribute on
# the TableFormatter.
encoding = sys.stdout.encoding or 'utf8'
print unicode(self).encode(encoding, 'backslashreplace')
def AddRow(self, row):
"""Add a new row (an iterable) to this formatter."""
raise NotImplementedError('AddRow must be implemented by subclass')
def AddRows(self, rows):
"""Add all rows to this table."""
for row in rows:
self.AddRow(row)
def AddField(self, field):
"""Add a field as a new column to this formatter."""
# TODO(user): Excise this bigquery-specific method.
align = 'l' if field.get('type', []) == 'STRING' else 'r'
self.AddColumn(field['name'], align=align)
def AddFields(self, fields):
"""Convenience method to add a list of fields."""
for field in fields:
self.AddField(field)
def AddDict(self, d):
"""Add a dict as a row by using column names as keys."""
self.AddRow([d.get(name, '') for name in self.column_names])
@property
def column_names(self):
"""Return the ordered list of column names in self."""
raise NotImplementedError('column_names must be implemented by subclass')
def AddColumn(self, column_name, align='r', **kwds):
"""Add a new column to this formatter."""
raise NotImplementedError('AddColumn must be implemented by subclass')
def AddColumns(self, column_names, kwdss=None):
"""Add a series of columns to this formatter."""
kwdss = kwdss or [{}] * len(column_names)
for column_name, kwds in zip(column_names, kwdss):
self.AddColumn(column_name, **kwds)
class PrettyFormatter(TableFormatter):
"""Formats output as an ASCII-art table with borders."""
def __init__(self, **kwds):
"""Initialize a new PrettyFormatter.
Keyword arguments:
junction_char: (default: +) Character to use for table junctions.
horizontal_char: (default: -) Character to use for horizontal lines.
vertical_char: (default: |) Character to use for vertical lines.
"""
super(PrettyFormatter, self).__init__(**kwds)
self.junction_char = kwds.get('junction_char', '+')
self.horizontal_char = kwds.get('horizontal_char', '-')
self.vertical_char = kwds.get('vertical_char', '|')
self.rows = []
self.row_heights = []
self._column_names = []
self.column_widths = []
self.column_alignments = []
self.header_height = 1
def __len__(self):
return len(self.rows)
def __unicode__(self):
if self or not self.skip_header_when_empty:
lines = itertools.chain(
self.FormatHeader(), self.FormatRows(), self.FormatHrule())
else:
lines = []
return '\n'.join(lines)
@staticmethod
def CenteredPadding(interval, size, left_justify=True):
"""Compute information for centering a string in a fixed space.
Given two integers interval and size, with size <= interval, this
function computes two integers left_padding and right_padding with
left_padding + right_padding + size = interval
and
|left_padding - right_padding| <= 1.
In the case that interval and size have different parity,
left_padding will be larger iff left_justify is True. (That is,
iff the string should be left justified in the "center" space.)
Args:
interval: Size of the fixed space.
size: Size of the string to center in that space.
left_justify: (optional, default: True) Whether the string
should be left-justified in the center space.
Returns:
left_padding, right_padding: The size of the left and right
margins for centering the string.
Raises:
FormatterException: If size > interval.
"""
if size > interval:
raise FormatterException('Illegal state in table formatting')
same_parity = (interval % 2) == (size % 2)
padding = (interval - size) / 2
if same_parity:
return padding, padding
elif left_justify:
return padding, padding + 1
else:
return padding + 1, padding
@staticmethod
def Abbreviate(s, width):
"""Abbreviate a string to at most width characters."""
suffix = '.' * min(width, 3)
return s if len(s) <= width else s[:width - len(suffix)] + suffix
@staticmethod
def FormatCell(entry, cell_width, cell_height=1, align='c', valign='t'):
"""Format an entry into a list of strings for a fixed cell size.
Given a (possibly multi-line) entry and a cell height and width,
we split the entry into a list of lines and format each one into
the given width and alignment. We then pad the list with
additional blank lines of the appropriate width so that the
resulting list has exactly cell_height entries. Each entry
is also padded with one space on either side.
We abbreviate strings for width, but we require that the
number of lines in entry is at most cell_height.
Args:
entry: String to format, which may have newlines.
cell_width: Maximum width for lines in the cell.
cell_height: Number of lines in the cell.
align: Alignment to use for lines of text.
valign: Vertical alignment in the cell. One of 't',
'c', or 'b' (top, center, and bottom, respectively).
Returns:
An iterator yielding exactly cell_height lines, each of
exact width cell_width + 2, corresponding to this cell.
Raises:
FormatterException: If there are too many lines in entry.
ValueError: If the valign is invalid.
"""
entry_lines = [PrettyFormatter.Abbreviate(line, cell_width)
for line in entry.split('\n')]
if len(entry_lines) > cell_height:
raise FormatterException('Too many lines (%s) for a cell of size %s' % (
len(entry_lines), cell_height))
if valign == 't':
top_lines = []
bottom_lines = itertools.repeat(' ' * (cell_width + 2),
cell_height - len(entry_lines))
elif valign == 'c':
top_padding, bottom_padding = PrettyFormatter.CenteredPadding(
cell_height, len(entry_lines))
top_lines = itertools.repeat(' ' * (cell_width + 2), top_padding)
bottom_lines = itertools.repeat(' ' * (cell_width + 2), bottom_padding)
elif valign == 'b':
bottom_lines = []
top_lines = itertools.repeat(' ' * (cell_width + 2),
cell_height - len(entry_lines))
else:
raise ValueError('Unknown value for valign: %s' % (valign,))
content_lines = []
for line in entry_lines:
if align == 'c':
left_padding, right_padding = PrettyFormatter.CenteredPadding(
cell_width, len(line))
content_lines.append(' %s%s%s ' % (
' ' * left_padding, line, ' ' * right_padding))
elif align in ('l', 'r'):
fmt = ' %*s ' if align == 'r' else ' %-*s '
content_lines.append(fmt % (cell_width, line))
else:
raise FormatterException('Unknown alignment: %s' % (align,))
return itertools.chain(top_lines, content_lines, bottom_lines)
def FormatRow(self, entries, row_height,
column_alignments=None, column_widths=None):
"""Format a row into a list of strings.
Given a list of entries, which must be the same length as the
number of columns in this table, and a desired row height, we
generate a list of strings corresponding to the printed
representation of that row.
Args:
entries: List of entries to format.
row_height: Number of printed lines corresponding to this row.
column_alignments: (optional, default self.column_alignments)
The alignment to use for each column.
column_widths: (optional, default self.column_widths) The widths
of each column.
Returns:
An iterator over the strings in the printed representation
of this row.
"""
column_alignments = column_alignments or self.column_alignments
column_widths = column_widths or self.column_widths
# pylint:disable-msg=C6402
curried_format = lambda entry, width, align: self.__class__.FormatCell(
unicode(entry), width, cell_height=row_height, align=align)
printed_rows = itertools.izip(*itertools.imap(
curried_format, entries, column_widths, column_alignments))
return (self.vertical_char.join(itertools.chain([''], cells, ['']))
for cells in printed_rows)
def HeaderLines(self):
"""Return an iterator over the row(s) for the column names."""
aligns = itertools.repeat('c')
return self.FormatRow(self.column_names, self.header_height,
column_alignments=aligns)
def FormatHrule(self):
"""Return a list containing an hrule for this table."""
entries = (''.join(itertools.repeat('-', width + 2))
for width in self.column_widths)
return [self.junction_char.join(itertools.chain([''], entries, ['']))]
def FormatHeader(self):
"""Return an iterator over the lines for the header of this table."""
return itertools.chain(
self.FormatHrule(), self.HeaderLines(), self.FormatHrule())
def FormatRows(self):
"""Return an iterator over all the rows in this table."""
return itertools.chain(*itertools.imap(
self.FormatRow, self.rows, self.row_heights))
def AddRow(self, row):
"""Add a row to this table.
Args:
row: A list of length equal to the number of columns in this table.
Raises:
FormatterException: If the row length is invalid.
"""
if len(row) != len(self.column_names):
raise FormatterException('Invalid row length: %s' % (len(row),))
split_rows = [unicode(entry).split('\n') for entry in row]
self.row_heights.append(max(len(lines) for lines in split_rows))
column_widths = (max(len(line) for line in entry) for entry in split_rows)
self.column_widths = [max(width, current) for width, current
in itertools.izip(column_widths, self.column_widths)]
self.rows.append(row)
def AddColumn(self, column_name, align='l', **kwds):
"""Add a column to this table.
Args:
column_name: Name for the new column.
align: (optional, default: 'l') Alignment for the new column entries.
Raises:
FormatterException: If the table already has any rows, or if the
provided alignment is invalid.
"""
if self:
raise FormatterException(
'Cannot add a new column to an initialized table')
if align not in ('l', 'c', 'r'):
raise FormatterException('Invalid column alignment: %s' % (align,))
lines = column_name.split('\n')
self.column_widths.append(max(len(line) for line in lines))
self.column_alignments.append(align)
self.column_names.append(column_name)
self.header_height = max(len(lines), self.header_height)
@property
def column_names(self):
return self._column_names
class SparsePrettyFormatter(PrettyFormatter):
"""Formats output as a table with a header and separator line."""
def __init__(self, **kwds):
"""Initialize a new SparsePrettyFormatter."""
default_kwds = {'junction_char': ' ',
'vertical_char': ' '}
default_kwds.update(kwds)
super(SparsePrettyFormatter, self).__init__(**default_kwds)
def __unicode__(self):
if self or not self.skip_header_when_empty:
lines = itertools.chain(self.FormatHeader(), self.FormatRows())
else:
lines = []
return '\n'.join(lines)
def FormatHeader(self):
"""Return an iterator over the header lines for this table."""
return itertools.chain(self.HeaderLines(), self.FormatHrule())
class CsvFormatter(TableFormatter):
"""Formats output as CSV with header lines.
The resulting CSV file includes a header line, uses Unix-style
newlines, and only quotes those entries which require it, namely
those that contain quotes, newlines, or commas.
"""
def __init__(self, **kwds):
super(CsvFormatter, self).__init__(**kwds)
self._buffer = cStringIO.StringIO()
self._header = []
self._table = csv.writer(
self._buffer, quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
def __nonzero__(self):
return bool(self._buffer.tell())
def __len__(self):
return len(unicode(self).splitlines())
def __unicode__(self):
if self or not self.skip_header_when_empty:
lines = [','.join(self._header), self._buffer.getvalue()]
else:
lines = []
# Note that we need to explicitly decode here to work around
# the fact that the CSV module does not work with unicode.
return '\n'.join(line.decode('utf8') for line in lines).rstrip()
@property
def column_names(self):
return self._header[:]
def AddColumn(self, column_name, **kwds):
if self:
raise FormatterException(
'Cannot add a new column to an initialized table')
self._header.append(column_name)
def AddRow(self, row):
self._table.writerow([unicode(entry).encode('utf8', 'backslashreplace')
for entry in row])
class JsonFormatter(TableFormatter):
"""Formats output in maximally compact JSON."""
def __init__(self, **kwds):
super(JsonFormatter, self).__init__(**kwds)
self._field_names = []
self._table = []
def __len__(self):
return len(self._table)
def __unicode__(self):
return json.dumps(self._table, separators=(',', ':'), ensure_ascii=False)
@property
def column_names(self):
return self._field_names[:]
def AddColumn(self, column_name, **kwds):
if self:
raise FormatterException(
'Cannot add a new column to an initialized table')
self._field_names.append(column_name)
def AddRow(self, row):
if len(row) != len(self._field_names):
raise FormatterException('Invalid row: %s' % (row,))
self._table.append(dict(zip(self._field_names, row)))
class PrettyJsonFormatter(JsonFormatter):
"""Formats output in human-legible JSON."""
def __unicode__(self):
return json.dumps(self._table, sort_keys=True, indent=2, ensure_ascii=False)
class NullFormatter(TableFormatter):
"""Formatter that prints no output at all."""
def __init__(self, **kwds):
super(NullFormatter, self).__init__(**kwds)
self._column_names = []
self._rows = []
def __nonzero__(self):
return bool(self._rows)
def __len__(self):
return len(self._rows)
def __unicode__(self):
return ''
def AddRow(self, row):
self._rows.append(row)
def AddRows(self, rows):
for row in rows:
self.AddRow(row)
@property
def column_names(self):
return self._column_names[:]
def AddColumn(self, column_name, **kwds):
self._column_names.append(column_name)
def GetFormatter(table_format):
"""Map a format name to a TableFormatter object."""
if table_format == 'csv':
table_formatter = CsvFormatter()
elif table_format == 'pretty':
table_formatter = PrettyFormatter()
elif table_format == 'json':
table_formatter = JsonFormatter()
elif table_format == 'prettyjson':
table_formatter = PrettyJsonFormatter()
elif table_format == 'sparse':
table_formatter = SparsePrettyFormatter()
elif table_format == 'none':
table_formatter = NullFormatter()
else:
raise FormatterException('Unknown format: %s' % table_format)
return table_formatter
| Python |
#!/usr/bin/env python
#
# Copyright 2012 Google Inc. All Rights Reserved.
"""Python script for interacting with BigQuery."""
import cmd
import codecs
import datetime
import httplib
import json
import os
import pdb
import pipes
import platform
import shlex
import sys
import time
import traceback
import types
import apiclient
import httplib2
import oauth2client
import oauth2client.client
import oauth2client.file
import oauth2client.tools
from google.apputils import app
from google.apputils import appcommands
import gflags as flags
import table_formatter
import bigquery_client
flags.DEFINE_string(
'apilog', None,
'Turn on logging of all server requests and responses. If no string is '
'provided, log to stdout; if a string is provided, instead log to that '
'file.')
flags.DEFINE_string(
'api',
'https://www.googleapis.com',
'API endpoint to talk to.')
flags.DEFINE_string(
'api_version', 'v2',
'API version to use.')
flags.DEFINE_boolean(
'debug_mode', False,
'Show tracebacks on Python exceptions.')
flags.DEFINE_string(
'trace', None,
'A tracing token of the form "trace:<traceid>" '
'to include in api requests.')
flags.DEFINE_string(
'bigqueryrc', os.path.join(os.path.expanduser('~'), '.bigqueryrc'),
'Path to configuration file. The configuration file specifies '
'new defaults for any flags, and can be overrridden by specifying the '
'flag on the command line. If the --bigqueryrc flag is not specified, the '
'BIGQUERYRC environment variable is used. If that is not specified, the '
'path "~/.bigqueryrc" is used.')
flags.DEFINE_string(
'credential_file', os.path.join(os.path.expanduser('~'),
'.bigquery.v2.token'),
'Filename used for storing the BigQuery OAuth token.')
flags.DEFINE_string(
'discovery_file', '',
'Filename for JSON document to read for discovery.')
flags.DEFINE_boolean(
'synchronous_mode', True,
'If True, wait for command completion before returning, and use the '
'job completion status for error codes. If False, simply create the '
'job, and use the success of job creation as the error code.',
short_name='sync')
flags.DEFINE_string(
'project_id', '',
'Default project to use for requests.')
flags.DEFINE_string(
'dataset_id', '',
'Default dataset to use for requests. (Ignored when not applicable.)')
# This flag is "hidden" at the global scope to avoid polluting help
# text on individual commands for rarely used functionality.
flags.DEFINE_string(
'job_id', None,
'A unique job_id to use for the request. If None, the server will create '
'a unique job_id. Applies only to commands that launch jobs, such as cp, '
'extract, link, load, and query. ')
flags.DEFINE_boolean(
'fingerprint_job_id', False,
'Whether to use a job id that is derived from a fingerprint of the job '
'configuration. This will prevent the same job from running multiple times '
'accidentally.')
flags.DEFINE_boolean(
'quiet', False,
'If True, ignore status updates while jobs are running.',
short_name='q')
flags.DEFINE_boolean(
'headless',
False,
'Whether this bq session is running without user interaction. This '
'affects behavior that expects user interaction, like whether '
'debug_mode will break into the debugger and lowers the frequency '
'of informational printing.')
flags.DEFINE_enum(
'format', None,
['none', 'json', 'prettyjson', 'csv', 'sparse', 'pretty'],
'Format for command output. Options include:'
'\n pretty: formatted table output'
'\n sparse: simpler table output'
'\n prettyjson: easy-to-read JSON format'
'\n json: maximally compact JSON'
'\n csv: csv format with header'
'\nThe first three are intended to be human-readable, and the latter '
'three are for passing to another program. If no format is selected, '
'one will be chosen based on the command run.')
flags.DEFINE_multistring(
'job_property', None,
'Additional key-value pairs to include in the properties field of '
'the job configuration') # No period: Multistring adds flagspec suffix.
flags.DEFINE_string(
'service_account', '',
'Use this service account email address for authorization. '
'For example, 1234567890@developer.gserviceaccount.com.'
)
flags.DEFINE_string(
'service_account_private_key_file', '',
'Filename that contains the service account private key. '
'Required if --service_account is specified.')
flags.DEFINE_string(
'service_account_private_key_password', 'notasecret',
'Password for private key. This password must match the password '
'you set on the key when you created it in the Google APIs Console. '
'Defaults to the default Google APIs Console private key password.')
flags.DEFINE_string(
'service_account_credential_file', None,
'File to be used as a credential store for service accounts. '
'Must be set if using a service account.')
FLAGS = flags.FLAGS
# These are long names.
# pylint:disable-msg=C6409
JobReference = bigquery_client.ApiClientHelper.JobReference
ProjectReference = bigquery_client.ApiClientHelper.ProjectReference
DatasetReference = bigquery_client.ApiClientHelper.DatasetReference
TableReference = bigquery_client.ApiClientHelper.TableReference
BigqueryClient = bigquery_client.BigqueryClient
JobIdGeneratorIncrementing = bigquery_client.JobIdGeneratorIncrementing
JobIdGeneratorRandom = bigquery_client.JobIdGeneratorRandom
JobIdGeneratorFingerprint = bigquery_client.JobIdGeneratorFingerprint
# pylint:enable-msg=C6409
_CLIENT_USER_AGENT = 'bq/2.0'
_CLIENT_SCOPE = [
'https://www.googleapis.com/auth/bigquery',
]
_CLIENT_ID = '977385342095.apps.googleusercontent.com'
_CLIENT_INFO = {
'client_id': _CLIENT_ID,
'client_secret': 'wbER7576mc_1YOII0dGk7jEE',
'scope': _CLIENT_SCOPE,
'user_agent': _CLIENT_USER_AGENT,
}
_BIGQUERY_TOS_MESSAGE = (
'In order to get started, please visit the Google APIs Console to '
'create a project and agree to our Terms of Service:\n'
'\thttp://code.google.com/apis/console\n\n'
'For detailed sign-up instructions, please see our Getting Started '
'Guide:\n'
'\thttps://developers.google.com/bigquery/docs/getting-started\n\n'
'Once you have completed the sign-up process, please try your command '
'again.')
_DELIMITER_MAP = {
'tab': '\t',
'\\t': '\t',
}
# These aren't relevant for user-facing docstrings:
# pylint:disable-msg=C6112
# pylint:disable-msg=C6113
# TODO(user): Write some explanation of the structure of this file.
####################
# flags processing
####################
def _GetBigqueryRcFilename():
"""Return the name of the bigqueryrc file to use.
In order, we look for a flag the user specified, an environment
variable, and finally the default value for the flag.
Returns:
bigqueryrc filename as a string.
"""
return ((FLAGS['bigqueryrc'].present and FLAGS.bigqueryrc) or
os.environ.get('BIGQUERYRC') or
FLAGS.bigqueryrc)
def _ProcessBigqueryrc():
"""Updates FLAGS with values found in the bigqueryrc file."""
bigqueryrc = _GetBigqueryRcFilename()
if not os.path.exists(bigqueryrc):
return
with open(bigqueryrc) as rcfile:
for line in rcfile:
if line.lstrip().startswith('#') or not line.strip():
continue
elif line.lstrip().startswith('['):
# TODO(user): Support command-specific flag sections.
continue
flag, equalsign, value = line.partition('=')
# if no value given, assume stringified boolean true
if not equalsign:
value = 'true'
flag = flag.strip()
value = value.strip()
while flag.startswith('-'):
flag = flag[1:]
# We want flags specified at the command line to override
# those in the flagfile.
if flag not in FLAGS:
raise app.UsageError(
'Unknown flag %s found in bigqueryrc file' % (flag,))
if not FLAGS[flag].present:
FLAGS[flag].Parse(value)
elif FLAGS[flag].Type().startswith('multi'):
old_value = getattr(FLAGS, flag)
FLAGS[flag].Parse(value)
setattr(FLAGS, flag, old_value + getattr(FLAGS, flag))
def _ResolveApiInfoFromFlags():
"""Determine an api and api_version."""
api_version = FLAGS.api_version
api = FLAGS.api
return {'api': api, 'api_version': api_version}
def _GetServiceAccountCredentialsFromFlags(storage): # pylint:disable-msg=W0613
if not oauth2client.client.HAS_OPENSSL:
raise app.UsageError(
'BigQuery requires OpenSSL to be installed in order to use '
'service account credentials. Please install OpenSSL '
'and the Python OpenSSL package.')
if FLAGS.service_account_private_key_file:
try:
with file(FLAGS.service_account_private_key_file, 'rb') as f:
key = f.read()
except IOError as e:
raise app.UsageError(
'Service account specified, but private key in file "%s" '
'cannot be read:\n%s' % (FLAGS.service_account_private_key_file, e))
else:
raise app.UsageError(
'Service account authorization requires the '
'service_account_private_key_file flag to be set.')
return oauth2client.client.SignedJwtAssertionCredentials(
FLAGS.service_account, key, _CLIENT_SCOPE,
private_key_password=FLAGS.service_account_private_key_password,
user_agent=_CLIENT_USER_AGENT)
def _GetCredentialsFromOAuthFlow(storage):
print
print '******************************************************************'
print '** No OAuth2 credentials found, beginning authorization process **'
print '******************************************************************'
print
if FLAGS.headless:
print 'Running in headless mode, exiting.'
sys.exit(1)
while True:
# If authorization fails, we want to retry, rather than let this
# cascade up and get caught elsewhere. If users want out of the
# retry loop, they can ^C.
try:
flow = oauth2client.client.OAuth2WebServerFlow(**_CLIENT_INFO)
credentials = oauth2client.tools.run(flow, storage)
break
except (oauth2client.client.FlowExchangeError, SystemExit), e:
# Here SystemExit is "no credential at all", and the
# FlowExchangeError is "invalid" -- usually because you reused
# a token.
print 'Invalid authorization: %s' % (e,)
print
except httplib2.HttpLib2Error as e:
print 'Error communicating with server. Please check your internet '
print 'connection and try again.'
print
print 'Error is: %s' % (e,)
sys.exit(1)
print
print '************************************************'
print '** Continuing execution of BigQuery operation **'
print '************************************************'
print
return credentials
def _GetCredentialsFromFlags():
if FLAGS.service_account:
credentials_getter = _GetServiceAccountCredentialsFromFlags
credential_file = FLAGS.service_account_credential_file
if not credential_file:
raise app.UsageError(
'The flag --service_account_credential_file must be specified '
'if --service_account is used.')
else:
credentials_getter = _GetCredentialsFromOAuthFlow
credential_file = FLAGS.credential_file
try:
# Note that oauth2client.file ensures the file is created with
# the correct permissions.
storage = oauth2client.file.Storage(credential_file)
except OSError, e:
raise bigquery_client.BigqueryError(
'Cannot create credential file %s: %s' % (FLAGS.credential_file, e))
try:
credentials = storage.get()
except BaseException, e:
BigqueryCmd.ProcessError(
e, name='GetCredentialsFromFlags',
message_prefix=(
'Credentials appear corrupt. Please delete the credential file '
'and try your command again. You can delete your credential '
'file using "bq init --delete_credentials".\n\nIf that does '
'not work, you may have encountered a bug in the BigQuery CLI.'))
sys.exit(1)
if credentials is None or credentials.invalid:
credentials = credentials_getter(storage)
credentials.set_store(storage)
return credentials
def _GetFormatterFromFlags(secondary_format='sparse'):
if FLAGS['format'].present:
return table_formatter.GetFormatter(FLAGS.format)
else:
return table_formatter.GetFormatter(secondary_format)
def _ExpandForPrinting(fields, rows, formatter):
"""Expand entries that require special bq-specific formatting."""
def NormalizeTimestamp(entry):
try:
date = datetime.datetime.utcfromtimestamp(float(entry))
return date.strftime('%Y-%m-%d %H:%M:%S')
except ValueError:
return '<date out of range for display>'
column_normalizers = {}
for i, field in enumerate(fields):
if field['type'].upper() == 'TIMESTAMP':
column_normalizers[i] = NormalizeTimestamp
def NormalizeNone():
if isinstance(formatter, table_formatter.JsonFormatter):
return None
elif isinstance(formatter, table_formatter.CsvFormatter):
return ''
else:
return 'NULL'
def NormalizeEntry(i, entry):
if entry is None:
return NormalizeNone()
elif i in column_normalizers:
return column_normalizers[i](entry)
return entry
return ([NormalizeEntry(i, e) for i, e in enumerate(row)] for row in rows)
def _PrintDryRunInfo(job):
num_bytes = job['statistics']['query']['totalBytesProcessed']
if FLAGS.format in ['prettyjson', 'json']:
_PrintFormattedJsonObject(job)
elif FLAGS.format == 'csv':
print num_bytes
else:
print (
'Query successfully validated. Assuming the tables are not modified, '
'running this query will process %s bytes of data.' % (num_bytes,))
def _PrintFormattedJsonObject(obj):
if FLAGS.format == 'prettyjson':
print json.dumps(obj, sort_keys=True, indent=2)
else:
print json.dumps(obj, separators=(',', ':'))
def _GetJobIdFromFlags():
"""Returns the job id or job generator from the flags."""
if FLAGS.fingerprint_job_id and FLAGS.job_id:
raise app.UsageError(
'The fingerprint_job_id flag cannot be specified with the job_id '
'flag.')
if FLAGS.fingerprint_job_id:
return JobIdGeneratorFingerprint()
elif FLAGS.job_id is None:
return JobIdGeneratorIncrementing(JobIdGeneratorRandom())
elif FLAGS.job_id:
return FLAGS.job_id
else:
# User specified a job id, but it was empty. Let the
# server come up with a job id.
return None
def _GetWaitPrinterFactoryFromFlags():
"""Returns the default wait_printer_factory to use while waiting for jobs."""
if FLAGS.quiet:
return BigqueryClient.QuietWaitPrinter
if FLAGS.headless:
return BigqueryClient.TransitionWaitPrinter
return BigqueryClient.VerboseWaitPrinter
def _PromptWithDefault(message):
"""Prompts user with message, return key pressed or '' on enter."""
if FLAGS.headless:
print 'Running --headless, accepting default for prompt: %s' % (message,)
return ''
return raw_input(message).lower()
def _PromptYN(message):
"""Prompts user with message, returning the key 'y', 'n', or '' on enter."""
response = None
while response not in ['y', 'n', '']:
response = _PromptWithDefault(message)
return response
def _NormalizeFieldDelimiter(field_delimiter):
"""Validates and returns the correct field_delimiter."""
# The only non-string delimiter we allow is None, which represents
# no field delimiter specified by the user.
if field_delimiter is None:
return field_delimiter
try:
# We check the field delimiter flag specifically, since a
# mis-entered Thorn character generates a difficult to
# understand error during request serialization time.
_ = field_delimiter.decode(sys.stdin.encoding or 'utf8')
except UnicodeDecodeError:
raise app.UsageError(
'The field delimiter flag is not valid. Flags must be '
'specified in your default locale. For example, '
'the Latin 1 representation of Thorn is byte code FE, '
'which in the UTF-8 locale would be expressed as C3 BE.')
# Allow TAB and \\t substitution.
key = field_delimiter.lower()
return _DELIMITER_MAP.get(key, field_delimiter)
class TablePrinter(object):
"""Base class for printing a table, with a default implementation."""
def __init__(self, **kwds):
super(TablePrinter, self).__init__()
# Most extended classes will require state.
for key, value in kwds.iteritems():
setattr(self, key, value)
def PrintTable(self, fields, rows):
formatter = _GetFormatterFromFlags(secondary_format='pretty')
formatter.AddFields(fields)
rows = _ExpandForPrinting(fields, rows, formatter)
formatter.AddRows(rows)
formatter.Print()
class Factory(object):
"""Class encapsulating factory creation of BigqueryClient."""
_BIGQUERY_CLIENT_FACTORY = None
class ClientTablePrinter(object):
_TABLE_PRINTER = None
@classmethod
def GetTablePrinter(cls):
if cls._TABLE_PRINTER is None:
cls._TABLE_PRINTER = TablePrinter()
return cls._TABLE_PRINTER
@classmethod
def SetTablePrinter(cls, printer):
if not isinstance(printer, TablePrinter):
raise TypeError('Printer must be an instance of TablePrinter.')
cls._TABLE_PRINTER = printer
@classmethod
def GetBigqueryClientFactory(cls):
if cls._BIGQUERY_CLIENT_FACTORY is None:
cls._BIGQUERY_CLIENT_FACTORY = bigquery_client.BigqueryClient
return cls._BIGQUERY_CLIENT_FACTORY
@classmethod
def SetBigqueryClientFactory(cls, factory):
if not issubclass(factory, bigquery_client.BigqueryClient):
raise TypeError('Factory must be subclass of BigqueryClient.')
cls._BIGQUERY_CLIENT_FACTORY = factory
def _PrintTable(client, table_dict, **extra_args):
fields, rows = client.ReadSchemaAndRows(table_dict, **extra_args)
printer = Factory.ClientTablePrinter.GetTablePrinter()
printer.PrintTable(fields, rows)
class Client(object):
"""Class wrapping a singleton bigquery_client.BigqueryClient."""
client = None
@staticmethod
def Create(**kwds):
"""Build a new BigqueryClient configured from kwds and FLAGS."""
def KwdsOrFlags(name):
return kwds[name] if name in kwds else getattr(FLAGS, name)
# Note that we need to handle possible initialization tasks
# for the case of being loaded as a library.
_ProcessBigqueryrc()
bigquery_client.ConfigurePythonLogger(FLAGS.apilog)
credentials = _GetCredentialsFromFlags()
assert credentials is not None
client_args = {}
global_args = ('credential_file', 'job_property',
'project_id', 'dataset_id', 'trace', 'sync',
'api', 'api_version')
for name in global_args:
client_args[name] = KwdsOrFlags(name)
client_args['wait_printer_factory'] = _GetWaitPrinterFactoryFromFlags()
if FLAGS.discovery_file:
with open(FLAGS.discovery_file) as f:
client_args['discovery_document'] = f.read()
bigquery_client_factory = Factory.GetBigqueryClientFactory()
return bigquery_client_factory(credentials=credentials, **client_args)
@classmethod
def Get(cls):
"""Return a BigqueryClient initialized from flags."""
if cls.client is None:
try:
cls.client = Client.Create()
except ValueError, e:
# Convert constructor parameter errors into flag usage errors.
raise app.UsageError(e)
return cls.client
@classmethod
def Delete(cls):
"""Delete the existing client.
This is needed when flags have changed, and we need to force
client recreation to reflect new flag values.
"""
cls.client = None
def _Typecheck(obj, types, message=None): # pylint:disable-msg=W0621
"""Raises a user error if obj is not an instance of types."""
if not isinstance(obj, types):
message = message or 'Type of %s is not one of %s' % (obj, types)
raise app.UsageError(message)
# TODO(user): This code uses more than the average amount of
# Python magic. Explain what the heck is going on throughout.
class NewCmd(appcommands.Cmd):
"""Featureful extension of appcommands.Cmd."""
def __init__(self, name, flag_values):
super(NewCmd, self).__init__(name, flag_values)
run_with_args = getattr(self, 'RunWithArgs', None)
self._new_style = isinstance(run_with_args, types.MethodType)
if self._new_style:
func = run_with_args.im_func
code = func.func_code # pylint: disable=W0621
self._full_arg_list = list(code.co_varnames[:code.co_argcount])
# TODO(user): There might be some corner case where this
# is *not* the right way to determine bound vs. unbound method.
if isinstance(run_with_args.im_self, run_with_args.im_class):
self._full_arg_list.pop(0)
self._max_args = len(self._full_arg_list)
self._min_args = self._max_args - len(func.func_defaults or [])
self._star_args = bool(code.co_flags & 0x04)
self._star_kwds = bool(code.co_flags & 0x08)
if self._star_args:
self._max_args = sys.maxint
self._debug_mode = FLAGS.debug_mode
self.surface_in_shell = True
self.__doc__ = self.RunWithArgs.__doc__
elif self.Run.im_func is NewCmd.Run.im_func:
raise appcommands.AppCommandsError(
'Subclasses of NewCmd must override Run or RunWithArgs')
def __getattr__(self, name):
if name in self._command_flags:
return self._command_flags[name].value
return super(NewCmd, self).__getattribute__(name)
def _GetFlag(self, flagname):
if flagname in self._command_flags:
return self._command_flags[flagname]
else:
return None
def Run(self, argv):
"""Run this command.
If self is a new-style command, we set up arguments and call
self.RunWithArgs, gracefully handling exceptions. If not, we
simply call self.Run(argv).
Args:
argv: List of arguments as strings.
Returns:
0 on success, nonzero on failure.
"""
if not self._new_style:
return super(NewCmd, self).Run(argv)
original_values = self._command_flags.FlagValuesDict()
try:
args = self._command_flags(argv)[1:]
for flag, value in self._command_flags.FlagValuesDict().iteritems():
setattr(self, flag, value)
if value == original_values[flag]:
original_values.pop(flag)
new_args = []
for argname in self._full_arg_list[:self._min_args]:
flag = self._GetFlag(argname)
if flag is not None and flag.present:
new_args.append(flag.value)
elif args:
new_args.append(args.pop(0))
else:
print 'Not enough positional args, still looking for %s' % (argname,)
if self.usage:
print 'Usage: %s' % (self.usage,)
return 1
new_kwds = {}
for argname in self._full_arg_list[self._min_args:]:
flag = self._GetFlag(argname)
if flag is not None and flag.present:
new_kwds[argname] = flag.value
elif args:
new_kwds[argname] = args.pop(0)
if args and not self._star_args:
print 'Too many positional args, still have %s' % (args,)
return 1
new_args.extend(args)
if self._debug_mode:
return self.RunDebug(new_args, new_kwds)
else:
return self.RunSafely(new_args, new_kwds)
finally:
for flag, value in original_values.iteritems():
setattr(self, flag, value)
self._command_flags[flag].Parse(value)
def RunCmdLoop(self, argv):
"""Hook for use in cmd.Cmd-based command shells."""
try:
args = shlex.split(argv)
except ValueError, e:
raise SyntaxError(BigqueryCmd.EncodeForPrinting(e))
return self.Run([self._command_name] + args)
def _HandleError(self, e):
message = str(e)
if isinstance(e, bigquery_client.BigqueryClientConfigurationError):
message += ' Try running "bq init".'
print 'Exception raised in %s operation: %s' % (self._command_name, message)
return 1
def RunDebug(self, args, kwds):
"""Run this command in debug mode."""
try:
return_value = self.RunWithArgs(*args, **kwds)
except BaseException, e:
# Don't break into the debugger for expected exceptions.
if isinstance(e, app.UsageError) or (
isinstance(e, bigquery_client.BigqueryError) and
not isinstance(e, bigquery_client.BigqueryInterfaceError)):
return self._HandleError(e)
print
print '****************************************************'
print '** Unexpected Exception raised in bq execution! **'
if FLAGS.headless:
print '** --headless mode enabled, exiting. **'
print '** See STDERR for traceback. **'
else:
print '** --debug_mode enabled, starting pdb. **'
print '****************************************************'
print
traceback.print_exc()
print
if not FLAGS.headless:
pdb.post_mortem()
return 1
return return_value
def RunSafely(self, args, kwds):
"""Run this command, turning exceptions into print statements."""
try:
return_value = self.RunWithArgs(*args, **kwds)
except BaseException, e:
return self._HandleError(e)
return return_value
class BigqueryCmd(NewCmd):
"""Bigquery-specific NewCmd wrapper."""
def RunSafely(self, args, kwds):
"""Run this command, printing information about any exceptions raised."""
try:
return_value = self.RunWithArgs(*args, **kwds)
except BaseException, e:
return BigqueryCmd.ProcessError(e, name=self._command_name)
return return_value
@staticmethod
def EncodeForPrinting(s):
"""Safely encode a string as the encoding for sys.stdout."""
encoding = sys.stdout.encoding or 'ascii'
return unicode(s).encode(encoding, 'backslashreplace')
@staticmethod
def ProcessError(
e, name='unknown',
message_prefix='You have encountered a bug in the BigQuery CLI.'):
"""Translate an error message into some printing and a return code."""
response = []
retcode = 1
contact_us_msg = (
'Google engineers monitor and answer questions on Stack Overflow, with '
'the tag google-bigquery:\n'
' http://stackoverflow.com/questions/ask?tags=google-bigquery\n'
'Please include a brief description of the steps that led to this '
'issue, as well as the following information: \n\n')
error_details = (
'========================================\n'
'== Platform ==\n'
' %s\n'
'== bq version ==\n'
' %s\n'
'== Command line ==\n'
' %s\n'
'== UTC timestamp ==\n'
' %s\n'
'== Error trace ==\n'
'%s'
'========================================\n') % (
':'.join([
platform.python_implementation(),
platform.python_version(),
platform.platform()]),
_Version.VersionNumber(),
sys.argv,
time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime()),
''.join(traceback.format_tb(sys.exc_info()[2]))
)
codecs.register_error('strict', codecs.replace_errors)
message = BigqueryCmd.EncodeForPrinting(e)
if isinstance(e, (bigquery_client.BigqueryNotFoundError,
bigquery_client.BigqueryDuplicateError)):
response.append('BigQuery error in %s operation: %s' % (name, message))
retcode = 2
elif isinstance(e, bigquery_client.BigqueryTermsOfServiceError):
response.append(str(e) + '\n')
response.append(_BIGQUERY_TOS_MESSAGE)
elif isinstance(e, bigquery_client.BigqueryInvalidQueryError):
response.append('Error in query string: %s' % (message,))
elif (isinstance(e, bigquery_client.BigqueryError)
and not isinstance(e, bigquery_client.BigqueryInterfaceError)):
response.append('BigQuery error in %s operation: %s' % (name, message))
elif isinstance(e, (app.UsageError, TypeError)):
response.append(message)
elif (isinstance(e, SyntaxError) or
isinstance(e, bigquery_client.BigquerySchemaError)):
response.append('Invalid input: %s' % (message,))
elif isinstance(e, flags.FlagsError):
response.append('Error parsing command: %s' % (message,))
elif isinstance(e, KeyboardInterrupt):
response.append('')
else: # pylint:disable-msg=W0703
# Errors with traceback information are printed here.
# The traceback module has nicely formatted the error trace
# for us, so we don't want to undo that via TextWrap.
if isinstance(e, bigquery_client.BigqueryInterfaceError):
message_prefix = (
'Bigquery service returned an invalid reply in %s operation: %s.'
'\n\n'
'Please make sure you are using the latest version '
'of the bq tool and try again. '
'If this problem persists, you may have encountered a bug in the '
'bigquery client.' % (name, message))
elif isinstance(e, oauth2client.client.Error):
message_prefix = (
'Authorization error. This may be a network connection problem, '
'so please try again. If this problem persists, the credentials '
'may be corrupt. Try deleting and re-creating your credentials. '
'You can delete your credentials using '
'"bq init --delete_credentials".'
'\n\n'
'If this problem still occurs, you may have encountered a bug '
'in the bigquery client.')
elif (isinstance(e, httplib.HTTPException)
or isinstance(e, apiclient.errors.Error)
or isinstance(e, httplib2.HttpLib2Error)):
message_prefix = (
'Network connection problem encountered, please try again.'
'\n\n'
'If this problem persists, you may have encountered a bug in the '
'bigquery client.')
print flags.TextWrap(message_prefix + ' ' + contact_us_msg)
print error_details
response.append('Unexpected exception in %s operation: %s' % (
name, message))
print flags.TextWrap('\n'.join(response))
return retcode
def PrintJobStartInfo(self, job):
"""Print a simple status line."""
reference = BigqueryClient.ConstructObjectReference(job)
print 'Successfully started %s %s' % (self._command_name, reference)
class _Load(BigqueryCmd):
usage = """load <destination_table> <source> <schema>"""
def __init__(self, name, fv):
super(_Load, self).__init__(name, fv)
flags.DEFINE_string(
'field_delimiter', None,
'The character that indicates the boundary between columns in the '
'input file. "\\t" and "tab" are accepted names for tab.',
short_name='F', flag_values=fv)
flags.DEFINE_enum(
'encoding', None,
['UTF-8', 'ISO-8859-1'],
'The character encoding used by the input file. Options include:'
'\n ISO-8859-1 (also known as Latin-1)'
'\n UTF-8',
short_name='E', flag_values=fv)
flags.DEFINE_integer(
'skip_leading_rows', None,
'The number of rows at the beginning of the source file to skip.',
flag_values=fv)
flags.DEFINE_string(
'schema', None,
'Either a filename or a comma-separated list of fields in the form '
'name[:type].',
flag_values=fv)
flags.DEFINE_boolean(
'replace', False,
'If true erase existing contents before loading new data.',
flag_values=fv)
flags.DEFINE_string(
'quote', None,
'Quote character to use to enclose records. Default is ". '
'To indicate no quote character at all, use an empty string.',
flag_values=fv)
flags.DEFINE_integer(
'max_bad_records', 0,
'Maximum number of bad records allowed before the entire job fails.',
flag_values=fv)
flags.DEFINE_boolean(
'allow_quoted_newlines', None,
'Whether to allow quoted newlines in CSV import data.',
flag_values=fv)
flags.DEFINE_enum(
'source_format', None,
['CSV',
'NEWLINE_DELIMITED_JSON',
'DATASTORE_BACKUP'],
'Format of source data. Options include:'
'\n CSV'
'\n NEWLINE_DELIMITED_JSON'
'\n DATASTORE_BACKUP',
flag_values=fv)
def RunWithArgs(self, destination_table, source, schema=None):
"""Perform a load operation of source into destination_table.
Usage:
load <destination_table> <source> [<schema>]
The <destination_table> is the fully-qualified table name of table to
create, or append to if the table already exists.
The <source> argument can be a path to a single local file, or a
comma-separated list of URIs.
The <schema> argument should be either the name of a JSON file or a text
schema. This schema should be omitted if the table already has one.
In the case that the schema is provided in text form, it should be a
comma-separated list of entries of the form name[:type], where type will
default to string if not specified.
In the case that <schema> is a filename, it should contain a
single array object, each entry of which should be an object with
properties 'name', 'type', and (optionally) 'mode'. See the online
documentation for more detail:
https://code.google.com/apis/bigquery/docs/uploading.html#createtable
Note: the case of a single-entry schema with no type specified is
ambiguous; one can use name:string to force interpretation as a
text schema.
Examples:
bq load ds.new_tbl ./info.csv ./info_schema.json
bq load ds.new_tbl gs://mybucket/info.csv ./info_schema.json
bq load ds.small gs://mybucket/small.csv name:integer,value:string
bq load ds.small gs://mybucket/small.csv field1,field2,field3
Arguments:
destination_table: Destination table name.
source: Name of local file to import, or a comma-separated list of
URI paths to data to import.
schema: Either a text schema or JSON file, as above.
"""
client = Client.Get()
table_reference = client.GetTableReference(destination_table)
opts = {
'encoding': self.encoding,
'skip_leading_rows': self.skip_leading_rows,
'max_bad_records': self.max_bad_records,
'allow_quoted_newlines': self.allow_quoted_newlines,
'job_id': _GetJobIdFromFlags(),
'source_format': self.source_format,
}
if self.replace:
opts['write_disposition'] = 'WRITE_TRUNCATE'
if self.field_delimiter:
opts['field_delimiter'] = _NormalizeFieldDelimiter(self.field_delimiter)
if self.quote is not None:
opts['quote'] = _NormalizeFieldDelimiter(self.quote)
job = client.Load(table_reference, source, schema=schema, **opts)
if not FLAGS.sync:
self.PrintJobStartInfo(job)
class _Query(BigqueryCmd):
usage = """query <sql>"""
def __init__(self, name, fv):
super(_Query, self).__init__(name, fv)
flags.DEFINE_string(
'destination_table', '',
'Name of destination table for query results.',
flag_values=fv)
flags.DEFINE_integer(
'max_rows', 100,
'How many rows to return in the result.',
flag_values=fv)
flags.DEFINE_boolean(
'batch', False,
'Whether to run the query in batch mode.',
flag_values=fv)
flags.DEFINE_boolean(
'append_table', False,
'When a destination table is specified, whether or not to append.',
flag_values=fv)
flags.DEFINE_boolean(
'replace', False,
'If true erase existing contents before loading new data.',
flag_values=fv)
flags.DEFINE_boolean(
'allow_large_results', False,
'Whether to materialize large results in the destination table.',
flag_values=fv)
flags.DEFINE_boolean(
'dry_run', None,
'Whether the query should be validated without executing.',
flag_values=fv)
flags.DEFINE_boolean(
'require_cache', None,
'Whether to only run the query if it is already cached.',
flag_values=fv)
flags.DEFINE_boolean(
'use_cache', None,
'Whether to use the query cache to avoid rerunning cached queries.',
flag_values=fv)
def RunWithArgs(self, *args):
"""Execute a query.
Examples:
bq query 'select count(*) from publicdata:samples.shakespeare'
Usage:
query <sql_query>
"""
kwds = {
'destination_table': self.destination_table,
'job_id': _GetJobIdFromFlags(),
'preserve_nulls': True,
'allow_large_results': self.allow_large_results,
'dry_run': self.dry_run,
'use_cache': self.use_cache,
}
if self.destination_table and self.append_table:
kwds['write_disposition'] = 'WRITE_APPEND'
if self.destination_table and self.replace:
kwds['write_disposition'] = 'WRITE_TRUNCATE'
if self.require_cache:
kwds['create_disposition'] = 'CREATE_NEVER'
if self.batch:
kwds['priority'] = 'BATCH'
client = Client.Get()
job = client.Query(' '.join(args), **kwds)
if self.dry_run:
_PrintDryRunInfo(job)
elif not FLAGS.sync:
self.PrintJobStartInfo(job)
else:
_PrintTable(client, job['configuration']['query']['destinationTable'],
max_rows=self.max_rows)
class _Extract(BigqueryCmd):
usage = """extract <source_table> <destination_uri>"""
def __init__(self, name, fv):
super(_Extract, self).__init__(name, fv)
flags.DEFINE_string(
'field_delimiter', None,
'The character that indicates the boundary between columns in the '
'output file. "\\t" and "tab" are accepted names for tab.',
short_name='F', flag_values=fv)
flags.DEFINE_enum(
'destination_format', None,
['CSV', 'NEWLINE_DELIMITED_JSON'],
'The format with which to write the extracted data. Tables with '
'nested or repeated fields cannot be extracted to CSV.',
flag_values=fv)
def RunWithArgs(self, source_table, destination_uri):
"""Perform an extract operation of source_table into destination_uri.
Usage:
extract <source_table> <destination_uri>
Examples:
bq extract ds.summary gs://mybucket/summary.csv
Arguments:
source_table: Source table to extract.
destination_uri: Google Storage uri.
"""
client = Client.Get()
kwds = {
'job_id': _GetJobIdFromFlags(),
}
table_reference = client.GetTableReference(source_table)
job = client.Extract(
table_reference, destination_uri,
field_delimiter=_NormalizeFieldDelimiter(self.field_delimiter),
destination_format=self.destination_format, **kwds)
if not FLAGS.sync:
self.PrintJobStartInfo(job)
class _List(BigqueryCmd):
usage = """ls [(-j|-p|-d)] [-a] [-n <number>] [<identifier>]"""
def __init__(self, name, fv):
super(_List, self).__init__(name, fv)
flags.DEFINE_boolean(
'all_jobs', None,
'Show results from all users in this project (for listing jobs only).',
short_name='a', flag_values=fv)
flags.DEFINE_boolean(
'jobs', False,
'Show jobs described by this identifier.',
short_name='j', flag_values=fv)
flags.DEFINE_integer(
'max_results', None,
'Maximum number to list.',
short_name='n', flag_values=fv)
flags.DEFINE_boolean(
'projects', False,
'Show all projects.',
short_name='p', flag_values=fv)
flags.DEFINE_boolean(
'datasets', False,
'Show datasets described by this identifier.',
short_name='d', flag_values=fv)
def RunWithArgs(self, identifier=''):
"""List the objects contained in the named collection.
List the objects in the named project or dataset. A trailing : or
. can be used to signify a project or dataset.
* With -j, show the jobs in the named project.
* With -p, show all projects.
Examples:
bq ls
bq ls -j proj
bq ls -p -n 1000
bq ls mydataset
"""
# pylint:disable-msg=C6115
if self.j and self.p:
raise app.UsageError(
'Cannot specify more than one of -j and -p.')
if self.p and identifier:
raise app.UsageError('Cannot specify an identifier with -p')
if self.a and not self.j:
raise app.UsageError('-a can only be specified with -j')
client = Client.Get()
formatter = _GetFormatterFromFlags()
if identifier:
reference = client.GetReference(identifier)
else:
try:
reference = client.GetReference(identifier)
except bigquery_client.BigqueryError:
# We want to let through the case of no identifier, which
# will fall through to the second case below.
reference = None
# If we got a TableReference, we might be able to make sense
# of it as a DatasetReference, as in 'ls foo' with dataset_id
# set.
if isinstance(reference, TableReference):
try:
reference = client.GetDatasetReference(identifier)
except bigquery_client.BigqueryError:
pass
_Typecheck(reference, (types.NoneType, ProjectReference, DatasetReference),
('Invalid identifier "%s" for ls, cannot call list on object '
'of type %s') % (identifier, type(reference).__name__))
if self.d and isinstance(reference, DatasetReference):
reference = reference.GetProjectReference()
if self.j:
reference = client.GetProjectReference(identifier)
_Typecheck(reference, ProjectReference,
'Cannot determine job(s) associated with "%s"' % (identifier,))
project_reference = client.GetProjectReference(identifier)
BigqueryClient.ConfigureFormatter(formatter, JobReference)
results = map( # pylint:disable-msg=C6402
client.FormatJobInfo,
client.ListJobs(reference=project_reference,
max_results=self.max_results,
all_users=self.a))
elif self.p or reference is None:
BigqueryClient.ConfigureFormatter(formatter, ProjectReference)
results = map( # pylint:disable-msg=C6402
client.FormatProjectInfo,
client.ListProjects(max_results=self.max_results))
elif isinstance(reference, ProjectReference):
BigqueryClient.ConfigureFormatter(formatter, DatasetReference)
results = map( # pylint:disable-msg=C6402
client.FormatDatasetInfo,
client.ListDatasets(reference, max_results=self.max_results))
else: # isinstance(reference, DatasetReference):
BigqueryClient.ConfigureFormatter(formatter, TableReference)
results = map( # pylint:disable-msg=C6402
client.FormatTableInfo,
client.ListTables(reference, max_results=self.max_results))
for result in results:
formatter.AddDict(result)
formatter.Print()
class _Delete(BigqueryCmd):
usage = """rm [-f] [-r] [(-d|-t)] <identifier>"""
def __init__(self, name, fv):
super(_Delete, self).__init__(name, fv)
flags.DEFINE_boolean(
'dataset', False,
'Remove dataset described by this identifier.',
short_name='d', flag_values=fv)
flags.DEFINE_boolean(
'table', False,
'Remove table described by this identifier.',
short_name='t', flag_values=fv)
flags.DEFINE_boolean(
'force', False,
"Ignore existing tables and datasets, don't prompt.",
short_name='f', flag_values=fv)
flags.DEFINE_boolean(
'recursive', False,
'Remove dataset and any tables it may contain.',
short_name='r', flag_values=fv)
def RunWithArgs(self, identifier):
"""Delete the dataset or table described by identifier.
Always requires an identifier, unlike the show and ls commands.
By default, also requires confirmation before deleting. Supports
the -d and -t flags to signify that the identifier is a dataset
or table.
* With -f, don't ask for confirmation before deleting.
* With -r, remove all tables in the named dataset.
Examples:
bq rm ds.table
bq rm -r -f old_dataset
"""
client = Client.Get()
# pylint:disable-msg=C6115
if self.d and self.t:
raise app.UsageError('Cannot specify more than one of -d and -t.')
if not identifier:
raise app.UsageError('Must provide an identifier for rm.')
if self.t:
reference = client.GetTableReference(identifier)
elif self.d:
reference = client.GetDatasetReference(identifier)
else:
reference = client.GetReference(identifier)
_Typecheck(reference, (DatasetReference, TableReference),
'Invalid identifier "%s" for rm.' % (identifier,))
if isinstance(reference, TableReference) and self.r:
raise app.UsageError(
'Cannot specify -r with %r' % (reference,))
if not self.force:
if ((isinstance(reference, DatasetReference) and
client.DatasetExists(reference)) or
(isinstance(reference, TableReference)
and client.TableExists(reference))):
if 'y' != _PromptYN('rm: remove %r? (y/N) ' % (reference,)):
print 'NOT deleting %r, exiting.' % (reference,)
return 0
if isinstance(reference, DatasetReference):
client.DeleteDataset(reference,
ignore_not_found=self.force,
delete_contents=self.recursive)
elif isinstance(reference, TableReference):
client.DeleteTable(reference,
ignore_not_found=self.force)
class _Copy(BigqueryCmd):
usage = """cp [-n] <source_table> <dest_table>"""
def __init__(self, name, fv):
super(_Copy, self).__init__(name, fv)
flags.DEFINE_boolean(
'no_clobber', False,
'Do not overwrite an existing table.',
short_name='n', flag_values=fv)
flags.DEFINE_boolean(
'force', False,
"Ignore existing destination tables, don't prompt.",
short_name='f', flag_values=fv)
flags.DEFINE_boolean(
'append_table', False,
'Append to an existing table.',
short_name='a', flag_values=fv)
def RunWithArgs(self, source_table, dest_table):
"""Copies one table to another.
Examples:
bq cp dataset.old_table dataset2.new_table
"""
client = Client.Get()
source_reference = client.GetTableReference(source_table)
dest_reference = client.GetTableReference(dest_table)
if self.append_table:
write_disposition = 'WRITE_APPEND'
ignore_already_exists = True
elif self.no_clobber:
write_disposition = 'WRITE_EMPTY'
ignore_already_exists = True
else:
write_disposition = 'WRITE_TRUNCATE'
ignore_already_exists = False
if not self.force:
if client.TableExists(dest_reference):
if 'y' != _PromptYN('cp: replace %r? (y/N) ' % (dest_reference,)):
print 'NOT copying %r, exiting.' % (source_reference,)
return 0
kwds = {
'write_disposition': write_disposition,
'ignore_already_exists': ignore_already_exists,
'job_id': _GetJobIdFromFlags(),
}
job = client.CopyTable(source_reference, dest_reference, **kwds)
if job is None:
print "Table '%s' already exists, skipping" % (dest_reference,)
elif not FLAGS.sync:
self.PrintJobStartInfo(job)
else:
print "Table '%s' successfully copied to '%s'" % (
source_reference, dest_reference)
class _Make(BigqueryCmd):
usage = """mk [-d] <identifier> OR mk [-t] <identifier> [<schema>]"""
def __init__(self, name, fv):
super(_Make, self).__init__(name, fv)
flags.DEFINE_boolean(
'force', False,
'Ignore errors reporting that the object already exists.',
short_name='f', flag_values=fv)
flags.DEFINE_boolean(
'dataset', False,
'Create dataset with this name.',
short_name='d', flag_values=fv)
flags.DEFINE_boolean(
'table', False,
'Create table with this name.',
short_name='t', flag_values=fv)
flags.DEFINE_string(
'schema', '',
'Either a filename or a comma-separated list of fields in the form '
'name[:type].',
flag_values=fv)
flags.DEFINE_string(
'description', None,
'Description of the dataset or table.',
flag_values=fv)
flags.DEFINE_integer(
'expiration', None,
'Expiration time, in seconds from now, of a table.',
flag_values=fv)
def RunWithArgs(self, identifier='', schema=''):
# pylint:disable-msg=C6115
"""Create a dataset or table with this name.
See 'bq help load' for more information on specifying the schema.
Examples:
bq mk new_dataset
bq mk new_dataset.new_table
bq --dataset_id=new_dataset mk table
bq mk -t new_dataset.newtable name:integer,value:string
"""
client = Client.Get()
if self.d and self.t:
raise app.UsageError('Cannot specify both -d and -t.')
if self.t:
reference = client.GetTableReference(identifier)
elif self.d or not identifier:
reference = client.GetDatasetReference(identifier)
else:
reference = client.GetReference(identifier)
_Typecheck(reference, (DatasetReference, TableReference),
"Invalid identifier '%s' for mk." % (identifier,))
if isinstance(reference, DatasetReference):
if self.schema:
raise app.UsageError('Cannot specify schema with a dataset.')
if self.expiration:
raise app.UsageError('Cannot specify an expiration for a dataset.')
if client.DatasetExists(reference):
message = "Dataset '%s' already exists." % (reference,)
if not self.f:
raise bigquery_client.BigqueryError(message)
else:
print message
return
client.CreateDataset(reference, ignore_existing=True,
description=self.description)
print "Dataset '%s' successfully created." % (reference,)
elif isinstance(reference, TableReference):
if client.TableExists(reference):
message = "Table '%s' already exists." % (reference,)
if not self.f:
raise bigquery_client.BigqueryError(message)
else:
print message
return
if schema:
schema = bigquery_client.BigqueryClient.ReadSchema(schema)
else:
schema = None
expiration = None
if self.expiration:
expiration = int(self.expiration + time.time()) * 1000
client.CreateTable(reference, ignore_existing=True, schema=schema,
description=self.description,
expiration=expiration)
print "Table '%s' successfully created." % (reference,)
class _Update(BigqueryCmd):
usage = """update [-d] [-t] <identifier> [<schema>]"""
def __init__(self, name, fv):
super(_Update, self).__init__(name, fv)
flags.DEFINE_boolean(
'dataset', False,
'Updates a dataset with this name.',
short_name='d', flag_values=fv)
flags.DEFINE_boolean(
'table', False,
'Updates a table with this name.',
short_name='t', flag_values=fv)
flags.DEFINE_string(
'schema', '',
'Either a filename or a comma-separated list of fields in the form '
'name[:type].',
flag_values=fv)
flags.DEFINE_string(
'description', None,
'Description of the dataset or table.',
flag_values=fv)
flags.DEFINE_integer(
'expiration', None,
'Expiration time, in seconds from now, of a table.',
flag_values=fv)
def RunWithArgs(self, identifier='', schema=''):
# pylint:disable-msg=C6115
"""Updates a dataset or table with this name.
See 'bq help load' for more information on specifying the schema.
Examples:
bq update --description "Dataset description" existing_dataset
bq update --description "My table" dataset.table
bq update -t new_dataset.newtable name:integer,value:string
"""
client = Client.Get()
if self.d and self.t:
raise app.UsageError('Cannot specify both -d and -t.')
if self.t:
reference = client.GetTableReference(identifier)
elif self.d or not identifier:
reference = client.GetDatasetReference(identifier)
else:
reference = client.GetReference(identifier)
_Typecheck(reference, (DatasetReference, TableReference),
"Invalid identifier '%s' for mk." % (identifier,))
if isinstance(reference, DatasetReference):
if self.schema:
raise app.UsageError('Cannot specify schema with a dataset.')
if self.expiration:
raise app.UsageError('Cannot specify an expiration for a dataset.')
client.UpdateDataset(reference, description=self.description)
print "Dataset '%s' successfully updated." % (reference,)
elif isinstance(reference, TableReference):
if schema:
schema = bigquery_client.BigqueryClient.ReadSchema(schema)
else:
schema = None
expiration = None
if self.expiration:
expiration = int(self.expiration + time.time()) * 1000
client.UpdateTable(reference, schema=schema,
description=self.description,
expiration=expiration)
print "Table '%s' successfully updated." % (reference,)
class _Show(BigqueryCmd):
usage = """show [<identifier>]"""
def __init__(self, name, fv):
super(_Show, self).__init__(name, fv)
flags.DEFINE_boolean(
'job', False,
'If true, interpret this identifier as a job id.',
short_name='j', flag_values=fv)
flags.DEFINE_boolean(
'dataset', False,
'Show dataset with this name.',
short_name='d', flag_values=fv)
def RunWithArgs(self, identifier=''):
"""Show all information about an object.
Examples:
bq show -j <job_id>
bq show dataset
bq show dataset.table
"""
# pylint:disable-msg=C6115
client = Client.Get()
if self.j:
reference = client.GetJobReference(identifier)
elif self.d:
reference = client.GetDatasetReference(identifier)
else:
reference = client.GetReference(identifier)
if reference is None:
raise app.UsageError('Must provide an identifier for show.')
object_info = client.GetObjectInfo(reference)
# The JSON formats are handled separately so that they don't print
# the record as a list of one record.
if FLAGS.format in ['prettyjson', 'json']:
_PrintFormattedJsonObject(object_info)
elif FLAGS.format in [None, 'sparse', 'pretty']:
formatter = _GetFormatterFromFlags()
BigqueryClient.ConfigureFormatter(
formatter, type(reference), print_format='show')
object_info = BigqueryClient.FormatInfoByKind(object_info)
formatter.AddDict(object_info)
print '%s %s\n' % (reference.typename.capitalize(), reference)
formatter.Print()
print
if (isinstance(reference, JobReference) and
object_info['State'] == 'FAILURE'):
error_result = object_info['status']['errorResult']
error_ls = object_info['status'].get('errors', [])
error = bigquery_client.BigqueryError.Create(
error_result, error_result, error_ls)
print 'Errors encountered during job execution. %s\n' % (error,)
else:
formatter = _GetFormatterFromFlags()
formatter.AddColumns(object_info.keys())
formatter.AddDict(object_info)
formatter.Print()
class _Head(BigqueryCmd):
usage = """head [-n <max rows>] <table identifier>"""
def __init__(self, name, fv):
super(_Head, self).__init__(name, fv)
flags.DEFINE_integer(
'max_rows', 100,
'The number of rows to print when showing table data.',
short_name='n', flag_values=fv)
def RunWithArgs(self, identifier=''):
"""Displays rows in a table.
Examples:
bq head dataset.table
bq head -n 10 dataset.table
"""
client = Client.Get()
reference = client.GetReference(identifier)
_Typecheck(reference, (TableReference,),
'Must provide a table identifier for head.')
_PrintTable(client, dict(reference), max_rows=self.max_rows)
class _Wait(BigqueryCmd):
usage = """wait [<job_id>] [<secs>]"""
def RunWithArgs(self, job_id='', secs=sys.maxint):
# pylint:disable-msg=C6115
"""Wait some number of seconds for a job to finish.
Poll job_id until either (1) the job is DONE or (2) the
specified number of seconds have elapsed. Waits forever
if unspecified. If no job_id is specified, and there is
only one running job, we poll that job.
Examples:
bq wait # Waits forever for the currently running job.
bq wait job_id # Waits forever
bq wait job_id 100 # Waits 100 seconds
bq wait job_id 0 # See if a job is done.
Arguments:
job_id: Job ID to wait on.
secs: Number of seconds to wait (must be >= 0).
"""
try:
secs = BigqueryClient.NormalizeWait(secs)
except ValueError:
raise app.UsageError('Invalid wait time: %s' % (secs,))
client = Client.Get()
if not job_id:
running_jobs = client.ListJobRefs(state_filter=['PENDING', 'RUNNING'])
if len(running_jobs) != 1:
raise bigquery_client.BigqueryError(
'No job_id provided, found %d running jobs' % (len(running_jobs),))
job_reference = running_jobs.pop()
else:
job_reference = client.GetJobReference(job_id)
client.WaitJob(job_reference=job_reference, wait=secs)
# pylint:disable-msg=C6409
class CommandLoop(cmd.Cmd):
"""Instance of cmd.Cmd built to work with NewCmd."""
class TerminateSignal(Exception):
"""Exception type used for signaling loop completion."""
pass
def __init__(self, commands, prompt=None):
cmd.Cmd.__init__(self)
self._commands = {'help': commands['help']}
self._special_command_names = ['help', 'repl', 'EOF']
for name, command in commands.iteritems():
if (name not in self._special_command_names and
isinstance(command, NewCmd) and
command.surface_in_shell):
self._commands[name] = command
setattr(self, 'do_%s' % (name,), command.RunCmdLoop)
self._default_prompt = prompt or 'BigQuery> '
self._set_prompt()
self._last_return_code = 0
@property
def last_return_code(self):
return self._last_return_code
def _set_prompt(self):
client = Client().Get()
if client.project_id:
path = str(client.GetReference())
self.prompt = '%s> ' % (path,)
else:
self.prompt = self._default_prompt
def do_EOF(self, *unused_args):
"""Terminate the running command loop.
This function raises an exception to avoid the need to do
potentially-error-prone string parsing inside onecmd.
Returns:
Never returns.
Raises:
CommandLoop.TerminateSignal: always.
"""
raise CommandLoop.TerminateSignal()
def postloop(self):
print 'Goodbye.'
def completedefault(self, unused_text, line, unused_begidx, unused_endidx):
if not line:
return []
else:
command_name = line.partition(' ')[0].lower()
usage = ''
if command_name in self._commands:
usage = self._commands[command_name].usage
elif command_name == 'set':
usage = 'set (project_id|dataset_id) <name>'
elif command_name == 'unset':
usage = 'unset (project_id|dataset_id)'
if usage:
print
print usage
print '%s%s' % (self.prompt, line),
return []
def emptyline(self):
print 'Available commands:',
print ' '.join(list(self._commands))
def precmd(self, line):
"""Preprocess the shell input."""
if line == 'EOF':
return line
if line.startswith('exit') or line.startswith('quit'):
return 'EOF'
words = line.strip().split()
if len(words) > 1 and words[0].lower() == 'select':
return 'query %s' % (pipes.quote(line),)
if len(words) == 1 and words[0] not in ['help', 'ls', 'version']:
return 'help %s' % (line.strip(),)
return line
def onecmd(self, line):
"""Process a single command.
Runs a single command, and stores the return code in
self._last_return_code. Always returns False unless the command
was EOF.
Args:
line: (str) Command line to process.
Returns:
A bool signaling whether or not the command loop should terminate.
"""
try:
self._last_return_code = cmd.Cmd.onecmd(self, line)
except CommandLoop.TerminateSignal:
return True
except BaseException, e:
name = line.split(' ')[0]
BigqueryCmd.ProcessError(e, name=name)
self._last_return_code = 1
return False
def get_names(self):
names = dir(self)
commands = (name for name in self._commands
if name not in self._special_command_names)
names.extend('do_%s' % (name,) for name in commands)
names.append('do_select')
names.remove('do_EOF')
return names
def do_set(self, line):
"""Set the value of the project_id or dataset_id flag."""
client = Client().Get()
name, value = (line.split(' ') + ['', ''])[:2]
if (name not in ('project_id', 'dataset_id') or
not 1 <= len(line.split(' ')) <= 2):
print 'set (project_id|dataset_id) <name>'
elif name == 'dataset_id' and not client.project_id:
print 'Cannot set dataset_id with project_id unset'
else:
setattr(client, name, value)
self._set_prompt()
return 0
def do_unset(self, line):
"""Unset the value of the project_id or dataset_id flag."""
name = line.strip()
client = Client.Get()
if name not in ('project_id', 'dataset_id'):
print 'unset (project_id|dataset_id)'
else:
setattr(client, name, '')
if name == 'project_id':
client.dataset_id = ''
self._set_prompt()
return 0
def do_help(self, command_name):
"""Print the help for command_name (if present) or general help."""
# TODO(user): Add command-specific flags.
def FormatOneCmd(name, command, command_names):
indent_size = appcommands.GetMaxCommandLength() + 3
if len(command_names) > 1:
indent = ' ' * indent_size
command_help = flags.TextWrap(
command.CommandGetHelp('', cmd_names=command_names),
indent=indent,
firstline_indent='')
first_help_line, _, rest = command_help.partition('\n')
first_line = '%-*s%s' % (indent_size, name + ':', first_help_line)
return '\n'.join((first_line, rest))
else:
default_indent = ' '
return '\n' + flags.TextWrap(
command.CommandGetHelp('', cmd_names=command_names),
indent=default_indent,
firstline_indent=default_indent) + '\n'
if not command_name:
print '\nHelp for Bigquery commands:\n'
command_names = list(self._commands)
print '\n\n'.join(
FormatOneCmd(name, command, command_names)
for name, command in self._commands.iteritems()
if name not in self._special_command_names)
print
elif command_name in self._commands:
print FormatOneCmd(command_name, self._commands[command_name],
command_names=[command_name])
return 0
def postcmd(self, stop, line):
return bool(stop) or line == 'EOF'
# pylint:enable-msg=C6409
class _Repl(BigqueryCmd):
"""Start an interactive bq session."""
def __init__(self, name, fv):
super(_Repl, self).__init__(name, fv)
self.surface_in_shell = False
flags.DEFINE_string(
'prompt', '',
'Prompt to use for BigQuery shell.',
flag_values=fv)
def RunWithArgs(self):
"""Start an interactive bq session."""
repl = CommandLoop(appcommands.GetCommandList(), prompt=self.prompt)
print 'Welcome to BigQuery! (Type help for more information.)'
while True:
try:
repl.cmdloop()
break
except KeyboardInterrupt:
print
return repl.last_return_code
class _Init(BigqueryCmd):
"""Create a .bigqueryrc file and set up OAuth credentials."""
def __init__(self, name, fv):
super(_Init, self).__init__(name, fv)
self.surface_in_shell = False
flags.DEFINE_boolean(
'delete_credentials', None,
'If specified, the credentials file associated with this .bigqueryrc '
'file is deleted.',
flag_values=fv)
def DeleteCredentials(self):
"""Deletes this user's credential file."""
_ProcessBigqueryrc()
filename = FLAGS.service_account_credential_file or FLAGS.credential_file
if not os.path.exists(filename):
print 'Credential file %s does not exist.' % (filename,)
return 0
try:
if 'y' != _PromptYN('Delete credential file %s? (y/N) ' % (filename,)):
print 'NOT deleting %s, exiting.' % (filename,)
return 0
os.remove(filename)
except OSError, e:
print 'Error removing %s: %s' % (filename, e)
return 1
def RunWithArgs(self):
"""Authenticate and create a default .bigqueryrc file."""
_ProcessBigqueryrc()
bigquery_client.ConfigurePythonLogger(FLAGS.apilog)
if self.delete_credentials:
return self.DeleteCredentials()
bigqueryrc = _GetBigqueryRcFilename()
# Delete the old one, if it exists.
print
print 'Welcome to BigQuery! This script will walk you through the '
print 'process of initializing your .bigqueryrc configuration file.'
print
if os.path.exists(bigqueryrc):
print ' **** NOTE! ****'
print 'An existing .bigqueryrc file was found at %s.' % (bigqueryrc,)
print 'Are you sure you want to continue and overwrite your existing '
print 'configuration?'
print
if 'y' != _PromptYN('Overwrite %s? (y/N) ' % (bigqueryrc,)):
print 'NOT overwriting %s, exiting.' % (bigqueryrc,)
return 0
print
try:
os.remove(bigqueryrc)
except OSError, e:
print 'Error removing %s: %s' % (bigqueryrc, e)
return 1
print 'First, we need to set up your credentials if they do not '
print 'already exist.'
print
client = Client.Get()
entries = {'credential_file': FLAGS.credential_file}
projects = client.ListProjects()
print 'Credential creation complete. Now we will select a default project.'
print
if not projects:
print 'No projects found for this user. Please go to '
print ' https://code.google.com/apis/console'
print 'and create a project.'
print
else:
print 'List of projects:'
formatter = _GetFormatterFromFlags()
formatter.AddColumn('#')
BigqueryClient.ConfigureFormatter(formatter, ProjectReference)
for index, project in enumerate(projects):
result = BigqueryClient.FormatProjectInfo(project)
result.update({'#': index + 1})
formatter.AddDict(result)
formatter.Print()
if len(projects) == 1:
project_reference = BigqueryClient.ConstructObjectReference(
projects[0])
print 'Found only one project, setting %s as the default.' % (
project_reference,)
print
entries['project_id'] = project_reference.projectId
else:
print 'Found multiple projects. Please enter a selection for '
print 'which should be the default, or leave blank to not '
print 'set a default.'
print
response = None
while not isinstance(response, int):
response = _PromptWithDefault(
'Enter a selection (1 - %s): ' % (len(projects),))
try:
if not response or 1 <= int(response) <= len(projects):
response = int(response or 0)
except ValueError:
pass
print
if response:
project_reference = BigqueryClient.ConstructObjectReference(
projects[response - 1])
entries['project_id'] = project_reference.projectId
try:
with open(bigqueryrc, 'w') as rcfile:
for flag, value in entries.iteritems():
print >>rcfile, '%s = %s' % (flag, value)
except IOError, e:
print 'Error writing %s: %s' % (bigqueryrc, e)
return 1
print 'BigQuery configuration complete! Type "bq" to get started.'
print
_ProcessBigqueryrc()
# Destroy the client we created, so that any new client will
# pick up new flag values.
Client.Delete()
return 0
class _Version(BigqueryCmd):
usage = """version"""
@staticmethod
def VersionNumber():
"""Return the version of bq."""
try:
import pkg_resources # pylint:disable-msg=C6204
version = pkg_resources.get_distribution('bigquery').version
return 'v%s' % (version,)
except ImportError:
return '<unknown>'
def RunWithArgs(self):
"""Return the version of bq."""
version = type(self).VersionNumber()
print 'This is BigQuery CLI %s' % (version,)
def main(argv):
try:
FLAGS.auth_local_webserver = False
bq_commands = {
'load': _Load,
'query': _Query,
'extract': _Extract,
'ls': _List,
'rm': _Delete,
'mk': _Make,
'show': _Show,
'head': _Head,
'wait': _Wait,
'cp': _Copy,
'update': _Update,
'version': _Version,
'shell': _Repl,
'init': _Init,
}
for command, function in bq_commands.iteritems():
if command not in appcommands.GetCommandList():
appcommands.AddCmd(command, function)
if (not argv or
(len(argv) > 1 and
argv[1] not in ['init', 'help', 'version'] and
argv[1] in appcommands.GetCommandList())):
# Service Accounts don't use cached oauth credentials and
# all bigqueryrc defaults are technically optional.
if not FLAGS.service_account:
if not (os.path.exists(_GetBigqueryRcFilename()) or
os.path.exists(FLAGS.credential_file)):
appcommands.GetCommandByName('init').Run([])
except KeyboardInterrupt, e:
print 'Control-C pressed, exiting.'
sys.exit(1)
except BaseException, e: # pylint:disable-msg=W0703
print 'Error initializing bq client: %s' % (e,)
if FLAGS.debug_mode or FLAGS.headless:
traceback.print_exc()
if not FLAGS.headless:
pdb.post_mortem()
sys.exit(1)
# pylint: disable-msg=C6409
def run_main():
"""Function to be used as setuptools script entry point.
Appcommands assumes that it always runs as __main__, but launching
via a setuptools-generated entry_point breaks this rule. We do some
trickery here to make sure that appcommands and flags find their
state where they expect to by faking ourselves as __main__.
"""
# Put the flags for this module somewhere the flags module will look
# for them.
# pylint: disable-msg=W0212
new_name = flags._GetMainModule()
sys.modules[new_name] = sys.modules['__main__']
for flag in FLAGS.FlagsByModuleDict().get(__name__, []):
FLAGS._RegisterFlagByModule(new_name, flag)
for key_flag in FLAGS.KeyFlagsByModuleDict().get(__name__, []):
FLAGS._RegisterKeyFlagForModule(new_name, key_flag)
# pylint: enable-msg=W0212
# Now set __main__ appropriately so that appcommands will be
# happy.
sys.modules['__main__'] = sys.modules[__name__]
appcommands.Run()
sys.modules['__main__'] = sys.modules.pop(new_name)
if __name__ == '__main__':
appcommands.Run()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for table_formatter.py."""
from google.apputils import googletest
import table_formatter
class TableFormatterTest(googletest.TestCase):
def setUp(self):
super(TableFormatterTest, self).setUp()
if type(self) != TableFormatterTest:
self.failUnless(hasattr(self, 'format_class'),
'Subclasses must provide self.format_class')
self.formatter = self.format_class()
self.formatter.AddColumns(('foo', 'longer header'),
kwdss=[{}, {'align': 'r'}])
self.formatter.AddRow(['a', 3])
self.formatter.AddRow(['abc', 123])
def testStr(self):
self.failIf(hasattr(self, 'format_class'),
'Subclasses must override testStr')
def testUnicodeRow(self):
row = [11, 'chinese', u'你不能教老狗新把戏']
if type(self) != TableFormatterTest:
formatter = self.format_class()
formatter.AddColumns(('count', 'language', 'message'))
formatter.AddRow(row)
# Note that we don't need any asserts here: the act of calling
# Print will throw if unicode isn't being handled correctly.
formatter.Print()
formatter = self.format_class()
formatter.AddColumns(('message',))
formatter.AddRow(row[2:])
formatter.Print()
self.assertTrue(all(ord(c) <= 127 for c in str(formatter)))
self.assertTrue(any(ord(c) > 127 for c in unicode(formatter)))
class PrettyFormatterTest(TableFormatterTest):
def setUp(self):
# Static method names are too long without abbreviations.
self.PF = table_formatter.PrettyFormatter # pylint:disable-msg=C6409
self.format_class = self.PF
super(PrettyFormatterTest, self).setUp()
def testStr(self):
table_repr = '\n'.join((
'+-----+---------------+',
'| foo | longer header |',
'+-----+---------------+',
'| a | 3 |',
'| abc | 123 |',
'+-----+---------------+'))
self.assertEquals(table_repr, str(self.formatter))
def testCenteredPadding(self):
self.assertEquals((1, 1), self.PF.CenteredPadding(8, 6))
self.assertEquals((2, 1), self.PF.CenteredPadding(8, 5, left_justify=False))
self.assertEquals((1, 2), self.PF.CenteredPadding(8, 5))
self.assertRaises(table_formatter.FormatterException,
self.PF.CenteredPadding, 1, 5)
def testAbbreviate(self):
self.assertEquals('', self.PF.Abbreviate('abc', 0))
self.assertEquals('.', self.PF.Abbreviate('abc', 1))
self.assertEquals('ab...', self.PF.Abbreviate('abcdef', 5))
self.assertEquals('abcdef', self.PF.Abbreviate('abcdef', 6))
self.assertEquals('abcdef', self.PF.Abbreviate('abcdef', 7))
def testFormatCell(self):
entry = 'abc'
self.assertEquals(
[' abc '], list(self.PF.FormatCell(entry, 3)))
self.assertEquals(
[' abc '], list(self.PF.FormatCell(entry, 5, align='l')))
self.assertEquals(
[' abc '], list(self.PF.FormatCell(entry, 5)))
self.assertEquals(
[' abc '], list(self.PF.FormatCell(entry, 5, align='r')))
self.assertEquals(
[' abc '], list(self.PF.FormatCell(entry, 6)))
lines = [
' abc ',
' ',
' ',
]
self.assertEquals(lines, list(self.PF.FormatCell(entry, 6, cell_height=3)))
lines.append(lines[-1])
self.assertEquals(lines, list(self.PF.FormatCell(entry, 6, cell_height=4)))
lines = [
' ',
' abc... ',
' ab ',
' ',
]
self.assertEquals(lines, list(self.PF.FormatCell(
'abcdefghi\nab', 6, cell_height=4, align='l', valign='c')))
lines = [
' abc... ',
' ab ',
' ',
' ',
]
self.assertEquals(lines, list(self.PF.FormatCell(
'abcdefghi\nab', 6, cell_height=4, align='l')))
lines = [
' ',
' ',
' abc... ',
' ab ',
]
self.assertEquals(lines, list(self.PF.FormatCell(
'abcdefghi\nab', 6, cell_height=4, align='l', valign='b')))
self.assertRaises(table_formatter.FormatterException,
self.PF.FormatCell, 'ab\na', 5)
def testFormatRow(self):
formatter = table_formatter.PrettyFormatter()
formatter.AddColumns(('one', 'two'))
formatter.AddRow(['a', 'b'])
self.assertEquals(
['| a | b |'],
list(formatter.FormatRow(formatter.rows[0], 1)))
formatter.AddRow(['a', 'b\nc'])
self.assertEquals(
['| a | b |',
'| | c |',
],
list(formatter.FormatRow(formatter.rows[1], 2)))
self.assertRaises(table_formatter.FormatterException,
formatter.FormatRow, formatter.rows[1], 1)
formatter.AddRow(['a', '\nbbbbbb\nc'])
self.assertEquals(
['| a | |',
'| | bbbbbb |',
'| | c |',
],
list(formatter.FormatRow(formatter.rows[2], 3)))
self.assertEquals(
['| a | |',
'| | b... |',
'| | c |',
],
list(formatter.FormatRow(formatter.rows[2], 3, column_widths=[3, 4])))
def testHeaderLines(self):
formatter = table_formatter.PrettyFormatter()
formatter.AddColumns(('a', 'b'))
formatter.AddRow(['really long string', ''])
self.assertEquals(
['| a | b |'],
list(formatter.HeaderLines()))
def testFormatHeader(self):
formatter = table_formatter.PrettyFormatter()
formatter.AddColumns(('a', 'bcd\nefgh'))
formatter.AddRow(['really long string', ''])
self.assertEquals(
['+--------------------+------+',
'| a | bcd |',
'| | efgh |',
'+--------------------+------+'],
list(formatter.FormatHeader()))
def testAddRow(self):
formatter = table_formatter.PrettyFormatter()
formatter.AddColumns(('a', 'b'))
formatter.AddRow(['foo', 'x'])
self.assertEquals(1, len(formatter))
self.assertEquals([3, 1], formatter.column_widths)
self.assertEquals([1], formatter.row_heights)
formatter.AddRow(['foo\nbar', 'xxxxxxx'])
self.assertEquals(2, len(formatter))
self.assertEquals([3, 7], formatter.column_widths)
self.assertEquals([1, 2], formatter.row_heights)
# Check that we can add non-string entries.
formatter.AddRow([3, {'a': 5}])
def testAddColumn(self):
formatter = table_formatter.PrettyFormatter()
formatter.AddColumn('abc\ndef', align='r')
self.assertEquals([3], formatter.column_widths)
self.assertEquals(2, formatter.header_height)
self.assertRaises(table_formatter.FormatterException,
formatter.AddColumn, 'bad', align='d')
formatter.AddRow([3])
self.assertRaises(table_formatter.FormatterException,
formatter.AddColumn, 'anything')
def testPrintEmptyTable(self):
formatter = table_formatter.PrettyFormatter(skip_header_when_empty=False)
formatter.AddColumns(('a', 'b'))
table_repr = '\n'.join((
'+---+---+',
'| a | b |',
'+---+---+',
'+---+---+'))
self.assertEquals(table_repr, str(formatter))
formatter = table_formatter.PrettyFormatter()
formatter.AddColumns(('a', 'b'))
self.assertEquals(table_repr, str(formatter))
formatter = table_formatter.PrettyFormatter(skip_header_when_empty=True)
formatter.AddColumns(('a', 'b'))
self.assertEquals('', str(formatter))
class SparsePrettyFormatterTest(TableFormatterTest):
def setUp(self):
self.format_class = table_formatter.SparsePrettyFormatter
super(SparsePrettyFormatterTest, self).setUp()
def testStr(self):
table_repr = '\n'.join((
' foo longer header ',
' ----- --------------- ',
' a 3 ',
' abc 123 '))
self.assertEquals(table_repr, str(self.formatter))
def testFormatHeader(self):
formatter = table_formatter.SparsePrettyFormatter()
formatter.AddColumns(('a', 'bcd\nefgh'))
formatter.AddRow(['really long string', ''])
self.assertEquals(
[' a bcd ',
' efgh ',
' -------------------- ------ '],
list(formatter.FormatHeader()))
def testPrintEmptyTable(self):
formatter = table_formatter.SparsePrettyFormatter(
skip_header_when_empty=False)
formatter.AddColumns(('a', 'b'))
table_repr = '\n'.join((
' a b ',
' --- --- '))
self.assertEquals(table_repr, str(formatter))
formatter = table_formatter.SparsePrettyFormatter()
formatter.AddColumns(('a', 'b'))
self.assertEquals(table_repr, str(formatter))
formatter = table_formatter.SparsePrettyFormatter(
skip_header_when_empty=True)
formatter.AddColumns(('a', 'b'))
self.assertEquals('', str(formatter))
class PrettyJsonFormatterTest(TableFormatterTest):
def setUp(self):
self.format_class = table_formatter.PrettyJsonFormatter
super(PrettyJsonFormatterTest, self).setUp()
def testStr(self):
table_repr = '\n'.join((
'[',
' {',
' "foo": "a", ',
' "longer header": 3',
' }, ',
' {',
' "foo": "abc", ',
' "longer header": 123',
' }',
']'))
self.assertEquals(table_repr, str(self.formatter))
class JsonFormatterTest(TableFormatterTest):
def setUp(self):
self.format_class = table_formatter.JsonFormatter
super(JsonFormatterTest, self).setUp()
def testStr(self):
table_repr = ('[{"longer header":3,"foo":"a"},'
'{"longer header":123,"foo":"abc"}]')
self.assertEquals(table_repr, str(self.formatter))
class CsvFormatterTest(TableFormatterTest):
def setUp(self):
self.format_class = table_formatter.CsvFormatter
super(CsvFormatterTest, self).setUp()
def testStr(self):
table_repr = '\n'.join((
'foo,longer header',
'a,3',
'abc,123'))
self.assertEquals(table_repr, str(self.formatter))
class NullFormatterTest(TableFormatterTest):
def setUp(self):
self.format_class = table_formatter.NullFormatter
super(NullFormatterTest, self).setUp()
def testStr(self):
self.assertEquals('', str(self.formatter))
def testUnicodeRow(self):
self.assertEquals('', unicode(self.formatter))
if __name__ == '__main__':
googletest.main()
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
import remove_pyreadline
import setuptools.command.easy_install as easy_install
import setuptools.package_index
import shutil
import sys
EASY_INSTALL_PTH_FILENAME = 'easy-install.pth'
BACKUP_SUFFIX = '.old'
def locate_package(name):
import pkg_resources
try:
pkg = setuptools.package_index.get_distribution(name)
except pkg_resources.DistributionNotFound:
pkg = None
return pkg
def find_package_consumers(name, deps_to_ignore=None):
installed_packages = list(setuptools.package_index.AvailableDistributions())
if deps_to_ignore is None:
deps_to_ignore = []
consumers = []
for package_name in installed_packages:
if name == package_name:
continue
package_info = setuptools.package_index.get_distribution(package_name)
if package_name in deps_to_ignore:
continue
for req in package_info.requires():
if req.project_name == name:
consumers.append(package_name)
break
return consumers
def remove_package(pkg):
site_packages_dir, egg_name = os.path.split(pkg.location)
easy_install_pth_filename = os.path.join(site_packages_dir,
EASY_INSTALL_PTH_FILENAME)
backup_filename = easy_install_pth_filename + BACKUP_SUFFIX
shutil.copy2(easy_install_pth_filename, backup_filename)
pth_file = easy_install.PthDistributions(easy_install_pth_filename)
pth_file.remove(pkg)
pth_file.save()
if os.path.isdir(pkg.location):
shutil.rmtree(pkg.location)
else:
os.unlink(pkg.location)
def y_or_n_p(prompt):
response = raw_input('%s (y/n) ' % (prompt,)).strip().lower()
while response not in ['y', 'n']:
response = raw_input(' Please answer y or n: ').strip().lower()
return response
def delete_pyreadline():
pkg = locate_package('pyreadline')
if pkg is None:
print "pyreadline not found, exiting."
return
consumers = find_package_consumers('pyreadline')
if consumers:
print 'pyreadline is a dependency of all the following packages:'
for p in consumers:
print ' %s' % (p,)
print
else:
print 'pyreadline is not a dependency of any installed packages.'
print
response = y_or_n_p('Continue and uninstall pyreadline?')
if response == 'n':
print 'Aborting uninstall of pyreadline.'
return
remove_package(pkg)
print 'pyreadline successfully uninstalled!'
def run_main():
print 'This script will attempt to remove pyreadline from your system.'
print
if platform.system() == 'Windows':
print
print '*** WARNING ***'
print 'This is a Windows system, and removal of pyreadline on a Windows'
print 'system is NOT recommended.'
response = y_or_n_p('Are you SURE you want to proceed?')
if response == 'n':
print 'Exiting.'
exit(0)
delete_pyreadline()
if __name__ == '__main__':
run_main()
| Python |
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Bigquery Client library for Python."""
import abc
import datetime
import hashlib
import itertools
import json
import logging
import os
import pkgutil
import random
import re
import string
import sys
import textwrap
import time
import apiclient
from apiclient import discovery
from apiclient import http as http_request
from apiclient import model
import httplib2
# To configure apiclient logging.
import gflags as flags
# A unique non-None default, for use in kwargs that need to
# distinguish default from None.
_DEFAULT = object()
def _Typecheck(obj, types, message=None, method=None):
if not isinstance(obj, types):
if not message:
if method:
message = 'Invalid reference for %s: %r' % (method, obj)
else:
message = 'Type of %r is not one of %s' % (obj, types)
raise TypeError(message)
def _ToLowerCamel(name):
"""Convert a name with underscores to camelcase."""
return re.sub('_[a-z]', lambda match: match.group(0)[1].upper(), name)
def _ToFilename(url):
"""Converts a url to a filename."""
return ''.join([c for c in url if c in string.ascii_lowercase])
def _ApplyParameters(config, **kwds):
"""Adds all kwds to config dict, adjusting keys to camelcase.
Note this does not remove entries that are set to None, however.
kwds: A dict of keys and values to set in the config.
Args:
config: A configuration dict.
"""
config.update((_ToLowerCamel(k), v) for k, v in kwds.iteritems()
if v is not None)
def ConfigurePythonLogger(apilog=None):
"""Sets up Python logger, which BigqueryClient logs with.
Applications can configure logging however they want, but this
captures one pattern of logging which seems useful when dealing with
a single command line option for determining logging.
Args:
apilog: To log to sys.stdout, specify '', '-', '1', 'true', or
'stdout'. To log to sys.stderr, specify 'stderr'. To log to a
file, specify the file path. Specify None to disable logging.
"""
if apilog is None:
# Effectively turn off logging.
logging.disable(logging.CRITICAL)
else:
if apilog in ('', '-', '1', 'true', 'stdout'):
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
elif apilog == 'stderr':
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
elif apilog:
logging.basicConfig(filename=apilog, level=logging.INFO)
else:
logging.basicConfig(level=logging.INFO)
# Turn on apiclient logging of http requests and responses.
flags.FLAGS.dump_request_response = True
class BigqueryError(Exception):
@staticmethod
def Create(error, server_error, error_ls, job_ref=None):
"""Returns a BigqueryError for json error embedded in server_error.
If error_ls contains any errors other than the given one, those
are also included in the returned message.
Args:
error: The primary error to convert.
server_error: The error returned by the server. (This is only used
in the case that error is malformed.)
error_ls: Additional errors to include in the error message.
job_ref: JobReference, if this is an error associated with a job.
Returns:
BigqueryError representing error.
"""
reason = error.get('reason')
if job_ref:
message = 'Error processing %r: %s' % (job_ref, error.get('message'))
else:
message = error.get('message')
# We don't want to repeat the "main" error message.
new_errors = [err for err in error_ls if err != error]
if new_errors:
message += '\nFailure details:\n'
message += '\n'.join(
textwrap.fill(err.get('message', ''),
initial_indent=' - ',
subsequent_indent=' ')
for err in new_errors)
if not reason or not message:
return BigqueryInterfaceError(
'Error reported by server with missing error fields. '
'Server returned: %s' % (str(server_error),))
if reason == 'notFound':
return BigqueryNotFoundError(message, error, error_ls, job_ref=job_ref)
if reason == 'duplicate':
return BigqueryDuplicateError(message, error, error_ls, job_ref=job_ref)
if reason == 'accessDenied':
return BigqueryAccessDeniedError(
message, error, error_ls, job_ref=job_ref)
if reason == 'invalidQuery':
return BigqueryInvalidQueryError(
message, error, error_ls, job_ref=job_ref)
if reason == 'termsOfServiceNotAccepted':
return BigqueryTermsOfServiceError(
message, error, error_ls, job_ref=job_ref)
if reason == 'backendError':
return BigqueryBackendError(
message, error, error_ls, job_ref=job_ref)
# We map the less interesting errors to BigqueryServiceError.
return BigqueryServiceError(message, error, error_ls, job_ref=job_ref)
class BigqueryCommunicationError(BigqueryError):
"""Error communicating with the server."""
pass
class BigqueryInterfaceError(BigqueryError):
"""Response from server missing required fields."""
pass
class BigqueryServiceError(BigqueryError):
"""Base class of Bigquery-specific error responses.
The BigQuery server received request and returned an error.
"""
def __init__(self, message, error, error_list, job_ref=None,
*args, **kwds):
"""Initializes a BigqueryServiceError.
Args:
message: A user-facing error message.
error: The error dictionary, code may inspect the 'reason' key.
error_list: A list of additional entries, for example a load job
may contain multiple errors here for each error encountered
during processing.
job_ref: Optional JobReference, if this error was encountered
while processing a job.
"""
super(BigqueryServiceError, self).__init__(message, *args, **kwds)
self.error = error
self.error_list = error_list
self.job_ref = job_ref
def __repr__(self):
return '%s: error=%s, error_list=%s, job_ref=%s' % (
self.__class__.__name__, self.error, self.error_list, self.job_ref)
class BigqueryNotFoundError(BigqueryServiceError):
"""The requested resource or identifier was not found."""
pass
class BigqueryDuplicateError(BigqueryServiceError):
"""The requested resource or identifier already exists."""
pass
class BigqueryAccessDeniedError(BigqueryServiceError):
"""The user does not have access to the requested resource."""
pass
class BigqueryInvalidQueryError(BigqueryServiceError):
"""The SQL statement is invalid."""
pass
class BigqueryTermsOfServiceError(BigqueryAccessDeniedError):
"""User has not ACK'd ToS."""
pass
class BigqueryBackendError(BigqueryServiceError):
"""A backend error typically corresponding to retriable HTTP 503 failures."""
pass
class BigqueryClientError(BigqueryError):
"""Invalid use of BigqueryClient."""
pass
class BigqueryClientConfigurationError(BigqueryClientError):
"""Invalid configuration of BigqueryClient."""
pass
class BigquerySchemaError(BigqueryClientError):
"""Error in locating or parsing the schema."""
pass
class BigqueryModel(model.JsonModel):
"""Adds optional global parameters to all requests."""
def __init__(self, trace=None, **kwds):
super(BigqueryModel, self).__init__(**kwds)
self.trace = trace
# pylint:disable-msg=C6409
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing request."""
if 'trace' not in query_params and self.trace:
query_params['trace'] = self.trace
return super(BigqueryModel, self).request(
headers, path_params, query_params, body_value)
# pylint:enable-msg=C6409
class BigqueryHttp(http_request.HttpRequest):
"""Converts errors into Bigquery errors."""
def __init__(self, bigquery_model, *args, **kwds):
super(BigqueryHttp, self).__init__(*args, **kwds)
self._model = bigquery_model
@staticmethod
def Factory(bigquery_model):
"""Returns a function that creates a BigqueryHttp with the given model."""
def _Construct(*args, **kwds):
captured_model = bigquery_model
return BigqueryHttp(captured_model, *args, **kwds)
return _Construct
def execute(self, **kwds): # pylint:disable-msg=C6409
try:
return super(BigqueryHttp, self).execute(**kwds)
except apiclient.errors.HttpError, e:
# TODO(user): Remove this when apiclient supports logging
# of error responses.
self._model._log_response(e.resp, e.content) # pylint:disable-msg=W0212
if e.resp.get('content-type', '').startswith('application/json'):
BigqueryClient.RaiseError(json.loads(e.content))
else:
raise BigqueryCommunicationError(
('Could not connect with BigQuery server.\n'
'Http response status: %s\n'
'Http response content:\n%s') % (
e.resp.get('status', '(unexpected)'), e.content))
class JobIdGenerator(object):
"""Base class for job id generators."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def Generate(self, job_configuration):
"""Generates a job_id to use for job_configuration."""
class JobIdGeneratorNone(JobIdGenerator):
"""Job id generator that returns None, letting the server pick the job id."""
def Generate(self, unused_config):
return None
class JobIdGeneratorRandom(JobIdGenerator):
"""Generates random job ids."""
def Generate(self, unused_config):
return 'bqjob_r%08x_%016x' % (random.randint(0, sys.maxint),
int(time.time() * 1000))
class JobIdGeneratorFingerprint(JobIdGenerator):
"""Generates job ids that uniquely match the job config."""
def _Hash(self, config, sha1):
"""Computes the sha1 hash of a dict."""
keys = config.keys()
# Python dict enumeration ordering is random. Sort the keys
# so that we will visit them in a stable order.
keys.sort()
for key in keys:
sha1.update('%s' % (key,))
v = config[key]
if isinstance(v, dict):
logging.info('Hashing: %s...', key)
self._Hash(v, sha1)
elif isinstance(v, list):
logging.info('Hashing: %s ...', key)
for inner_v in v:
self._Hash(inner_v, sha1)
else:
logging.info('Hashing: %s:%s', key, v)
sha1.update('%s' % (v,))
def Generate(self, config):
s1 = hashlib.sha1()
self._Hash(config, s1)
job_id = 'bqjob_c%s' % (s1.hexdigest(),)
logging.info('Fingerprinting: %s:\n%s', config, job_id)
return job_id
class JobIdGeneratorIncrementing(JobIdGenerator):
"""Generates job ids that increment each time we're asked."""
def __init__(self, inner):
self._inner = inner
self._retry = 0
def Generate(self, config):
self._retry += 1
return '%s_%d' % (self._inner.Generate(config), self._retry)
class BigqueryClient(object):
"""Class encapsulating interaction with the BigQuery service."""
def __init__(self, **kwds):
"""Initializes BigqueryClient.
Required keywords:
api: the api to connect to, for example "bigquery".
api_version: the version of the api to connect to, for example "v2".
Optional keywords:
project_id: a default project id to use. While not required for
initialization, a project_id is required when calling any
method that creates a job on the server. Methods that have
this requirement pass through **kwds, and will raise
BigqueryClientConfigurationError if no project_id can be
found.
dataset_id: a default dataset id to use.
discovery_document: the discovery document to use. If None, one
will be retrieved from the discovery api. If not specified,
the built-in discovery document will be used.
job_property: a list of "key=value" strings defining properties
to apply to all job operations.
trace: a tracing header to inclue in all bigquery api requests.
sync: boolean, when inserting jobs, whether to wait for them to
complete before returning from the insert request.
wait_printer_factory: a function that returns a WaitPrinter.
This will be called for each job that we wait on. See WaitJob().
Raises:
ValueError: if keywords are missing or incorrectly specified.
"""
super(BigqueryClient, self).__init__()
for key, value in kwds.iteritems():
setattr(self, key, value)
self._apiclient = None
for required_flag in ('api', 'api_version'):
if required_flag not in kwds:
raise ValueError('Missing required flag: %s' % (required_flag,))
default_flag_values = {
'project_id': '',
'dataset_id': '',
'discovery_document': _DEFAULT,
'job_property': '',
'trace': None,
'sync': True,
'wait_printer_factory': BigqueryClient.TransitionWaitPrinter,
'job_id_generator': JobIdGeneratorIncrementing(JobIdGeneratorRandom()),
}
for flagname, default in default_flag_values.iteritems():
if not hasattr(self, flagname):
setattr(self, flagname, default)
if self.dataset_id and not self.project_id:
raise ValueError('Cannot set dataset_id without project_id')
def GetHttp(self):
"""Returns the httplib2 Http to use."""
http = httplib2.Http()
return http
def GetDiscoveryUrl(self):
"""Returns the url to the discovery document for bigquery."""
discovery_url = self.api + '/discovery/v1/apis/{api}/{apiVersion}/rest'
return discovery_url
@property
def apiclient(self):
"""Return the apiclient attached to self."""
if self._apiclient is None:
http = self.credentials.authorize(self.GetHttp())
bigquery_model = BigqueryModel(self.trace)
bigquery_http = BigqueryHttp.Factory(
bigquery_model)
discovery_document = self.discovery_document
if discovery_document == _DEFAULT:
# Use the api description packed with this client, if one exists.
try:
discovery_document = pkgutil.get_data(
'bigquery_client', 'discovery/%s.bigquery.%s.rest.json'
% (_ToFilename(self.api), self.api_version))
except IOError:
discovery_document = None
if discovery_document is None:
try:
self._apiclient = discovery.build(
'bigquery', self.api_version, http=http,
discoveryServiceUrl=self.GetDiscoveryUrl(),
model=bigquery_model,
requestBuilder=bigquery_http)
except (httplib2.ServerNotFoundError, apiclient.errors.HttpError), e:
# We can't find the specified server.
raise BigqueryCommunicationError(
'Cannot contact server. Please try again.\nError: %s'
'\nContent: %s' % (str(e), e.content))
except apiclient.errors.UnknownApiNameOrVersion, e:
# We can't resolve the discovery url for the given server.
raise BigqueryCommunicationError(
'Invalid API name or version: %s' % (str(e),))
else:
self._apiclient = discovery.build_from_document(
discovery_document, http=http,
model=bigquery_model,
requestBuilder=bigquery_http)
return self._apiclient
#################################
## Utility methods
#################################
@staticmethod
def FormatTime(secs):
return time.strftime('%d %b %H:%M:%S', time.localtime(secs))
@staticmethod
def FormatAcl(acl):
"""Format a server-returned ACL for printing."""
acl_entries = {
'OWNER': [],
'WRITER': [],
'READER': [],
}
for entry in acl:
entry = entry.copy()
role = entry.pop('role', '')
if not role or len(entry.values()) != 1:
raise BigqueryServiceError(
'Invalid ACL returned by server: %s' % (acl,))
for key, value in entry.iteritems():
# TODO(user): Remove this if once we've switched
# to v2.
if key == 'allAuthenticatedUsers':
acl_entries[role].append(key)
else:
acl_entries[role].append(value)
result_lines = []
if acl_entries['OWNER']:
result_lines.extend([
'Owners:', ',\n'.join(' %s' % (o,) for o in acl_entries['OWNER'])])
if acl_entries['WRITER']:
result_lines.extend([
'Writers:', ',\n'.join(' %s' % (o,) for o in acl_entries['WRITER'])])
if acl_entries['READER']:
result_lines.extend([
'Readers:', ',\n'.join(' %s' % (o,) for o in acl_entries['READER'])])
return '\n'.join(result_lines)
@staticmethod
def FormatSchema(schema):
"""Format a schema for printing."""
def PrintFields(fields, indent=0):
"""Print all fields in a schema, recurring as necessary."""
lines = []
for field in fields:
prefix = '| ' * indent
junction = '|' if field.get('type', 'STRING') != 'RECORD' else '+'
entry = '%s- %s: %s' % (
junction, field['name'], field.get('type', 'STRING').lower())
if field.get('mode', 'NULLABLE') != 'NULLABLE':
entry += ' (%s)' % (field['mode'].lower(),)
lines.append(prefix + entry)
if 'fields' in field:
lines.extend(PrintFields(field['fields'], indent + 1))
return lines
return '\n'.join(PrintFields(schema.get('fields', [])))
@staticmethod
def NormalizeWait(wait):
try:
return int(wait)
except ValueError:
raise ValueError('Invalid value for wait: %s' % (wait,))
@staticmethod
def ValidatePrintFormat(print_format):
if print_format not in ['show', 'list']:
raise ValueError('Unknown format: %s' % (print_format,))
@staticmethod
def _ParseIdentifier(identifier):
"""Parses identifier into a tuple of (possibly empty) identifiers.
This will parse the identifier into a tuple of the form
(project_id, dataset_id, table_id) without doing any validation on
the resulting names; missing names are returned as ''. The
interpretation of these identifiers depends on the context of the
caller. For example, if you know the identifier must be a job_id,
then you can assume dataset_id is the job_id.
Args:
identifier: string, identifier to parse
Returns:
project_id, dataset_id, table_id: (string, string, string)
"""
# We need to handle the case of a lone project identifier of the
# form domain.com:proj separately.
if re.search('^\w[\w.]*\.[\w.]+:\w[\w\d_-]*:?$', identifier):
return identifier, '', ''
project_id, _, dataset_and_table_id = identifier.rpartition(':')
if project_id:
dataset_id, _, table_id = dataset_and_table_id.partition('.')
else:
dataset_id, _, table_id = dataset_and_table_id.rpartition('.')
return project_id, dataset_id, table_id
def GetProjectReference(self, identifier=''):
"""Determine a project reference from an identifier and self."""
project_id, dataset_id, table_id = BigqueryClient._ParseIdentifier(
identifier)
try:
# ParseIdentifier('foo') is just a table_id, but we want to read
# it as a project_id.
project_id = project_id or table_id or self.project_id
if not dataset_id and project_id:
return ApiClientHelper.ProjectReference.Create(projectId=project_id)
except ValueError:
pass
raise BigqueryClientError('Cannot determine project described by %s' % (
identifier,))
def GetDatasetReference(self, identifier=''):
"""Determine a DatasetReference from an identifier and self."""
project_id, dataset_id, table_id = BigqueryClient._ParseIdentifier(
identifier)
if table_id and not project_id and not dataset_id:
# identifier is 'foo'
project_id = self.project_id
dataset_id = table_id
elif project_id and dataset_id and not table_id:
# identifier is 'foo:bar'
pass
elif not identifier:
# identifier is ''
project_id = self.project_id
dataset_id = self.dataset_id
else:
raise BigqueryError('Cannot determine dataset described by %s' % (
identifier,))
try:
return ApiClientHelper.DatasetReference.Create(
projectId=project_id, datasetId=dataset_id)
except ValueError:
raise BigqueryError('Cannot determine dataset described by %s' % (
identifier,))
def GetTableReference(self, identifier=''):
"""Determine a TableReference from an identifier and self."""
project_id, dataset_id, table_id = BigqueryClient._ParseIdentifier(
identifier)
try:
return ApiClientHelper.TableReference.Create(
projectId=project_id or self.project_id,
datasetId=dataset_id or self.dataset_id,
tableId=table_id,
)
except ValueError:
raise BigqueryError('Cannot determine table described by %s' % (
identifier,))
def GetReference(self, identifier=''):
"""Try to deduce a project/dataset/table reference from a string.
If the identifier is not compound, treat it as the most specific
identifier we don't have as a flag, or as the table_id. If it is
compound, fill in any unspecified part.
Args:
identifier: string, Identifier to create a reference for.
Returns:
A valid ProjectReference, DatasetReference, or TableReference.
Raises:
BigqueryError: if no valid reference can be determined.
"""
try:
return self.GetTableReference(identifier)
except BigqueryError:
pass
try:
return self.GetDatasetReference(identifier)
except BigqueryError:
pass
try:
return self.GetProjectReference(identifier)
except BigqueryError:
pass
raise BigqueryError('Cannot determine reference for "%s"' % (identifier,))
# TODO(user): consider introducing job-specific and possibly
# dataset- and project-specific parsers for the case of knowing what
# type we are looking for. Reinterpreting "dataset_id" as "job_id"
# is rather confusing.
def GetJobReference(self, identifier=''):
"""Determine a JobReference from an identifier and self."""
project_id, dataset_id, table_id = BigqueryClient._ParseIdentifier(
identifier)
if table_id and not project_id and not dataset_id:
# identifier is 'foo'
project_id = self.project_id
job_id = table_id
elif project_id and dataset_id and not table_id:
# identifier is 'foo:bar'
job_id = dataset_id
else:
job_id = None
if job_id:
try:
return ApiClientHelper.JobReference.Create(
projectId=project_id, jobId=job_id)
except ValueError:
pass
raise BigqueryError('Cannot determine job described by %s' % (
identifier,))
def GetObjectInfo(self, reference):
"""Get all data returned by the server about a specific object."""
# Projects are handled separately, because we only have
# bigquery.projects.list.
if isinstance(reference, ApiClientHelper.ProjectReference):
projects = self.ListProjects()
for project in projects:
if BigqueryClient.ConstructObjectReference(project) == reference:
project['kind'] = 'bigquery#project'
return project
raise BigqueryNotFoundError('Unknown %r' % (reference,))
if isinstance(reference, ApiClientHelper.JobReference):
return self.apiclient.jobs().get(**dict(reference)).execute()
elif isinstance(reference, ApiClientHelper.DatasetReference):
return self.apiclient.datasets().get(**dict(reference)).execute()
elif isinstance(reference, ApiClientHelper.TableReference):
return self.apiclient.tables().get(**dict(reference)).execute()
else:
raise TypeError('Type of reference must be one of: ProjectReference, '
'JobReference, DatasetReference, or TableReference')
def GetTableSchema(self, table_dict):
table_info = self.apiclient.tables().get(**table_dict).execute()
return table_info.get('schema', {})
def ReadTableRows(self, table_dict, max_rows=sys.maxint):
"""Read at most max_rows rows from a table."""
page_token = None
rows = []
while len(rows) < max_rows:
data = self.apiclient.tabledata().list(
maxResults=max_rows - len(rows),
pageToken=page_token,
**table_dict).execute()
page_token = data.get('pageToken', None)
max_rows = min(max_rows, int(data['totalRows']))
more_rows = data.get('rows', [])
for row in more_rows:
rows.append([entry.get('v', '') for entry in row.get('f', [])])
if not page_token and len(rows) != max_rows:
raise BigqueryInterfaceError(
'PageToken missing for %r' % (
ApiClientHelper.TableReference.Create(**table_dict),))
if not more_rows and len(rows) != max_rows:
raise BigqueryInterfaceError(
'Not enough rows returned by server for %r' % (
ApiClientHelper.TableReference.Create(**table_dict),))
return rows
def ReadSchemaAndRows(self, table_dict, max_rows=sys.maxint):
"""Convenience method to get the schema and rows from a table.
Arguments:
table_dict: table reference dictionary.
max_rows: number of rows to read.
Returns:
A tuple where the first item is the list of fields and the
second item a list of rows.
"""
return (self.GetTableSchema(table_dict).get('fields', []),
self.ReadTableRows(table_dict, max_rows))
@staticmethod
def ConfigureFormatter(formatter, reference_type, print_format='list'):
"""Configure a formatter for a given reference type.
If print_format is 'show', configures the formatter with several
additional fields (useful for printing a single record).
Arguments:
formatter: TableFormatter object to configure.
reference_type: Type of object this formatter will be used with.
print_format: Either 'show' or 'list' to control what fields are
included.
Raises:
ValueError: If reference_type or format is unknown.
"""
BigqueryClient.ValidatePrintFormat(print_format)
if reference_type == ApiClientHelper.JobReference:
if print_format == 'list':
formatter.AddColumns(('jobId',))
formatter.AddColumns(
('Job Type', 'State', 'Start Time', 'Duration',))
if print_format == 'show':
formatter.AddColumns(('Bytes Processed',))
elif reference_type == ApiClientHelper.ProjectReference:
if print_format == 'list':
formatter.AddColumns(('projectId',))
formatter.AddColumns(('friendlyName',))
elif reference_type == ApiClientHelper.DatasetReference:
if print_format == 'list':
formatter.AddColumns(('datasetId',))
if print_format == 'show':
formatter.AddColumns(('Last modified', 'ACLs',))
elif reference_type == ApiClientHelper.TableReference:
if print_format == 'list':
formatter.AddColumns(('tableId',))
if print_format == 'show':
formatter.AddColumns(('Last modified', 'Schema',
'Total Rows', 'Total Bytes',
'Expiration'))
else:
raise ValueError('Unknown reference type: %s' % (
reference_type.__name__,))
@staticmethod
def RaiseError(result):
"""Raises an appropriate BigQuery error given the json error result."""
error = result.get('error', {}).get('errors', [{}])[0]
raise BigqueryError.Create(error, result, [])
@staticmethod
def IsFailedJob(job):
"""Predicate to determine whether or not a job failed."""
return 'errorResult' in job.get('status', {})
@staticmethod
def RaiseIfJobError(job):
"""Raises a BigQueryError if the job is in an error state.
Args:
job: a Job resource.
Returns:
job, if it is not in an error state.
Raises:
BigqueryError: A BigqueryError instance based on the job's error
description.
"""
if BigqueryClient.IsFailedJob(job):
error = job['status']['errorResult']
error_ls = job['status'].get('errors', [])
raise BigqueryError.Create(
error, error, error_ls,
job_ref=BigqueryClient.ConstructObjectReference(job))
return job
@staticmethod
def GetJobTypeName(job_info):
"""Helper for job printing code."""
job_names = set(('extract', 'load', 'query', 'copy'))
try:
return set(job_info.get('configuration', {}).keys()).intersection(
job_names).pop()
except KeyError:
return None
@staticmethod
def ProcessSources(source_string):
"""Take a source string and return a list of URIs.
The list will consist of either a single local filename, which
we check exists and is a file, or a list of gs:// uris.
Args:
source_string: A comma-separated list of URIs.
Returns:
List of one or more valid URIs, as strings.
Raises:
BigqueryClientError: if no valid list of sources can be determined.
"""
sources = [source.strip() for source in source_string.split(',')]
gs_uris = [source for source in sources if source.startswith('gs://')]
if not sources:
raise BigqueryClientError('No sources specified')
if gs_uris:
if len(gs_uris) != len(sources):
raise BigqueryClientError('All URIs must begin with "gs://" if any do.')
return sources
else:
source = sources[0]
if len(sources) > 1:
raise BigqueryClientError(
'Local upload currently supports only one file, found %d' % (
len(sources),))
if not os.path.exists(source):
raise BigqueryClientError('Source file not found: %s' % (source,))
if not os.path.isfile(source):
raise BigqueryClientError('Source path is not a file: %s' % (source,))
return sources
@staticmethod
def ReadSchema(schema):
"""Create a schema from a string or a filename.
If schema does not contain ':' and is the name of an existing
file, read it as a JSON schema. If not, it must be a
comma-separated list of fields in the form name:type.
Args:
schema: A filename or schema.
Returns:
The new schema (as a dict).
Raises:
BigquerySchemaError:
If the schema is invalid or the filename does not exist.
"""
def NewField(entry):
name, _, field_type = entry.partition(':')
if entry.count(':') > 1 or not name.strip():
raise BigquerySchemaError('Invalid schema entry: %s' % (entry,))
return {
'name': name.strip(),
'type': field_type.strip().upper() or 'STRING',
}
if not schema:
raise BigquerySchemaError('Schema cannot be empty')
elif os.path.exists(schema):
with open(schema) as f:
try:
return json.load(f)
except ValueError, e:
raise BigquerySchemaError(
('Error decoding JSON schema from file %s: %s\n'
'To specify a one-column schema, use "name:string".') % (
schema, e))
elif re.match(r'[./\\]', schema) is not None:
# We have something that looks like a filename, but we didn't
# find it. Tell the user about the problem now, rather than wait
# for a round-trip to the server.
raise BigquerySchemaError(
('Error reading schema: "%s" looks like a filename, '
'but was not found.') % (schema,))
else:
return [NewField(entry) for entry in schema.split(',')]
@staticmethod
def _KindToName(kind):
"""Convert a kind to just a type name."""
return kind.partition('#')[2]
@staticmethod
def FormatInfoByKind(object_info):
"""Format a single object_info (based on its 'kind' attribute)."""
kind = BigqueryClient._KindToName(object_info.get('kind'))
if kind == 'job':
return BigqueryClient.FormatJobInfo(object_info)
elif kind == 'project':
return BigqueryClient.FormatProjectInfo(object_info)
elif kind == 'dataset':
return BigqueryClient.FormatDatasetInfo(object_info)
elif kind == 'table':
return BigqueryClient.FormatTableInfo(object_info)
else:
raise ValueError('Unknown object type: %s' % (kind,))
@staticmethod
def FormatJobInfo(job_info):
"""Prepare a job_info for printing.
Arguments:
job_info: Job dict to format.
Returns:
The new job_info.
"""
result = job_info.copy()
reference = BigqueryClient.ConstructObjectReference(result)
result.update(dict(reference))
if 'startTime' in result.get('statistics', {}):
start = int(result['statistics']['startTime']) / 1000
if 'endTime' in result['statistics']:
duration_seconds = int(result['statistics']['endTime']) / 1000 - start
result['Duration'] = str(datetime.timedelta(seconds=duration_seconds))
result['Start Time'] = BigqueryClient.FormatTime(start)
result['Job Type'] = BigqueryClient.GetJobTypeName(result)
result['State'] = result['status']['state']
if result['State'] == 'DONE':
try:
BigqueryClient.RaiseIfJobError(result)
result['State'] = 'SUCCESS'
except BigqueryError:
result['State'] = 'FAILURE'
if 'totalBytesProcessed' in result.get('statistics', {}):
result['Bytes Processed'] = result['statistics']['totalBytesProcessed']
return result
@staticmethod
def FormatProjectInfo(project_info):
"""Prepare a project_info for printing.
Arguments:
project_info: Project dict to format.
Returns:
The new project_info.
"""
result = project_info.copy()
reference = BigqueryClient.ConstructObjectReference(result)
result.update(dict(reference))
return result
@staticmethod
def FormatDatasetInfo(dataset_info):
"""Prepare a dataset_info for printing.
Arguments:
dataset_info: Dataset dict to format.
Returns:
The new dataset_info.
"""
result = dataset_info.copy()
reference = BigqueryClient.ConstructObjectReference(result)
result.update(dict(reference))
if 'lastModifiedTime' in result:
result['Last modified'] = BigqueryClient.FormatTime(
int(result['lastModifiedTime']) / 1000)
if 'access' in result:
result['ACLs'] = BigqueryClient.FormatAcl(result['access'])
return result
@staticmethod
def FormatTableInfo(table_info):
"""Prepare a table_info for printing.
Arguments:
table_info: Table dict to format.
Returns:
The new table_info.
"""
result = table_info.copy()
reference = BigqueryClient.ConstructObjectReference(result)
result.update(dict(reference))
if 'lastModifiedTime' in result:
result['Last modified'] = BigqueryClient.FormatTime(
int(result['lastModifiedTime']) / 1000)
if 'schema' in result:
result['Schema'] = BigqueryClient.FormatSchema(result['schema'])
if 'numBytes' in result:
result['Total Bytes'] = result['numBytes']
if 'numRows' in result:
result['Total Rows'] = result['numRows']
if 'expirationTime' in result:
result['Expiration'] = BigqueryClient.FormatTime(
int(result['expirationTime']) / 1000)
return result
@staticmethod
def ConstructObjectReference(object_info):
"""Construct a Reference from a server response."""
if 'kind' in object_info:
typename = BigqueryClient._KindToName(object_info['kind'])
lower_camel = typename + 'Reference'
if lower_camel not in object_info:
raise ValueError('Cannot find %s in object of type %s: %s' % (
lower_camel, typename, object_info))
else:
keys = [k for k in object_info if k.endswith('Reference')]
if len(keys) != 1:
raise ValueError('Expected one Reference, found %s: %s' % (
len(keys), keys))
lower_camel = keys[0]
upper_camel = lower_camel[0].upper() + lower_camel[1:]
reference_type = getattr(ApiClientHelper, upper_camel, None)
if reference_type is None:
raise ValueError('Unknown reference type: %s' % (typename,))
return reference_type.Create(**object_info[lower_camel])
@staticmethod
def ConstructObjectInfo(reference):
"""Construct an Object from an ObjectReference."""
typename = reference.__class__.__name__
lower_camel = typename[0].lower() + typename[1:]
return {lower_camel: dict(reference)}
def _PrepareListRequest(self, reference, max_results=None, page_token=None):
request = dict(reference)
if max_results is not None:
request['maxResults'] = max_results
if page_token is not None:
request['pageToken'] = page_token
return request
def _NormalizeProjectReference(self, reference):
if reference is None:
try:
return self.GetProjectReference()
except BigqueryClientError:
raise BigqueryClientError(
'Project reference or a default project is required')
return reference
def ListJobRefs(self, **kwds):
return map( # pylint:disable-msg=C6402
BigqueryClient.ConstructObjectReference, self.ListJobs(**kwds))
def ListJobs(self, reference=None,
max_results=None, state_filter=None,
all_users=None):
"""Return a list of jobs.
Args:
reference: The ProjectReference to list jobs for.
max_results: The maximum number of jobs to return.
state_filter: A single state filter or a list of filters to
apply. If not specified, no filtering is applied.
all_users: Whether to list jobs for all users of the project. Requesting
user must be an owner of the project to list all jobs.
Returns:
A list of jobs.
"""
reference = self._NormalizeProjectReference(reference)
_Typecheck(reference, ApiClientHelper.ProjectReference, method='ListJobs')
request = self._PrepareListRequest(reference, max_results, None)
if state_filter is not None:
# The apiclient wants enum values as lowercase strings.
if isinstance(state_filter, basestring):
state_filter = state_filter.lower()
else:
state_filter = [s.lower() for s in state_filter]
_ApplyParameters(request, projection='full',
state_filter=state_filter, all_users=all_users)
jobs = self.apiclient.jobs().list(**request).execute()
return jobs.get('jobs', [])
def ListProjectRefs(self, **kwds):
"""List the project references this user has access to."""
return map( # pylint:disable-msg=C6402
BigqueryClient.ConstructObjectReference, self.ListProjects(**kwds))
def ListProjects(self, max_results=None, page_token=None):
"""List the projects this user has access to."""
request = self._PrepareListRequest({}, max_results, page_token)
result = self.apiclient.projects().list(**request).execute()
return result.get('projects', [])
def ListDatasetRefs(self, **kwds):
return map( # pylint:disable-msg=C6402
BigqueryClient.ConstructObjectReference, self.ListDatasets(**kwds))
def ListDatasets(self, reference=None, max_results=None, page_token=None):
"""List the datasets associated with this reference."""
reference = self._NormalizeProjectReference(reference)
_Typecheck(reference, ApiClientHelper.ProjectReference,
method='ListDatasets')
request = self._PrepareListRequest(reference, max_results, page_token)
result = self.apiclient.datasets().list(**request).execute()
return result.get('datasets', [])
def ListTableRefs(self, **kwds):
return map( # pylint:disable-msg=C6402
BigqueryClient.ConstructObjectReference, self.ListTables(**kwds))
def ListTables(self, reference, max_results=None, page_token=None):
"""List the tables associated with this reference."""
_Typecheck(reference, ApiClientHelper.DatasetReference, method='ListTables')
request = self._PrepareListRequest(reference, max_results, page_token)
result = self.apiclient.tables().list(**request).execute()
return result.get('tables', [])
#################################
## Table and dataset management
#################################
def CopyTable(self, source_reference, dest_reference,
create_disposition=None, write_disposition=None,
ignore_already_exists=False, **kwds):
"""Copies a table.
Args:
source_reference: TableReference of source table.
dest_reference: TableReference of destination table.
create_disposition: Optional. Specifies the create_disposition for
the dest_reference.
write_disposition: Optional. Specifies the write_disposition for
the dest_reference.
ignore_already_exists: Whether to ignore "already exists" errors.
**kwds: Passed on to ExecuteJob.
Returns:
The job description, or None for ignored errors.
Raises:
BigqueryDuplicateError: when write_disposition 'WRITE_EMPTY' is
specified and the dest_reference table already exists.
"""
_Typecheck(source_reference, ApiClientHelper.TableReference,
method='CopyTable')
_Typecheck(dest_reference, ApiClientHelper.TableReference,
method='CopyTable')
copy_config = {
'destinationTable': dict(dest_reference),
'sourceTable': dict(source_reference),
}
_ApplyParameters(copy_config, create_disposition=create_disposition,
write_disposition=write_disposition)
try:
return self.ExecuteJob({'copy': copy_config}, **kwds)
except BigqueryDuplicateError, e:
if ignore_already_exists:
return None
raise e
def DatasetExists(self, reference):
_Typecheck(reference, ApiClientHelper.DatasetReference,
method='DatasetExists')
try:
self.apiclient.datasets().get(**dict(reference)).execute()
return True
except BigqueryNotFoundError:
return False
def TableExists(self, reference):
_Typecheck(reference, ApiClientHelper.TableReference, method='TableExists')
try:
self.apiclient.tables().get(**dict(reference)).execute()
return True
except BigqueryNotFoundError:
return False
def CreateDataset(self, reference, ignore_existing=False, description=None,
friendly_name=None, acl=None):
"""Create a dataset corresponding to DatasetReference.
Args:
reference: the DatasetReference to create.
ignore_existing: (boolean, default False) If False, raise
an exception if the dataset already exists.
description: an optional dataset description.
friendly_name: an optional friendly name for the dataset.
acl: an optional ACL for the dataset, as a list of dicts.
Raises:
TypeError: if reference is not a DatasetReference.
BigqueryDuplicateError: if reference exists and ignore_existing
is False.
"""
_Typecheck(reference, ApiClientHelper.DatasetReference,
method='CreateDataset')
body = BigqueryClient.ConstructObjectInfo(reference)
if friendly_name is not None:
body['friendlyName'] = friendly_name
if description is not None:
body['description'] = description
if acl is not None:
body['access'] = acl
try:
self.apiclient.datasets().insert(
body=body,
**dict(reference.GetProjectReference())).execute()
except BigqueryDuplicateError:
if not ignore_existing:
raise
def CreateTable(self, reference, ignore_existing=False, schema=None,
description=None, friendly_name=None, expiration=None):
"""Create a table corresponding to TableReference.
Args:
reference: the TableReference to create.
ignore_existing: (boolean, default True) If False, raise
an exception if the dataset already exists.
schema: an optional schema.
description: an optional table description.
friendly_name: an optional friendly name for the table.
expiration: optional expiration time in milliseconds since the epoch.
Raises:
TypeError: if reference is not a TableReference.
BigqueryDuplicateError: if reference exists and ignore_existing
is False.
"""
_Typecheck(reference, ApiClientHelper.TableReference, method='CreateTable')
try:
body = BigqueryClient.ConstructObjectInfo(reference)
if schema:
body['schema'] = {'fields': schema}
if friendly_name is not None:
body['friendlyName'] = friendly_name
if description is not None:
body['description'] = description
if expiration is not None:
body['expirationTime'] = expiration
self.apiclient.tables().insert(
body=body,
**dict(reference.GetDatasetReference())).execute()
except BigqueryDuplicateError:
if not ignore_existing:
raise
def UpdateTable(self, reference, schema=None,
description=None, friendly_name=None, expiration=None):
"""Updates a table.
Args:
reference: the TableReference to update.
schema: an optional schema.
description: an optional table description.
friendly_name: an optional friendly name for the table.
expiration: optional expiration time in milliseconds since the epoch.
Raises:
TypeError: if reference is not a TableReference.
"""
_Typecheck(reference, ApiClientHelper.TableReference, method='UpdateTable')
body = BigqueryClient.ConstructObjectInfo(reference)
if schema:
body['schema'] = {'fields': schema}
if friendly_name is not None:
body['friendlyName'] = friendly_name
if description is not None:
body['description'] = description
if expiration is not None:
body['expirationTime'] = expiration
self.apiclient.tables().patch(body=body, **dict(reference)).execute()
def UpdateDataset(self, reference,
description=None, friendly_name=None, acl=None):
"""Updates a dataset.
Args:
reference: the DatasetReference to update.
description: an optional dataset description.
friendly_name: an optional friendly name for the dataset.
acl: an optional ACL for the dataset, as a list of dicts.
Raises:
TypeError: if reference is not a DatasetReference.
"""
_Typecheck(reference, ApiClientHelper.DatasetReference,
method='UpdateDataset')
body = BigqueryClient.ConstructObjectInfo(reference)
if friendly_name is not None:
body['friendlyName'] = friendly_name
if description is not None:
body['description'] = description
if acl is not None:
body['access'] = acl
self.apiclient.datasets().patch(body=body, **dict(reference)).execute()
def DeleteDataset(self, reference, ignore_not_found=False,
delete_contents=None):
"""Deletes DatasetReference reference.
Args:
reference: the DatasetReference to delete.
ignore_not_found: Whether to ignore "not found" errors.
delete_contents: [Boolean] Whether to delete the contents of
non-empty datasets. If not specified and the dataset has
tables in it, the delete will fail. If not specified, the
server default applies.
Raises:
TypeError: if reference is not a DatasetReference.
BigqueryNotFoundError: if reference does not exist and
ignore_not_found is False.
"""
_Typecheck(reference, ApiClientHelper.DatasetReference,
method='DeleteDataset')
args = dict(reference)
if delete_contents is not None:
args['deleteContents'] = delete_contents
try:
self.apiclient.datasets().delete(**args).execute()
except BigqueryNotFoundError:
if not ignore_not_found:
raise
def DeleteTable(self, reference, ignore_not_found=False):
"""Deletes TableReference reference.
Args:
reference: the TableReference to delete.
ignore_not_found: Whether to ignore "not found" errors.
Raises:
TypeError: if reference is not a TableReference.
BigqueryNotFoundError: if reference does not exist and
ignore_not_found is False.
"""
_Typecheck(reference, ApiClientHelper.TableReference, method='DeleteTable')
try:
self.apiclient.tables().delete(**dict(reference)).execute()
except BigqueryNotFoundError:
if not ignore_not_found:
raise
#################################
## Job control
#################################
def StartJob(self, configuration,
project_id=None, upload_file=None, job_id=None):
"""Start a job with the given configuration.
Args:
configuration: The configuration for a job.
project_id: The project_id to run the job under. If None,
self.project_id is used.
upload_file: A file to include as a media upload to this request.
Only valid on job requests that expect a media upload file.
job_id: A unique job_id to use for this job. If a
JobIdGenerator, a job id will be generated from the job configuration.
If None, a unique job_id will be created for this request.
Returns:
The job resource returned from the insert job request. If there is an
error, the jobReference field will still be filled out with the job
reference used in the request.
Raises:
BigqueryClientConfigurationError: if project_id and
self.project_id are None.
"""
project_id = project_id or self.project_id
if not project_id:
raise BigqueryClientConfigurationError(
'Cannot start a job without a project id.')
configuration = configuration.copy()
if self.job_property:
configuration['properties'] = dict(
prop.partition('=')[0::2] for prop in self.job_property)
job_request = {'configuration': configuration}
# Use the default job id generator if no job id was supplied.
job_id = job_id or self.job_id_generator
if isinstance(job_id, JobIdGenerator):
job_id = job_id.Generate(configuration)
if job_id is not None:
job_reference = {'jobId': job_id, 'projectId': project_id}
job_request['jobReference'] = job_reference
media_upload = ''
if upload_file:
resumable = True
media_upload = http_request.MediaFileUpload(
filename=upload_file, mimetype='application/octet-stream',
resumable=resumable)
result = self.apiclient.jobs().insert(
body=job_request, media_body=media_upload,
projectId=project_id).execute()
return result
def RunJobSynchronously(self, configuration, project_id=None,
upload_file=None, job_id=None):
result = self.StartJob(configuration, project_id=project_id,
upload_file=upload_file, job_id=job_id)
if result['status']['state'] != 'DONE':
job_reference = BigqueryClient.ConstructObjectReference(result)
result = self.WaitJob(job_reference)
return self.RaiseIfJobError(result)
def ExecuteJob(self, configuration, sync=None,
project_id=None, upload_file=None, job_id=None):
"""Execute a job, possibly waiting for results."""
if sync is None:
sync = self.sync
if sync:
job = self.RunJobSynchronously(
configuration, project_id=project_id, upload_file=upload_file,
job_id=job_id)
else:
job = self.StartJob(
configuration, project_id=project_id, upload_file=upload_file,
job_id=job_id)
self.RaiseIfJobError(job)
return job
class WaitPrinter(object):
"""Base class that defines the WaitPrinter interface."""
def Print(self, job_id, wait_time, status):
"""Prints status for the current job we are waiting on.
Args:
job_id: the identifier for this job.
wait_time: the number of seconds we have been waiting so far.
status: the status of the job we are waiting for.
"""
raise NotImplementedError('Subclass must implement Print')
def Done(self):
"""Waiting is done and no more Print calls will be made.
This function should handle the case of Print not being called.
"""
raise NotImplementedError('Subclass must implement Done')
class WaitPrinterHelper(WaitPrinter):
"""A Done implementation that prints based off a property."""
print_on_done = False
def Done(self):
if self.print_on_done:
print
class QuietWaitPrinter(WaitPrinterHelper):
"""A WaitPrinter that prints nothing."""
def Print(self, unused_job_id, unused_wait_time, unused_status):
pass
class VerboseWaitPrinter(WaitPrinterHelper):
"""A WaitPrinter that prints every update."""
def Print(self, job_id, wait_time, status):
self.print_on_done = True
print '\rWaiting on %s ... (%ds) Current status: %-7s' % (
job_id, wait_time, status),
sys.stdout.flush()
class TransitionWaitPrinter(VerboseWaitPrinter):
"""A WaitPrinter that only prints status change updates."""
_previous_status = None
def Print(self, job_id, wait_time, status):
if status != self._previous_status:
self._previous_status = status
super(BigqueryClient.TransitionWaitPrinter, self).Print(
job_id, wait_time, status)
def WaitJob(self, job_reference, status='DONE',
wait=sys.maxint, wait_printer_factory=None):
"""Poll for a job to run until it reaches the requested status.
Arguments:
job_reference: JobReference to poll.
status: (optional, default 'DONE') Desired job status.
wait: (optional, default maxint) Max wait time.
wait_printer_factory: (optional, defaults to
self.wait_printer_factory) Returns a subclass of WaitPrinter
that will be called after each job poll.
Returns:
The job object returned by the final status call.
Raises:
StopIteration: If polling does not reach the desired state before
timing out.
ValueError: If given an invalid wait value.
"""
_Typecheck(job_reference, ApiClientHelper.JobReference, method='WaitJob')
start_time = time.time()
job = None
if wait_printer_factory:
printer = wait_printer_factory()
else:
printer = self.wait_printer_factory()
# This is a first pass at wait logic: we ping at 1s intervals a few
# times, then increase to max(3, max_wait), and then keep waiting
# that long until we've run out of time.
waits = itertools.chain(
itertools.repeat(1, 8),
xrange(2, 30, 3),
itertools.repeat(30))
current_wait = 0
current_status = 'UNKNOWN'
while current_wait <= wait:
try:
done, job = self.PollJob(job_reference, status=status, wait=wait)
current_status = job['status']['state']
if done:
printer.Print(job_reference.jobId, current_wait, current_status)
break
except BigqueryCommunicationError, e:
# Communication errors while waiting on a job are okay.
logging.warning('Transient error during job status check: %s', e)
except BigqueryBackendError, e:
# Temporary server errors while waiting on a job are okay.
logging.warning('Transient error during job status check: %s', e)
for _ in xrange(waits.next()):
current_wait = time.time() - start_time
printer.Print(job_reference.jobId, current_wait, current_status)
time.sleep(1)
else:
raise StopIteration(
'Wait timed out. Operation not finished, in state %s' % (
current_status,))
printer.Done()
return job
def PollJob(self, job_reference, status='DONE', wait=0):
"""Poll a job once for a specific status.
Arguments:
job_reference: JobReference to poll.
status: (optional, default 'DONE') Desired job status.
wait: (optional, default 0) Max server-side wait time for one poll call.
Returns:
Tuple (in_state, job) where in_state is True if job is
in the desired state.
Raises:
ValueError: If given an invalid wait value.
"""
_Typecheck(job_reference, ApiClientHelper.JobReference, method='PollJob')
wait = BigqueryClient.NormalizeWait(wait)
job = self.apiclient.jobs().get(**dict(job_reference)).execute()
current = job['status']['state']
return (current == status, job)
#################################
## Wrappers for job types
#################################
def RunQuery(self, **kwds):
"""Run a query job synchronously, and return the result.
Args:
**kwds: Passed on to self.Query and self.ExecuteJob.
Returns:
The rows in the query result as a list.
"""
new_kwds = dict(kwds)
new_kwds['sync'] = True
job = self.Query(**new_kwds)
return self.ReadTableRows(job['configuration']['query']['destinationTable'])
def Query(self, query, destination_table=None,
create_disposition=None, write_disposition=None,
priority=None, preserve_nulls=None,
allow_large_results=False,
dry_run=None,
use_cache=None,
**kwds):
# pylint:disable-msg=g-doc-args
"""Execute the given query, returning the created job.
The job will execute synchronously if sync=True is provided as an
argument or if self.sync is true.
Args:
query: Query to execute.
destination_table: (default None) If provided, send the results to the
given table.
create_disposition: Optional. Specifies the create_disposition for
the destination_table.
write_disposition: Optional. Specifies the write_disposition for
the destination_table.
priority: Optional. Priority to run the query with. Either
'INTERACTIVE' (default) or 'BATCH'.
preserve_nulls: Optional. Indicates whether to preserve nulls in input
data. Temporary flag; will be removed in a future version.
allow_large_results: (default False) If provided, enables support for
large (> 128M) results.
dry_run: Optional. Indicates whether the query will only be validated and
return processing statistics instead of actually running.
use_cache: Optional. Whether to use the query cache. If create_disposition
is CREATE_NEVER, will only run the query if the result is already
cached.
**kwds: Passed on to self.ExecuteJob.
Raises:
BigqueryClientError: if no query is provided.
Returns:
The resulting job info.
"""
if not query:
raise BigqueryClientError('No query string provided')
query_config = {'query': query}
if self.dataset_id:
query_config['defaultDataset'] = dict(self.GetDatasetReference())
if destination_table:
try:
reference = self.GetTableReference(destination_table)
except BigqueryError, e:
raise BigqueryError('Invalid value %s for destination_table: %s' % (
destination_table, e))
query_config['destinationTable'] = dict(reference)
_ApplyParameters(
query_config,
allow_large_results=allow_large_results,
create_disposition=create_disposition,
preserve_nulls=preserve_nulls,
priority=priority,
write_disposition=write_disposition,
use_query_cache=use_cache)
request = {'query': query_config}
_ApplyParameters(request, dry_run=dry_run)
return self.ExecuteJob(request, **kwds)
def Load(self, destination_table_reference, source,
schema=None, create_disposition=None, write_disposition=None,
field_delimiter=None, skip_leading_rows=None, encoding=None,
quote=None, max_bad_records=None, allow_quoted_newlines=None,
source_format=None,
**kwds):
"""Load the given data into BigQuery.
The job will execute synchronously if sync=True is provided as an
argument or if self.sync is true.
Args:
destination_table_reference: TableReference to load data into.
source: String specifying source data to load.
schema: (default None) Schema of the created table. (Can be left blank
for append operations.)
create_disposition: Optional. Specifies the create_disposition for
the destination_table_reference.
write_disposition: Optional. Specifies the write_disposition for
the destination_table_reference.
field_delimiter: Optional. Specifies the single byte field delimiter.
skip_leading_rows: Optional. Number of rows of initial data to skip.
encoding: Optional. Specifies character encoding of the input data.
May be "UTF-8" or "ISO-8859-1". Defaults to UTF-8 if not specified.
quote: Optional. Quote character to use. Default is '"'. Note that
quoting is done on the raw binary data before encoding is applied.
max_bad_records: Optional. Maximum number of bad records that should
be ignored before the entire job is aborted.
allow_quoted_newlines: Optional. Whether to allow quoted newlines in csv
import data.
source_format: Optional. Format of source data. May be "CSV",
"DATASTORE_BACKUP", or "NEWLINE_DELIMITED_JSON".
**kwds: Passed on to self.ExecuteJob.
Returns:
The resulting job info.
"""
_Typecheck(destination_table_reference, ApiClientHelper.TableReference)
load_config = {'destinationTable': dict(destination_table_reference)}
sources = BigqueryClient.ProcessSources(source)
if sources[0].startswith('gs://'):
load_config['sourceUris'] = sources
upload_file = None
else:
upload_file = sources[0]
if schema is not None:
load_config['schema'] = {'fields': BigqueryClient.ReadSchema(schema)}
_ApplyParameters(
load_config, create_disposition=create_disposition,
write_disposition=write_disposition, field_delimiter=field_delimiter,
skip_leading_rows=skip_leading_rows, encoding=encoding,
quote=quote, max_bad_records=max_bad_records,
source_format=source_format,
allow_quoted_newlines=allow_quoted_newlines)
return self.ExecuteJob(configuration={'load': load_config},
upload_file=upload_file, **kwds)
def Extract(self, source_table, destination_uri,
print_header=None, field_delimiter=None,
destination_format=None,
**kwds):
"""Extract the given table from BigQuery.
The job will execute synchronously if sync=True is provided as an
argument or if self.sync is true.
Args:
source_table: TableReference to read data from.
destination_uri: String specifying destination location.
print_header: Optional. Whether to print out a header row in the results.
field_delimiter: Optional. Specifies the single byte field delimiter.
destination_format: Optional. Format to extract table to. May be "CSV"
or "NEWLINE_DELIMITED_JSON".
**kwds: Passed on to self.ExecuteJob.
Returns:
The resulting job info.
Raises:
BigqueryClientError: if required parameters are invalid.
"""
_Typecheck(source_table, ApiClientHelper.TableReference)
if not destination_uri.startswith('gs://'):
raise BigqueryClientError('Extract only supports "gs://" uris.')
extract_config = {'sourceTable': dict(source_table)}
_ApplyParameters(
extract_config, destination_uri=destination_uri,
destination_format=destination_format,
print_header=print_header, field_delimiter=field_delimiter)
return self.ExecuteJob(configuration={'extract': extract_config}, **kwds)
class ApiClientHelper(object):
"""Static helper methods and classes not provided by the discovery client."""
def __init__(self, *unused_args, **unused_kwds):
raise NotImplementedError('Cannot instantiate static class ApiClientHelper')
class Reference(object):
"""Base class for Reference objects returned by apiclient."""
_required_fields = set()
_format_str = ''
def __init__(self, **kwds):
if type(self) == ApiClientHelper.Reference:
raise NotImplementedError(
'Cannot instantiate abstract class ApiClientHelper.Reference')
for name in self._required_fields:
if not kwds.get(name, ''):
raise ValueError('Missing required argument %s to %s' % (
name, self.__class__.__name__))
setattr(self, name, kwds[name])
@classmethod
def Create(cls, **kwds):
"""Factory method for this class."""
args = dict((k, v) for k, v in kwds.iteritems()
if k in cls._required_fields)
return cls(**args)
def __iter__(self):
return ((name, getattr(self, name)) for name in self._required_fields)
def __str__(self):
return self._format_str % dict(self)
def __repr__(self):
return "%s '%s'" % (self.typename, self)
def __eq__(self, other):
d = dict(other)
return all(getattr(self, name) == d.get(name, '')
for name in self._required_fields)
class JobReference(Reference):
_required_fields = set(('projectId', 'jobId'))
_format_str = '%(projectId)s:%(jobId)s'
typename = 'job'
class ProjectReference(Reference):
_required_fields = set(('projectId',))
_format_str = '%(projectId)s'
typename = 'project'
class DatasetReference(Reference):
_required_fields = set(('projectId', 'datasetId'))
_format_str = '%(projectId)s:%(datasetId)s'
typename = 'dataset'
def GetProjectReference(self):
return ApiClientHelper.ProjectReference.Create(
projectId=self.projectId)
class TableReference(Reference):
_required_fields = set(('projectId', 'datasetId', 'tableId'))
_format_str = '%(projectId)s:%(datasetId)s.%(tableId)s'
typename = 'table'
def GetDatasetReference(self):
return ApiClientHelper.DatasetReference.Create(
projectId=self.projectId, datasetId=self.datasetId)
def GetProjectReference(self):
return ApiClientHelper.ProjectReference.Create(
projectId=self.projectId)
| Python |
#!/usr/bin/env python
# Copyright 2011 Google Inc. All Rights Reserved.
"""Tests for bigquery_client.py."""
import itertools
import json
import StringIO
import tempfile
from google.apputils import googletest
import bigquery_client
class BigqueryClientTest(googletest.TestCase):
def setUp(self):
self.client = bigquery_client.BigqueryClient(api='http', api_version='')
self.reference_tests = {
'prj:': ('prj', '', ''),
'example.com:prj': ('example.com:prj', '', ''),
'example.com:prj-2': ('example.com:prj-2', '', ''),
'www.example.com:prj': ('www.example.com:prj', '', ''),
'prj:ds': ('prj', 'ds', ''),
'example.com:prj:ds': ('example.com:prj', 'ds', ''),
'prj:ds.tbl': ('prj', 'ds', 'tbl'),
'example.com:prj:ds.tbl': ('example.com:prj', 'ds', 'tbl'),
}
self.parse_tests = self.reference_tests.copy()
self.parse_tests.update({
'ds.': ('', 'ds', ''),
'ds.tbl': ('', 'ds', 'tbl'),
'tbl': ('', '', 'tbl'),
})
self.field_names = ('projectId', 'datasetId', 'tableId')
@staticmethod
def _LengthToType(parts):
if len(parts) == 1:
return bigquery_client.ApiClientHelper.ProjectReference
if len(parts) == 2:
return bigquery_client.ApiClientHelper.DatasetReference
if len(parts) == 3:
return bigquery_client.ApiClientHelper.TableReference
return None
def _GetReference(self, parts):
parts = filter(bool, parts)
reference_type = BigqueryClientTest._LengthToType(parts)
args = dict(itertools.izip(self.field_names, parts))
return reference_type(**args)
def testToCamel(self):
self.assertEqual('lowerCamel', bigquery_client._ToLowerCamel('lower_camel'))
def testReadSchemaFromFile(self):
# Test the filename case.
with tempfile.NamedTemporaryFile() as f:
# Write out the results.
print >>f, '['
print >>f, ' { "name": "Number", "type": "integer", "mode": "REQUIRED" },'
print >>f, ' { "name": "Name", "type": "string", "mode": "REQUIRED" },'
print >>f, ' { "name": "Other", "type": "string", "mode": "OPTIONAL" }'
print >>f, ']'
f.flush()
# Read them as JSON.
f.seek(0)
result = json.load(f)
# Compare the results.
self.assertEqual(result, self.client.ReadSchema(f.name))
def testReadSchemaFromString(self):
# Check some cases that should pass.
self.assertEqual(
[{'name': 'foo', 'type': 'INTEGER'}],
bigquery_client.BigqueryClient.ReadSchema('foo:integer'))
self.assertEqual(
[{'name': 'foo', 'type': 'INTEGER'},
{'name': 'bar', 'type': 'STRING'}],
bigquery_client.BigqueryClient.ReadSchema('foo:integer, bar:string'))
self.assertEqual(
[{'name': 'foo', 'type': 'STRING'}],
bigquery_client.BigqueryClient.ReadSchema('foo'))
self.assertEqual(
[{'name': 'foo', 'type': 'STRING'},
{'name': 'bar', 'type': 'STRING'}],
bigquery_client.BigqueryClient.ReadSchema('foo,bar'))
self.assertEqual(
[{'name': 'foo', 'type': 'INTEGER'},
{'name': 'bar', 'type': 'STRING'}],
bigquery_client.BigqueryClient.ReadSchema('foo:integer, bar'))
# Check some cases that should fail.
self.assertRaises(bigquery_client.BigquerySchemaError,
bigquery_client.BigqueryClient.ReadSchema,
'')
self.assertRaises(bigquery_client.BigquerySchemaError,
bigquery_client.BigqueryClient.ReadSchema,
'foo,bar:int:baz')
self.assertRaises(bigquery_client.BigquerySchemaError,
bigquery_client.BigqueryClient.ReadSchema,
'foo:int,,bar:string')
self.assertRaises(bigquery_client.BigquerySchemaError,
bigquery_client.BigqueryClient.ReadSchema,
'../foo/bar/fake_filename')
def testParseIdentifier(self):
for identifier, parse in self.parse_tests.iteritems():
self.assertEquals(parse, bigquery_client.BigqueryClient._ParseIdentifier(
identifier))
def testGetReference(self):
for identifier, parse in self.reference_tests.iteritems():
reference = self._GetReference(parse)
self.assertEquals(reference, self.client.GetReference(identifier))
def testParseDatasetReference(self):
dataset_parses = dict((k, v) for k, v in self.reference_tests.iteritems()
if len(filter(bool, v)) == 2)
for identifier, parse in dataset_parses.iteritems():
reference = self._GetReference(parse)
self.assertEquals(reference, self.client.GetDatasetReference(identifier))
for invalid in ['ds.tbl', 'prj:ds.tbl']:
self.assertRaises(bigquery_client.BigqueryError,
self.client.GetDatasetReference, invalid)
def testParseProjectReference(self):
project_parses = dict((k, v) for k, v in self.reference_tests.iteritems()
if len(filter(bool, v)) == 1)
for identifier, parse in project_parses.iteritems():
reference = self._GetReference(parse)
self.assertEquals(reference, self.client.GetProjectReference(identifier))
invalid_projects = [
'prj:ds', 'example.com:prj:ds', 'ds.', 'ds.tbl', 'prj:ds.tbl']
for invalid in invalid_projects:
self.assertRaises(bigquery_client.BigqueryError,
self.client.GetProjectReference, invalid)
def testParseJobReference(self):
self.assertTrue(self.client.GetJobReference('proj:job_id'))
self.client.project_id = None
self.assertRaises(bigquery_client.BigqueryError,
self.client.GetJobReference, 'job_id')
self.client.project_id = 'proj'
self.assertTrue(self.client.GetJobReference('job_id'))
invalid_job_ids = [
'prj:', 'example.com:prj:ds.tbl', 'ds.tbl', 'prj:ds.tbl']
for invalid in invalid_job_ids:
self.assertRaises(bigquery_client.BigqueryError,
self.client.GetJobReference, invalid)
def testRaiseError(self):
# Confirm we handle arbitrary errors gracefully.
try:
bigquery_client.BigqueryClient.RaiseError({})
except bigquery_client.BigqueryError as _:
pass
def testParseNewlineDelimitedJson(self):
data = '{"a":1}\n{"b":2}'
result = bigquery_client.ParseNewlineDelimitedJson(
None, StringIO.StringIO(data))
self.assertEquals(2, len(result))
self.assertEquals([None, None], [x[0] for x in result])
self.assertEquals(1, result[0][1]['a'])
self.assertEquals(2, result[1][1]['b'])
result = bigquery_client.ParseNewlineDelimitedJson(
'foo/', StringIO.StringIO(data))
self.assertEquals(['foo/0', 'foo/8'], [x[0] for x in result])
def _Parse(s):
bigquery_client.ParseNewlineDelimitedJson(
None, StringIO.StringIO(s))
self.assertRaisesRegexp(bigquery_client.BigqueryClientError,
r'Could not parse', _Parse, '_junk_')
self.assertRaisesRegexp(bigquery_client.BigqueryClientError,
r'not a JSON object', _Parse, '[1, 2]')
if __name__ == '__main__':
googletest.main()
| Python |
#!/usr/bin/env python
#
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Sample app demonstrates extraction of GAE Datastore data to Google BigQuery
Uses the App Engine MapReduce mapper pipeline to read entities
out of the App Engine Datastore, write processed entities into
Cloud Storage in CSV format, then starts another pipeline that
creates a BigQuery ingestion job. Uses code from the log2bq
project: http://code.google.com/p/log2bq/
"""
__author__ = 'manoochehri@google.com (Michael Manoochehri)'
import time
import calendar
import datetime
import httplib2
from google.appengine.api import taskqueue
from google.appengine.api import users
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp import template
from mapreduce.lib import files
from mapreduce import base_handler
from mapreduce import mapreduce_pipeline
from apiclient.discovery import build
from oauth2client.appengine import AppAssertionCredentials
SCOPE = 'https://www.googleapis.com/auth/bigquery'
PROJECT_ID = 'XXXXXXXXXXXX' # Your Project ID here
BQ_DATASET_ID = 'datastore_data'
GS_BUCKET = 'datastore_csvoutput'
ENTITY_KIND = 'main.ProductSalesData'
class ProductSalesData(db.Model):
product_id = db.IntegerProperty(required=True)
date = db.DateTimeProperty(verbose_name=None,
auto_now=True,
auto_now_add=True)
store = db.StringProperty(required=True)
class DatastoreMapperPipeline(base_handler.PipelineBase):
def run(self, entity_type):
output = yield mapreduce_pipeline.MapperPipeline(
"Datastore Mapper %s" % entity_type,
"main.datastore_map",
"mapreduce.input_readers.DatastoreInputReader",
output_writer_spec="mapreduce.output_writers.FileOutputWriter",
params={
"input_reader":{
"entity_kind": entity_type,
},
"output_writer":{
"filesystem": "gs",
"gs_bucket_name": GS_BUCKET,
"output_sharding":"none",
}
},
shards=12)
yield CloudStorageToBigQuery(output)
class CloudStorageToBigQuery(base_handler.PipelineBase):
def run(self, csv_output):
credentials = AppAssertionCredentials(scope=SCOPE)
http = credentials.authorize(httplib2.Http())
bigquery_service = build("bigquery", "v2", http=http)
jobs = bigquery_service.jobs()
table_name = 'datastore_data_%s' % datetime.datetime.utcnow().strftime(
'%m%d%Y_%H%M%S')
files = [str(f.replace('/gs/', 'gs://')) for f in csv_output]
result = jobs.insert(projectId=PROJECT_ID,
body=build_job_data(table_name,files))
result.execute()
def build_job_data(table_name, files):
return {"projectId": PROJECT_ID,
"configuration":{
"load": {
"sourceUris": files,
"schema":{
"fields":[
{
"name":"product_id",
"type":"INTEGER",
},
{
"name":"date",
"type":"INTEGER",
},
{
"name":"store",
"type":"STRING",
}
]
},
"destinationTable":{
"projectId": PROJECT_ID,
"datasetId": BQ_DATASET_ID,
"tableId": table_name,
},
"maxBadRecords": 0,
}
}
}
def datastore_map(entity_type):
data = db.to_dict(entity_type)
resultlist = [data.get('product_id'),
timestamp_to_posix(data.get('date')),
data.get('store')]
result = ','.join(['"%s"' % field for field in resultlist])
yield("%s\n" % result)
def timestamp_to_posix(timestamp):
return int(time.mktime(timestamp.timetuple()))
class DatastoretoBigQueryStart(webapp.RequestHandler):
def get(self):
pipeline = DatastoreMapperPipeline(ENTITY_KIND)
pipeline.start()
path = pipeline.base_path + "/status?root=" + pipeline.pipeline_id
self.redirect(path)
class AddDataHandler(webapp.RequestHandler):
def get(self):
for i in range(0,9):
data = ProductSalesData(product_id=i,
store='Store %s' % str(i))
self.response.out.write('Added sample Datastore entity #%s<br />' % str(i))
data.put()
self.response.out.write('<a href="/start">Click here</a> to start the Datastore to BigQuery pipeline.')
application = webapp.WSGIApplication(
[('/start', DatastoretoBigQueryStart),
('/add_data', AddDataHandler)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
import httplib2
from apiclient.discovery import build
from oauth2client.appengine import oauth2decorator_from_clientsecrets
class BigQueryClient(object):
def __init__(self, http, decorator):
"""Creates the BigQuery client connection"""
self.service = build('bigquery', 'v2', http=http)
self.decorator = decorator
def getTableData(self, project, dataset, table):
# The credentials must already exist before you call decorator.http()
# So you cannot pre-generate 'decorated' in the BigQueryClient constructor,
# only from within a method protected by .oauth_required
decorated = self.decorator.http()
return self.service.tables().get(projectId=project, datasetId=dataset,
tableId=table).execute(decorated)
def getLastModTime(self, project, dataset, table):
data = self.getTableData(project, dataset, table)
if data is not None and 'lastModifiedTime' in data:
return data['lastModifiedTime']
else:
return None
def Query(self, query, project, timeout_ms=10000):
query_config = {
'query': query,
'timeoutMs': timeout_ms
}
decorated = self.decorator.http()
result_json = (self.service.jobs()
.query(projectId=project, body=query_config)
.execute(decorated))
return result_json
| Python |
#!/usr/bin/env python
#
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import bqclient
import httplib2
import logging
import os
from django.utils import simplejson as json
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp.template import render
from oauth2client.appengine import oauth2decorator_from_clientsecrets
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = os.path.join(os.path.dirname(__file__), 'client_secrets.json')
# BILLING_PROJECT_ID for a project where you and your users
# are viewing members. This is where the bill will be sent.
# During the limited availability preview, there is no bill.
# Replace the BILLING_PROJECT_ID value with the Client ID value
# from your project, the same numeric value you used in client_secrets.json
BILLING_PROJECT_ID = "99999999999"
DATA_PROJECT_ID = "publicdata"
DATASET = "samples"
TABLE = "natality"
QUERY = """
select state, SUM(gestation_weeks) / COUNT(gestation_weeks) as weeks
from publicdata:samples.natality
where year > 1990 and year < 2005 and IS_EXPLICITLY_DEFINED(gestation_weeks)
group by state order by weeks
"""
decorator = oauth2decorator_from_clientsecrets(CLIENT_SECRETS,
'https://www.googleapis.com/auth/bigquery')
http = httplib2.Http(memcache)
bq = bqclient.BigQueryClient(http, decorator)
class MainHandler(webapp.RequestHandler):
def _bq2geo(self, bqdata):
"""geodata output for region maps must be in the format region, value.
Assume the BigQuery query output is in this format and get names from schema.
"""
logging.info(bqdata)
columnNameGeo = bqdata['schema']['fields'][0]['name']
columnNameVal = bqdata['schema']['fields'][1]['name']
logging.info("Column Names=%s, %s" % (columnNameGeo, columnNameVal))
geodata = { 'cols': ({'id':columnNameGeo, 'label':columnNameGeo, 'type':'string'},
{'id':columnNameVal, 'label':columnNameVal, 'type':'number'})}
geodata['rows'] = [];
logging.info(geodata)
for row in bqdata['rows']:
newrow = ({'c':[]})
newrow['c'].append({'v': 'US-'+row['f'][0]['v']})
newrow['c'].append({'v':row['f'][1]['v']})
geodata['rows'].append(newrow)
logging.info('FINAL GEODATA---')
logging.info(geodata)
return json.dumps(geodata)
@decorator.oauth_required
def get(self):
logging.info('Last mod time: %s' % bq.getLastModTime(
DATA_PROJECT_ID, DATASET, TABLE))
data = { 'data': self._bq2geo(bq.Query(QUERY, BILLING_PROJECT_ID)),
'query': QUERY }
template = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(render(template, data))
application = webapp.WSGIApplication([
('/', MainHandler),
], debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
"""BigQuery App Engine demo.
Demos how to start a BigQuery job running, then poll the job
to get the results when it's complete.
"""
__author__ = 'kbrisbin@google.com (Kathryn Hurley)'
import bigqueryv2
import errors
import httplib2
import os
import simplejson
from oauth2client.appengine import oauth2decorator_from_clientsecrets
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
# Project ID for your BigQuery Project in the API Console
PROJECT_ID = '[YOUR PROJECT ID]'
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = os.path.join(os.path.dirname(__file__), 'client_secrets.json')
decorator = oauth2decorator_from_clientsecrets(
filename=CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/bigquery',
message=template.render(
os.path.join(os.path.dirname(__file__), 'templates/error.html'),
{'clientSecrets': CLIENT_SECRETS}))
BQ = bigqueryv2.BigQueryClient(PROJECT_ID)
class MainHandler(webapp.RequestHandler):
"""Display the index page."""
@decorator.oauth_aware
def get(self):
"""Main handler.
Displays index page if logged in.
Otherwise, starts OAuth 2.0 dance.
"""
path = os.path.join(os.path.dirname(__file__), 'templates/index.html')
if decorator.has_credentials():
self.redirect('/about')
variables = {'url': decorator.authorize_url()}
self.response.out.write(template.render(path, variables))
class QueryPage(webapp.RequestHandler):
"""Display the query page."""
@decorator.oauth_required
def get(self):
"""Display the query HTML page."""
path = os.path.join(os.path.dirname(__file__), 'templates/query.html')
self.response.out.write(template.render(path, {}))
class QueryHandler(webapp.RequestHandler):
"""Handle queries to BigQuery."""
@decorator.oauth_required
def get(self):
"""Poll the job to see if it's complete."""
authorized_http = decorator.http()
job_id = self.request.get('jobId')
try:
response = BQ.poll(authorized_http, job_id)
json_response = simplejson.dumps(response)
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json_response)
except errors.PollError:
self.response.set_status(500, 'Error during Poll')
@decorator.oauth_required
def post(self):
"""Post a new query job to BigQuery."""
authorized_http = decorator.http()
query = self.request.get('query')
try:
job_id = BQ.query(authorized_http, query)
json_response = simplejson.dumps({'jobId': job_id})
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json_response)
except errors.QueryError:
self.response.set_status(500, 'Error during Query')
app = webapp.WSGIApplication(
[
('/', MainHandler),
('/about', QueryPage),
('/query', QueryHandler),
],
debug=True
)
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
"""Any errors occurring during execution.
- Error during query.
- Error during poll.
"""
class Error(Exception):
"""Base exception."""
pass
class QueryError(Error):
"""Exception raised for errors during query."""
pass
class PollError(Error):
"""Exception raised for errors during job poll."""
pass
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.